diff --git a/.gitattributes b/.gitattributes deleted file mode 100644 index d9794962b..000000000 --- a/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -artiq/_version.py export-subst diff --git a/.github/ISSUE_TEMPLATE/1_Bug_Report.md b/.github/ISSUE_TEMPLATE/1_Bug_Report.md new file mode 100644 index 000000000..263b34f86 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/1_Bug_Report.md @@ -0,0 +1,49 @@ +--- +name: Bug report +about: Report a bug in ARTIQ + +--- + + + +# Bug Report + + + +## One-Line Summary + +Short summary. + +## Issue Details + +### Steps to Reproduce + +1. Step 1. +2. Step 2. +3. Step 3. + +### Expected Behavior + +Behavior + +### Actual (undesired) Behavior + +* Text description +* Log message, tracebacks, screen shots where relevant + +### Your System (omit irrelevant parts) + +* Operating System: +* ARTIQ version: (with recent versions of ARTIQ, run ``artiq_client --version``) +* Version of the gateware and runtime loaded in the core device: (in the output of ``artiq_coremgmt -D .... log``) +* If using Conda, output of `conda list` (please submit as a file attachment, as this tends to be long) +* Hardware involved: + + diff --git a/.github/ISSUE_TEMPLATE/2_Feature_Request.md b/.github/ISSUE_TEMPLATE/2_Feature_Request.md new file mode 100644 index 000000000..d0d02f1e0 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/2_Feature_Request.md @@ -0,0 +1,28 @@ +--- +name: Feature request +about: Suggest an idea for ARTIQ + +--- + + + +# ARTIQ Feature Request + +## Problem this request addresses + +A clear and concise description of what the problem is. + +## Describe the solution you'd like + +A clear and concise description of what you want to happen. + +## Additional context + +Add any other context about the feature request here. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/3_Question.md b/.github/ISSUE_TEMPLATE/3_Question.md new file mode 100644 index 000000000..80e7a2ae3 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/3_Question.md @@ -0,0 +1,29 @@ +--- +name: Support question +about: Questions about ARTIQ that are not covered in the documentation + +--- + +# Question + + + +## Category: FILL_IN + + + +## Description + +Question text diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000..0df6f0f36 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,69 @@ + + +# ARTIQ Pull Request + +## Description of Changes + +### Related Issue + + + +## Type of Changes + + +| | Type | +| ------------- | ------------- | +| ✓ | :bug: Bug fix | +| ✓ | :sparkles: New feature | +| ✓ | :hammer: Refactoring | +| ✓ | :scroll: Docs | + +## Steps (Choose relevant, delete irrelevant before submitting) + +### All Pull Requests + +- [x] Use correct spelling and grammar. +- [ ] Update [RELEASE_NOTES.rst](../RELEASE_NOTES.rst) if there are noteworthy changes, especially if there are changes to existing APIs. +- [ ] Close/update issues. +- [ ] Check the copyright situation of your changes and sign off your patches (`git commit --signoff`, see [copyright](../CONTRIBUTING.rst#copyright-and-sign-off)). + +### Code Changes + +- [ ] Run `flake8` to check code style (follow PEP-8 style). `flake8` has issues with parsing Migen/gateware code, ignore as necessary. +- [ ] Test your changes or have someone test them. Mention what was tested and how. +- [ ] Add and check docstrings and comments +- [ ] Check, test, and update the [unittests in /artiq/test/](../artiq/test/) or [gateware simulations in /artiq/gateware/test](../artiq/gateware/test) + +### Documentation Changes + +- [ ] Check, test, and update the documentation in [doc/](../doc/). Build documentation (`cd doc/manual/; make html`) to ensure no errors. + +### Git Logistics + +- [ ] Split your contribution into logically separate changes (`git rebase --interactive`). Merge/squash/fixup commits that just fix or amend previous commits. Remove unintended changes & cleanup. See [tutorial](https://www.atlassian.com/git/tutorials/rewriting-history/git-rebase). +- [ ] Write short & meaningful commit messages. Review each commit for messages (`git show`). Format: + ``` + topic: description. < 50 characters total. + + Longer description. < 70 characters per line + ``` + +### Licensing + +See [copyright & licensing for more info](https://github.com/m-labs/artiq/blob/master/CONTRIBUTING.rst#copyright-and-sign-off). +ARTIQ files that do not contain a license header are copyrighted by M-Labs Limited and are licensed under LGPLv3+. diff --git a/.gitignore b/.gitignore index 91ffc1c07..77178c15e 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,7 @@ __pycache__/ *.elf *.fbi *.pcap +*.prof .ipynb_checkpoints /doc/manual/_build /build @@ -17,6 +18,7 @@ __pycache__/ /artiq/binaries /artiq/firmware/target/ /misoc_*/ +/artiq_*/ /artiq/test/results /artiq/examples/*/results diff --git a/.readthedocs.yml b/.readthedocs.yml deleted file mode 100644 index 7bbd24a41..000000000 --- a/.readthedocs.yml +++ /dev/null @@ -1,5 +0,0 @@ -python: - version: 3 - pip_install: false -conda: - file: conda/artiq-doc.yaml diff --git a/.gitmodules b/BETA similarity index 100% rename from .gitmodules rename to BETA diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 14cdaa256..8fa7d0336 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -8,8 +8,8 @@ Reporting Issues/Bugs Thanks for `reporting issues to ARTIQ `_! You can also discuss issues and ask questions on IRC (the `#m-labs channel on freenode -`_) or on the `mailing list -`_. +`_), the `Mattermost chat +`_, or on the `forum `_. The best bug reports are those which contain sufficient information. With accurate and comprehensive context, an issue can be resolved quickly and @@ -17,19 +17,20 @@ efficiently. Please consider adding the following data to your issue report if possible: * A clear and unique summary that fits into one line. Also check that - this issue has not jet been reported. If it has, add additional information there. + this issue has not yet been reported. If it has, add additional information there. * Precise steps to reproduce (list of actions that leads to the issue) * Expected behavior (what should happen) * Actual behavior (what happens instead) * Logging message, trace backs, screen shots where relevant -* Components involved: +* Components involved (omit irrelevant parts): - * Operating system - * Conda version - * ARTIQ version (package or git commit id, versions for bitstream, BIOS, - runtime and host software) + * Operating System + * ARTIQ version (with recent versions of ARTIQ, run ``artiq_client --version``) + * Version of the gateware and runtime loaded in the core device (in the output of ``artiq_coremgmt -D .... log``) + * If using Conda, output of `conda list` * Hardware involved + For in-depth information on bug reporting, see: http://www.chiark.greenend.org.uk/~sgtatham/bugs.html @@ -42,10 +43,31 @@ Contributing Code ARTIQ welcomes contributions. Write bite-sized patches that can stand alone, clean them up, write proper commit messages, add docstrings and unittests. Then ``git rebase`` them onto the current master or merge the current master. Verify -that the testsuite passes. Then prepare a pull request or send patches to the -`mailing list `_ to be -discussed. Expect your contribution to be held up to coding standards (e.g. use -``flake8`` to check yourself). +that the testsuite passes. Then submit a pull request. Expect your contribution +to be held up to coding standards (e.g. use ``flake8`` to check yourself). + +Checklist for Code Contributions +-------------------------------- + +- Test your changes or have someone test them. Mention what was tested and how. +- Use correct spelling and grammar. Use your code editor to help you with + syntax, spelling, and style +- Style: PEP-8 (``flake8``) +- Add, check docstrings and comments +- Split your contribution into logically separate changes (``git rebase + --interactive``). Merge (squash, fixup) commits that just fix previous commits + or amend them. Remove unintended changes. Clean up your commits. +- Check the copyright situation of your changes and sign off your patches + (``git commit --signoff``, see also below) +- Write meaningful commit messages containing the area of the change + and a concise description (50 characters or less) in the first line. + Describe everything else in the long explanation. +- Review each of your commits for the above items (``git show``) +- Update ``RELEASE_NOTES.md`` if there are noteworthy changes, especially if + there are changes to existing APIs +- Check, test, and update the documentation in `doc/` +- Check, test, and update the unittests +- Close and/or update issues Copyright and Sign-Off ---------------------- @@ -95,4 +117,4 @@ then you just add a line saying using your legal name (sorry, no pseudonyms or anonymous contributions.) ARTIQ files that do not contain a license header are copyrighted by M-Labs Limited -and are licensed under GNU GPL version 3. +and are licensed under GNU LGPL version 3 or later. diff --git a/DEVELOPER_NOTES.rst b/DEVELOPER_NOTES.rst index 460a16d05..8c141fbf9 100644 --- a/DEVELOPER_NOTES.rst +++ b/DEVELOPER_NOTES.rst @@ -1,69 +1,17 @@ -Release process -=============== - -Maintain ``RELEASE_NOTES.rst`` with a list of new features and API changes in each major release. - -Major releases --------------- - -1. Create branch release-X from master. -2. Tag the next commit in master X+1.0.dev. -3. Ensure that release versions of all packages required are available under the ``main`` label in conda. Ensure that new packages in ``main`` do not break older ARTIQ releases. -4. In the release-X branch, remove any unfinished features. -5. Test and fix any problems found. Apply fixes to both master and release-X. -6. If you have willing testers for release candidates, tag X.0rc1 in the release-X branch (generally use signed annotated tags, i.e. ``git tag -sa X.0rc1``), have it build, and point testers there. Iterate over the previous points with new release candidates if necessary. -7. Tag X.0 in the release-X branch, build it, and copy its packages to ``main`` channel. -8. Mint a new DOI from Zenodo and update the README/introduction. -9. Update the m-labs.hk/artiq/manual redirect to point to m-labs.hk/artiq/manual-release-X (edit /artiq/.htaccess). -10. "Draft a new release" and close the milestone on GitHub. -11. Deprecate the old release documentation with a banner in - doc/manual/_templates/layout.html in the old ``release-(X-1)`` branch. - -Minor (bugfix) releases ------------------------ - -1. Backport bugfixes from the master branch or fix bugs specific to old releases into the currently maintained release-X branch(es). -2. When significant bugs have been fixed, tag X.Y+1. -3. To help dealing with regressions, no new features or refactorings should be implemented in release-X branches. Those happen in the master branch, and then a new release-X+1 branch is created. -4. "Draft a new release" and close the milestone on GitHub. - Sharing development boards ========================== -To avoid conflicts for development boards on the server, while using a board you must hold the corresponding lock file present in ``/run/board``. Holding the lock file grants you exclusive access to the board. +To avoid conflicts for development boards on the server, while using a board you must hold the corresponding lock file present in the ``/tmp`` folder of the machine to which the board is connected. Holding the lock file grants you exclusive access to the board. -To lock the KC705 for 30 minutes or until Ctrl-C is pressed: +For example, to lock the KC705 until ENTER is pressed: :: - flock --verbose /run/boards/kc705 sleep 1800 -Check that the command acquires the lock, i.e. prints something such as: -:: - flock: getting lock took 0.000003 seconds - flock: executing sleep - -To lock the KC705 for the duration of the execution of a shell: -:: - flock /run/boards/kc705 bash - -You may also use this script: -:: - #!/bin/bash - exec flock /run/boards/$1 bash --rcfile <(cat ~/.bashrc; echo PS1=\"[$1\ lock]\ \$PS1\") + ssh rpi-1.m-labs.hk "flock /tmp/board_lock-kc705-1 -c 'echo locked; read; echo unlocked'" If the board is already locked by another user, the ``flock`` commands above will wait for the lock to be released. -To determine which user is locking a board, use: +To determine which user is locking a board, use a command such as: :: - fuser -v /run/boards/kc705 - - -Selecting a development board with artiq_flash -============================================== - -Use the ``bus:port`` notation:: - - artiq_flash --preinit-command "ftdi_location 5:2" # Sayma 1 - artiq_flash --preinit-command "ftdi_location 3:10" # Sayma 2 - artiq_flash --preinit-command "ftdi_location 5:1" # Sayma 3 + ssh rpi-1.m-labs.hk "fuser -v /tmp/board_lock-kc705-1" Deleting git branches diff --git a/MAJOR_VERSION b/MAJOR_VERSION new file mode 100644 index 000000000..1e8b31496 --- /dev/null +++ b/MAJOR_VERSION @@ -0,0 +1 @@ +6 diff --git a/README.rst b/README.rst index 3b380833a..0d4859a3a 100644 --- a/README.rst +++ b/README.rst @@ -4,25 +4,23 @@ .. image:: https://raw.githubusercontent.com/m-labs/artiq/master/doc/logo/artiq.png :target: https://m-labs.hk/artiq -ARTIQ (Advanced Real-Time Infrastructure for Quantum physics) is the next-generation control system for quantum information experiments. -It is maintained and developed by `M-Labs `_ and the initial development was for and in partnership with the `Ion Storage Group at NIST `_. ARTIQ is free software and offered to the entire research community as a solution equally applicable to other challenging control tasks, including outside the field of ion trapping. Several other laboratories (e.g. at the University of Oxford, the Army Research Lab, and the University of Maryland) have later adopted ARTIQ as their control system and have contributed to it. +ARTIQ (Advanced Real-Time Infrastructure for Quantum physics) is a leading-edge control and data acquisition system for quantum information experiments. +It is maintained and developed by `M-Labs `_ and the initial development was for and in partnership with the `Ion Storage Group at NIST `_. ARTIQ is free software and offered to the entire research community as a solution equally applicable to other challenging control tasks, including outside the field of ion trapping. Many laboratories around the world have adopted ARTIQ as their control system, with over a hundred Sinara hardware crates deployed, and some have `contributed `_ to it. The system features a high-level programming language that helps describing complex experiments, which is compiled and executed on dedicated hardware with nanosecond timing resolution and sub-microsecond latency. It includes graphical user interfaces to parametrize and schedule experiments and to visualize and explore the results. -ARTIQ uses FPGA hardware to perform its time-critical tasks. -It is designed to be portable to hardware platforms from different vendors and FPGA manufacturers. -Currently, several different configurations of a `high-end FPGA evaluation kit `_ are used and supported. This FPGA platform can be combined with any number of additional peripherals, either already accessible from ARTIQ or made accessible with little effort. +ARTIQ uses FPGA hardware to perform its time-critical tasks. The `Sinara hardware `_, and in particular the Kasli FPGA carrier, is designed to work with ARTIQ. +ARTIQ is designed to be portable to hardware platforms from different vendors and FPGA manufacturers. +Several different configurations of a `FPGA evaluation kit `_ and of a `Zynq evaluation kit `_ are also used and supported. FPGA platforms can be combined with any number of additional peripherals, either already accessible from ARTIQ or made accessible with little effort. -Custom hardware components with widely extended capabilities and advanced support for scalable and fully distributed real-time control of experiments `are being designed `_. - -ARTIQ and its dependencies are available in the form of `conda packages `_ for both Linux and Windows. +ARTIQ and its dependencies are available in the form of Nix packages (for Linux) and Conda packages (for Windows and Linux). See `the manual `_ for installation instructions. Packages containing pre-compiled binary images to be loaded onto the hardware platforms are supplied for each configuration. Like any open source software ARTIQ can equally be built and installed directly from `source `_. ARTIQ is supported by M-Labs and developed openly. -Components, features, fixes, improvements, and extensions are funded by and developed for the partnering research groups. +Components, features, fixes, improvements, and extensions are often `funded `_ by and developed for the partnering research groups. -Technologies employed include `Python `_, `Migen `_, `MiSoC `_/`mor1kx `_, `LLVM `_/`llvmlite `_, and `Qt5 `_. +Core technologies employed include `Python `_, `Migen `_, `Migen-AXI `_, `Rust `_, `MiSoC `_/`mor1kx `_, `LLVM `_/`llvmlite `_, and `Qt5 `_. Website: https://m-labs.hk/artiq @@ -31,7 +29,7 @@ Website: https://m-labs.hk/artiq License ======= -Copyright (C) 2014-2017 M-Labs Limited. +Copyright (C) 2014-2020 M-Labs Limited. ARTIQ is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by @@ -50,9 +48,10 @@ The ARTIQ manifesto =================== The free and open dissemination of methods and results is central to scientific progress. -The ARTIQ authors, contributors, and supporters consider the free and open exchange of scientific tools to be equally important and have chosen the licensing terms of ARTIQ accordingly. -ARTIQ, including its gateware, the firmware, and the ARTIQ tools and libraries are licensed as LGPLv3+. -This ensures that a user of ARTIQ obtains broad rights to use, redistribute, and modify it. + +The ARTIQ and Sinara authors, contributors, and supporters consider the free and open exchange of scientific tools to be equally important and have chosen the licensing terms of ARTIQ and Sinara accordingly. ARTIQ, including its gateware, the firmware, and the ARTIQ tools and libraries are licensed as LGPLv3+. The Sinara hardware designs are licensed under CERN OHL. +This ensures that a user of ARTIQ or Sinara hardware designs obtains broad rights to use, redistribute, study, and modify them. + The following statements are intended to clarify the interpretation and application of the licensing terms: * There is no requirement to distribute any unmodified, modified, or extended versions of ARTIQ. Only when distributing ARTIQ the source needs to be made available. diff --git a/RELEASE_NOTES.rst b/RELEASE_NOTES.rst index 890552f1b..4ab9f0537 100644 --- a/RELEASE_NOTES.rst +++ b/RELEASE_NOTES.rst @@ -3,25 +3,250 @@ Release notes ============= -4.0 (unreleased) ----------------- +ARTIQ-6 +------- +Highlights: + +* New hardware support: + - Zynq SoC core devices, enabling kernels to run on 1 GHz CPU core with a floating-point + unit for faster computations. This currently requires an external + repository (https://git.m-labs.hk/m-labs/artiq-zynq) and only supports the ZC706. + - Mirny 4-channel wide-band PLL/VCO-based microwave frequency synthesiser + - Fastino 32-channel, 3MS/s per channel, 16-bit DAC EEM + - Kasli 2.0 +* ARTIQ Python (core device kernels): + - Multidimensional arrays are now available on the core device, using NumPy syntax. + Elementwise operations (e.g. ``+``, ``/``), matrix multiplication (``@``) and + multidimensional indexing are supported; slices and views are not yet. + - Trigonometric and other common math functions from NumPy are now available on the + core device (e.g. ``numpy.sin``), both for scalar arguments and implicitly + broadcast across multidimensional arrays. + - Failed assertions now raise ``AssertionError``\ s instead of aborting kernel + execution. +* Performance improvements: + - SERDES TTL inputs can now detect edges on pulses that are shorter + than the RTIO period (https://github.com/m-labs/artiq/issues/1432) + - Improved performance for kernel RPC involving list and array. +* Coredevice SI to mu conversions now always return valid codes, or raise a ``ValueError``. +* Zotino now exposes ``voltage_to_mu()`` +* ``ad9910``: The maximum amplitude scale factor is now ``0x3fff`` (was ``0x3ffe`` + before). +* Dashboard: + - Applets now restart if they are running and a ccb call changes their spec + - A "Quick Open" dialog to open experiments by typing part of their name can + be brought up Ctrl-P (Ctrl+Return to immediately submit the selected entry + with the default arguments). +* Experiment results are now always saved to HDF5, even if run() fails. +* Core device: ``panic_reset 1`` now correctly resets the kernel CPU as well if + communication CPU panic occurs. +* NumberValue accepts a ``type`` parameter specifying the output as ``int`` or ``float`` +* A parameter ``--identifier-str`` has been added to many targets to aid + with reproducible builds. +* Python 3.7 support in Conda packages. + +Breaking changes: + +* ``artiq_netboot`` has been moved to its own repository at + https://git.m-labs.hk/m-labs/artiq-netboot +* Core device watchdogs have been removed. +* The ARTIQ compiler now implements arrays following NumPy semantics, rather than as a + thin veneer around lists. Most prior use cases of NumPy arrays in kernels should work + unchanged with the new implementation, but the behavior might differ slightly in some + cases (for instance, non-rectangular arrays are not currently supported). +* ``quamash`` has been replaced with ``qasync``. + + +ARTIQ-5 +------- + +Highlights: + +* Performance improvements: + - Faster RTIO event submission (1.5x improvement in pulse rate test) + See: https://github.com/m-labs/artiq/issues/636 + - Faster compilation times (3 seconds saved on kernel compilation time on a typical + medium-size experiment) + See: https://github.com/m-labs/artiq/commit/611bcc4db4ed604a32d9678623617cd50e968cbf +* Improved packaging and build system: + - new continuous integration/delivery infrastructure based on Nix and Hydra, + providing reproducibility, speed and independence. + - rolling release process (https://github.com/m-labs/artiq/issues/1326). + - firmware, gateware and device database templates are automatically built for all + supported Kasli variants. + - new JSON description format for generic Kasli systems. + - Nix packages are now supported. + - many Conda problems worked around. + - controllers are now out-of-tree. + - split packages that enable lightweight applications that communicate with ARTIQ, + e.g. controllers running on non-x86 single-board computers. +* Improved Urukul support: + - AD9910 RAM mode. + - Configurable refclk divider and PLL bypass. + - More reliable phase synchronization at high sample rates. + - Synchronization calibration data can be read from EEPROM. +* A gateware-level input edge counter has been added, which offers higher + throughput and increased flexibility over the usual TTL input PHYs where + edge timestamps are not required. See ``artiq.coredevice.edge_counter`` for + the core device driver and ``artiq.gateware.rtio.phy.edge_counter``/ + ``artiq.gateware.eem.DIO.add_std`` for the gateware components. +* With DRTIO, Siphaser uses a better calibration mechanism. + See: https://github.com/m-labs/artiq/commit/cc58318500ecfa537abf24127f2c22e8fe66e0f8 +* Schedule updates can be sent to influxdb (artiq_influxdb_schedule). +* Experiments can now programatically set their default pipeline, priority, and flush flag. +* List datasets can now be efficiently appended to from experiments using + ``artiq.language.environment.HasEnvironment.append_to_dataset``. +* The core device now supports IPv6. +* To make development easier, the bootloader can receive firmware and secondary FPGA + gateware from the network. +* Python 3.7 compatibility (Nix and source builds only, no Conda). +* Various other bugs from 4.0 fixed. +* Preliminary Sayma v2 and Metlino hardware support. + +Breaking changes: + +* The ``artiq.coredevice.ad9910.AD9910`` and + ``artiq.coredevice.ad9914.AD9914`` phase reference timestamp parameters + have been renamed to ``ref_time_mu`` for consistency, as they are in machine + units. +* The controller manager now ignores device database entries without the + ``command`` key set to facilitate sharing of devices between multiple + masters. +* The meaning of the ``-d/--dir`` and ``--srcbuild`` options of ``artiq_flash`` + has changed. +* Controllers for third-party devices are now out-of-tree. +* ``aqctl_corelog`` now filters log messages below the ``WARNING`` level by default. + This behavior can be changed using the ``-v`` and ``-q`` options like the other + programs. +* On Kasli the firmware now starts with a unique default MAC address + from EEPROM if `mac` is absent from the flash config. +* The ``-e/--experiment`` switch of ``artiq_run`` and ``artiq_compile`` + has been renamed ``-c/--class-name``. +* ``artiq_devtool`` has been removed. +* Much of ``artiq.protocols`` has been moved to a separate package ``sipyco``. + ``artiq_rpctool`` has been renamed to ``sipyco_rpctool``. + + +ARTIQ-4 +------- + +4.0 +*** + +* The ``artiq.coredevice.ttl`` drivers no longer track the timestamps of + submitted events in software, requiring the user to explicitly specify the + timeout for ``count()``/``timestamp_mu()``. Support for ``sync()`` has been dropped. + + Now that RTIO has gained DMA support, there is no longer a reliable way for + the kernel CPU to track the individual events submitted on any one channel. + Requiring the timeouts to be specified explicitly ensures consistent API + behavior. To make this more convenient, the ``TTLInOut.gate_*()`` functions + now return the cursor position at the end of the gate, e.g.:: + + ttl_input.count(ttl_input.gate_rising(100 * us)) + + In most situations – that is, unless the timeline cursor is rewound after the + respective ``gate_*()`` call – simply passing ``now_mu()`` is also a valid + upgrade path:: + + ttl_input.count(now_mu()) + + The latter might use up more timeline slack than necessary, though. + + In place of ``TTL(In)Out.sync``, the new ``Core.wait_until_mu()`` method can + be used, which blocks execution until the hardware RTIO cursor reaches the + given timestamp:: + + ttl_output.pulse(10 * us) + self.core.wait_until_mu(now_mu()) +* RTIO outputs use a new architecture called Scalable Event Dispatcher (SED), + which allows building systems with large number of RTIO channels more + efficiently. + From the user perspective, collision errors become asynchronous, and non- + monotonic timestamps on any combination of channels are generally allowed + (instead of producing sequence errors). + RTIO inputs are not affected. * The DDS channel number for the NIST CLOCK target has changed. * The dashboard configuration files are now stored one-per-master, keyed by the server address argument and the notify port. * The master now has a ``--name`` argument. If given, the dashboard is labelled with this name rather than the server address. -* ``artiq_flash --adapter`` has been changed to ``artiq_flash --variant``. +* ``artiq_flash`` targets Kasli by default. Use ``-t kc705`` to flash a KC705 + instead. +* ``artiq_flash -m/--adapter`` has been changed to ``artiq_flash -V/--variant``. +* The ``proxy`` action of ``artiq_flash`` is determined automatically and should + not be specified manually anymore. +* ``kc705_dds`` has been renamed ``kc705``. +* The ``-H/--hw-adapter`` option of ``kc705`` has been renamed ``-V/--variant``. +* SPI masters have been switched from misoc-spi to misoc-spi2. This affects + all out-of-tree RTIO core device drivers using those buses. See the various + commits on e.g. the ``ad53xx`` driver for an example how to port from the old + to the new bus. +* The ``ad5360`` coredevice driver has been renamed to ``ad53xx`` and the API + has changed to better support Zotino. +* ``artiq.coredevice.dds`` has been renamed to ``artiq.coredevice.ad9914`` and + simplified. DDS batch mode is no longer supported. The ``core_dds`` device + is no longer necessary. +* The configuration entry ``startup_clock`` is renamed ``rtio_clock``. Switching + clocks dynamically (i.e. without device restart) is no longer supported. +* ``set_dataset(..., save=True)`` has been renamed + ``set_dataset(..., archive=True)``. +* On the AD9914 DDS, when switching to ``PHASE_MODE_CONTINUOUS`` from another mode, + use the returned value of the last ``set_mu`` call as the phase offset for + ``PHASE_MODE_CONTINUOUS`` to avoid a phase discontinuity. This is no longer done + automatically. If one phase glitch when entering ``PHASE_MODE_CONTINUOUS`` is not + an issue, this recommendation can be ignored. + + +ARTIQ-3 +------- + +3.7 +*** + +No further notes. + + +3.6 +*** + +No further notes. + + +3.5 +*** + +No further notes. + + +3.4 +*** + +No further notes. + + +3.3 +*** + +No further notes. + + +3.2 +*** + +* To accommodate larger runtimes, the flash layout as changed. As a result, the + contents of the flash storage will be lost when upgrading. Set the values back + (IP, MAC address, startup kernel, etc.) after the upgrade. 3.1 ---- +*** No further notes. 3.0 ---- +*** * The ``--embed`` option of applets is replaced with the environment variable ``ARTIQ_APPLET_EMBED``. The GUI sets this enviroment variable itself and the @@ -74,50 +299,53 @@ No further notes. * Packages are no longer available for 32-bit Windows. +ARTIQ-2 +------- + 2.5 ---- +*** No further notes. 2.4 ---- +*** No further notes. 2.3 ---- +*** * When using conda, add the conda-forge channel before installing ARTIQ. 2.2 ---- +*** No further notes. 2.1 ---- +*** No further notes. 2.0 ---- +*** No further notes. 2.0rc2 ------- +****** No further notes. 2.0rc1 ------- +****** * The format of the influxdb pattern file is simplified. The procedure to edit patterns is also changed to modifying the pattern file and calling: @@ -166,39 +394,42 @@ No further notes. receives a numpy type. +ARTIQ-1 +------- + 1.3 ---- +*** No further notes. 1.2 ---- +*** No further notes. 1.1 ---- +*** * TCA6424A.set converts the "outputs" value to little-endian before programming it into the registers. 1.0 ---- +*** No further notes. 1.0rc4 ------- +****** * setattr_argument and setattr_device add their key to kernel_invariants. 1.0rc3 ------- +****** * The HDF5 format has changed. @@ -212,7 +443,7 @@ No further notes. 1.0rc2 ------- +****** * The CPU speed in the pipistrello gateware has been reduced from 83 1/3 MHz to 75 MHz. This will reduce the achievable sustained pulse rate and latency @@ -222,7 +453,7 @@ No further notes. 1.0rc1 ------- +****** * Experiments (your code) should use ``from artiq.experiment import *`` (and not ``from artiq import *`` as previously) diff --git a/artiq/__init__.py b/artiq/__init__.py index 4536d3b00..cbb01c9e5 100644 --- a/artiq/__init__.py +++ b/artiq/__init__.py @@ -1,11 +1,7 @@ -from ._version import get_versions -__version__ = get_versions()['version'] -del get_versions +from ._version import get_version +__version__ = get_version() +del get_version import os __artiq_dir__ = os.path.dirname(os.path.abspath(__file__)) del os - -from ._version import get_versions -__version__ = get_versions()['version'] -del get_versions diff --git a/artiq/_version.py b/artiq/_version.py index 0452994dd..b3c3d5be4 100644 --- a/artiq/_version.py +++ b/artiq/_version.py @@ -1,520 +1,13 @@ - -# This file helps to compute a version number in source trees obtained from -# git-archive tarball (such as those provided by githubs download-from-tag -# feature). Distribution tarballs (built by setup.py sdist) and build -# directories (produced by setup.py build) will contain a much shorter file -# that just contains the computed version number. - -# This file is released into the public domain. Generated by -# versioneer-0.18 (https://github.com/warner/python-versioneer) - -"""Git implementation of _version.py.""" - -import errno import os -import re -import subprocess -import sys - -def get_keywords(): - """Get the keywords needed to look up the version information.""" - # these strings will be replaced by git during git-archive. - # setup.py/versioneer.py will grep for the variable names, so they must - # each be defined on a line of their own. _version.py will just call - # get_keywords(). - git_refnames = "$Format:%d$" - git_full = "$Format:%H$" - git_date = "$Format:%ci$" - keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} - return keywords - - -class VersioneerConfig: - """Container for Versioneer configuration parameters.""" - - -def get_config(): - """Create, populate and return the VersioneerConfig() object.""" - # these strings are filled in when 'setup.py versioneer' creates - # _version.py - cfg = VersioneerConfig() - cfg.VCS = "git" - cfg.style = "pep440" - cfg.tag_prefix = "" - cfg.parentdir_prefix = "artiq-" - cfg.versionfile_source = "artiq/_version.py" - cfg.verbose = False - return cfg - - -class NotThisMethod(Exception): - """Exception raised if a method is not valid for the current scenario.""" - - -LONG_VERSION_PY = {} -HANDLERS = {} - - -def register_vcs_handler(vcs, method): # decorator - """Decorator to mark a method as the handler for a particular VCS.""" - def decorate(f): - """Store f in HANDLERS[vcs][method].""" - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f - return f - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): - """Call the given command(s).""" - assert isinstance(commands, list) - p = None - for c in commands: - try: - dispcmd = str([c] + args) - # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) - break - except EnvironmentError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %s" % dispcmd) - print(e) - return None, None - else: - if verbose: - print("unable to find command, tried %s" % (commands,)) - return None, None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() - if p.returncode != 0: - if verbose: - print("unable to run %s (error)" % dispcmd) - print("stdout was %s" % stdout) - return None, p.returncode - return stdout, p.returncode - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - """Try to determine the version from the parent directory name. - - Source tarballs conventionally unpack into a directory that includes both - the project name and a version string. We will also support searching up - two directory levels for an appropriately named parent directory - """ - rootdirs = [] - - for i in range(3): - dirname = os.path.basename(root) - if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} - else: - rootdirs.append(root) - root = os.path.dirname(root) # up a level - - if verbose: - print("Tried directories %s but none started with prefix %s" % - (str(rootdirs), parentdir_prefix)) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - """Extract version information from the given file.""" - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - f.close() - except EnvironmentError: - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - """Get version information from git keywords.""" - if not keywords: - raise NotThisMethod("no keywords at all, weird") - date = keywords.get("date") - if date is not None: - # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant - # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 - # -like" string, which we must then edit to make compliant), because - # it's been around since git-1.5.3, and it's too difficult to - # discover which version we're using, or to work around using an - # older one. - date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) - if verbose: - print("discarding '%s', no digits" % ",".join(refs - tags)) - if verbose: - print("likely tags: %s" % ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] - if verbose: - print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} - # no suitable tags, so version is "0+unknown", but full hex is still there - if verbose: - print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): - """Get version from 'git describe' in the root of the source tree. - - This only gets called if the git-archive 'subst' keywords were *not* - expanded, and _version.py hasn't already been rewritten with a short - version string, meaning we're inside a checked out source tree. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) - if rc != 0: - if verbose: - print("Directory %s not under git control" % root) - raise NotThisMethod("'git rev-parse --git-dir' returned error") - - # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] - # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", "--abbrev=8", - "--match", "%s*" % tag_prefix], - cwd=root) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:8] # maybe improved later - pieces["error"] = None - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) - if not mo: - # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%s' doesn't start with prefix '%s'" - print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) - pieces["distance"] = int(count_out) # total number of commits - - # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], - cwd=root)[0].strip() - pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - - return pieces - - -def plus_or_dot(pieces): - """Return a + if we don't already have one, else return a .""" - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - """Build up version string, with post-release "local version identifier". - - Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - Exceptions: - 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_pre(pieces): - """TAG[.post.devDISTANCE] -- No -dirty. - - Exceptions: - 1: no tags. 0.post.devDISTANCE - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += ".post.dev%d" % pieces["distance"] - else: - # exception #1 - rendered = "0.post.dev%d" % pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX] . - - The ".dev0" means dirty. Note that .dev0 sorts backwards - (a dirty tree will appear "older" than the corresponding clean one), - but you shouldn't be releasing software with -dirty anyways. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%s" % pieces["short"] - return rendered - - -def render_pep440_old(pieces): - """TAG[.postDISTANCE[.dev0]] . - - The ".dev0" means dirty. - - Eexceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - """TAG[-DISTANCE-gHEX][-dirty]. - - Like 'git describe --tags --dirty --always'. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - """TAG-DISTANCE-gHEX[-dirty]. - - Like 'git describe --tags --dirty --always -long'. - The distance/hash is unconditional. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - """Render the given version pieces into the requested style.""" - if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%s'" % style) - - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} - - -def get_versions(): - """Get version information or return default if unable to do so.""" - # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have - # __file__, we can work backwards from there to the root. Some - # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which - # case we can only use expanded keywords. - - cfg = get_config() - verbose = cfg.verbose - - try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, - verbose) - except NotThisMethod: - pass - - try: - root = os.path.realpath(__file__) - # versionfile_source is the relative path from the top of the source - # tree (where the .git directory might live) to this file. Invert - # this to find the root from __file__. - for i in cfg.versionfile_source.split('/'): - root = os.path.dirname(root) - except NameError: - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree", - "date": None} - - try: - pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) - return render(pieces, cfg.style) - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - except NotThisMethod: - pass - - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to compute version", "date": None} +def get_version(): + override = os.getenv("VERSIONEER_OVERRIDE") + if override: + return override + srcroot = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir) + with open(os.path.join(srcroot, "MAJOR_VERSION"), "r") as f: + version = f.read().strip() + version += ".unknown" + if os.path.exists(os.path.join(srcroot, "BETA")): + version += ".beta" + return version diff --git a/artiq/applets/simple.py b/artiq/applets/simple.py index c05d86384..e5776310a 100644 --- a/artiq/applets/simple.py +++ b/artiq/applets/simple.py @@ -4,11 +4,11 @@ import asyncio import os import string -from quamash import QEventLoop, QtWidgets, QtCore +from qasync import QEventLoop, QtWidgets, QtCore -from artiq.protocols.sync_struct import Subscriber, process_mod -from artiq.protocols import pyon -from artiq.protocols.pipe_ipc import AsyncioChildComm +from sipyco.sync_struct import Subscriber, process_mod +from sipyco import pyon +from sipyco.pipe_ipc import AsyncioChildComm logger = logging.getLogger(__name__) @@ -114,7 +114,7 @@ class SimpleApplet: self.datasets = {getattr(self.args, arg.replace("-", "_")) for arg in self.dataset_args} - def quamash_init(self): + def qasync_init(self): app = QtWidgets.QApplication([]) self.loop = QEventLoop(app) asyncio.set_event_loop(self.loop) @@ -212,7 +212,7 @@ class SimpleApplet: def run(self): self.args_init() - self.quamash_init() + self.qasync_init() try: self.ipc_init() try: diff --git a/artiq/browser/datasets.py b/artiq/browser/datasets.py index 4f3e9e181..b66b18216 100644 --- a/artiq/browser/datasets.py +++ b/artiq/browser/datasets.py @@ -3,10 +3,11 @@ import asyncio from PyQt5 import QtCore, QtWidgets +from sipyco.pc_rpc import AsyncioClient as RPCClient + from artiq.tools import short_format from artiq.gui.tools import LayoutWidget, QRecursiveFilterProxyModel from artiq.gui.models import DictSyncTreeSepModel -from artiq.protocols.pc_rpc import AsyncioClient as RPCClient # reduced read-only version of artiq.dashboard.datasets diff --git a/artiq/browser/experiments.py b/artiq/browser/experiments.py index c1a531152..6b580a7ed 100644 --- a/artiq/browser/experiments.py +++ b/artiq/browser/experiments.py @@ -7,10 +7,11 @@ from collections import OrderedDict from PyQt5 import QtCore, QtGui, QtWidgets import h5py +from sipyco import pyon + from artiq import __artiq_dir__ as artiq_dir from artiq.gui.tools import LayoutWidget, log_level_to_name, get_open_file_name from artiq.gui.entries import procdesc_to_entry -from artiq.protocols import pyon from artiq.master.worker import Worker, log_worker_exception logger = logging.getLogger(__name__) @@ -258,8 +259,9 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow): def dropEvent(self, ev): for uri in ev.mimeData().urls(): if uri.scheme() == "file": - logger.debug("Loading HDF5 arguments from %s", uri.path()) - asyncio.ensure_future(self.load_hdf5_task(uri.path())) + filename = QtCore.QDir.toNativeSeparators(uri.toLocalFile()) + logger.debug("Loading HDF5 arguments from %s", filename) + asyncio.ensure_future(self.load_hdf5_task(filename)) break async def compute_arginfo(self): diff --git a/artiq/browser/files.py b/artiq/browser/files.py index 1dfbac28e..103ca28db 100644 --- a/artiq/browser/files.py +++ b/artiq/browser/files.py @@ -5,7 +5,8 @@ from datetime import datetime import h5py from PyQt5 import QtCore, QtWidgets, QtGui -from artiq.protocols import pyon +from sipyco import pyon + logger = logging.getLogger(__name__) @@ -41,7 +42,7 @@ class ThumbnailIconProvider(QtWidgets.QFileIconProvider): except KeyError: return try: - img = QtGui.QImage.fromData(t.value) + img = QtGui.QImage.fromData(t[()]) except: logger.warning("unable to read thumbnail from %s", info.filePath(), exc_info=True) @@ -101,13 +102,13 @@ class Hdf5FileSystemModel(QtWidgets.QFileSystemModel): h5 = open_h5(info) if h5 is not None: try: - expid = pyon.decode(h5["expid"].value) - start_time = datetime.fromtimestamp(h5["start_time"].value) + expid = pyon.decode(h5["expid"][()]) + start_time = datetime.fromtimestamp(h5["start_time"][()]) v = ("artiq_version: {}\nrepo_rev: {}\nfile: {}\n" "class_name: {}\nrid: {}\nstart_time: {}").format( - h5["artiq_version"].value, expid["repo_rev"], + h5["artiq_version"][()], expid["repo_rev"], expid["file"], expid["class_name"], - h5["rid"].value, start_time) + h5["rid"][()], start_time) return v except: logger.warning("unable to read metadata from %s", @@ -173,14 +174,14 @@ class FilesDock(QtWidgets.QDockWidget): logger.debug("loading datasets from %s", info.filePath()) with f: try: - expid = pyon.decode(f["expid"].value) - start_time = datetime.fromtimestamp(f["start_time"].value) + expid = pyon.decode(f["expid"][()]) + start_time = datetime.fromtimestamp(f["start_time"][()]) v = { - "artiq_version": f["artiq_version"].value, + "artiq_version": f["artiq_version"][()], "repo_rev": expid["repo_rev"], "file": expid["file"], "class_name": expid["class_name"], - "rid": f["rid"].value, + "rid": f["rid"][()], "start_time": start_time, } self.metadata_changed.emit(v) @@ -189,13 +190,13 @@ class FilesDock(QtWidgets.QDockWidget): info.filePath(), exc_info=True) rd = dict() if "archive" in f: - rd = {k: (True, v.value) for k, v in f["archive"].items()} + rd = {k: (True, v[()]) for k, v in f["archive"].items()} if "datasets" in f: for k, v in f["datasets"].items(): if k in rd: logger.warning("dataset '%s' is both in archive and " "outputs", k) - rd[k] = (True, v.value) + rd[k] = (True, v[()]) if rd: self.datasets.init(rd) self.dataset_changed.emit(info.filePath()) diff --git a/artiq/build_soc.py b/artiq/build_soc.py new file mode 100644 index 000000000..e3a1f3360 --- /dev/null +++ b/artiq/build_soc.py @@ -0,0 +1,72 @@ +import os +import subprocess + +from migen import * +from misoc.interconnect.csr import * +from misoc.integration.builder import * + +from artiq.gateware.amp import AMPSoC +from artiq import __version__ as artiq_version +from artiq import __artiq_dir__ as artiq_dir + + +__all__ = ["add_identifier", "build_artiq_soc"] + + +def get_identifier_string(soc, suffix="", add_class_name=True): + r = artiq_version + if suffix or add_class_name: + r += ";" + if add_class_name: + r += getattr(soc, "class_name_override", soc.__class__.__name__.lower()) + r += suffix + return r + + +class ReprogrammableIdentifier(Module, AutoCSR): + def __init__(self, ident): + self.address = CSRStorage(8) + self.data = CSRStatus(8) + + contents = list(ident.encode()) + l = len(contents) + if l > 255: + raise ValueError("Identifier string must be 255 characters or less") + contents.insert(0, l) + + for i in range(8): + self.specials += Instance("ROM256X1", name="identifier_str"+str(i), + i_A0=self.address.storage[0], i_A1=self.address.storage[1], + i_A2=self.address.storage[2], i_A3=self.address.storage[3], + i_A4=self.address.storage[4], i_A5=self.address.storage[5], + i_A6=self.address.storage[6], i_A7=self.address.storage[7], + o_O=self.data.status[i], + p_INIT=sum(1 << j if c & (1 << i) else 0 for j, c in enumerate(contents))) + + +def add_identifier(soc, *args, gateware_identifier_str=None, **kwargs): + if hasattr(soc, "identifier"): + raise ValueError + identifier_str = get_identifier_string(soc, *args, **kwargs) + soc.submodules.identifier = ReprogrammableIdentifier(gateware_identifier_str or identifier_str) + soc.config["IDENTIFIER_STR"] = identifier_str + + +def build_artiq_soc(soc, argdict): + firmware_dir = os.path.join(artiq_dir, "firmware") + builder = Builder(soc, **argdict) + builder.software_packages = [] + builder.add_software_package("bootloader", os.path.join(firmware_dir, "bootloader")) + if isinstance(soc, AMPSoC): + builder.add_software_package("libm") + builder.add_software_package("libprintf") + builder.add_software_package("libunwind") + builder.add_software_package("ksupport", os.path.join(firmware_dir, "ksupport")) + builder.add_software_package("runtime", os.path.join(firmware_dir, "runtime")) + else: + # Assume DRTIO satellite. + builder.add_software_package("satman", os.path.join(firmware_dir, "satman")) + try: + builder.build() + except subprocess.CalledProcessError as e: + raise SystemExit("Command {} failed".format(" ".join(e.cmd))) diff --git a/artiq/compiler/builtins.py b/artiq/compiler/builtins.py index 77e102d3b..b5662aec6 100644 --- a/artiq/compiler/builtins.py +++ b/artiq/compiler/builtins.py @@ -82,13 +82,27 @@ class TList(types.TMono): super().__init__("list", {"elt": elt}) class TArray(types.TMono): - def __init__(self, elt=None): + def __init__(self, elt=None, num_dims=1): if elt is None: elt = types.TVar() - super().__init__("array", {"elt": elt}) + if isinstance(num_dims, int): + # Make TArray more convenient to instantiate from (ARTIQ) user code. + num_dims = types.TValue(num_dims) + # For now, enforce number of dimensions to be known, as we'd otherwise + # need to implement custom unification logic for the type of `shape`. + # Default to 1 to keep compatibility with old user code from before + # multidimensional array support. + assert isinstance(num_dims.value, int), "Number of dimensions must be resolved" + + super().__init__("array", {"elt": elt, "num_dims": num_dims}) + self.attributes = OrderedDict([ + ("buffer", types._TPointer(elt)), + ("shape", types.TTuple([TInt32()] * num_dims.value)), + ]) def _array_printer(typ, printer, depth, max_depth): - return "numpy.array(elt={})".format(printer.name(typ["elt"], depth, max_depth)) + return "numpy.array(elt={}, num_dims={})".format( + printer.name(typ["elt"], depth, max_depth), typ["num_dims"].value) types.TypePrinter.custom_printers["array"] = _array_printer class TRange(types.TMono): @@ -169,6 +183,9 @@ def fn_ValueError(): def fn_ZeroDivisionError(): return types.TExceptionConstructor(TException("ZeroDivisionError")) +def fn_RuntimeError(): + return types.TExceptionConstructor(TException("RuntimeError")) + def fn_range(): return types.TBuiltinFunction("range") @@ -178,6 +195,9 @@ def fn_len(): def fn_round(): return types.TBuiltinFunction("round") +def fn_abs(): + return types.TBuiltinFunction("abs") + def fn_min(): return types.TBuiltinFunction("min") @@ -202,9 +222,6 @@ def obj_interleave(): def obj_sequential(): return types.TBuiltin("sequential") -def fn_watchdog(): - return types.TBuiltinFunction("watchdog") - def fn_delay(): return types.TBuiltinFunction("delay") @@ -300,7 +317,7 @@ def is_iterable(typ): def get_iterable_elt(typ): if is_str(typ) or is_bytes(typ) or is_bytearray(typ): return TInt(types.TValue(8)) - elif is_iterable(typ): + elif types._is_pointer(typ) or is_iterable(typ): return typ.find()["elt"].find() else: assert False @@ -314,6 +331,6 @@ def is_allocated(typ): return not (is_none(typ) or is_bool(typ) or is_int(typ) or is_float(typ) or is_range(typ) or types._is_pointer(typ) or types.is_function(typ) or - types.is_c_function(typ) or types.is_rpc(typ) or + types.is_external_function(typ) or types.is_rpc(typ) or types.is_method(typ) or types.is_tuple(typ) or types.is_value(typ)) diff --git a/artiq/compiler/embedding.py b/artiq/compiler/embedding.py index 6cee76bf9..c327392f8 100644 --- a/artiq/compiler/embedding.py +++ b/artiq/compiler/embedding.py @@ -14,7 +14,7 @@ from pythonparser import lexer as source_lexer, parser as source_parser from Levenshtein import ratio as similarity, jaro_winkler from ..language import core as language_core -from . import types, builtins, asttyped, prelude +from . import types, builtins, asttyped, math_fns, prelude from .transforms import ASTTypedRewriter, Inferencer, IntMonomorphizer, TypedtreePrinter from .transforms.asttyped_rewriter import LocalExtractor @@ -166,6 +166,10 @@ class ASTSynthesizer: typ = builtins.TBool() return asttyped.NameConstantT(value=value, type=typ, loc=self._add(repr(value))) + elif value is numpy.float: + typ = builtins.fn_float() + return asttyped.NameConstantT(value=None, type=typ, + loc=self._add("numpy.float")) elif value is numpy.int32: typ = builtins.fn_int32() return asttyped.NameConstantT(value=None, type=typ, @@ -221,21 +225,23 @@ class ASTSynthesizer: return asttyped.ListT(elts=elts, ctx=None, type=builtins.TList(), begin_loc=begin_loc, end_loc=end_loc, loc=begin_loc.join(end_loc)) - elif isinstance(value, numpy.ndarray): - begin_loc = self._add("numpy.array([") + elif isinstance(value, tuple): + begin_loc = self._add("(") elts = [] for index, elt in enumerate(value): elts.append(self.quote(elt)) - if index < len(value) - 1: - self._add(", ") - end_loc = self._add("])") - - return asttyped.ListT(elts=elts, ctx=None, type=builtins.TArray(), - begin_loc=begin_loc, end_loc=end_loc, - loc=begin_loc.join(end_loc)) + self._add(", ") + end_loc = self._add(")") + return asttyped.TupleT(elts=elts, ctx=None, + type=types.TTuple([e.type for e in elts]), + begin_loc=begin_loc, end_loc=end_loc, + loc=begin_loc.join(end_loc)) + elif isinstance(value, numpy.ndarray): + return self.call(numpy.array, [list(value)], {}) elif inspect.isfunction(value) or inspect.ismethod(value) or \ isinstance(value, pytypes.BuiltinFunctionType) or \ - isinstance(value, SpecializedFunction): + isinstance(value, SpecializedFunction) or \ + isinstance(value, numpy.ufunc): if inspect.ismethod(value): quoted_self = self.quote(value.__self__) function_type = self.quote_function(value.__func__, self.expanded_from) @@ -749,6 +755,14 @@ class Stitcher: quote_function=self._quote_function) def _function_loc(self, function): + if isinstance(function, SpecializedFunction): + function = function.host_function + if hasattr(function, 'artiq_embedded') and function.artiq_embedded.function: + function = function.artiq_embedded.function + + if isinstance(function, str): + return source.Range(source.Buffer(function, ""), 0, 0) + filename = function.__code__.co_filename line = function.__code__.co_firstlineno name = function.__code__.co_name @@ -832,10 +846,20 @@ class Stitcher: # Extract function source. embedded_function = host_function.artiq_embedded.function - source_code = inspect.getsource(embedded_function) - filename = embedded_function.__code__.co_filename - module_name = embedded_function.__globals__['__name__'] - first_line = embedded_function.__code__.co_firstlineno + if isinstance(embedded_function, str): + # This is a function to be eval'd from the given source code in string form. + # Mangle the host function's id() into the fully qualified name to make sure + # there are no collisions. + source_code = embedded_function + embedded_function = host_function + filename = "" + module_name = "__eval_{}".format(id(host_function)) + first_line = 1 + else: + source_code = inspect.getsource(embedded_function) + filename = embedded_function.__code__.co_filename + module_name = embedded_function.__globals__['__name__'] + first_line = embedded_function.__code__.co_firstlineno # Extract function annotation. signature = inspect.signature(embedded_function) @@ -921,6 +945,9 @@ class Stitcher: return function_node def _extract_annot(self, function, annot, kind, call_loc, fn_kind): + if annot is None: + annot = builtins.TNone() + if not isinstance(annot, types.Type): diag = diagnostic.Diagnostic("error", "type annotation for {kind}, '{annot}', is not an ARTIQ type", @@ -969,9 +996,9 @@ class Stitcher: self.engine.process(diag) ret_type = types.TVar() - function_type = types.TCFunction(arg_types, ret_type, - name=function.artiq_embedded.syscall, - flags=function.artiq_embedded.flags) + function_type = types.TExternalFunction(arg_types, ret_type, + name=function.artiq_embedded.syscall, + flags=function.artiq_embedded.flags) self.functions[function] = function_type return function_type @@ -1014,7 +1041,7 @@ class Stitcher: function_type = types.TRPC(ret_type, service=self.embedding_map.store_object(host_function), - async=is_async) + is_async=is_async) self.functions[function] = function_type return function_type @@ -1025,7 +1052,11 @@ class Stitcher: host_function = function if function in self.functions: - pass + return self.functions[function] + + math_type = math_fns.match(function) + if math_type is not None: + self.functions[function] = math_type elif not hasattr(host_function, "artiq_embedded") or \ (host_function.artiq_embedded.core_name is None and host_function.artiq_embedded.portable is False and diff --git a/artiq/compiler/ir.py b/artiq/compiler/ir.py index 6ae9bdf76..3f984606f 100644 --- a/artiq/compiler/ir.py +++ b/artiq/compiler/ir.py @@ -36,6 +36,48 @@ class TKeyword(types.TMono): def is_keyword(typ): return isinstance(typ, TKeyword) + +# See rpc_proto.rs and comm_kernel.py:_{send,receive}_rpc_value. +def rpc_tag(typ, error_handler): + typ = typ.find() + if types.is_tuple(typ): + assert len(typ.elts) < 256 + return b"t" + bytes([len(typ.elts)]) + \ + b"".join([rpc_tag(elt_type, error_handler) + for elt_type in typ.elts]) + elif builtins.is_none(typ): + return b"n" + elif builtins.is_bool(typ): + return b"b" + elif builtins.is_int(typ, types.TValue(32)): + return b"i" + elif builtins.is_int(typ, types.TValue(64)): + return b"I" + elif builtins.is_float(typ): + return b"f" + elif builtins.is_str(typ): + return b"s" + elif builtins.is_bytes(typ): + return b"B" + elif builtins.is_bytearray(typ): + return b"A" + elif builtins.is_list(typ): + return b"l" + rpc_tag(builtins.get_iterable_elt(typ), error_handler) + elif builtins.is_array(typ): + num_dims = typ["num_dims"].value + return b"a" + bytes([num_dims]) + rpc_tag(typ["elt"], error_handler) + elif builtins.is_range(typ): + return b"r" + rpc_tag(builtins.get_iterable_elt(typ), error_handler) + elif is_keyword(typ): + return b"k" + rpc_tag(typ.params["value"], error_handler) + elif types.is_function(typ) or types.is_method(typ) or types.is_rpc(typ): + raise ValueError("RPC tag for functional value") + elif '__objectid__' in typ.attributes: + return b"O" + else: + error_handler(typ) + + class Value: """ An SSA value that keeps track of its uses. @@ -303,6 +345,7 @@ class BasicBlock(NamedValue): :ivar instructions: (list of :class:`Instruction`) """ + _dump_loc = True def __init__(self, instructions, name=""): super().__init__(TBasicBlock(), name) @@ -378,12 +421,12 @@ class BasicBlock(NamedValue): lines = ["{}:".format(escape_name(self.name))] if self.function is not None: lines[0] += " ; predecessors: {}".format( - ", ".join([escape_name(pred.name) for pred in self.predecessors()])) + ", ".join(sorted([escape_name(pred.name) for pred in self.predecessors()]))) # Annotated instructions loc = None for insn in self.instructions: - if loc != insn.loc: + if self._dump_loc and loc != insn.loc: loc = insn.loc if loc is None: @@ -409,7 +452,13 @@ class BasicBlock(NamedValue): class Argument(NamedValue): """ A function argument. + + :ivar loc: (:class:`pythonparser.source.Range` or None) + source location """ + def __init__(self, typ, name): + super().__init__(typ, name) + self.loc = None def as_entity(self, type_printer): return self.as_operand(type_printer) @@ -731,6 +780,33 @@ class SetAttr(Instruction): def value(self): return self.operands[1] +class Offset(Instruction): + """ + An intruction that adds an offset to a pointer (indexes into a list). + + This is used to represent internally generated pointer arithmetic, and must + remain inside the same object (see :class:`GetElem` and LLVM's GetElementPtr). + """ + + """ + :param lst: (:class:`Value`) list + :param index: (:class:`Value`) index + """ + def __init__(self, base, offset, name=""): + assert isinstance(base, Value) + assert isinstance(offset, Value) + typ = types._TPointer(builtins.get_iterable_elt(base.type)) + super().__init__([base, offset], typ, name) + + def opcode(self): + return "offset" + + def base(self): + return self.operands[0] + + def index(self): + return self.operands[1] + class GetElem(Instruction): """ An intruction that loads an element from a list. @@ -748,7 +824,7 @@ class GetElem(Instruction): def opcode(self): return "getelem" - def list(self): + def base(self): return self.operands[0] def index(self): @@ -774,7 +850,7 @@ class SetElem(Instruction): def opcode(self): return "setelem" - def list(self): + def base(self): return self.operands[0] def index(self): @@ -833,6 +909,7 @@ class Arith(Instruction): def rhs(self): return self.operands[1] + class Compare(Instruction): """ A comparison operation on numbers. diff --git a/artiq/compiler/math_fns.py b/artiq/compiler/math_fns.py new file mode 100644 index 000000000..35d23b534 --- /dev/null +++ b/artiq/compiler/math_fns.py @@ -0,0 +1,132 @@ +r""" +The :mod:`math_fns` module lists math-related functions from NumPy recognized +by the ARTIQ compiler so host function objects can be :func:`match`\ ed to +the compiler type metadata describing their core device analogue. +""" + +from collections import OrderedDict +import numpy +from . import builtins, types + +# Some special mathematical functions are exposed via their scipy.special +# equivalents. Since the rest of the ARTIQ core does not depend on SciPy, +# gracefully handle it not being present, making the functions simply not +# available. +try: + import scipy.special as scipy_special +except ImportError: + scipy_special = None + +#: float -> float numpy.* math functions for which llvm.* intrinsics exist. +unary_fp_intrinsics = [(name, "llvm." + name + ".f64") for name in [ + "sin", + "cos", + "exp", + "exp2", + "log", + "log10", + "log2", + "fabs", + "floor", + "ceil", + "trunc", + "sqrt", +]] + [ + # numpy.rint() seems to (NumPy 1.19.0, Python 3.8.5, Linux x86_64) + # implement round-to-even, but unfortunately, rust-lang/libm only + # provides round(), which always rounds away from zero. + # + # As there is no equivalent of the latter in NumPy (nor any other + # basic rounding function), expose round() as numpy.rint anyway, + # even if the rounding modes don't match up, so there is some way + # to do rounding on the core device. (numpy.round() has entirely + # different semantics; it rounds to a configurable number of + # decimals.) + ("rint", "llvm.round.f64"), +] + +#: float -> float numpy.* math functions lowered to runtime calls. +unary_fp_runtime_calls = [ + ("tan", "tan"), + ("arcsin", "asin"), + ("arccos", "acos"), + ("arctan", "atan"), + ("sinh", "sinh"), + ("cosh", "cosh"), + ("tanh", "tanh"), + ("arcsinh", "asinh"), + ("arccosh", "acosh"), + ("arctanh", "atanh"), + ("expm1", "expm1"), + ("cbrt", "cbrt"), +] + +#: float -> float numpy.* math functions lowered to runtime calls. +unary_fp_runtime_calls = [ + ("tan", "tan"), + ("arcsin", "asin"), + ("arccos", "acos"), + ("arctan", "atan"), + ("sinh", "sinh"), + ("cosh", "cosh"), + ("tanh", "tanh"), + ("arcsinh", "asinh"), + ("arccosh", "acosh"), + ("arctanh", "atanh"), + ("expm1", "expm1"), + ("cbrt", "cbrt"), +] + +scipy_special_unary_runtime_calls = [ + ("erf", "erf"), + ("erfc", "erfc"), + ("gamma", "tgamma"), + ("gammaln", "lgamma"), + ("j0", "j0"), + ("j1", "j1"), + ("y0", "y0"), + ("y1", "y1"), +] +# Not mapped: jv/yv, libm only supports integer orders. + +#: (float, float) -> float numpy.* math functions lowered to runtime calls. +binary_fp_runtime_calls = [ + ("arctan2", "atan2"), + ("copysign", "copysign"), + ("fmax", "fmax"), + ("fmin", "fmin"), + # ("ldexp", "ldexp"), # One argument is an int; would need a bit more plumbing. + ("hypot", "hypot"), + ("nextafter", "nextafter"), +] + +#: Array handling builtins (special treatment due to allocations). +numpy_builtins = ["transpose"] + + +def fp_runtime_type(name, arity): + args = [("arg{}".format(i), builtins.TFloat()) for i in range(arity)] + return types.TExternalFunction( + OrderedDict(args), + builtins.TFloat(), + name, + # errno isn't observable from ARTIQ Python. + flags={"nounwind", "nowrite"}, + broadcast_across_arrays=True) + + +math_fn_map = { + getattr(numpy, symbol): fp_runtime_type(mangle, arity=1) + for symbol, mangle in (unary_fp_intrinsics + unary_fp_runtime_calls) +} +for symbol, mangle in binary_fp_runtime_calls: + math_fn_map[getattr(numpy, symbol)] = fp_runtime_type(mangle, arity=2) +for name in numpy_builtins: + math_fn_map[getattr(numpy, name)] = types.TBuiltinFunction("numpy." + name) +if scipy_special is not None: + for symbol, mangle in scipy_special_unary_runtime_calls: + math_fn_map[getattr(scipy_special, symbol)] = fp_runtime_type(mangle, arity=1) + + +def match(obj): + return math_fn_map.get(obj, None) diff --git a/artiq/compiler/module.py b/artiq/compiler/module.py index 052a176ac..d43404b20 100644 --- a/artiq/compiler/module.py +++ b/artiq/compiler/module.py @@ -60,12 +60,14 @@ class Module: ref_period=ref_period) dead_code_eliminator = transforms.DeadCodeEliminator(engine=self.engine) local_access_validator = validators.LocalAccessValidator(engine=self.engine) + local_demoter = transforms.LocalDemoter() + constant_hoister = transforms.ConstantHoister() devirtualization = analyses.Devirtualization() interleaver = transforms.Interleaver(engine=self.engine) invariant_detection = analyses.InvariantDetection(engine=self.engine) - cast_monomorphizer.visit(src.typedtree) int_monomorphizer.visit(src.typedtree) + cast_monomorphizer.visit(src.typedtree) inferencer.visit(src.typedtree) monomorphism_validator.visit(src.typedtree) escape_validator.visit(src.typedtree) @@ -77,6 +79,8 @@ class Module: dead_code_eliminator.process(self.artiq_ir) interleaver.process(self.artiq_ir) local_access_validator.process(self.artiq_ir) + local_demoter.process(self.artiq_ir) + constant_hoister.process(self.artiq_ir) if remarks: invariant_detection.process(self.artiq_ir) diff --git a/artiq/compiler/prelude.py b/artiq/compiler/prelude.py index 24a7bd1fa..13f319650 100644 --- a/artiq/compiler/prelude.py +++ b/artiq/compiler/prelude.py @@ -25,10 +25,12 @@ def globals(): "IndexError": builtins.fn_IndexError(), "ValueError": builtins.fn_ValueError(), "ZeroDivisionError": builtins.fn_ZeroDivisionError(), + "RuntimeError": builtins.fn_RuntimeError(), # Built-in Python functions "len": builtins.fn_len(), "round": builtins.fn_round(), + "abs": builtins.fn_abs(), "min": builtins.fn_min(), "max": builtins.fn_max(), "print": builtins.fn_print(), @@ -42,7 +44,6 @@ def globals(): "parallel": builtins.obj_parallel(), "interleave": builtins.obj_interleave(), "sequential": builtins.obj_sequential(), - "watchdog": builtins.fn_watchdog(), # ARTIQ time management functions "delay": builtins.fn_delay(), diff --git a/artiq/compiler/targets.py b/artiq/compiler/targets.py index b3cfdec8d..9ebc7907d 100644 --- a/artiq/compiler/targets.py +++ b/artiq/compiler/targets.py @@ -1,5 +1,5 @@ -import os, sys, tempfile, subprocess -from artiq.compiler import types +import os, sys, tempfile, subprocess, io +from artiq.compiler import types, ir from llvmlite_artiq import ir as ll, binding as llvm llvm.initialize() @@ -8,40 +8,44 @@ llvm.initialize_all_asmprinters() class RunTool: def __init__(self, pattern, **tempdata): - self.files = [] - self.pattern = pattern - self.tempdata = tempdata - - def maketemp(self, data): - f = tempfile.NamedTemporaryFile() - f.write(data) - f.flush() - self.files.append(f) - return f + self._pattern = pattern + self._tempdata = tempdata + self._tempnames = {} + self._tempfiles = {} def __enter__(self): - tempfiles = {} - tempnames = {} - for key in self.tempdata: - tempfiles[key] = self.maketemp(self.tempdata[key]) - tempnames[key] = tempfiles[key].name + for key, data in self._tempdata.items(): + if data is None: + fd, filename = tempfile.mkstemp() + os.close(fd) + self._tempnames[key] = filename + else: + with tempfile.NamedTemporaryFile(delete=False) as f: + f.write(data) + self._tempnames[key] = f.name cmdline = [] - for argument in self.pattern: - cmdline.append(argument.format(**tempnames)) + for argument in self._pattern: + cmdline.append(argument.format(**self._tempnames)) - process = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + process = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + universal_newlines=True) stdout, stderr = process.communicate() if process.returncode != 0: raise Exception("{} invocation failed: {}". - format(cmdline[0], stderr.decode('utf-8'))) + format(cmdline[0], stderr)) - tempfiles["__stdout__"] = stdout.decode('utf-8') - return tempfiles + self._tempfiles["__stdout__"] = io.StringIO(stdout) + for key in self._tempdata: + if self._tempdata[key] is None: + self._tempfiles[key] = open(self._tempnames[key], "rb") + return self._tempfiles def __exit__(self, exc_typ, exc_value, exc_trace): - for f in self.files: - f.close() + for file in self._tempfiles.values(): + file.close() + for filename in self._tempnames.values(): + os.unlink(filename) def _dump(target, kind, suffix, content): if target is not None: @@ -71,12 +75,23 @@ class Target: :var print_function: (string) Name of a formatted print functions (with the signature of ``printf``) provided by the target, e.g. ``"printf"``. + :var little_endian: (boolean) + Whether the code will be executed on a little-endian machine. This cannot be always + determined from data_layout due to JIT. + :var now_pinning: (boolean) + Whether the target implements the now-pinning RTIO optimization. """ triple = "unknown" data_layout = "" features = [] print_function = "printf" + little_endian = False + now_pinning = True + tool_ld = "ld.lld" + tool_strip = "llvm-strip" + tool_addr2line = "llvm-addr2line" + tool_cxxfilt = "llvm-cxxfilt" def __init__(self): self.llcontext = ll.Context() @@ -127,6 +142,9 @@ class Target: print("====== MODULE_SIGNATURE DUMP ======", file=sys.stderr) print(module, file=sys.stderr) + if os.getenv("ARTIQ_IR_NO_LOC") is not None: + ir.BasicBlock._dump_loc = False + type_printer = types.TypePrinter() _dump(os.getenv("ARTIQ_DUMP_IR"), "ARTIQ IR", ".txt", lambda: "\n".join(fn.as_entity(type_printer) for fn in module.artiq_ir)) @@ -163,10 +181,11 @@ class Target: def link(self, objects): """Link the relocatable objects into a shared library for this target.""" - with RunTool([self.triple + "-ld", "-shared", "--eh-frame-hdr"] + + with RunTool([self.tool_ld, "-shared", "--eh-frame-hdr"] + ["{{obj{}}}".format(index) for index in range(len(objects))] + + ["-x"] + ["-o", "{output}"], - output=b"", + output=None, **{"obj{}".format(index): obj for index, obj in enumerate(objects)}) \ as results: library = results["output"].read() @@ -180,8 +199,8 @@ class Target: return self.link([self.assemble(self.compile(module)) for module in modules]) def strip(self, library): - with RunTool([self.triple + "-strip", "--strip-debug", "{library}", "-o", "{output}"], - library=library, output=b"") \ + with RunTool([self.tool_strip, "--strip-debug", "{library}", "-o", "{output}"], + library=library, output=None) \ as results: return results["output"].read() @@ -194,11 +213,11 @@ class Target: # inside the call instruction (or its delay slot), since that's what # the backtrace entry should point at. offset_addresses = [hex(addr - 1) for addr in addresses] - with RunTool([self.triple + "-addr2line", "--addresses", "--functions", "--inlines", + with RunTool([self.tool_addr2line, "--addresses", "--functions", "--inlines", "--demangle", "--exe={library}"] + offset_addresses, library=library) \ as results: - lines = iter(results["__stdout__"].rstrip().split("\n")) + lines = iter(results["__stdout__"].read().rstrip().split("\n")) backtrace = [] while True: try: @@ -216,18 +235,25 @@ class Target: filename, line = location.rsplit(":", 1) if filename == "??" or filename == "": continue + if line == "?": + line = -1 + else: + line = int(line) # can't get column out of addr2line D: - backtrace.append((filename, int(line), -1, function, address)) + backtrace.append((filename, line, -1, function, address)) return backtrace def demangle(self, names): - with RunTool([self.triple + "-c++filt"] + names) as results: - return results["__stdout__"].rstrip().split("\n") + with RunTool([self.tool_cxxfilt] + names) as results: + return results["__stdout__"].read().rstrip().split("\n") class NativeTarget(Target): def __init__(self): super().__init__() self.triple = llvm.get_default_triple() + host_data_layout = str(llvm.targets.Target.from_default_triple().create_target_machine().target_data) + assert host_data_layout[0] in "eE" + self.little_endian = host_data_layout[0] == "e" class OR1KTarget(Target): triple = "or1k-linux" @@ -235,3 +261,23 @@ class OR1KTarget(Target): "f64:32:32-v64:32:32-v128:32:32-a0:0:32-n32" features = ["mul", "div", "ffl1", "cmov", "addc"] print_function = "core_log" + little_endian = False + now_pinning = True + + tool_ld = "or1k-linux-ld" + tool_strip = "or1k-linux-strip" + tool_addr2line = "or1k-linux-addr2line" + tool_cxxfilt = "or1k-linux-c++filt" + +class CortexA9Target(Target): + triple = "armv7-unknown-linux-gnueabihf" + data_layout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" + features = ["dsp", "fp16", "neon", "vfp3"] + print_function = "core_log" + little_endian = True + now_pinning = False + + tool_ld = "armv7-unknown-linux-gnueabihf-ld" + tool_strip = "armv7-unknown-linux-gnueabihf-strip" + tool_addr2line = "armv7-unknown-linux-gnueabihf-addr2line" + tool_cxxfilt = "armv7-unknown-linux-gnueabihf-c++filt" diff --git a/artiq/compiler/testbench/embedding.py b/artiq/compiler/testbench/embedding.py index 65435a7db..c637c4b3a 100644 --- a/artiq/compiler/testbench/embedding.py +++ b/artiq/compiler/testbench/embedding.py @@ -1,4 +1,4 @@ -import sys, os +import sys, os, tokenize from artiq.master.databases import DeviceDB from artiq.master.worker_db import DeviceManager @@ -27,7 +27,7 @@ def main(): ddb_path = os.path.join(os.path.dirname(sys.argv[1]), "device_db.py") dmgr = DeviceManager(DeviceDB(ddb_path)) - with open(sys.argv[1]) as f: + with tokenize.open(sys.argv[1]) as f: testcase_code = compile(f.read(), f.name, "exec") testcase_vars = {'__name__': 'testbench', 'dmgr': dmgr} exec(testcase_code, testcase_vars) @@ -38,8 +38,6 @@ def main(): core.compile(testcase_vars["entrypoint"], (), {}) else: core.run(testcase_vars["entrypoint"], (), {}) - print(core.comm.get_log()) - core.comm.clear_log() except CompileError as error: if not diag: exit(1) diff --git a/artiq/compiler/testbench/irgen.py b/artiq/compiler/testbench/irgen.py index fa1608e12..275f11c94 100644 --- a/artiq/compiler/testbench/irgen.py +++ b/artiq/compiler/testbench/irgen.py @@ -1,8 +1,12 @@ -import sys, fileinput +import sys, os, fileinput from pythonparser import diagnostic +from .. import ir from ..module import Module, Source def main(): + if os.getenv("ARTIQ_IR_NO_LOC") is not None: + ir.BasicBlock._dump_loc = False + def process_diagnostic(diag): print("\n".join(diag.render())) if diag.level in ("fatal", "error"): diff --git a/artiq/compiler/testbench/perf_embedding.py b/artiq/compiler/testbench/perf_embedding.py index b94b88735..41f09cb04 100644 --- a/artiq/compiler/testbench/perf_embedding.py +++ b/artiq/compiler/testbench/perf_embedding.py @@ -1,4 +1,4 @@ -import sys, os +import sys, os, tokenize from pythonparser import diagnostic from ...language.environment import ProcessArgumentManager from ...master.databases import DeviceDB, DatasetDB @@ -22,7 +22,7 @@ def main(): engine = diagnostic.Engine() engine.process = process_diagnostic - with open(sys.argv[1]) as f: + with tokenize.open(sys.argv[1]) as f: testcase_code = compile(f.read(), f.name, "exec") testcase_vars = {'__name__': 'testbench'} exec(testcase_code, testcase_vars) diff --git a/artiq/compiler/transforms/__init__.py b/artiq/compiler/transforms/__init__.py index 305cf614e..5696be95f 100644 --- a/artiq/compiler/transforms/__init__.py +++ b/artiq/compiler/transforms/__init__.py @@ -5,6 +5,8 @@ from .cast_monomorphizer import CastMonomorphizer from .iodelay_estimator import IODelayEstimator from .artiq_ir_generator import ARTIQIRGenerator from .dead_code_eliminator import DeadCodeEliminator -from .llvm_ir_generator import LLVMIRGenerator +from .local_demoter import LocalDemoter +from .constant_hoister import ConstantHoister from .interleaver import Interleaver from .typedtree_printer import TypedtreePrinter +from .llvm_ir_generator import LLVMIRGenerator diff --git a/artiq/compiler/transforms/artiq_ir_generator.py b/artiq/compiler/transforms/artiq_ir_generator.py index a1bc76956..afa75b61b 100644 --- a/artiq/compiler/transforms/artiq_ir_generator.py +++ b/artiq/compiler/transforms/artiq_ir_generator.py @@ -7,6 +7,7 @@ semantics explicitly. """ from collections import OrderedDict, defaultdict +from functools import reduce from pythonparser import algorithm, diagnostic, ast from .. import types, builtins, asttyped, ir, iodelay @@ -52,12 +53,6 @@ class ARTIQIRGenerator(algorithm.Visitor): a component of a composite right-hand side when visiting a composite left-hand side, such as, in ``x, y = z``, the 2nd tuple element when visting ``y`` - :ivar current_assert_env: (:class:`ir.Alloc` of type :class:`ir.TEnvironment`) - the environment where the individual components of current assert - statement are stored until display - :ivar current_assert_subexprs: (list of (:class:`ast.AST`, string)) - the mapping from components of current assert statement to the names - their values have in :ivar:`current_assert_env` :ivar break_target: (:class:`ir.BasicBlock` or None) the basic block to which ``break`` will transfer control :ivar continue_target: (:class:`ir.BasicBlock` or None) @@ -82,6 +77,13 @@ class ARTIQIRGenerator(algorithm.Visitor): :ivar method_map: (map of :class:`ast.AttributeT` to :class:`ir.GetAttribute`) the map from method resolution nodes to instructions retrieving the called function inside a translated :class:`ast.CallT` node + + Finally, functions that implement array operations are instantiated on the fly as + necessary. They are kept track of in global dictionaries, with a mangled name + containing types and operations as key: + + :ivar array_op_funcs: the map from mangled name to implementation of + operations on/between arrays """ _size_type = builtins.TInt32() @@ -100,8 +102,6 @@ class ARTIQIRGenerator(algorithm.Visitor): self.current_private_env = None self.current_args = None self.current_assign = None - self.current_assert_env = None - self.current_assert_subexprs = None self.break_target = None self.continue_target = None self.return_target = None @@ -110,6 +110,8 @@ class ARTIQIRGenerator(algorithm.Visitor): self.function_map = dict() self.variable_map = dict() self.method_map = defaultdict(lambda: []) + self.array_op_funcs = dict() + self.raise_assert_func = None def annotate_calls(self, devirtualization): for var_node in devirtualization.variable_map: @@ -268,6 +270,9 @@ class ARTIQIRGenerator(algorithm.Visitor): self.current_args[arg_name] = arg optargs.append(arg) + for (arg, arg_node) in zip(args + optargs, node.args.args): + arg.loc = arg_node.loc + func = ir.Function(typ, ".".join(self.name), [env_arg] + args + optargs, loc=node.lambda_loc if is_lambda else node.keyword_loc) func.is_internal = is_internal @@ -405,6 +410,8 @@ class ARTIQIRGenerator(algorithm.Visitor): length = self.iterable_len(insn) return self.append(ir.Compare(ast.NotEq(loc=None), length, ir.Constant(0, length.type)), block=block) + elif builtins.is_none(insn.type): + return ir.Constant(False, builtins.TBool()) else: note = diagnostic.Diagnostic("note", "this expression has type {type}", @@ -506,7 +513,28 @@ class ARTIQIRGenerator(algorithm.Visitor): def iterable_get(self, value, index): # Assuming the value is within bounds. - if builtins.is_listish(value.type): + if builtins.is_array(value.type): + # Scalar indexing into ndarray. + num_dims = value.type.find()["num_dims"].value + if num_dims > 1: + old_shape = self.append(ir.GetAttr(value, "shape")) + lengths = [self.append(ir.GetAttr(old_shape, i)) for i in range(1, num_dims)] + new_shape = self._make_array_shape(lengths) + + stride = reduce( + lambda l, r: self.append(ir.Arith(ast.Mult(loc=None), l, r)), + lengths[1:], lengths[0]) + offset = self.append(ir.Arith(ast.Mult(loc=None), stride, index)) + old_buffer = self.append(ir.GetAttr(value, "buffer")) + new_buffer = self.append(ir.Offset(old_buffer, offset)) + + result_type = builtins.TArray(value.type.find()["elt"], + types.TValue(num_dims - 1)) + return self.append(ir.Alloc([new_buffer, new_shape], result_type)) + else: + buffer = self.append(ir.GetAttr(value, "buffer")) + return self.append(ir.GetElem(buffer, index)) + elif builtins.is_listish(value.type): return self.append(ir.GetElem(value, index)) elif builtins.is_range(value.type): start = self.append(ir.GetAttr(value, "start")) @@ -823,35 +851,23 @@ class ARTIQIRGenerator(algorithm.Visitor): cleanup = [] for item_node in node.items: + # user-defined context manager context_expr_node = item_node.context_expr optional_vars_node = item_node.optional_vars + context_mgr = self.visit(context_expr_node) + enter_fn = self.append(ir.GetAttr(context_mgr, '__enter__')) + exit_fn = self.append(ir.GetAttr(context_mgr, '__exit__')) - if isinstance(context_expr_node, asttyped.CallT) and \ - types.is_builtin(context_expr_node.func.type, "watchdog"): - timeout = self.visit(context_expr_node.args[0]) - timeout_ms = self.append(ir.Arith(ast.Mult(loc=None), timeout, - ir.Constant(1000, builtins.TFloat()))) - timeout_ms_int = self.append(ir.Coerce(timeout_ms, builtins.TInt64())) + try: + self.current_assign = self._user_call(enter_fn, [], {}) + if optional_vars_node is not None: + self.visit(optional_vars_node) + finally: + self.current_assign = None - watchdog_id = self.append(ir.Builtin("watchdog_set", [timeout_ms_int], - builtins.TInt32())) - cleanup.append(lambda: - self.append(ir.Builtin("watchdog_clear", [watchdog_id], builtins.TNone()))) - else: # user-defined context manager - context_mgr = self.visit(context_expr_node) - enter_fn = self.append(ir.GetAttr(context_mgr, '__enter__')) - exit_fn = self.append(ir.GetAttr(context_mgr, '__exit__')) - - try: - self.current_assign = self._user_call(enter_fn, [], {}) - if optional_vars_node is not None: - self.visit(optional_vars_node) - finally: - self.current_assign = None - - none = self.append(ir.Alloc([], builtins.TNone())) - cleanup.append(lambda: - self._user_call(exit_fn, [none, none, none], {})) + none = self.append(ir.Alloc([], builtins.TNone())) + cleanup.append(lambda: + self._user_call(exit_fn, [none, none, none], {})) self._try_finally( body_gen=lambda: self.visit(node.body), @@ -983,19 +999,30 @@ class ARTIQIRGenerator(algorithm.Visitor): cond_block = self.current_block self.current_block = body_block = self.add_block("check.body") - closure = self.append(ir.Closure(func, ir.Constant(None, ir.TEnvironment("check", {})))) + self._invoke_raising_func(func, params, "check") + + self.current_block = tail_block = self.add_block("check.tail") + cond_block.append(ir.BranchIf(cond, tail_block, body_block)) + + def _invoke_raising_func(self, func, params, block_name): + """Emit a call/invoke instruction as appropriate to terminte the current + basic block with a call to a helper function that always raises an + exception. + + (This is done for compiler-inserted checks and assertions to keep the + generated code tight for the normal case.) + """ + closure = self.append(ir.Closure(func, + ir.Constant(None, ir.TEnvironment("raise", {})))) if self.unwind_target is None: insn = self.append(ir.Call(closure, params, {})) else: - after_invoke = self.add_block("check.invoke") + after_invoke = self.add_block(block_name + ".invoke") insn = self.append(ir.Invoke(closure, params, {}, after_invoke, self.unwind_target)) self.current_block = after_invoke insn.is_cold = True self.append(ir.Unreachable()) - self.current_block = tail_block = self.add_block("check.tail") - cond_block.append(ir.BranchIf(cond, tail_block, body_block)) - def _map_index(self, length, index, one_past_the_end=False, loc=None): lt_0 = self.append(ir.Compare(ast.Lt(loc=None), index, ir.Constant(0, index.type))) @@ -1056,16 +1083,31 @@ class ARTIQIRGenerator(algorithm.Visitor): finally: self.current_assign = old_assign - length = self.iterable_len(value, index.type) - mapped_index = self._map_index(length, index, - loc=node.begin_loc) - if self.current_assign is None: - result = self.iterable_get(value, mapped_index) - result.set_name("{}.at.{}".format(value.name, _readable_name(index))) - return result + # For multi-dimensional indexes, just apply them sequentially. This + # works, as they are only supported for types where we do not + # immediately need to distinguish between the Get and Set cases + # (i.e. arrays, which are reference types). + if types.is_tuple(index.type): + num_idxs = len(index.type.find().elts) + indices = [ + self.append(ir.GetAttr(index, i)) for i in range(num_idxs) + ] else: - self.append(ir.SetElem(value, mapped_index, self.current_assign, - name="{}.at.{}".format(value.name, _readable_name(index)))) + indices = [index] + indexed = value + for i, idx in enumerate(indices): + length = self.iterable_len(indexed, idx.type) + mapped_index = self._map_index(length, idx, loc=node.begin_loc) + if self.current_assign is None or i < len(indices) - 1: + indexed = self.iterable_get(indexed, mapped_index) + indexed.set_name("{}.at.{}".format(indexed.name, + _readable_name(idx))) + else: + self.append(ir.SetElem(indexed, mapped_index, self.current_assign, + name="{}.at.{}".format(value.name, + _readable_name(index)))) + if self.current_assign is None: + return indexed else: # Slice length = self.iterable_len(value, node.slice.type) @@ -1262,7 +1304,6 @@ class ARTIQIRGenerator(algorithm.Visitor): for value_node in node.values: value_head = self.current_block value = self.visit(value_node) - self.instrument_assert(value_node, value) value_tail = self.current_block blocks.append((value, value_head, value_tail)) @@ -1283,6 +1324,69 @@ class ARTIQIRGenerator(algorithm.Visitor): value_tail.append(ir.Branch(tail)) return phi + def _make_array_unaryop(self, name, make_op, result_type, arg_type): + try: + result = ir.Argument(result_type, "result") + arg = ir.Argument(arg_type, "arg") + + # TODO: We'd like to use a "C function" here to be able to supply + # specialised implementations in a library in the future (and e.g. avoid + # passing around the context argument), but the code generator currently + # doesn't allow emitting them. + args = [result, arg] + typ = types.TFunction(args=OrderedDict([(arg.name, arg.type) + for arg in args]), + optargs=OrderedDict(), + ret=builtins.TNone()) + env_args = [ir.EnvironmentArgument(self.current_env.type, "ARG.ENV")] + + old_loc, self.current_loc = self.current_loc, None + func = ir.Function(typ, name, env_args + args) + func.is_internal = True + func.is_generated = True + self.functions.append(func) + old_func, self.current_function = self.current_function, func + + entry = self.add_block("entry") + old_block, self.current_block = self.current_block, entry + + old_final_branch, self.final_branch = self.final_branch, None + old_unwind, self.unwind_target = self.unwind_target, None + + shape = self.append(ir.GetAttr(arg, "shape")) + + result_buffer = self.append(ir.GetAttr(result, "buffer")) + arg_buffer = self.append(ir.GetAttr(arg, "buffer")) + num_total_elts = self._get_total_array_len(shape) + + def body_gen(index): + a = self.append(ir.GetElem(arg_buffer, index)) + self.append( + ir.SetElem(result_buffer, index, make_op(a))) + return self.append( + ir.Arith(ast.Add(loc=None), index, ir.Constant(1, self._size_type))) + + self._make_loop( + ir.Constant(0, self._size_type), lambda index: self.append( + ir.Compare(ast.Lt(loc=None), index, num_total_elts)), body_gen) + + self.append(ir.Return(ir.Constant(None, builtins.TNone()))) + return func + finally: + self.current_loc = old_loc + self.current_function = old_func + self.current_block = old_block + self.final_branch = old_final_branch + self.unwind_target = old_unwind + + def _get_array_unaryop(self, name, make_op, result_type, arg_type): + name = "_array_{}_{}".format( + name, self._mangle_arrayop_types([result_type, arg_type])) + if name not in self.array_op_funcs: + self.array_op_funcs[name] = self._make_array_unaryop( + name, make_op, result_type, arg_type) + return self.array_op_funcs[name] + def visit_UnaryOpT(self, node): if isinstance(node.op, ast.Not): cond = self.coerce_to_bool(self.visit(node.operand)) @@ -1294,9 +1398,18 @@ class ARTIQIRGenerator(algorithm.Visitor): return self.append(ir.Arith(ast.BitXor(loc=None), ir.Constant(-1, operand.type), operand)) elif isinstance(node.op, ast.USub): + def make_sub(val): + return self.append(ir.Arith(ast.Sub(loc=None), + ir.Constant(0, val.type), val)) operand = self.visit(node.operand) - return self.append(ir.Arith(ast.Sub(loc=None), - ir.Constant(0, operand.type), operand)) + if builtins.is_array(operand.type): + shape = self.append(ir.GetAttr(operand, "shape")) + result, _ = self._allocate_new_array(node.type.find()["elt"], shape) + func = self._get_array_unaryop("USub", make_sub, node.type, operand.type) + self._invoke_arrayop(func, [result, operand]) + return result + else: + return make_sub(operand) elif isinstance(node.op, ast.UAdd): # No-op. return self.visit(node.operand) @@ -1308,12 +1421,340 @@ class ARTIQIRGenerator(algorithm.Visitor): if node.type.find() == value.type: return value else: - return self.append(ir.Coerce(value, node.type, - name="{}.{}".format(_readable_name(value), - node.type.name))) + if builtins.is_array(node.type): + result_elt = node.type.find()["elt"] + shape = self.append(ir.GetAttr(value, "shape")) + result, _ = self._allocate_new_array(result_elt, shape) + func = self._get_array_unaryop( + "Coerce", lambda v: self.append(ir.Coerce(v, result_elt)), + node.type, value.type) + self._invoke_arrayop(func, [result, value]) + return result + else: + return self.append( + ir.Coerce(value, + node.type, + name="{}.{}".format(_readable_name(value), + node.type.name))) + + def _get_total_array_len(self, shape): + lengths = [ + self.append(ir.GetAttr(shape, i)) for i in range(len(shape.type.elts)) + ] + return reduce(lambda l, r: self.append(ir.Arith(ast.Mult(loc=None), l, r)), + lengths[1:], lengths[0]) + + def _allocate_new_array(self, elt, shape): + total_length = self._get_total_array_len(shape) + buffer = self.append(ir.Alloc([total_length], types._TPointer(elt=elt))) + result_type = builtins.TArray(elt, types.TValue(len(shape.type.elts))) + return self.append(ir.Alloc([buffer, shape], result_type)), total_length + + def _make_array_binop(self, name, result_type, lhs_type, rhs_type, body_gen): + try: + result = ir.Argument(result_type, "result") + lhs = ir.Argument(lhs_type, "lhs") + rhs = ir.Argument(rhs_type, "rhs") + + # TODO: We'd like to use a "C function" here to be able to supply + # specialised implementations in a library in the future (and e.g. avoid + # passing around the context argument), but the code generator currently + # doesn't allow emitting them. + args = [result, lhs, rhs] + typ = types.TFunction(args=OrderedDict([(arg.name, arg.type) + for arg in args]), + optargs=OrderedDict(), + ret=builtins.TNone()) + env_args = [ir.EnvironmentArgument(self.current_env.type, "ARG.ENV")] + + old_loc, self.current_loc = self.current_loc, None + func = ir.Function(typ, name, env_args + args) + func.is_internal = True + func.is_generated = True + self.functions.append(func) + old_func, self.current_function = self.current_function, func + + entry = self.add_block("entry") + old_block, self.current_block = self.current_block, entry + + old_final_branch, self.final_branch = self.final_branch, None + old_unwind, self.unwind_target = self.unwind_target, None + + body_gen(result, lhs, rhs) + + self.append(ir.Return(ir.Constant(None, builtins.TNone()))) + return func + finally: + self.current_loc = old_loc + self.current_function = old_func + self.current_block = old_block + self.final_branch = old_final_branch + self.unwind_target = old_unwind + + def _make_array_elementwise_binop(self, name, result_type, lhs_type, + rhs_type, make_op): + def body_gen(result, lhs, rhs): + # At this point, shapes are assumed to match; could just pass buffer + # pointer for two of the three arrays as well. + result_buffer = self.append(ir.GetAttr(result, "buffer")) + shape = self.append(ir.GetAttr(result, "shape")) + num_total_elts = self._get_total_array_len(shape) + + if builtins.is_array(lhs.type): + lhs_buffer = self.append(ir.GetAttr(lhs, "buffer")) + def get_left(index): + return self.append(ir.GetElem(lhs_buffer, index)) + else: + def get_left(index): + return lhs + + if builtins.is_array(rhs.type): + rhs_buffer = self.append(ir.GetAttr(rhs, "buffer")) + def get_right(index): + return self.append(ir.GetElem(rhs_buffer, index)) + else: + def get_right(index): + return rhs + + def loop_gen(index): + l = get_left(index) + r = get_right(index) + result = make_op(l, r) + self.append(ir.SetElem(result_buffer, index, result)) + return self.append( + ir.Arith(ast.Add(loc=None), index, + ir.Constant(1, self._size_type))) + + self._make_loop( + ir.Constant(0, self._size_type), lambda index: self.append( + ir.Compare(ast.Lt(loc=None), index, num_total_elts)), + loop_gen) + + return self._make_array_binop(name, result_type, lhs_type, rhs_type, + body_gen) + + def _mangle_arrayop_types(self, types): + def name_error(typ): + assert False, "Internal compiler error: No RPC tag for {}".format(typ) + + def mangle_name(typ): + typ = typ.find() + # rpc_tag is used to turn element types into mangled names for no + # particularly good reason apart from not having to invent yet another + # string representation. + if builtins.is_array(typ): + return mangle_name(typ["elt"]) + str(typ["num_dims"].find().value) + return ir.rpc_tag(typ, name_error).decode() + + return "_".join(mangle_name(t) for t in types) + + def _get_array_elementwise_binop(self, name, make_op, result_type, lhs_type, rhs_type): + # Currently, we always have any type coercions resolved explicitly in the AST. + # In the future, this might no longer be true and the three types might all + # differ. + name = "_array_{}_{}".format( + name, + self._mangle_arrayop_types([result_type, lhs_type, rhs_type])) + if name not in self.array_op_funcs: + self.array_op_funcs[name] = self._make_array_elementwise_binop( + name, result_type, lhs_type, rhs_type, make_op) + return self.array_op_funcs[name] + + def _invoke_arrayop(self, func, params): + closure = self.append( + ir.Closure(func, ir.Constant(None, ir.TEnvironment("arrayop", {})))) + if self.unwind_target is None: + self.append(ir.Call(closure, params, {})) + else: + after_invoke = self.add_block("arrayop.invoke") + self.append(ir.Invoke(func, params, {}, after_invoke, self.unwind_target)) + self.current_block = after_invoke + + def _get_array_offset(self, shape, indices): + result = indices[0] + for dim, index in zip(shape[1:], indices[1:]): + result = self.append(ir.Arith(ast.Mult(loc=None), result, dim)) + result = self.append(ir.Arith(ast.Add(loc=None), result, index)) + return result + + def _get_matmult(self, result_type, lhs_type, rhs_type): + name = "_array_MatMult_" + self._mangle_arrayop_types( + [result_type, lhs_type, rhs_type]) + if name not in self.array_op_funcs: + + def body_gen(result, lhs, rhs): + assert builtins.is_array(result.type), \ + "vec @ vec should have been normalised into array result" + + # We assume result has correct shape; could just pass buffer pointer + # as well. + result_buffer = self.append(ir.GetAttr(result, "buffer")) + lhs_buffer = self.append(ir.GetAttr(lhs, "buffer")) + rhs_buffer = self.append(ir.GetAttr(rhs, "buffer")) + + num_rows, num_summands, _, num_cols = self._get_matmult_shapes(lhs, rhs) + + elt = result.type["elt"].find() + env_type = ir.TEnvironment("loop", {"$total": elt}) + env = self.append(ir.Alloc([], env_type)) + + def row_loop(row_idx): + lhs_base_offset = self.append( + ir.Arith(ast.Mult(loc=None), row_idx, num_summands)) + lhs_base = self.append(ir.Offset(lhs_buffer, lhs_base_offset)) + result_base_offset = self.append( + ir.Arith(ast.Mult(loc=None), row_idx, num_cols)) + result_base = self.append( + ir.Offset(result_buffer, result_base_offset)) + + def col_loop(col_idx): + rhs_base = self.append(ir.Offset(rhs_buffer, col_idx)) + + self.append( + ir.SetLocal(env, "$total", ir.Constant(elt.zero(), elt))) + + def sum_loop(sum_idx): + lhs_elem = self.append(ir.GetElem(lhs_base, sum_idx)) + rhs_offset = self.append( + ir.Arith(ast.Mult(loc=None), sum_idx, num_cols)) + rhs_elem = self.append(ir.GetElem(rhs_base, rhs_offset)) + product = self.append( + ir.Arith(ast.Mult(loc=None), lhs_elem, rhs_elem)) + prev_total = self.append(ir.GetLocal(env, "$total")) + total = self.append( + ir.Arith(ast.Add(loc=None), prev_total, product)) + self.append(ir.SetLocal(env, "$total", total)) + return self.append( + ir.Arith(ast.Add(loc=None), sum_idx, + ir.Constant(1, self._size_type))) + + self._make_loop( + ir.Constant(0, self._size_type), lambda index: self.append( + ir.Compare(ast.Lt(loc=None), index, num_summands)), + sum_loop) + + total = self.append(ir.GetLocal(env, "$total")) + self.append(ir.SetElem(result_base, col_idx, total)) + + return self.append( + ir.Arith(ast.Add(loc=None), col_idx, + ir.Constant(1, self._size_type))) + + self._make_loop( + ir.Constant(0, self._size_type), lambda index: self.append( + ir.Compare(ast.Lt(loc=None), index, num_cols)), col_loop) + return self.append( + ir.Arith(ast.Add(loc=None), row_idx, + ir.Constant(1, self._size_type))) + + self._make_loop( + ir.Constant(0, self._size_type), lambda index: self.append( + ir.Compare(ast.Lt(loc=None), index, num_rows)), row_loop) + + self.array_op_funcs[name] = self._make_array_binop( + name, result_type, lhs_type, rhs_type, body_gen) + return self.array_op_funcs[name] + + def _get_matmult_shapes(self, lhs, rhs): + lhs_shape = self.append(ir.GetAttr(lhs, "shape")) + if lhs.type["num_dims"].value == 1: + lhs_shape_outer = ir.Constant(1, self._size_type) + lhs_shape_inner = self.append(ir.GetAttr(lhs_shape, 0)) + else: + lhs_shape_outer = self.append(ir.GetAttr(lhs_shape, 0)) + lhs_shape_inner = self.append(ir.GetAttr(lhs_shape, 1)) + + rhs_shape = self.append(ir.GetAttr(rhs, "shape")) + if rhs.type["num_dims"].value == 1: + rhs_shape_inner = self.append(ir.GetAttr(rhs_shape, 0)) + rhs_shape_outer = ir.Constant(1, self._size_type) + else: + rhs_shape_inner = self.append(ir.GetAttr(rhs_shape, 0)) + rhs_shape_outer = self.append(ir.GetAttr(rhs_shape, 1)) + + return lhs_shape_outer, lhs_shape_inner, rhs_shape_inner, rhs_shape_outer + + def _make_array_shape(self, dims): + return self.append(ir.Alloc(dims, types.TTuple([self._size_type] * len(dims)))) + + def _emit_matmult(self, node, left, right): + # TODO: Also expose as numpy.dot. + lhs = self.visit(left) + rhs = self.visit(right) + + num_rows, lhs_inner, rhs_inner, num_cols = self._get_matmult_shapes(lhs, rhs) + self._make_check( + self.append(ir.Compare(ast.Eq(loc=None), lhs_inner, rhs_inner)), + lambda lhs_inner, rhs_inner: self.alloc_exn( + builtins.TException("ValueError"), + ir.Constant( + "inner dimensions for matrix multiplication do not match ({0} vs. {1})", + builtins.TStr()), lhs_inner, rhs_inner), + params=[lhs_inner, rhs_inner], + loc=node.loc) + result_shape = self._make_array_shape([num_rows, num_cols]) + + final_type = node.type.find() + if not builtins.is_array(final_type): + elt = node.type + result_dims = 0 + else: + elt = final_type["elt"] + result_dims = final_type["num_dims"].value + + result, _ = self._allocate_new_array(elt, result_shape) + func = self._get_matmult(result.type, left.type, right.type) + self._invoke_arrayop(func, [result, lhs, rhs]) + + if result_dims == 2: + return result + result_buffer = self.append(ir.GetAttr(result, "buffer")) + if result_dims == 1: + shape = self._make_array_shape( + [num_cols if lhs.type["num_dims"].value == 1 else num_rows]) + return self.append(ir.Alloc([result_buffer, shape], node.type)) + return self.append(ir.GetElem(result_buffer, ir.Constant(0, self._size_type))) + + def _broadcast_binop(self, name, make_op, result_type, lhs, rhs): + # Broadcast scalars (broadcasting higher dimensions is not yet allowed in the + # language). + broadcast = False + array_arg = lhs + if not builtins.is_array(lhs.type): + broadcast = True + array_arg = rhs + elif not builtins.is_array(rhs.type): + broadcast = True + + shape = self.append(ir.GetAttr(array_arg, "shape")) + + if not broadcast: + rhs_shape = self.append(ir.GetAttr(rhs, "shape")) + self._make_check( + self.append(ir.Compare(ast.Eq(loc=None), shape, rhs_shape)), + lambda: self.alloc_exn( + builtins.TException("ValueError"), + ir.Constant("operands could not be broadcast together", + builtins.TStr()))) + + elt = result_type.find()["elt"] + result, _ = self._allocate_new_array(elt, shape) + func = self._get_array_elementwise_binop(name, make_op, result_type, lhs.type, + rhs.type) + self._invoke_arrayop(func, [result, lhs, rhs]) + return result def visit_BinOpT(self, node): - if builtins.is_numeric(node.type): + if isinstance(node.op, ast.MatMult): + return self._emit_matmult(node, node.left, node.right) + elif builtins.is_array(node.type): + lhs = self.visit(node.left) + rhs = self.visit(node.right) + name = type(node.op).__name__ + def make_op(l, r): + return self.append(ir.Arith(node.op, l, r)) + return self._broadcast_binop(name, make_op, node.type, lhs, rhs) + elif builtins.is_numeric(node.type): lhs = self.visit(node.left) rhs = self.visit(node.right) if isinstance(node.op, (ast.LShift, ast.RShift)): @@ -1423,7 +1864,7 @@ class ARTIQIRGenerator(algorithm.Visitor): for index in range(len(lhs.type.elts)): lhs_elt = self.append(ir.GetAttr(lhs, index)) rhs_elt = self.append(ir.GetAttr(rhs, index)) - elt_result = self.append(ir.Compare(op, lhs_elt, rhs_elt)) + elt_result = self.polymorphic_compare_pair(op, lhs_elt, rhs_elt) if result is None: result = elt_result else: @@ -1450,6 +1891,7 @@ class ARTIQIRGenerator(algorithm.Visitor): lhs_elt = self.append(ir.GetElem(lhs, index_phi)) rhs_elt = self.append(ir.GetElem(rhs, index_phi)) body_result = self.polymorphic_compare_pair(op, lhs_elt, rhs_elt) + body_end = self.current_block loop_body2 = self.add_block("compare.body2") self.current_block = loop_body2 @@ -1465,8 +1907,8 @@ class ARTIQIRGenerator(algorithm.Visitor): phi.add_incoming(compare_length, head) loop_head.append(ir.BranchIf(loop_cond, loop_body, tail)) phi.add_incoming(ir.Constant(True, builtins.TBool()), loop_head) - loop_body.append(ir.BranchIf(body_result, loop_body2, tail)) - phi.add_incoming(body_result, loop_body) + body_end.append(ir.BranchIf(body_result, loop_body2, tail)) + phi.add_incoming(body_result, body_end) if isinstance(op, ast.NotEq): result = self.append(ir.Select(phi, @@ -1476,9 +1918,15 @@ class ARTIQIRGenerator(algorithm.Visitor): return result else: - assert False + loc = lhs.loc + loc.end = rhs.loc.end + diag = diagnostic.Diagnostic("error", + "Custom object comparison is not supported", + {}, + loc) + self.engine.process(diag) - def polymorphic_compare_pair_inclusion(self, op, needle, haystack): + def polymorphic_compare_pair_inclusion(self, needle, haystack): if builtins.is_range(haystack.type): # Optimized range `in` operator start = self.append(ir.GetAttr(haystack, "start")) @@ -1520,34 +1968,46 @@ class ARTIQIRGenerator(algorithm.Visitor): result = phi else: - assert False - - if isinstance(op, ast.NotIn): - result = self.append(ir.Select(result, - ir.Constant(False, builtins.TBool()), - ir.Constant(True, builtins.TBool()))) + loc = needle.loc + loc.end = haystack.loc.end + diag = diagnostic.Diagnostic("error", + "Custom object inclusion test is not supported", + {}, + loc) + self.engine.process(diag) return result + def invert(self, value): + return self.append(ir.Select(value, + ir.Constant(False, builtins.TBool()), + ir.Constant(True, builtins.TBool()))) + def polymorphic_compare_pair(self, op, lhs, rhs): if isinstance(op, (ast.Is, ast.IsNot)): # The backend will handle equality of aggregates. return self.append(ir.Compare(op, lhs, rhs)) - elif isinstance(op, (ast.In, ast.NotIn)): - return self.polymorphic_compare_pair_inclusion(op, lhs, rhs) - else: # Eq, NotEq, Lt, LtE, Gt, GtE + elif isinstance(op, ast.In): + return self.polymorphic_compare_pair_inclusion(lhs, rhs) + elif isinstance(op, ast.NotIn): + result = self.polymorphic_compare_pair_inclusion(lhs, rhs) + return self.invert(result) + elif isinstance(op, (ast.Eq, ast.Lt, ast.LtE, ast.Gt, ast.GtE)): return self.polymorphic_compare_pair_order(op, lhs, rhs) + elif isinstance(op, ast.NotEq): + result = self.polymorphic_compare_pair_order(ast.Eq(loc=op.loc), lhs, rhs) + return self.invert(result) + else: + assert False def visit_CompareT(self, node): # Essentially a sequence of `and`s performed over results # of comparisons. blocks = [] lhs = self.visit(node.left) - self.instrument_assert(node.left, lhs) for op, rhs_node in zip(node.ops, node.comparators): result_head = self.current_block rhs = self.visit(rhs_node) - self.instrument_assert(rhs_node, rhs) result = self.polymorphic_compare_pair(op, lhs, rhs) result_tail = self.current_block @@ -1625,8 +2085,8 @@ class ARTIQIRGenerator(algorithm.Visitor): return self.append(ir.Coerce(arg, node.type)) else: assert False - elif (types.is_builtin(typ, "list") or types.is_builtin(typ, "array") or - types.is_builtin(typ, "bytearray")): + elif (types.is_builtin(typ, "list") or + types.is_builtin(typ, "bytearray") or types.is_builtin(typ, "bytes")): if len(node.args) == 0 and len(node.keywords) == 0: length = ir.Constant(0, builtins.TInt32()) return self.append(ir.Alloc([length], node.type)) @@ -1637,6 +2097,7 @@ class ARTIQIRGenerator(algorithm.Visitor): def body_gen(index): elt = self.iterable_get(arg, index) + elt = self.append(ir.Coerce(elt, builtins.get_iterable_elt(node.type))) self.append(ir.SetElem(result, index, elt)) return self.append(ir.Arith(ast.Add(loc=None), index, ir.Constant(1, length.type))) @@ -1647,6 +2108,68 @@ class ARTIQIRGenerator(algorithm.Visitor): return result else: assert False + elif types.is_builtin(typ, "array"): + if len(node.args) == 1 and len(node.keywords) in (0, 1): + result_type = node.type.find() + arg = self.visit(node.args[0]) + + result_elt = result_type["elt"].find() + num_dims = result_type["num_dims"].value + + # Derive shape from first element on each level (and fail later if the + # array is in fact jagged). + first_elt = None + lengths = [] + for dim_idx in range(num_dims): + if first_elt is None: + first_elt = arg + else: + first_elt = self.iterable_get(first_elt, + ir.Constant(0, self._size_type)) + lengths.append(self.iterable_len(first_elt)) + + shape = self.append(ir.Alloc(lengths, result_type.attributes["shape"])) + num_total_elts = self._get_total_array_len(shape) + + # Assign buffer from nested iterables. + buffer = self.append( + ir.Alloc([num_total_elts], result_type.attributes["buffer"])) + + def assign_elems(outer_indices, indexed_arg): + if len(outer_indices) == num_dims: + dest_idx = self._get_array_offset(lengths, outer_indices) + coerced = self.append(ir.Coerce(indexed_arg, result_elt)) + self.append(ir.SetElem(buffer, dest_idx, coerced)) + else: + this_level_len = self.iterable_len(indexed_arg) + dim_idx = len(outer_indices) + if dim_idx > 0: + # Check for rectangularity (outermost index is never jagged, + # by definition). + result_len = self.append(ir.GetAttr(shape, dim_idx)) + self._make_check( + self.append(ir.Compare(ast.Eq(loc=None), this_level_len, result_len)), + lambda a, b: self.alloc_exn( + builtins.TException("ValueError"), + ir.Constant( + "arrays must be rectangular (lengths were {0} vs. {1})", + builtins.TStr()), a, b), + params=[this_level_len, result_len], + loc=node.loc) + + def body_gen(index): + elem = self.iterable_get(indexed_arg, index) + assign_elems(outer_indices + [index], elem) + return self.append( + ir.Arith(ast.Add(loc=None), index, + ir.Constant(1, self._size_type))) + self._make_loop( + ir.Constant(0, self._size_type), lambda index: self.append( + ir.Compare(ast.Lt(loc=None), index, this_level_len)), body_gen) + assign_elems([], arg) + return self.append(ir.Alloc([buffer, shape], node.type)) + else: + assert False elif types.is_builtin(typ, "range"): elt_typ = builtins.get_iterable_elt(node.type) if len(node.args) == 1 and len(node.keywords) == 0: @@ -1687,6 +2210,16 @@ class ARTIQIRGenerator(algorithm.Visitor): return self.append(ir.Builtin("round", [arg], node.type)) else: assert False + elif types.is_builtin(typ, "abs"): + if len(node.args) == 1 and len(node.keywords) == 0: + arg = self.visit(node.args[0]) + neg = self.append( + ir.Arith(ast.Sub(loc=None), ir.Constant(0, arg.type), arg)) + cond = self.append( + ir.Compare(ast.Lt(loc=None), arg, ir.Constant(0, arg.type))) + return self.append(ir.Select(cond, neg, arg)) + else: + assert False elif types.is_builtin(typ, "min"): if len(node.args) == 2 and len(node.keywords) == 0: arg0, arg1 = map(self.visit, node.args) @@ -1705,14 +2238,71 @@ class ARTIQIRGenerator(algorithm.Visitor): if len(node.args) == 2 and len(node.keywords) == 0: arg0, arg1 = map(self.visit, node.args) - result = self.append(ir.Alloc([arg0], node.type)) + num_dims = node.type.find()["num_dims"].value + if types.is_tuple(arg0.type): + lens = [self.append(ir.GetAttr(arg0, i)) for i in range(num_dims)] + else: + assert num_dims == 1 + lens = [arg0] + + shape = self._make_array_shape(lens) + result, total_len = self._allocate_new_array(node.type.find()["elt"], + shape) + def body_gen(index): self.append(ir.SetElem(result, index, arg1)) - return self.append(ir.Arith(ast.Add(loc=None), index, - ir.Constant(1, arg0.type))) - self._make_loop(ir.Constant(0, self._size_type), - lambda index: self.append(ir.Compare(ast.Lt(loc=None), index, arg0)), - body_gen) + return self.append( + ir.Arith(ast.Add(loc=None), index, + ir.Constant(1, self._size_type))) + + self._make_loop( + ir.Constant(0, self._size_type), lambda index: self.append( + ir.Compare(ast.Lt(loc=None), index, total_len)), body_gen) + return result + else: + assert False + elif types.is_builtin(typ, "numpy.transpose"): + if len(node.args) == 1 and len(node.keywords) == 0: + arg, = map(self.visit, node.args) + + num_dims = arg.type.find()["num_dims"].value + if num_dims == 1: + # No-op as per NumPy semantics. + return arg + assert num_dims == 2 + arg_shape = self.append(ir.GetAttr(arg, "shape")) + dim0 = self.append(ir.GetAttr(arg_shape, 0)) + dim1 = self.append(ir.GetAttr(arg_shape, 1)) + shape = self._make_array_shape([dim1, dim0]) + result, _ = self._allocate_new_array(node.type.find()["elt"], shape) + arg_buffer = self.append(ir.GetAttr(arg, "buffer")) + result_buffer = self.append(ir.GetAttr(result, "buffer")) + + def outer_gen(idx1): + arg_base = self.append(ir.Offset(arg_buffer, idx1)) + result_offset = self.append(ir.Arith(ast.Mult(loc=None), idx1, + dim0)) + result_base = self.append(ir.Offset(result_buffer, result_offset)) + + def inner_gen(idx0): + arg_offset = self.append( + ir.Arith(ast.Mult(loc=None), idx0, dim1)) + val = self.append(ir.GetElem(arg_base, arg_offset)) + self.append(ir.SetElem(result_base, idx0, val)) + return self.append( + ir.Arith(ast.Add(loc=None), idx0, ir.Constant(1, + idx0.type))) + + self._make_loop( + ir.Constant(0, self._size_type), lambda idx0: self.append( + ir.Compare(ast.Lt(loc=None), idx0, dim0)), inner_gen) + return self.append( + ir.Arith(ast.Add(loc=None), idx1, ir.Constant(1, idx1.type))) + + self._make_loop( + ir.Constant(0, self._size_type), + lambda idx1: self.append(ir.Compare(ast.Lt(loc=None), idx1, dim1)), + outer_gen) return result else: assert False @@ -1812,7 +2402,8 @@ class ARTIQIRGenerator(algorithm.Visitor): assert None not in args - if self.unwind_target is None: + if self.unwind_target is None or \ + types.is_external_function(callee.type) and "nounwind" in callee.type.flags: insn = self.append(ir.Call(func, args, arg_exprs)) else: after_invoke = self.add_block("invoke") @@ -1836,12 +2427,34 @@ class ARTIQIRGenerator(algorithm.Visitor): if types.is_builtin(node.func.type): insn = self.visit_builtin_call(node) + elif (types.is_broadcast_across_arrays(node.func.type) and len(args) >= 1 + and any(builtins.is_array(arg.type) for arg in args)): + # The iodelay machinery set up in the surrounding code was + # deprecated/a relic from the past when array broadcasting support + # was added, so no attempt to keep the delay tracking intact is + # made. + def make_call(*args): + return self._user_call(ir.Constant(None, callee.type), args, {}, + node.arg_exprs) + # TODO: Generate more generically if non-externals are allowed. + name = node.func.type.find().name + + if len(args) == 1: + shape = self.append(ir.GetAttr(args[0], "shape")) + result, _ = self._allocate_new_array(node.type.find()["elt"], shape) + func = self._get_array_unaryop(name, make_call, node.type, args[0].type) + self._invoke_arrayop(func, [result, args[0]]) + insn = result + elif len(args) == 2: + insn = self._broadcast_binop(name, make_call, node.type, *args) + else: + assert False, "Broadcasting for {} arguments not implemented".format(len) else: insn = self._user_call(callee, args, keywords, node.arg_exprs) - if isinstance(node.func, asttyped.AttributeT): attr_node = node.func - self.method_map[(attr_node.value.type.find(), attr_node.attr)].append(insn) + self.method_map[(attr_node.value.type.find(), + attr_node.attr)].append(insn) if node.iodelay is not None and not iodelay.is_const(node.iodelay, 0): after_delay = self.add_block("delay.tail") @@ -1853,78 +2466,70 @@ class ARTIQIRGenerator(algorithm.Visitor): def visit_QuoteT(self, node): return self.append(ir.Quote(node.value, node.type)) - def instrument_assert(self, node, value): - if self.current_assert_env is not None: - if isinstance(value, ir.Constant): - return # don't display the values of constants + def _get_raise_assert_func(self): + """Emit the helper function that constructs AssertionErrors and raises + them, if it does not already exist in the current module. - if any([algorithm.compare(node, subexpr) - for (subexpr, name) in self.current_assert_subexprs]): - return # don't display the same subexpression twice + A separate function is used for code size reasons. (This could also be + compiled into a stand-alone support library instead.) + """ + if self.raise_assert_func: + return self.raise_assert_func + try: + msg = ir.Argument(builtins.TStr(), "msg") + file = ir.Argument(builtins.TStr(), "file") + line = ir.Argument(builtins.TInt32(), "line") + col = ir.Argument(builtins.TInt32(), "col") + function = ir.Argument(builtins.TStr(), "function") - name = self.current_assert_env.type.add("$subexpr", ir.TOption(node.type)) - value_opt = self.append(ir.Alloc([value], ir.TOption(node.type)), - loc=node.loc) - self.append(ir.SetLocal(self.current_assert_env, name, value_opt), - loc=node.loc) - self.current_assert_subexprs.append((node, name)) + args = [msg, file, line, col, function] + typ = types.TFunction(args=OrderedDict([(arg.name, arg.type) + for arg in args]), + optargs=OrderedDict(), + ret=builtins.TNone()) + env = ir.TEnvironment(name="raise", vars={}) + env_arg = ir.EnvironmentArgument(env, "ARG.ENV") + func = ir.Function(typ, "_artiq_raise_assert", [env_arg] + args) + func.is_internal = True + func.is_cold = True + func.is_generated = True + self.functions.append(func) + old_func, self.current_function = self.current_function, func + + entry = self.add_block("entry") + old_block, self.current_block = self.current_block, entry + old_final_branch, self.final_branch = self.final_branch, None + old_unwind, self.unwind_target = self.unwind_target, None + + exn = self.alloc_exn(builtins.TException("AssertionError"), message=msg) + self.append(ir.SetAttr(exn, "__file__", file)) + self.append(ir.SetAttr(exn, "__line__", line)) + self.append(ir.SetAttr(exn, "__col__", col)) + self.append(ir.SetAttr(exn, "__func__", function)) + self.append(ir.Raise(exn)) + finally: + self.current_function = old_func + self.current_block = old_block + self.final_branch = old_final_branch + self.unwind_target = old_unwind + + self.raise_assert_func = func + return self.raise_assert_func def visit_Assert(self, node): - try: - assert_suffix = ".assert@{}:{}".format(node.loc.line(), node.loc.column()) - assert_env_type = ir.TEnvironment(name=self.current_function.name + assert_suffix, - vars={}) - assert_env = self.current_assert_env = \ - self.append(ir.Alloc([], assert_env_type, name="assertenv")) - assert_subexprs = self.current_assert_subexprs = [] - init = self.current_block - - prehead = self.current_block = self.add_block("assert.prehead") - cond = self.visit(node.test) - head = self.current_block - finally: - self.current_assert_env = None - self.current_assert_subexprs = None - - for subexpr_node, subexpr_name in assert_subexprs: - empty = init.append(ir.Alloc([], ir.TOption(subexpr_node.type))) - init.append(ir.SetLocal(assert_env, subexpr_name, empty)) - init.append(ir.Branch(prehead)) + cond = self.visit(node.test) + head = self.current_block if_failed = self.current_block = self.add_block("assert.fail") - - if node.msg: - explanation = node.msg.s - else: - explanation = node.loc.source() - self.append(ir.Builtin("printf", [ - ir.Constant("assertion failed at %.*s: %.*s\n\x00", builtins.TStr()), - ir.Constant(str(node.loc.begin()), builtins.TStr()), - ir.Constant(str(explanation), builtins.TStr()), - ], builtins.TNone())) - - for subexpr_node, subexpr_name in assert_subexprs: - subexpr_head = self.current_block - subexpr_value_opt = self.append(ir.GetLocal(assert_env, subexpr_name)) - subexpr_cond = self.append(ir.Builtin("is_some", [subexpr_value_opt], - builtins.TBool())) - - subexpr_body = self.current_block = self.add_block("assert.subexpr.body") - self.append(ir.Builtin("printf", [ - ir.Constant(" (%.*s) = \x00", builtins.TStr()), - ir.Constant(subexpr_node.loc.source(), builtins.TStr()) - ], builtins.TNone())) - subexpr_value = self.append(ir.Builtin("unwrap", [subexpr_value_opt], - subexpr_node.type)) - self.polymorphic_print([subexpr_value], separator="", suffix="\n") - subexpr_postbody = self.current_block - - subexpr_tail = self.current_block = self.add_block("assert.subexpr.tail") - self.append(ir.Branch(subexpr_tail), block=subexpr_postbody) - self.append(ir.BranchIf(subexpr_cond, subexpr_body, subexpr_tail), block=subexpr_head) - - self.append(ir.Builtin("abort", [], builtins.TNone())) - self.append(ir.Unreachable()) + text = str(node.msg.s) if node.msg else "AssertionError" + msg = ir.Constant(text, builtins.TStr()) + loc_file = ir.Constant(node.loc.source_buffer.name, builtins.TStr()) + loc_line = ir.Constant(node.loc.line(), builtins.TInt32()) + loc_column = ir.Constant(node.loc.column(), builtins.TInt32()) + loc_function = ir.Constant(".".join(self.name), builtins.TStr()) + self._invoke_raising_func(self._get_raise_assert_func(), [ + msg, loc_file, loc_line, loc_column, loc_function + ], "assert.fail") tail = self.current_block = self.add_block("assert.tail") self.append(ir.BranchIf(cond, tail, if_failed), block=head) @@ -1933,8 +2538,7 @@ class ARTIQIRGenerator(algorithm.Visitor): def printf(format_string, *args): format = ir.Constant(format_string, builtins.TStr()) if as_rtio: - now_mu = self.append(ir.Builtin("now_mu", [], builtins.TInt64())) - self.append(ir.Builtin("rtio_log", [now_mu, format, *args], builtins.TNone())) + self.append(ir.Builtin("rtio_log", [format, *args], builtins.TNone())) else: self.append(ir.Builtin("printf", [format, *args], builtins.TNone())) diff --git a/artiq/compiler/transforms/cast_monomorphizer.py b/artiq/compiler/transforms/cast_monomorphizer.py index 50e2bda4f..c0935ff1f 100644 --- a/artiq/compiler/transforms/cast_monomorphizer.py +++ b/artiq/compiler/transforms/cast_monomorphizer.py @@ -11,13 +11,12 @@ class CastMonomorphizer(algorithm.Visitor): self.engine = engine def visit_CallT(self, node): - self.generic_visit(node) - if (types.is_builtin(node.func.type, "int") or types.is_builtin(node.func.type, "int32") or types.is_builtin(node.func.type, "int64")): typ = node.type.find() if (not types.is_var(typ["width"]) and + len(node.args) == 1 and builtins.is_int(node.args[0].type) and types.is_var(node.args[0].type.find()["width"])): if isinstance(node.args[0], asttyped.BinOpT): @@ -29,3 +28,20 @@ class CastMonomorphizer(algorithm.Visitor): node.args[0].type.unify(typ) + if types.is_builtin(node.func.type, "int") or \ + types.is_builtin(node.func.type, "round"): + typ = node.type.find() + if types.is_var(typ["width"]): + typ["width"].unify(types.TValue(32)) + + self.generic_visit(node) + + def visit_CoerceT(self, node): + if isinstance(node.value, asttyped.NumT) and \ + builtins.is_int(node.type) and \ + builtins.is_int(node.value.type) and \ + not types.is_var(node.type["width"]) and \ + types.is_var(node.value.type["width"]): + node.value.type.unify(node.type) + + self.generic_visit(node) diff --git a/artiq/compiler/transforms/constant_hoister.py b/artiq/compiler/transforms/constant_hoister.py new file mode 100644 index 000000000..ea51046aa --- /dev/null +++ b/artiq/compiler/transforms/constant_hoister.py @@ -0,0 +1,43 @@ +""" +:class:`ConstantHoister` is a code motion transform: +it moves any invariant loads to the earliest point where +they may be executed. +""" + +from .. import types, ir + +class ConstantHoister: + def process(self, functions): + for func in functions: + self.process_function(func) + + def process_function(self, func): + entry = func.entry() + worklist = set(func.instructions()) + moved = set() + while len(worklist) > 0: + insn = worklist.pop() + + if (isinstance(insn, ir.GetAttr) and insn not in moved and + types.is_instance(insn.object().type) and + insn.attr in insn.object().type.constant_attributes): + has_variant_operands = False + index_in_entry = 0 + for operand in insn.operands: + if isinstance(operand, ir.Argument): + pass + elif isinstance(operand, ir.Instruction) and operand.basic_block == entry: + index_in_entry = entry.index(operand) + 1 + else: + has_variant_operands = True + break + + if has_variant_operands: + continue + + insn.remove_from_parent() + entry.instructions.insert(index_in_entry, insn) + moved.add(insn) + + for use in insn.uses: + worklist.add(use) diff --git a/artiq/compiler/transforms/dead_code_eliminator.py b/artiq/compiler/transforms/dead_code_eliminator.py index 2aceebeec..608a46d55 100644 --- a/artiq/compiler/transforms/dead_code_eliminator.py +++ b/artiq/compiler/transforms/dead_code_eliminator.py @@ -33,7 +33,8 @@ class DeadCodeEliminator: # it also has to run after the interleaver, but interleaver # doesn't like to work with IR before DCE. if isinstance(insn, (ir.Phi, ir.Alloc, ir.GetAttr, ir.GetElem, ir.Coerce, - ir.Arith, ir.Compare, ir.Select, ir.Quote, ir.Closure)) \ + ir.Arith, ir.Compare, ir.Select, ir.Quote, ir.Closure, + ir.Offset)) \ and not any(insn.uses): insn.erase() modified = True diff --git a/artiq/compiler/transforms/inferencer.py b/artiq/compiler/transforms/inferencer.py index c4fb07b94..684c575de 100644 --- a/artiq/compiler/transforms/inferencer.py +++ b/artiq/compiler/transforms/inferencer.py @@ -7,6 +7,7 @@ from pythonparser import algorithm, diagnostic, ast from .. import asttyped, types, builtins from .typedtree_printer import TypedtreePrinter + class Inferencer(algorithm.Visitor): """ :class:`Inferencer` infers types by recursively applying the unification @@ -183,6 +184,14 @@ class Inferencer(algorithm.Visitor): if builtins.is_bytes(collection.type) or builtins.is_bytearray(collection.type): self._unify(element.type, builtins.get_iterable_elt(collection.type), element.loc, None) + elif builtins.is_array(collection.type): + array_type = collection.type.find() + elem_dims = array_type["num_dims"].value - 1 + if elem_dims > 0: + elem_type = builtins.TArray(array_type["elt"], types.TValue(elem_dims)) + else: + elem_type = array_type["elt"] + self._unify(element.type, elem_type, element.loc, collection.loc) elif builtins.is_iterable(collection.type) and not builtins.is_str(collection.type): rhs_type = collection.type.find() rhs_wrapped_lhs_type = types.TMono(rhs_type.name, {"elt": element.type}) @@ -199,10 +208,9 @@ class Inferencer(algorithm.Visitor): self.generic_visit(node) value = node.value if types.is_tuple(value.type): - diag = diagnostic.Diagnostic("error", - "multi-dimensional slices are not supported", {}, - node.loc, []) - self.engine.process(diag) + for elt in value.type.find().elts: + self._unify(elt, builtins.TInt(), + value.loc, None) else: self._unify(value.type, builtins.TInt(), value.loc, None) @@ -228,12 +236,41 @@ class Inferencer(algorithm.Visitor): def visit_SubscriptT(self, node): self.generic_visit(node) if isinstance(node.slice, ast.Index): - self._unify_iterable(element=node, collection=node.value) + if types.is_tuple(node.slice.value.type): + if types.is_var(node.value.type): + return + if not builtins.is_array(node.value.type): + diag = diagnostic.Diagnostic( + "error", + "multi-dimensional indexing only supported for arrays, not {type}", + {"type": types.TypePrinter().name(node.value.type)}, + node.loc, []) + self.engine.process(diag) + return + num_idxs = len(node.slice.value.type.find().elts) + array_type = node.value.type.find() + num_dims = array_type["num_dims"].value + remaining_dims = num_dims - num_idxs + if remaining_dims < 0: + diag = diagnostic.Diagnostic( + "error", + "too many indices for array of dimension {num_dims}", + {"num_dims": num_dims}, node.slice.loc, []) + self.engine.process(diag) + return + if remaining_dims == 0: + self._unify(node.type, array_type["elt"], node.loc, + node.value.loc) + else: + self._unify( + node.type, + builtins.TArray(array_type["elt"], remaining_dims)) + else: + self._unify_iterable(element=node, collection=node.value) elif isinstance(node.slice, ast.Slice): - self._unify(node.type, node.value.type, - node.loc, node.value.loc) - else: # ExtSlice - pass # error emitted above + self._unify(node.type, node.value.type, node.loc, node.value.loc) + else: # ExtSlice + pass # error emitted above def visit_IfExpT(self, node): self.generic_visit(node) @@ -265,21 +302,36 @@ class Inferencer(algorithm.Visitor): node.operand.loc) self.engine.process(diag) else: # UAdd, USub + if types.is_var(operand_type): + return + if builtins.is_numeric(operand_type): - self._unify(node.type, operand_type, - node.loc, None) - elif not types.is_var(operand_type): - diag = diagnostic.Diagnostic("error", - "expected unary '{op}' operand to be of numeric type, not {type}", - {"op": node.op.loc.source(), - "type": types.TypePrinter().name(operand_type)}, - node.operand.loc) - self.engine.process(diag) + self._unify(node.type, operand_type, node.loc, None) + return + + if builtins.is_array(operand_type): + elt = operand_type.find()["elt"] + if builtins.is_numeric(elt): + self._unify(node.type, operand_type, node.loc, None) + return + if types.is_var(elt): + return + + diag = diagnostic.Diagnostic("error", + "expected unary '{op}' operand to be of numeric type, not {type}", + {"op": node.op.loc.source(), + "type": types.TypePrinter().name(operand_type)}, + node.operand.loc) + self.engine.process(diag) def visit_CoerceT(self, node): self.generic_visit(node) if builtins.is_numeric(node.type) and builtins.is_numeric(node.value.type): pass + elif (builtins.is_array(node.type) and builtins.is_array(node.value.type) + and builtins.is_numeric(node.type.find()["elt"]) + and builtins.is_numeric(node.value.type.find()["elt"])): + pass else: printer = types.TypePrinter() note = diagnostic.Diagnostic("note", @@ -305,14 +357,23 @@ class Inferencer(algorithm.Visitor): self.visit(node) return node - def _coerce_numeric(self, nodes, map_return=lambda typ: typ): + def _coerce_numeric(self, nodes, map_return=lambda typ: typ, map_node_type =lambda typ:typ): # See https://docs.python.org/3/library/stdtypes.html#numeric-types-int-float-complex. node_types = [] for node in nodes: if isinstance(node, asttyped.CoerceT): - node_types.append(node.value.type) + # If we already know exactly what we coerce this value to, use that type, + # or we'll get an unification error in case the coerced type is not the same + # as the type of the coerced value. + # Otherwise, use the potentially more specific subtype when considering possible + # coercions, or we may get stuck. + if node.type.fold(False, lambda acc, ty: acc or types.is_var(ty)): + node_types.append(node.value.type) + else: + node_types.append(node.type) else: node_types.append(node.type) + node_types = [map_node_type(typ) for typ in node_types] if any(map(types.is_var, node_types)): # not enough info yet return elif not all(map(builtins.is_numeric, node_types)): @@ -344,8 +405,125 @@ class Inferencer(algorithm.Visitor): else: assert False + def _coerce_binary_broadcast_op(self, left, right, map_return_elt, op_loc): + def num_dims(typ): + if builtins.is_array(typ): + # TODO: If number of dimensions is ever made a non-fixed parameter, + # need to acutally unify num_dims in _coerce_binop/…. + return typ.find()["num_dims"].value + return 0 + + left_dims = num_dims(left.type) + right_dims = num_dims(right.type) + if left_dims != right_dims and left_dims != 0 and right_dims != 0: + # Mismatch (only scalar broadcast supported for now). + note1 = diagnostic.Diagnostic("note", "operand of dimension {num_dims}", + {"num_dims": left_dims}, left.loc) + note2 = diagnostic.Diagnostic("note", "operand of dimension {num_dims}", + {"num_dims": right_dims}, right.loc) + diag = diagnostic.Diagnostic( + "error", "dimensions of '{op}' array operands must match", + {"op": op_loc.source()}, op_loc, [left.loc, right.loc], [note1, note2]) + self.engine.process(diag) + return + + def map_node_type(typ): + if not builtins.is_array(typ): + # This is a single value broadcast across the array. + return typ + return typ.find()["elt"] + + # Figure out result type, handling broadcasts. + result_dims = left_dims if left_dims else right_dims + def map_return(typ): + elt = map_return_elt(typ) + result = builtins.TArray(elt=elt, num_dims=result_dims) + left = builtins.TArray(elt=elt, num_dims=left_dims) if left_dims else elt + right = builtins.TArray(elt=elt, num_dims=right_dims) if right_dims else elt + return (result, left, right) + + return self._coerce_numeric((left, right), + map_return=map_return, + map_node_type=map_node_type) + def _coerce_binop(self, op, left, right): - if isinstance(op, (ast.BitAnd, ast.BitOr, ast.BitXor, + if isinstance(op, ast.MatMult): + if types.is_var(left.type) or types.is_var(right.type): + return + + def num_dims(operand): + if not builtins.is_array(operand.type): + diag = diagnostic.Diagnostic( + "error", + "expected matrix multiplication operand to be of array type, not {type}", + { + "op": op.loc.source(), + "type": types.TypePrinter().name(operand.type) + }, op.loc, [operand.loc]) + self.engine.process(diag) + return + num_dims = operand.type.find()["num_dims"].value + if num_dims not in (1, 2): + diag = diagnostic.Diagnostic( + "error", + "expected matrix multiplication operand to be 1- or 2-dimensional, not {type}", + { + "op": op.loc.source(), + "type": types.TypePrinter().name(operand.type) + }, op.loc, [operand.loc]) + self.engine.process(diag) + return + return num_dims + + left_dims = num_dims(left) + if not left_dims: + return + right_dims = num_dims(right) + if not right_dims: + return + + def map_node_type(typ): + return typ.find()["elt"] + + def map_return(typ): + if left_dims == 1: + if right_dims == 1: + result_dims = 0 + else: + result_dims = 1 + elif right_dims == 1: + result_dims = 1 + else: + result_dims = 2 + result = typ if result_dims == 0 else builtins.TArray( + typ, result_dims) + return (result, builtins.TArray(typ, left_dims), + builtins.TArray(typ, right_dims)) + + return self._coerce_numeric((left, right), + map_return=map_return, + map_node_type=map_node_type) + elif builtins.is_array(left.type) or builtins.is_array(right.type): + # Operations on arrays are element-wise (possibly using broadcasting). + + # TODO: Allow only for integer arrays. + # allowed_int_array_ops = (ast.BitAnd, ast.BitOr, ast.BitXor, ast.LShift, + # ast.RShift) + allowed_array_ops = (ast.Add, ast.Mult, ast.FloorDiv, ast.Mod, + ast.Pow, ast.Sub, ast.Div) + if not isinstance(op, allowed_array_ops): + diag = diagnostic.Diagnostic( + "error", "operator '{op}' not valid for array types", + {"op": op.loc.source()}, op.loc) + self.engine.process(diag) + return + + def map_result(typ): + if isinstance(op, ast.Div): + return builtins.TFloat() + return typ + return self._coerce_binary_broadcast_op(left, right, map_result, op.loc) + elif isinstance(op, (ast.BitAnd, ast.BitOr, ast.BitXor, ast.LShift, ast.RShift)): # bitwise operators require integers for operand in (left, right): @@ -444,7 +622,7 @@ class Inferencer(algorithm.Visitor): # division always returns a float return self._coerce_numeric((left, right), lambda typ: (builtins.TFloat(), builtins.TFloat(), builtins.TFloat())) - else: # MatMult + else: diag = diagnostic.Diagnostic("error", "operator '{op}' is not supported", {"op": op.loc.source()}, op.loc) @@ -682,28 +860,99 @@ class Inferencer(algorithm.Visitor): pass else: diagnose(valid_forms()) - elif types.is_builtin(typ, "list") or types.is_builtin(typ, "array"): - if types.is_builtin(typ, "list"): - valid_forms = lambda: [ - valid_form("list() -> list(elt='a)"), - valid_form("list(x:'a) -> list(elt='b) where 'a is iterable") - ] + elif types.is_builtin(typ, "str"): + diag = diagnostic.Diagnostic("error", + "strings currently cannot be constructed", {}, + node.loc) + self.engine.process(diag) + elif types.is_builtin(typ, "array"): + valid_forms = lambda: [ + valid_form("array(x:'a) -> array(elt='b) where 'a is iterable"), + valid_form("array(x:'a, dtype:'b) -> array(elt='b) where 'a is iterable") + ] - self._unify(node.type, builtins.TList(), - node.loc, None) - elif types.is_builtin(typ, "array"): - valid_forms = lambda: [ - valid_form("array() -> array(elt='a)"), - valid_form("array(x:'a) -> array(elt='b) where 'a is iterable") - ] + explicit_dtype = None + keywords_acceptable = False + if len(node.keywords) == 0: + keywords_acceptable = True + elif len(node.keywords) == 1: + if node.keywords[0].arg == "dtype": + keywords_acceptable = True + explicit_dtype = node.keywords[0].value + if len(node.args) == 1 and keywords_acceptable: + arg, = node.args - self._unify(node.type, builtins.TArray(), - node.loc, None) + # In the absence of any other information (there currently isn't a way + # to specify any), assume that all iterables are expandable into a + # (runtime-checked) rectangular array of the innermost element type. + elt = arg.type + num_dims = 0 + result_dims = (node.type.find()["num_dims"].value + if builtins.is_array(node.type) else -1) + while True: + if num_dims == result_dims: + # If we already know the number of dimensions of the result, + # stop so we can disambiguate the (innermost) element type of + # the argument if it is still unknown (e.g. empty array). + break + if types.is_var(elt): + return # undetermined yet + if not builtins.is_iterable(elt) or builtins.is_str(elt): + break + if builtins.is_array(elt): + num_dims += elt.find()["num_dims"].value + else: + num_dims += 1 + elt = builtins.get_iterable_elt(elt) + + if explicit_dtype is not None: + # TODO: Factor out type detection; support quoted type constructors + # (TList(TInt32), …)? + typ = explicit_dtype.type + if types.is_builtin(typ, "int32"): + elt = builtins.TInt32() + elif types.is_builtin(typ, "int64"): + elt = builtins.TInt64() + elif types.is_constructor(typ): + elt = typ.find().instance + else: + diag = diagnostic.Diagnostic( + "error", + "dtype argument of {builtin}() must be a valid constructor", + {"builtin": typ.find().name}, + node.func.loc, + notes=[note]) + self.engine.process(diag) + return + + if num_dims == 0: + note = diagnostic.Diagnostic( + "note", "this expression has type {type}", + {"type": types.TypePrinter().name(arg.type)}, arg.loc) + diag = diagnostic.Diagnostic( + "error", + "the argument of {builtin}() must be of an iterable type", + {"builtin": typ.find().name}, + node.func.loc, + notes=[note]) + self.engine.process(diag) + return + + self._unify(node.type, + builtins.TArray(elt, types.TValue(num_dims)), + node.loc, arg.loc) else: - assert False + diagnose(valid_forms()) + elif types.is_builtin(typ, "list"): + valid_forms = lambda: [ + valid_form("list() -> list(elt='a)"), + valid_form("list(x:'a) -> list(elt='b) where 'a is iterable") + ] + + self._unify(node.type, builtins.TList(), node.loc, None) if len(node.args) == 0 and len(node.keywords) == 0: - pass # [] + pass # [] elif len(node.args) == 1 and len(node.keywords) == 0: arg, = node.args @@ -798,6 +1047,28 @@ class Inferencer(algorithm.Visitor): arg.loc, None) else: diagnose(valid_forms()) + elif types.is_builtin(typ, "abs"): + fn = typ.name + + valid_forms = lambda: [ + valid_form("abs(x:numpy.int?) -> numpy.int?"), + valid_form("abs(x:float) -> float") + ] + + if len(node.args) == 1 and len(node.keywords) == 0: + (arg,) = node.args + if builtins.is_int(arg.type) or builtins.is_float(arg.type): + self._unify(arg.type, node.type, + arg.loc, node.loc) + elif types.is_var(arg.type): + pass # undetermined yet + else: + diag = diagnostic.Diagnostic("error", + "the arguments of abs() must be of a numeric type", {}, + node.func.loc) + self.engine.process(diag) + else: + diagnose(valid_forms()) elif types.is_builtin(typ, "min") or types.is_builtin(typ, "max"): fn = typ.name @@ -844,21 +1115,69 @@ class Inferencer(algorithm.Visitor): diagnose(valid_forms()) elif types.is_builtin(typ, "make_array"): valid_forms = lambda: [ - valid_form("numpy.full(count:int32, value:'a) -> numpy.array(elt='a)") + valid_form("numpy.full(count:int32, value:'a) -> array(elt='a, num_dims=1)"), + valid_form("numpy.full(shape:(int32,)*'b, value:'a) -> array(elt='a, num_dims='b)"), ] - self._unify(node.type, builtins.TArray(), - node.loc, None) - if len(node.args) == 2 and len(node.keywords) == 0: arg0, arg1 = node.args - self._unify(arg0.type, builtins.TInt32(), - arg0.loc, None) + if types.is_var(arg0.type): + return # undetermined yet + elif types.is_tuple(arg0.type): + num_dims = len(arg0.type.find().elts) + self._unify(arg0.type, types.TTuple([builtins.TInt32()] * num_dims), + arg0.loc, None) + else: + num_dims = 1 + self._unify(arg0.type, builtins.TInt32(), + arg0.loc, None) + + self._unify(node.type, builtins.TArray(num_dims=num_dims), + node.loc, None) self._unify(arg1.type, node.type.find()["elt"], arg1.loc, None) else: diagnose(valid_forms()) + elif types.is_builtin(typ, "numpy.transpose"): + valid_forms = lambda: [ + valid_form("transpose(x: array(elt='a, num_dims=1)) -> array(elt='a, num_dims=1)"), + valid_form("transpose(x: array(elt='a, num_dims=2)) -> array(elt='a, num_dims=2)") + ] + + if len(node.args) == 1 and len(node.keywords) == 0: + arg, = node.args + + if types.is_var(arg.type): + pass # undetermined yet + elif not builtins.is_array(arg.type): + note = diagnostic.Diagnostic( + "note", "this expression has type {type}", + {"type": types.TypePrinter().name(arg.type)}, arg.loc) + diag = diagnostic.Diagnostic( + "error", + "the argument of {builtin}() must be an array", + {"builtin": typ.find().name}, + node.func.loc, + notes=[note]) + self.engine.process(diag) + else: + num_dims = arg.type.find()["num_dims"].value + if num_dims not in (1, 2): + note = diagnostic.Diagnostic( + "note", "argument is {num_dims}-dimensional", + {"num_dims": num_dims}, arg.loc) + diag = diagnostic.Diagnostic( + "error", + "{builtin}() is currently only supported for up to " + "two-dimensional arrays", {"builtin": typ.find().name}, + node.func.loc, + notes=[note]) + self.engine.process(diag) + else: + self._unify(node.type, arg.type, node.loc, None) + else: + diagnose(valid_forms()) elif types.is_builtin(typ, "rtio_log"): valid_forms = lambda: [ valid_form("rtio_log(channel:str, args...) -> None"), @@ -892,9 +1211,6 @@ class Inferencer(algorithm.Visitor): elif types.is_builtin(typ, "at_mu"): simple_form("at_mu(time_mu:numpy.int64) -> None", [builtins.TInt64()]) - elif types.is_builtin(typ, "watchdog"): - simple_form("watchdog(time:float) -> [builtin context manager]", - [builtins.TFloat()], builtins.TNone()) elif types.is_constructor(typ): # An user-defined class. self._unify(node.type, typ.find().instance, @@ -978,6 +1294,27 @@ class Inferencer(algorithm.Visitor): self.engine.process(diag) return + # Array broadcasting for functions explicitly marked as such. + if len(node.args) == typ_arity and types.is_broadcast_across_arrays(typ): + if typ_arity == 1: + arg_type = node.args[0].type.find() + if builtins.is_array(arg_type): + typ_arg, = typ_args.values() + self._unify(typ_arg, arg_type["elt"], node.args[0].loc, None) + self._unify(node.type, builtins.TArray(typ_ret, arg_type["num_dims"]), + node.loc, None) + return + elif typ_arity == 2: + if any(builtins.is_array(arg.type) for arg in node.args): + ret, arg0, arg1 = self._coerce_binary_broadcast_op( + node.args[0], node.args[1], lambda t: typ_ret, node.loc) + node.args[0] = self._coerce_one(arg0, node.args[0], + other_node=node.args[1]) + node.args[1] = self._coerce_one(arg1, node.args[1], + other_node=node.args[0]) + self._unify(node.type, ret, node.loc, None) + return + for actualarg, (formalname, formaltyp) in \ zip(node.args, list(typ_args.items()) + list(typ_optargs.items())): self._unify(actualarg.type, formaltyp, @@ -999,6 +1336,17 @@ class Inferencer(algorithm.Visitor): elif keyword.arg in typ_optargs: self._unify(keyword.value.type, typ_optargs[keyword.arg], keyword.value.loc, None) + else: + note = diagnostic.Diagnostic("note", + "extraneous argument", {}, + keyword.loc) + diag = diagnostic.Diagnostic("error", + "this function of type {type} does not accept argument '{name}'", + {"type": types.TypePrinter().name(node.func.type), + "name": keyword.arg}, + node.func.loc, [], [note]) + self.engine.process(diag) + return passed_args[keyword.arg] = keyword.arg_loc for formalname in typ_args: @@ -1107,9 +1455,7 @@ class Inferencer(algorithm.Visitor): typ = node.context_expr.type if (types.is_builtin(typ, "interleave") or types.is_builtin(typ, "sequential") or - types.is_builtin(typ, "parallel") or - (isinstance(node.context_expr, asttyped.CallT) and - types.is_builtin(node.context_expr.func.type, "watchdog"))): + types.is_builtin(typ, "parallel")): # builtin context managers if node.optional_vars is not None: self._unify(node.optional_vars.type, builtins.TNone(), diff --git a/artiq/compiler/transforms/int_monomorphizer.py b/artiq/compiler/transforms/int_monomorphizer.py index 5002bb086..adab3b165 100644 --- a/artiq/compiler/transforms/int_monomorphizer.py +++ b/artiq/compiler/transforms/int_monomorphizer.py @@ -26,22 +26,3 @@ class IntMonomorphizer(algorithm.Visitor): return node.type["width"].unify(types.TValue(width)) - - def visit_CallT(self, node): - self.generic_visit(node) - - if types.is_builtin(node.func.type, "int") or \ - types.is_builtin(node.func.type, "round"): - typ = node.type.find() - if types.is_var(typ["width"]): - typ["width"].unify(types.TValue(32)) - - def visit_CoerceT(self, node): - if isinstance(node.value, asttyped.NumT) and \ - builtins.is_int(node.type) and \ - builtins.is_int(node.value.type) and \ - not types.is_var(node.type["width"]) and \ - types.is_var(node.value.type["width"]): - node.value.type.unify(node.type) - - self.generic_visit(node) diff --git a/artiq/compiler/transforms/llvm_ir_generator.py b/artiq/compiler/transforms/llvm_ir_generator.py index 1fdad4319..ce2530bf4 100644 --- a/artiq/compiler/transforms/llvm_ir_generator.py +++ b/artiq/compiler/transforms/llvm_ir_generator.py @@ -9,6 +9,7 @@ from pythonparser import ast, diagnostic from llvmlite_artiq import ir as ll, binding as llvm from ...language import core as language_core from .. import types, builtins, ir +from ..embedding import SpecializedFunction llvoid = ll.VoidType() @@ -22,7 +23,7 @@ llptr = ll.IntType(8).as_pointer() llptrptr = ll.IntType(8).as_pointer().as_pointer() llslice = ll.LiteralStructType([llptr, lli32]) llsliceptr = ll.LiteralStructType([llptr, lli32]).as_pointer() -llmetadata = ll.MetaData() +llmetadata = ll.MetaDataType() def memoize(generator): @@ -123,6 +124,35 @@ class DebugInfoEmitter: "scope": scope }) + +class ABILayoutInfo: + """Caches DataLayout size/alignment lookup results. + + llvmlite's Type.get_abi_{size, alignment}() are implemented in a very + inefficient way, in particular _get_ll_pointer_type() used to construct the + corresponding llvm::Type is. We thus cache the results, optionally directly + using the compiler type as a key. + + (This is a separate class for use with @memoize.) + """ + + def __init__(self, lldatalayout, llcontext, llty_of_type): + self.cache = {} + self.lldatalayout = lldatalayout + self.llcontext = llcontext + self.llty_of_type = llty_of_type + + @memoize + def get_size_align(self, llty): + lowered = llty._get_ll_pointer_type(self.lldatalayout, self.llcontext) + return (self.lldatalayout.get_pointee_abi_size(lowered), + self.lldatalayout.get_pointee_abi_alignment(lowered)) + + @memoize + def get_size_align_for_type(self, typ): + return self.get_size_align(self.llty_of_type(typ)) + + class LLVMIRGenerator: def __init__(self, engine, module_name, target, embedding_map): self.engine = engine @@ -133,6 +163,8 @@ class LLVMIRGenerator: self.llmodule.triple = target.triple self.llmodule.data_layout = target.data_layout self.lldatalayout = llvm.create_target_data(self.llmodule.data_layout) + self.abi_layout_info = ABILayoutInfo(self.lldatalayout, self.llcontext, + self.llty_of_type) self.function_flags = None self.llfunction = None self.llmap = {} @@ -148,10 +180,6 @@ class LLVMIRGenerator: self.tbaa_tree, ll.Constant(lli64, 1) ]) - self.tbaa_now = self.llmodule.add_metadata([ - ll.MetaDataString(self.llmodule, "timeline position"), - self.tbaa_tree - ]) def needs_sret(self, lltyp, may_be_large=True): if isinstance(lltyp, ll.VoidType): @@ -177,13 +205,13 @@ class LLVMIRGenerator: typ = typ.find() if types.is_tuple(typ): return ll.LiteralStructType([self.llty_of_type(eltty) for eltty in typ.elts]) - elif types.is_rpc(typ) or types.is_c_function(typ): + elif types.is_rpc(typ) or types.is_external_function(typ): if for_return: return llvoid else: - return ll.LiteralStructType([]) + return llunit elif types._is_pointer(typ): - return llptr + return ll.PointerType(self.llty_of_type(typ["elt"])) elif types.is_function(typ): sretarg = [] llretty = self.llty_of_type(typ.ret, for_return=True) @@ -211,13 +239,17 @@ class LLVMIRGenerator: if for_return: return llvoid else: - return ll.LiteralStructType([]) + return llunit elif builtins.is_bool(typ): return lli1 elif builtins.is_int(typ): return ll.IntType(builtins.get_int_width(typ)) elif builtins.is_float(typ): return lldouble + elif builtins.is_array(typ): + llshapety = self.llty_of_type(typ.attributes["shape"]) + llbufferty = self.llty_of_type(typ.attributes["buffer"]) + return ll.LiteralStructType([llbufferty, llshapety]) elif builtins.is_listish(typ): lleltty = self.llty_of_type(builtins.get_iterable_elt(typ)) return ll.LiteralStructType([lleltty.as_pointer(), lli32]) @@ -270,7 +302,7 @@ class LLVMIRGenerator: sanitized_str = re.sub(rb"[^a-zA-Z0-9_.]", b"", as_bytes[:20]).decode('ascii') name = self.llmodule.get_unique_name("S.{}".format(sanitized_str)) - llstr = self.llmodule.get_global(name) + llstr = self.llmodule.globals.get(name) if llstr is None: llstrty = ll.ArrayType(lli8, len(as_bytes)) llstr = ll.GlobalVariable(self.llmodule, llstrty, name) @@ -306,7 +338,7 @@ class LLVMIRGenerator: assert False def llbuiltin(self, name): - llglobal = self.llmodule.get_global(name) + llglobal = self.llmodule.globals.get(name) if llglobal is not None: return llglobal @@ -335,15 +367,13 @@ class LLVMIRGenerator: elif name == self.target.print_function: llty = ll.FunctionType(llvoid, [llptr], var_arg=True) elif name == "rtio_log": - llty = ll.FunctionType(llvoid, [lli64, llptr], var_arg=True) + llty = ll.FunctionType(llvoid, [llptr], var_arg=True) elif name == "__artiq_personality": llty = ll.FunctionType(lli32, [], var_arg=True) elif name == "__artiq_raise": llty = ll.FunctionType(llvoid, [self.llty_of_type(builtins.TException())]) elif name == "__artiq_reraise": llty = ll.FunctionType(llvoid, []) - elif name in "abort": - llty = ll.FunctionType(llvoid, []) elif name == "memcmp": llty = ll.FunctionType(lli32, [llptr, llptr, lli32]) elif name == "rpc_send": @@ -352,12 +382,19 @@ class LLVMIRGenerator: llty = ll.FunctionType(llvoid, [lli32, llsliceptr, llptrptr]) elif name == "rpc_recv": llty = ll.FunctionType(lli32, [llptr]) + + # with now-pinning elif name == "now": llty = lli64 - elif name == "watchdog_set": - llty = ll.FunctionType(lli32, [lli64]) - elif name == "watchdog_clear": - llty = ll.FunctionType(llvoid, [lli32]) + + # without now-pinning + elif name == "now_mu": + llty = ll.FunctionType(lli64, []) + elif name == "at_mu": + llty = ll.FunctionType(llvoid, [lli64]) + elif name == "delay_mu": + llty = ll.FunctionType(llvoid, [lli64]) + else: assert False @@ -366,7 +403,6 @@ class LLVMIRGenerator: if name in ("__artiq_raise", "__artiq_reraise", "llvm.trap"): llglobal.attributes.add("noreturn") if name in ("rtio_log", "rpc_send", "rpc_send_async", - "watchdog_set", "watchdog_clear", self.target.print_function): llglobal.attributes.add("nounwind") if name.find("__py_") == 0: @@ -458,7 +494,7 @@ class LLVMIRGenerator: assert False def get_function(self, typ, name): - llfun = self.llmodule.get_global(name) + llfun = self.llmodule.globals.get(name) if llfun is None: llfunty = self.llty_of_type(typ, bare=True) llfun = ll.Function(self.llmodule, llfunty, name) @@ -499,7 +535,7 @@ class LLVMIRGenerator: llobjects = defaultdict(lambda: []) for obj_id, obj_ref, obj_typ in self.embedding_map.iter_objects(): - llobject = self.llmodule.get_global("O.{}".format(obj_id)) + llobject = self.llmodule.globals.get("O.{}".format(obj_id)) if llobject is not None: llobjects[obj_typ].append(llobject.bitcast(llptr)) @@ -524,11 +560,10 @@ class LLVMIRGenerator: print(typ) assert False - if not (types.is_function(typ) or types.is_method(typ) or types.is_rpc(typ) or - name == "__objectid__"): - rpctag = b"Os" + self._rpc_tag(typ, error_handler=rpc_tag_error) + b":n" - else: + if name == "__objectid__": rpctag = b"" + else: + rpctag = b"Os" + ir.rpc_tag(typ, error_handler=rpc_tag_error) + b":n" llrpcattrinit = ll.Constant(llrpcattrty, [ ll.Constant(lli32, offset), @@ -551,17 +586,17 @@ class LLVMIRGenerator: offset = 0 llrpcattrs = [] for attr in typ.attributes: - attrtyp = typ.attributes[attr] - size = self.llty_of_type(attrtyp). \ - get_abi_size(self.lldatalayout, context=self.llcontext) - alignment = self.llty_of_type(attrtyp). \ - get_abi_alignment(self.lldatalayout, context=self.llcontext) + attrtyp = typ.attributes[attr] + size, alignment = self.abi_layout_info.get_size_align_for_type(attrtyp) if offset % alignment != 0: offset += alignment - (offset % alignment) if types.is_instance(typ) and attr not in typ.constant_attributes: - llrpcattrs.append(llrpcattr_of_attr(offset, attr, attrtyp)) + try: + llrpcattrs.append(llrpcattr_of_attr(offset, attr, attrtyp)) + except ValueError: + pass offset += size @@ -695,15 +730,19 @@ class LLVMIRGenerator: name=insn.name) else: assert False - elif builtins.is_listish(insn.type): + elif types._is_pointer(insn.type) or (builtins.is_listish(insn.type) + and not builtins.is_array(insn.type)): llsize = self.map(insn.operands[0]) lleltty = self.llty_of_type(builtins.get_iterable_elt(insn.type)) llalloc = self.llbuilder.alloca(lleltty, size=llsize) + if types._is_pointer(insn.type): + return llalloc llvalue = ll.Constant(self.llty_of_type(insn.type), ll.Undefined) llvalue = self.llbuilder.insert_value(llvalue, llalloc, 0, name=insn.name) llvalue = self.llbuilder.insert_value(llvalue, llsize, 1) return llvalue - elif not builtins.is_allocated(insn.type) or ir.is_keyword(insn.type): + elif (not builtins.is_allocated(insn.type) or ir.is_keyword(insn.type) + or builtins.is_array(insn.type)): llvalue = ll.Constant(self.llty_of_type(insn.type), ll.Undefined) for index, elt in enumerate(insn.operands): llvalue = self.llbuilder.insert_value(llvalue, self.map(elt), index) @@ -720,9 +759,8 @@ class LLVMIRGenerator: self.llbuilder.store(lloperand, llfieldptr) return llalloc - def llptr_to_var(self, llenv, env_ty, var_name, var_type=None): - if var_name in env_ty.params and (var_type is None or - env_ty.params[var_name] == var_type): + def llptr_to_var(self, llenv, env_ty, var_name): + if var_name in env_ty.params: var_index = list(env_ty.params.keys()).index(var_name) return self.llbuilder.gep(llenv, [self.llindex(0), self.llindex(var_index)], inbounds=True) @@ -731,15 +769,15 @@ class LLVMIRGenerator: llptr = self.llbuilder.gep(llenv, [self.llindex(0), self.llindex(outer_index)], inbounds=True) llouterenv = self.llbuilder.load(llptr) - llouterenv.set_metadata('unconditionally.invariant.load', self.empty_metadata) + llouterenv.set_metadata('invariant.load', self.empty_metadata) llouterenv.set_metadata('nonnull', self.empty_metadata) return self.llptr_to_var(llouterenv, env_ty.params["$outer"], var_name) def mark_dereferenceable(self, load): assert isinstance(load, ll.LoadInstr) and isinstance(load.type, ll.PointerType) - pointee_size = load.type.pointee.get_abi_size(self.lldatalayout, context=self.llcontext) + pointee_size, _ = self.abi_layout_info.get_size_align(load.type.pointee) metadata = self.llmodule.add_metadata([ll.Constant(lli64, pointee_size)]) - load.set_metadata('unconditionally_dereferenceable', metadata) + load.set_metadata('dereferenceable', metadata) def process_GetLocal(self, insn): env = insn.environment() @@ -793,7 +831,7 @@ class LLVMIRGenerator: closure_type = typ.attributes[attr] assert types.is_constructor(typ) assert types.is_function(closure_type) or types.is_rpc(closure_type) - if types.is_c_function(closure_type) or types.is_rpc(closure_type): + if types.is_external_function(closure_type) or types.is_rpc(closure_type): return None llty = self.llty_of_type(typ.attributes[attr]) @@ -839,7 +877,7 @@ class LLVMIRGenerator: if types.is_tuple(typ): return self.llbuilder.extract_value(self.map(insn.object()), attr, name=insn.name) - elif not builtins.is_allocated(typ): + elif builtins.is_array(typ) or not builtins.is_allocated(typ): return self.llbuilder.extract_value(self.map(insn.object()), self.attr_index(typ, attr), name=insn.name) @@ -875,7 +913,7 @@ class LLVMIRGenerator: inbounds=True, name="ptr.{}".format(insn.name)) llvalue = self.llbuilder.load(llptr, name="val.{}".format(insn.name)) if types.is_instance(typ) and attr in typ.constant_attributes: - llvalue.set_metadata('unconditionally.invariant.load', self.empty_metadata) + llvalue.set_metadata('invariant.load', self.empty_metadata) if isinstance(llvalue.type, ll.PointerType): self.mark_dereferenceable(llvalue) return llvalue @@ -902,20 +940,28 @@ class LLVMIRGenerator: inbounds=True, name=insn.name) return self.llbuilder.store(llvalue, llptr) - def process_GetElem(self, insn): - lst, idx = insn.list(), insn.index() - lllst, llidx = map(self.map, (lst, idx)) - llelts = self.llbuilder.extract_value(lllst, 0) + def process_Offset(self, insn): + base, idx = insn.base(), insn.index() + llelts, llidx = map(self.map, (base, idx)) + if not types._is_pointer(base.type): + # This is list-ish. + llelts = self.llbuilder.extract_value(llelts, 0) llelt = self.llbuilder.gep(llelts, [llidx], inbounds=True) + return llelt + + def process_GetElem(self, insn): + llelt = self.process_Offset(insn) llvalue = self.llbuilder.load(llelt) if isinstance(llvalue.type, ll.PointerType): self.mark_dereferenceable(llvalue) return llvalue def process_SetElem(self, insn): - lst, idx = insn.list(), insn.index() - lllst, llidx = map(self.map, (lst, idx)) - llelts = self.llbuilder.extract_value(lllst, 0) + base, idx = insn.base(), insn.index() + llelts, llidx = map(self.map, (base, idx)) + if not types._is_pointer(base.type): + # This is list-ish. + llelts = self.llbuilder.extract_value(llelts, 0) llelt = self.llbuilder.gep(llelts, [llidx], inbounds=True) return self.llbuilder.store(self.map(insn.value()), llelt) @@ -1090,8 +1136,6 @@ class LLVMIRGenerator: def process_Builtin(self, insn): if insn.op == "nop": return self.llbuilder.call(self.llbuiltin("llvm.donothing"), []) - if insn.op == "abort": - return self.llbuilder.call(self.llbuiltin("abort"), []) elif insn.op == "is_some": lloptarg = self.map(insn.operands[0]) return self.llbuilder.extract_value(lloptarg, 0, @@ -1118,7 +1162,7 @@ class LLVMIRGenerator: llptr = self.llbuilder.gep(llenv, [self.llindex(0), self.llindex(outer_index)], inbounds=True) llouterenv = self.llbuilder.load(llptr) - llouterenv.set_metadata('unconditionally.invariant.load', self.empty_metadata) + llouterenv.set_metadata('invariant.load', self.empty_metadata) llouterenv.set_metadata('nonnull', self.empty_metadata) return self.llptr_to_var(llouterenv, env_ty.params["$outer"], var_name) else: @@ -1128,13 +1172,18 @@ class LLVMIRGenerator: return get_outer(self.map(env), env.type) elif insn.op == "len": collection, = insn.operands + if builtins.is_array(collection.type): + # Return length of outermost dimension. + shape = self.llbuilder.extract_value(self.map(collection), + self.attr_index(collection.type, "shape")) + return self.llbuilder.extract_value(shape, 0) return self.llbuilder.extract_value(self.map(collection), 1) elif insn.op in ("printf", "rtio_log"): # We only get integers, floats, pointers and strings here. lloperands = [] for i, operand in enumerate(insn.operands): lloperand = self.map(operand) - if i == 0 and insn.op == "printf" or i == 1 and insn.op == "rtio_log": + if i == 0 and (insn.op == "printf" or insn.op == "rtio_log"): lloperands.append(self.llbuilder.extract_value(lloperand, 0)) elif builtins.is_str(operand.type) or builtins.is_bytes(operand.type): lloperands.append(self.llbuilder.extract_value(lloperand, 1)) @@ -1148,27 +1197,43 @@ class LLVMIRGenerator: # This is an identity cast at LLVM IR level. return self.map(insn.operands[0]) elif insn.op == "now_mu": - llnow = self.llbuilder.load(self.llbuiltin("now"), name=insn.name) - llnow.set_metadata("tbaa", self.tbaa_now) - return llnow + if self.target.now_pinning: + return self.llbuilder.load(self.llbuiltin("now"), name=insn.name) + else: + return self.llbuilder.call(self.llbuiltin("now_mu"), []) elif insn.op == "at_mu": time, = insn.operands - return self.llbuilder.store(self.map(time), self.llbuiltin("now")) + lltime = self.map(time) + if self.target.now_pinning: + lltime_hi = self.llbuilder.trunc(self.llbuilder.lshr(lltime, ll.Constant(lli64, 32)), lli32) + lltime_lo = self.llbuilder.trunc(lltime, lli32) + llnow_hiptr = self.llbuilder.bitcast(self.llbuiltin("now"), lli32.as_pointer()) + llnow_loptr = self.llbuilder.gep(llnow_hiptr, [self.llindex(1)]) + if self.target.little_endian: + lltime_hi, lltime_lo = lltime_lo, lltime_hi + llstore_hi = self.llbuilder.store_atomic(lltime_hi, llnow_hiptr, ordering="seq_cst", align=4) + llstore_lo = self.llbuilder.store_atomic(lltime_lo, llnow_loptr, ordering="seq_cst", align=4) + return llstore_lo + else: + return self.llbuilder.call(self.llbuiltin("at_mu"), [lltime]) elif insn.op == "delay_mu": interval, = insn.operands - llnowptr = self.llbuiltin("now") - llnow = self.llbuilder.load(llnowptr, name="now.old") - llnow.set_metadata("tbaa", self.tbaa_now) - lladjusted = self.llbuilder.add(llnow, self.map(interval), name="now.new") - llnowstore = self.llbuilder.store(lladjusted, llnowptr) - llnowstore.set_metadata("tbaa", self.tbaa_now) - return llnowstore - elif insn.op == "watchdog_set": - interval, = insn.operands - return self.llbuilder.call(self.llbuiltin("watchdog_set"), [self.map(interval)]) - elif insn.op == "watchdog_clear": - id, = insn.operands - return self.llbuilder.call(self.llbuiltin("watchdog_clear"), [self.map(id)]) + llinterval = self.map(interval) + if self.target.now_pinning: + llnowptr = self.llbuiltin("now") + llnow = self.llbuilder.load(llnowptr, name="now.old") + lladjusted = self.llbuilder.add(llnow, llinterval, name="now.new") + lladjusted_hi = self.llbuilder.trunc(self.llbuilder.lshr(lladjusted, ll.Constant(lli64, 32)), lli32) + lladjusted_lo = self.llbuilder.trunc(lladjusted, lli32) + llnow_hiptr = self.llbuilder.bitcast(llnowptr, lli32.as_pointer()) + llnow_loptr = self.llbuilder.gep(llnow_hiptr, [self.llindex(1)]) + if self.target.little_endian: + lladjusted_hi, lladjusted_lo = lladjusted_lo, lladjusted_hi + llstore_hi = self.llbuilder.store_atomic(lladjusted_hi, llnow_hiptr, ordering="seq_cst", align=4) + llstore_lo = self.llbuilder.store_atomic(lladjusted_lo, llnow_loptr, ordering="seq_cst", align=4) + return llstore_lo + else: + return self.llbuilder.call(self.llbuiltin("delay_mu"), [llinterval]) else: assert False @@ -1209,7 +1274,7 @@ class LLVMIRGenerator: llargs.append(llarg) llfunname = insn.target_function().type.name - llfun = self.llmodule.get_global(llfunname) + llfun = self.llmodule.globals.get(llfunname) if llfun is None: llretty = self.llty_of_type(insn.type, for_return=True) if self.needs_sret(llretty): @@ -1230,47 +1295,6 @@ class LLVMIRGenerator: return llfun, list(llargs) - # See session.c:{send,receive}_rpc_value and comm_generic.py:_{send,receive}_rpc_value. - def _rpc_tag(self, typ, error_handler): - typ = typ.find() - if types.is_tuple(typ): - assert len(typ.elts) < 256 - return b"t" + bytes([len(typ.elts)]) + \ - b"".join([self._rpc_tag(elt_type, error_handler) - for elt_type in typ.elts]) - elif builtins.is_none(typ): - return b"n" - elif builtins.is_bool(typ): - return b"b" - elif builtins.is_int(typ, types.TValue(32)): - return b"i" - elif builtins.is_int(typ, types.TValue(64)): - return b"I" - elif builtins.is_float(typ): - return b"f" - elif builtins.is_str(typ): - return b"s" - elif builtins.is_bytes(typ): - return b"B" - elif builtins.is_bytearray(typ): - return b"A" - elif builtins.is_list(typ): - return b"l" + self._rpc_tag(builtins.get_iterable_elt(typ), - error_handler) - elif builtins.is_array(typ): - return b"a" + self._rpc_tag(builtins.get_iterable_elt(typ), - error_handler) - elif builtins.is_range(typ): - return b"r" + self._rpc_tag(builtins.get_iterable_elt(typ), - error_handler) - elif ir.is_keyword(typ): - return b"k" + self._rpc_tag(typ.params["value"], - error_handler) - elif '__objectid__' in typ.attributes: - return b"O" - else: - error_handler(typ) - def _build_rpc(self, fun_loc, fun_type, args, llnormalblock, llunwindblock): llservice = ll.Constant(lli32, fun_type.service) @@ -1288,7 +1312,7 @@ class LLVMIRGenerator: {"type": printer.name(arg.type)}, arg.loc) self.engine.process(diag) - tag += self._rpc_tag(arg.type, arg_error_handler) + tag += ir.rpc_tag(arg.type, arg_error_handler) tag += b":" def ret_error_handler(typ): @@ -1302,7 +1326,7 @@ class LLVMIRGenerator: {"type": printer.name(fun_type.ret)}, fun_loc) self.engine.process(diag) - tag += self._rpc_tag(fun_type.ret, ret_error_handler) + tag += ir.rpc_tag(fun_type.ret, ret_error_handler) lltag = self.llconst_of_const(ir.Constant(tag, builtins.TStr())) lltagptr = self.llbuilder.alloca(lltag.type) @@ -1315,7 +1339,7 @@ class LLVMIRGenerator: name="rpc.args") for index, arg in enumerate(args): if builtins.is_none(arg.type): - llargslot = self.llbuilder.alloca(ll.LiteralStructType([]), + llargslot = self.llbuilder.alloca(llunit, name="rpc.arg{}".format(index)) else: llarg = self.map(arg) @@ -1327,7 +1351,7 @@ class LLVMIRGenerator: llargptr = self.llbuilder.gep(llargs, [ll.Constant(lli32, index)]) self.llbuilder.store(llargslot, llargptr) - if fun_type.async: + if fun_type.is_async: self.llbuilder.call(self.llbuiltin("rpc_send_async"), [llservice, lltagptr, llargs]) else: @@ -1337,7 +1361,13 @@ class LLVMIRGenerator: # Don't waste stack space on saved arguments. self.llbuilder.call(self.llbuiltin("llvm.stackrestore"), [llstackptr]) - if fun_type.async: + if fun_type.is_async: + # If this RPC is called using an `invoke` ARTIQ IR instruction, there will be + # no other instructions in this basic block. Since this RPC is async, it cannot + # possibly raise an exception, so add an explicit jump to the normal successor. + if llunwindblock: + self.llbuilder.branch(llnormalblock) + return ll.Undefined # T result = { @@ -1383,7 +1413,7 @@ class LLVMIRGenerator: self.llbuilder.position_at_end(lltail) llret = self.llbuilder.load(llslot, name="rpc.ret") - if not builtins.is_allocated(fun_type.ret): + if not fun_type.ret.fold(False, lambda r, t: r or builtins.is_allocated(t)): # We didn't allocate anything except the slot for the value itself. # Don't waste stack space. self.llbuilder.call(self.llbuiltin("llvm.stackrestore"), [llstackptr]) @@ -1398,7 +1428,7 @@ class LLVMIRGenerator: functiontyp, insn.arguments(), llnormalblock=None, llunwindblock=None) - elif types.is_c_function(functiontyp): + elif types.is_external_function(functiontyp): llfun, llargs = self._prepare_ffi_call(insn) else: llfun, llargs = self._prepare_closure_call(insn) @@ -1414,9 +1444,14 @@ class LLVMIRGenerator: else: llcall = llresult = self.llbuilder.call(llfun, llargs, name=insn.name) + if isinstance(llresult.type, ll.VoidType): + # We have NoneType-returning functions return void, but None is + # {} elsewhere. + llresult = ll.Constant(llunit, []) + # Never add TBAA nowrite metadata to a functon with sret! # This leads to miscompilations. - if types.is_c_function(functiontyp) and 'nowrite' in functiontyp.flags: + if types.is_external_function(functiontyp) and 'nowrite' in functiontyp.flags: llcall.set_metadata('tbaa', self.tbaa_nowrite_call) return llresult @@ -1430,7 +1465,7 @@ class LLVMIRGenerator: functiontyp, insn.arguments(), llnormalblock, llunwindblock) - elif types.is_c_function(functiontyp): + elif types.is_external_function(functiontyp): llfun, llargs = self._prepare_ffi_call(insn) else: llfun, llargs = self._prepare_closure_call(insn) @@ -1448,12 +1483,22 @@ class LLVMIRGenerator: llcall = self.llbuilder.invoke(llfun, llargs, llnormalblock, llunwindblock, name=insn.name) - # See the comment in process_Call. - if types.is_c_function(functiontyp) and 'nowrite' in functiontyp.flags: - llcall.set_metadata('tbaa', self.tbaa_nowrite_call) + # The !tbaa metadata is not legal to use with the invoke instruction, + # so unlike process_Call, we do not set it here. return llcall + def _quote_listish_to_llglobal(self, value, elt_type, path, kind_name): + llelts = [self._quote(value[i], elt_type, lambda: path() + [str(i)]) + for i in range(len(value))] + lleltsary = ll.Constant(ll.ArrayType(self.llty_of_type(elt_type), len(llelts)), + list(llelts)) + name = self.llmodule.scope.deduplicate("quoted.{}".format(kind_name)) + llglobal = ll.GlobalVariable(self.llmodule, lleltsary.type, name) + llglobal.initializer = lleltsary + llglobal.linkage = "private" + return llglobal.bitcast(lleltsary.type.element.as_pointer()) + def _quote(self, value, typ, path): value_id = id(value) if value_id in self.llobject_map: @@ -1481,7 +1526,7 @@ class LLVMIRGenerator: attrvalue = getattr(value, attr) is_class_function = (types.is_constructor(typ) and types.is_function(typ.attributes[attr]) and - not types.is_c_function(typ.attributes[attr])) + not types.is_external_function(typ.attributes[attr])) if is_class_function: attrvalue = self.embedding_map.specialize_function(typ.instance, attrvalue) if not (types.is_instance(typ) and attr in typ.constant_attributes): @@ -1530,28 +1575,39 @@ class LLVMIRGenerator: llstr = self.llstr_of_str(as_bytes) llconst = ll.Constant(llty, [llstr, ll.Constant(lli32, len(as_bytes))]) return llconst + elif builtins.is_array(typ): + assert isinstance(value, numpy.ndarray), fail_msg + typ = typ.find() + assert len(value.shape) == typ["num_dims"].find().value + flattened = value.reshape((-1,)) + lleltsptr = self._quote_listish_to_llglobal(flattened, typ["elt"], path, "array") + llshape = ll.Constant.literal_struct([ll.Constant(lli32, s) for s in value.shape]) + return ll.Constant(llty, [lleltsptr, llshape]) elif builtins.is_listish(typ): assert isinstance(value, (list, numpy.ndarray)), fail_msg elt_type = builtins.get_iterable_elt(typ) - llelts = [self._quote(value[i], elt_type, lambda: path() + [str(i)]) - for i in range(len(value))] - lleltsary = ll.Constant(ll.ArrayType(self.llty_of_type(elt_type), len(llelts)), - list(llelts)) - - name = self.llmodule.scope.deduplicate("quoted.{}".format(typ.name)) - llglobal = ll.GlobalVariable(self.llmodule, lleltsary.type, name) - llglobal.initializer = lleltsary - llglobal.linkage = "private" - - lleltsptr = llglobal.bitcast(lleltsary.type.element.as_pointer()) - llconst = ll.Constant(llty, [lleltsptr, ll.Constant(lli32, len(llelts))]) + lleltsptr = self._quote_listish_to_llglobal(value, elt_type, path, typ.find().name) + llconst = ll.Constant(llty, [lleltsptr, ll.Constant(lli32, len(value))]) return llconst - elif types.is_rpc(typ) or types.is_c_function(typ): - # RPC and C functions have no runtime representation. + elif types.is_tuple(typ): + assert isinstance(value, tuple), fail_msg + llelts = [self._quote(v, t, lambda: path() + [str(i)]) + for i, (v, t) in enumerate(zip(value, typ.elts))] + return ll.Constant(llty, llelts) + elif types.is_rpc(typ) or types.is_external_function(typ) or types.is_builtin_function(typ): + # RPC, C and builtin functions have no runtime representation. return ll.Constant(llty, ll.Undefined) elif types.is_function(typ): - return self.get_function_with_undef_env(typ.find(), - self.embedding_map.retrieve_function(value)) + try: + func = self.embedding_map.retrieve_function(value) + except KeyError: + # If a class function was embedded directly (e.g. by a `C.f(...)` call), + # but it also appears in a class hierarchy, we might need to fall back + # to the non-specialized one, since direct invocations do not cause + # monomorphization. + assert isinstance(value, SpecializedFunction) + func = self.embedding_map.retrieve_function(value.host_function) + return self.get_function_with_undef_env(typ.find(), func) elif types.is_method(typ): llclosure = self._quote(value.__func__, types.get_method_function(typ), lambda: path() + ['__func__']) @@ -1643,7 +1699,7 @@ class LLVMIRGenerator: llclauseexnname = self.llconst_of_const( ir.Constant(exnname, builtins.TStr())) - llclauseexnnameptr = self.llmodule.get_global("exn.{}".format(exnname)) + llclauseexnnameptr = self.llmodule.globals.get("exn.{}".format(exnname)) if llclauseexnnameptr is None: llclauseexnnameptr = ll.GlobalVariable(self.llmodule, llclauseexnname.type, name="exn.{}".format(exnname)) diff --git a/artiq/compiler/transforms/local_demoter.py b/artiq/compiler/transforms/local_demoter.py new file mode 100644 index 000000000..4701e7a7c --- /dev/null +++ b/artiq/compiler/transforms/local_demoter.py @@ -0,0 +1,51 @@ +""" +:class:`LocalDemoter` is a constant propagation transform: +it replaces reads of any local variable with only one write +in a function without closures with the value that was written. + +:class:`LocalAccessValidator` must be run before this transform +to ensure that the transformation it performs is sound. +""" + +from collections import defaultdict +from .. import ir + +class LocalDemoter: + def process(self, functions): + for func in functions: + self.process_function(func) + + def process_function(self, func): + env_safe = {} + env_gets = defaultdict(lambda: set()) + env_sets = defaultdict(lambda: set()) + + for insn in func.instructions(): + if isinstance(insn, (ir.GetLocal, ir.SetLocal)): + if "$" in insn.var_name: + continue + + env = insn.environment() + + if env not in env_safe: + for use in env.uses: + if not isinstance(use, (ir.GetLocal, ir.SetLocal)): + env_safe[env] = False + break + else: + env_safe[env] = True + + if not env_safe[env]: + continue + + if isinstance(insn, ir.SetLocal): + env_sets[(env, insn.var_name)].add(insn) + else: + env_gets[(env, insn.var_name)].add(insn) + + for (env, var_name) in env_sets: + if len(env_sets[(env, var_name)]) == 1: + set_insn = next(iter(env_sets[(env, var_name)])) + for get_insn in env_gets[(env, var_name)]: + get_insn.replace_all_uses_with(set_insn.value()) + get_insn.erase() diff --git a/artiq/compiler/types.py b/artiq/compiler/types.py index be76adf5a..e7b68a3a4 100644 --- a/artiq/compiler/types.py +++ b/artiq/compiler/types.py @@ -73,7 +73,7 @@ class TVar(Type): # path compression iter = self while iter.__class__ == TVar: - if iter is iter.parent: + if iter is root: break else: iter, iter.parent = iter.parent, root @@ -81,6 +81,8 @@ class TVar(Type): return root def unify(self, other): + if other is self: + return other = other.find() if self.parent is self: @@ -124,6 +126,8 @@ class TMono(Type): return self def unify(self, other): + if other is self: + return if isinstance(other, TMono) and self.name == other.name: assert self.params.keys() == other.params.keys() for param in self.params: @@ -171,6 +175,8 @@ class TTuple(Type): return self def unify(self, other): + if other is self: + return if isinstance(other, TTuple) and len(self.elts) == len(other.elts): for selfelt, otherelt in zip(self.elts, other.elts): selfelt.unify(otherelt) @@ -198,8 +204,10 @@ class TTuple(Type): return hash(tuple(self.elts)) class _TPointer(TMono): - def __init__(self): - super().__init__("pointer") + def __init__(self, elt=None): + if elt is None: + elt = TMono("int", {"width": 8}) # i8* + super().__init__("pointer", params={"elt": elt}) class TFunction(Type): """ @@ -237,6 +245,8 @@ class TFunction(Type): return self def unify(self, other): + if other is self: + return if isinstance(other, TFunction) and \ self.args.keys() == other.args.keys() and \ self.optargs.keys() == other.optargs.keys(): @@ -273,20 +283,29 @@ class TFunction(Type): def __hash__(self): return hash((_freeze(self.args), _freeze(self.optargs), self.ret)) -class TCFunction(TFunction): +class TExternalFunction(TFunction): """ - A function type of a runtime-provided C function. + A type of an externally-provided function. - :ivar name: (str) C function name - :ivar flags: (set of str) C function flags. + This can be any function following the C ABI, such as provided by the + C/Rust runtime, or a compiler backend intrinsic. The mangled name to link + against is encoded as part of the type. + + :ivar name: (str) external symbol name. + This will be the symbol linked against (following any extra C name + mangling rules). + :ivar flags: (set of str) function flags. Flag ``nounwind`` means the function never raises an exception. Flag ``nowrite`` means the function never writes any memory that the ARTIQ Python code can observe. + :ivar broadcast_across_arrays: (bool) + If True, the function is transparently applied element-wise when called + with TArray arguments. """ attributes = OrderedDict() - def __init__(self, args, ret, name, flags={}): + def __init__(self, args, ret, name, flags=set(), broadcast_across_arrays=False): assert isinstance(flags, set) for flag in flags: assert flag in {'nounwind', 'nowrite'} @@ -294,9 +313,12 @@ class TCFunction(TFunction): self.name = name self.delay = TFixedDelay(iodelay.Const(0)) self.flags = flags + self.broadcast_across_arrays = broadcast_across_arrays def unify(self, other): - if isinstance(other, TCFunction) and \ + if other is self: + return + if isinstance(other, TExternalFunction) and \ self.name == other.name: super().unify(other) elif isinstance(other, TVar): @@ -311,22 +333,24 @@ class TRPC(Type): :ivar ret: (:class:`Type`) return type :ivar service: (int) RPC service number - :ivar async: (bool) whether the RPC blocks until return + :ivar is_async: (bool) whether the RPC blocks until return """ attributes = OrderedDict() - def __init__(self, ret, service, async=False): + def __init__(self, ret, service, is_async=False): assert isinstance(ret, Type) - self.ret, self.service, self.async = ret, service, async + self.ret, self.service, self.is_async = ret, service, is_async def find(self): return self def unify(self, other): + if other is self: + return if isinstance(other, TRPC) and \ self.service == other.service and \ - self.async == other.async: + self.is_async == other.is_async: self.ret.unify(other.ret) elif isinstance(other, TVar): other.unify(self) @@ -343,7 +367,7 @@ class TRPC(Type): def __eq__(self, other): return isinstance(other, TRPC) and \ self.service == other.service and \ - self.async == other.async + self.is_async == other.is_async def __ne__(self, other): return not (self == other) @@ -366,6 +390,8 @@ class TBuiltin(Type): return self def unify(self, other): + if other is self: + return if self != other: raise UnificationError(self, other) @@ -388,6 +414,11 @@ class TBuiltin(Type): class TBuiltinFunction(TBuiltin): """ A type of a builtin function. + + Builtin functions are treated specially throughout all stages of the + compilation process according to their name (e.g. calls may not actually + lower to a function call). See :class:`TExternalFunction` for externally + defined functions that are otherwise regular. """ class TConstructor(TBuiltin): @@ -471,6 +502,8 @@ class TValue(Type): return self def unify(self, other): + if other is self: + return if isinstance(other, TVar): other.unify(self) elif self != other: @@ -561,13 +594,15 @@ def is_mono(typ, name=None, **params): if not isinstance(typ, TMono): return False - params_match = True + if name is not None and typ.name != name: + return False + for param in params: if param not in typ.params: return False - params_match = params_match and \ - typ.params[param].find() == params[param].find() - return name is None or (typ.name == name and params_match) + if typ.params[param].find() != params[param].find(): + return False + return True def is_polymorphic(typ): return typ.fold(False, lambda accum, typ: accum or is_var(typ)) @@ -589,12 +624,12 @@ def is_function(typ): def is_rpc(typ): return isinstance(typ.find(), TRPC) -def is_c_function(typ, name=None): +def is_external_function(typ, name=None): typ = typ.find() if name is None: - return isinstance(typ, TCFunction) + return isinstance(typ, TExternalFunction) else: - return isinstance(typ, TCFunction) and \ + return isinstance(typ, TExternalFunction) and \ typ.name == name def is_builtin(typ, name=None): @@ -605,6 +640,23 @@ def is_builtin(typ, name=None): return isinstance(typ, TBuiltin) and \ typ.name == name +def is_builtin_function(typ, name=None): + typ = typ.find() + if name is None: + return isinstance(typ, TBuiltinFunction) + else: + return isinstance(typ, TBuiltinFunction) and \ + typ.name == name + +def is_broadcast_across_arrays(typ): + # For now, broadcasting is only exposed to predefined external functions, and + # statically selected. Might be extended to user-defined functions if the design + # pans out. + typ = typ.find() + if not isinstance(typ, TExternalFunction): + return False + return typ.broadcast_across_arrays + def is_constructor(typ, name=None): typ = typ.find() if name is not None: @@ -709,12 +761,14 @@ class TypePrinter(object): else: return "%s(%s)" % (typ.name, ", ".join( ["%s=%s" % (k, self.name(typ.params[k], depth + 1)) for k in typ.params])) + elif isinstance(typ, _TPointer): + return "{}*".format(self.name(typ["elt"], depth + 1)) elif isinstance(typ, TTuple): if len(typ.elts) == 1: return "(%s,)" % self.name(typ.elts[0], depth + 1) else: return "(%s)" % ", ".join([self.name(typ, depth + 1) for typ in typ.elts]) - elif isinstance(typ, (TFunction, TCFunction)): + elif isinstance(typ, (TFunction, TExternalFunction)): args = [] args += [ "%s:%s" % (arg, self.name(typ.args[arg], depth + 1)) for arg in typ.args] @@ -728,13 +782,13 @@ class TypePrinter(object): elif not (delay.is_fixed() and iodelay.is_zero(delay.duration)): signature += " " + self.name(delay, depth + 1) - if isinstance(typ, TCFunction): + if isinstance(typ, TExternalFunction): return "[ffi {}]{}".format(repr(typ.name), signature) elif isinstance(typ, TFunction): return signature elif isinstance(typ, TRPC): return "[rpc{} #{}](...)->{}".format(typ.service, - " async" if typ.async else "", + " async" if typ.is_async else "", self.name(typ.ret, depth + 1)) elif isinstance(typ, TBuiltinFunction): return "".format(typ.name) diff --git a/artiq/compiler/validators/constness.py b/artiq/compiler/validators/constness.py index bfe228015..fb1123c49 100644 --- a/artiq/compiler/validators/constness.py +++ b/artiq/compiler/validators/constness.py @@ -50,3 +50,9 @@ class ConstnessValidator(algorithm.Visitor): node.loc) self.engine.process(diag) return + if builtins.is_array(typ): + diag = diagnostic.Diagnostic("error", + "array attributes cannot be assigned to", + {}, node.loc) + self.engine.process(diag) + return diff --git a/artiq/compiler/validators/escape.py b/artiq/compiler/validators/escape.py index de3db9191..c6ae59704 100644 --- a/artiq/compiler/validators/escape.py +++ b/artiq/compiler/validators/escape.py @@ -51,10 +51,6 @@ class Region: (other.range.begin_pos <= self.range.begin_pos <= other.range.end_pos and \ self.range.end_pos > other.range.end_pos) - def contract(self, other): - if not self.range: - self.range = other.range - def outlives(lhs, rhs): if not isinstance(lhs, Region): # lhs lives nonlexically return True @@ -69,8 +65,11 @@ class Region: class RegionOf(algorithm.Visitor): """ - Visit an expression and return the list of regions that must - be alive for the expression to execute. + Visit an expression and return the region that must be alive for the + expression to execute. + + For expressions involving multiple regions, the shortest-lived one is + returned. """ def __init__(self, env_stack, youngest_region): @@ -100,7 +99,7 @@ class RegionOf(algorithm.Visitor): visit_BinOpT = visit_sometimes_allocating def visit_CallT(self, node): - if types.is_c_function(node.func.type, "cache_get"): + if types.is_external_function(node.func.type, "cache_get"): # The cache is borrow checked dynamically return Global() else: @@ -157,7 +156,7 @@ class RegionOf(algorithm.Visitor): visit_NameConstantT = visit_immutable visit_NumT = visit_immutable visit_EllipsisT = visit_immutable - visit_UnaryOpT = visit_immutable + visit_UnaryOpT = visit_sometimes_allocating # possibly array op visit_CompareT = visit_immutable # Value lives forever @@ -301,17 +300,20 @@ class EscapeValidator(algorithm.Visitor): def visit_assignment(self, target, value): value_region = self._region_of(value) - # If this is a variable, we might need to contract the live range. - if isinstance(value_region, Region): - for name in self._names_of(target): - region = self._region_of(name) - if isinstance(region, Region): - region.contract(value_region) - # If we assign to an attribute of a quoted value, there will be no names # in the assignment lhs. target_names = self._names_of(target) or [] + # Adopt the value region for any variables declared on the lhs. + for name in target_names: + region = self._region_of(name) + if isinstance(region, Region) and not region.present(): + # Find the name's environment to overwrite the region. + for env in self.env_stack[::-1]: + if name.id in env: + env[name.id] = value_region + break + # The assigned value should outlive the assignee target_regions = [self._region_of(name) for name in target_names] for target_region in target_regions: diff --git a/artiq/coredevice/__init__.py b/artiq/coredevice/__init__.py index 70539315d..dced6ef0a 100644 --- a/artiq/coredevice/__init__.py +++ b/artiq/coredevice/__init__.py @@ -1,9 +1,3 @@ -from artiq.coredevice import exceptions, dds, spi -from artiq.coredevice.exceptions import (RTIOUnderflow, RTIOSequenceError, RTIOOverflow) -from artiq.coredevice.dds import (PHASE_MODE_CONTINUOUS, PHASE_MODE_ABSOLUTE, - PHASE_MODE_TRACKING) +from artiq.coredevice.exceptions import (RTIOUnderflow, RTIOOverflow) -__all__ = [] -__all__ += ["RTIOUnderflow", "RTIOSequenceError", "RTIOOverflow"] -__all__ += ["PHASE_MODE_CONTINUOUS", "PHASE_MODE_ABSOLUTE", - "PHASE_MODE_TRACKING"] +__all__ = ["RTIOUnderflow", "RTIOOverflow"] diff --git a/artiq/coredevice/ad5360.py b/artiq/coredevice/ad5360.py deleted file mode 100644 index c470fbec2..000000000 --- a/artiq/coredevice/ad5360.py +++ /dev/null @@ -1,182 +0,0 @@ -""" -Driver for the AD5360 DAC on RTIO. - -Output event replacement is not supported and issuing commands at the same -time is an error. -""" - - -from artiq.language.core import (kernel, portable, delay_mu, delay) -from artiq.language.units import ns, us -from artiq.coredevice import spi - -# Designed from the data sheets and somewhat after the linux kernel -# iio driver. - -_AD5360_SPI_CONFIG = (0*spi.SPI_OFFLINE | 0*spi.SPI_CS_POLARITY | - 0*spi.SPI_CLK_POLARITY | 1*spi.SPI_CLK_PHASE | - 0*spi.SPI_LSB_FIRST | 0*spi.SPI_HALF_DUPLEX) - -_AD5360_CMD_DATA = 3 << 22 -_AD5360_CMD_OFFSET = 2 << 22 -_AD5360_CMD_GAIN = 1 << 22 -_AD5360_CMD_SPECIAL = 0 << 22 - - -@portable -def _AD5360_WRITE_CHANNEL(c): - return (c + 8) << 16 - -_AD5360_SPECIAL_NOP = 0 << 16 -_AD5360_SPECIAL_CONTROL = 1 << 16 -_AD5360_SPECIAL_OFS0 = 2 << 16 -_AD5360_SPECIAL_OFS1 = 3 << 16 -_AD5360_SPECIAL_READ = 5 << 16 - - -@portable -def _AD5360_READ_CHANNEL(ch): - return (ch + 8) << 7 - -_AD5360_READ_X1A = 0x000 << 7 -_AD5360_READ_X1B = 0x040 << 7 -_AD5360_READ_OFFSET = 0x080 << 7 -_AD5360_READ_GAIN = 0x0c0 << 7 -_AD5360_READ_CONTROL = 0x101 << 7 -_AD5360_READ_OFS0 = 0x102 << 7 -_AD5360_READ_OFS1 = 0x103 << 7 - - -class AD5360: - """ - Support for the Analog devices AD53[67][0123] - multi-channel Digital to Analog Converters - - :param spi_device: Name of the SPI bus this device is on. - :param ldac_device: Name of the TTL device that LDAC is connected to - (optional). Needs to be explicitly initialized to high. - :param chip_select: Value to drive on the chip select lines - during transactions. - """ - - def __init__(self, dmgr, spi_device, ldac_device=None, chip_select=1): - self.core = dmgr.get("core") - self.bus = dmgr.get(spi_device) - if ldac_device is not None: - self.ldac = dmgr.get(ldac_device) - self.chip_select = chip_select - - @kernel - def setup_bus(self, write_div=4, read_div=7): - """Configure the SPI bus and the SPI transaction parameters - for this device. This method has to be called before any other method - if the bus has been used to access a different device in the meantime. - - This method advances the timeline by the duration of two - RTIO-to-Wishbone bus transactions. - - :param write_div: Write clock divider. - :param read_div: Read clock divider. - """ - # write: 2*8ns >= 10ns = t_6 (clk falling to cs_n rising) - # read: 4*8*ns >= 25ns = t_22 (clk falling to miso valid) - self.bus.set_config_mu(_AD5360_SPI_CONFIG, write_div, read_div) - self.bus.set_xfer(self.chip_select, 24, 0) - - @kernel - def write(self, data): - """Write 24 bits of data. - - This method advances the timeline by the duration of the SPI transfer - and the required CS high time. - """ - self.bus.write(data << 8) - delay_mu(self.bus.ref_period_mu) # get to 20ns min cs high - - @kernel - def write_offsets(self, value=0x1fff): - """Write the OFS0 and OFS1 offset DACs. - - This method advances the timeline by twice the duration of - :meth:`write`. - - :param value: Value to set both offset registers to. - """ - value &= 0x3fff - self.write(_AD5360_CMD_SPECIAL | _AD5360_SPECIAL_OFS0 | value) - self.write(_AD5360_CMD_SPECIAL | _AD5360_SPECIAL_OFS1 | value) - - @kernel - def write_channel(self, channel=0, value=0, op=_AD5360_CMD_DATA): - """Write to a channel register. - - This method advances the timeline by the duration of :meth:`write`. - - :param channel: Channel number to write to. - :param value: 16 bit value to write to the register. - :param op: Operation to perform, one of :const:`_AD5360_CMD_DATA`, - :const:`_AD5360_CMD_OFFSET`, :const:`_AD5360_CMD_GAIN` - (default: :const:`_AD5360_CMD_DATA`). - """ - channel &= 0x3f - value &= 0xffff - self.write(op | _AD5360_WRITE_CHANNEL(channel) | value) - - @kernel - def read_channel_sync(self, channel=0, op=_AD5360_READ_X1A): - """Read a channel register. - - This method advances the timeline by the duration of :meth:`write` plus - three RTIO-to-Wishbone transactions. - - :param channel: Channel number to read from. - :param op: Operation to perform, one of :const:`_AD5360_READ_X1A`, - :const:`_AD5360_READ_X1B`, :const:`_AD5360_READ_OFFSET`, - :const:`_AD5360_READ_GAIN` (default: :const:`_AD5360_READ_X1A`). - :return: The 16 bit register value. - """ - channel &= 0x3f - self.write(_AD5360_CMD_SPECIAL | _AD5360_SPECIAL_READ | op | - _AD5360_READ_CHANNEL(channel)) - self.bus.set_xfer(self.chip_select, 0, 24) - self.write(_AD5360_CMD_SPECIAL | _AD5360_SPECIAL_NOP) - self.bus.read_async() - self.bus.set_xfer(self.chip_select, 24, 0) - return self.bus.input_async() & 0xffff - - @kernel - def load(self): - """Pulse the LDAC line. - - This method advances the timeline by two RTIO clock periods (16 ns). - """ - self.ldac.off() - # t13 = 10ns ldac pulse width low - delay_mu(2*self.bus.ref_period_mu) - self.ldac.on() - - @kernel - def set(self, values, op=_AD5360_CMD_DATA): - """Write to several channels and pulse LDAC to update the channels. - - This method does not advance the timeline. Write events are scheduled - in the past. The DACs will synchronously start changing their output - levels `now`. - - :param values: List of 16 bit values to write to the channels. - :param op: Operation to perform, one of :const:`_AD5360_CMD_DATA`, - :const:`_AD5360_CMD_OFFSET`, :const:`_AD5360_CMD_GAIN` - (default: :const:`_AD5360_CMD_DATA`). - """ - # compensate all delays that will be applied - delay_mu(-len(values)*(self.bus.xfer_period_mu + - self.bus.write_period_mu + - self.bus.ref_period_mu) - - 3*self.bus.ref_period_mu - - self.core.seconds_to_mu(1.5*us)) - for i in range(len(values)): - self.write_channel(i, values[i], op) - delay_mu(3*self.bus.ref_period_mu + # latency alignment ttl to spi - self.core.seconds_to_mu(1.5*us)) # t10 max busy low for one channel - self.load() - delay_mu(-2*self.bus.ref_period_mu) # load(), t13 diff --git a/artiq/coredevice/ad53xx.py b/artiq/coredevice/ad53xx.py new file mode 100644 index 000000000..3445555d9 --- /dev/null +++ b/artiq/coredevice/ad53xx.py @@ -0,0 +1,393 @@ +""""RTIO driver for the Analog Devices AD53[67][0123] family of multi-channel +Digital to Analog Converters. + +Output event replacement is not supported and issuing commands at the same +time is an error. +""" + +# Designed from the data sheets and somewhat after the linux kernel +# iio driver. + +from numpy import int32 + +from artiq.language.core import (kernel, portable, delay_mu, delay, now_mu, + at_mu) +from artiq.language.units import ns, us +from artiq.coredevice import spi2 as spi + +SPI_AD53XX_CONFIG = (0*spi.SPI_OFFLINE | 1*spi.SPI_END | + 0*spi.SPI_INPUT | 0*spi.SPI_CS_POLARITY | + 0*spi.SPI_CLK_POLARITY | 1*spi.SPI_CLK_PHASE | + 0*spi.SPI_LSB_FIRST | 0*spi.SPI_HALF_DUPLEX) + +AD53XX_CMD_DATA = 3 << 22 +AD53XX_CMD_OFFSET = 2 << 22 +AD53XX_CMD_GAIN = 1 << 22 +AD53XX_CMD_SPECIAL = 0 << 22 + +AD53XX_SPECIAL_NOP = 0 << 16 +AD53XX_SPECIAL_CONTROL = 1 << 16 +AD53XX_SPECIAL_OFS0 = 2 << 16 +AD53XX_SPECIAL_OFS1 = 3 << 16 +AD53XX_SPECIAL_READ = 5 << 16 +AD53XX_SPECIAL_AB0 = 6 << 16 +AD53XX_SPECIAL_AB1 = 7 << 16 +AD53XX_SPECIAL_AB2 = 8 << 16 +AD53XX_SPECIAL_AB3 = 9 << 16 +AD53XX_SPECIAL_AB = 11 << 16 + +# incorporate the channel offset (8, table 17) here +AD53XX_READ_X1A = 0x008 << 7 +AD53XX_READ_X1B = 0x048 << 7 +AD53XX_READ_OFFSET = 0x088 << 7 +AD53XX_READ_GAIN = 0x0C8 << 7 + +AD53XX_READ_CONTROL = 0x101 << 7 +AD53XX_READ_OFS0 = 0x102 << 7 +AD53XX_READ_OFS1 = 0x103 << 7 +AD53XX_READ_AB0 = 0x106 << 7 +AD53XX_READ_AB1 = 0x107 << 7 +AD53XX_READ_AB2 = 0x108 << 7 +AD53XX_READ_AB3 = 0x109 << 7 + + +@portable +def ad53xx_cmd_write_ch(channel, value, op): + """Returns the word that must be written to the DAC to set a DAC + channel register to a given value. + + :param channel: DAC channel to write to (8 bits) + :param value: 16-bit value to write to the register + :param op: The channel register to write to, one of + :const:`AD53XX_CMD_DATA`, :const:`AD53XX_CMD_OFFSET` or + :const:`AD53XX_CMD_GAIN`. + :return: The 24-bit word to be written to the DAC + """ + return op | (channel + 8) << 16 | (value & 0xffff) + + +@portable +def ad53xx_cmd_read_ch(channel, op): + """Returns the word that must be written to the DAC to read a given + DAC channel register. + + :param channel: DAC channel to read (8 bits) + :param op: The channel register to read, one of + :const:`AD53XX_READ_X1A`, :const:`AD53XX_READ_X1B`, + :const:`AD53XX_READ_OFFSET`, :const:`AD53XX_READ_GAIN` etc. + :return: The 24-bit word to be written to the DAC to initiate read + """ + return AD53XX_CMD_SPECIAL | AD53XX_SPECIAL_READ | (op + (channel << 7)) + + +# maintain function definition for backward compatibility +@portable +def voltage_to_mu(voltage, offset_dacs=0x2000, vref=5.): + """Returns the 16-bit DAC register value required to produce a given output + voltage, assuming offset and gain errors have been trimmed out. + + The 16-bit register value may also be used with 14-bit DACs. The additional + bits are disregarded by 14-bit DACs. + + Also used to return offset register value required to produce a given + voltage when the DAC register is set to mid-scale. + An offset of V can be used to trim out a DAC offset error of -V. + + :param voltage: Voltage in SI units. + Valid voltages are: [-2*vref, + 2*vref - 1 LSB] + voltage offset. + :param offset_dacs: Register value for the two offset DACs + (default: 0x2000) + :param vref: DAC reference voltage (default: 5.) + :return: The 16-bit DAC register value + """ + code = int(round((1 << 16) * (voltage / (4. * vref)) + offset_dacs * 0x4)) + if code < 0x0 or code > 0xffff: + raise ValueError("Invalid DAC voltage!") + return code + + +class _DummyTTL: + @portable + def on(self): + pass + + @portable + def off(self): + pass + + +class AD53xx: + """Analog devices AD53[67][0123] family of multi-channel Digital to Analog + Converters. + + :param spi_device: SPI bus device name + :param ldac_device: LDAC RTIO TTLOut channel name (optional) + :param clr_device: CLR RTIO TTLOut channel name (optional) + :param chip_select: Value to drive on SPI chip select lines during + transactions (default: 1) + :param div_write: SPI clock divider for write operations (default: 4, + 50MHz max SPI clock with {t_high, t_low} >=8ns) + :param div_read: SPI clock divider for read operations (default: 8, not + optimized for speed, but cf data sheet t22: 25ns min SCLK edge to SDO + valid) + :param vref: DAC reference voltage (default: 5.) + :param offset_dacs: Initial register value for the two offset DACs, device + dependent and must be set correctly for correct voltage to mu + conversions. Knowledge of his state is not transferred between + experiments. (default: 8192) + :param core_device: Core device name (default: "core") + """ + kernel_invariants = {"bus", "ldac", "clr", "chip_select", "div_write", + "div_read", "vref", "core"} + + def __init__(self, dmgr, spi_device, ldac_device=None, clr_device=None, + chip_select=1, div_write=4, div_read=16, vref=5., + offset_dacs=8192, core="core"): + self.bus = dmgr.get(spi_device) + self.bus.update_xfer_duration_mu(div_write, 24) + if ldac_device is None: + self.ldac = _DummyTTL() + else: + self.ldac = dmgr.get(ldac_device) + if clr_device is None: + self.clr = _DummyTTL() + else: + self.clr = dmgr.get(clr_device) + self.chip_select = chip_select + self.div_write = div_write + self.div_read = div_read + self.vref = vref + self.offset_dacs = offset_dacs + self.core = dmgr.get(core) + + @kernel + def init(self, blind=False): + """Configures the SPI bus, drives LDAC and CLR high, programmes + the offset DACs, and enables overtemperature shutdown. + + This method must be called before any other method at start-up or if + the SPI bus has been accessed by another device. + + :param blind: If ``True``, do not attempt to read back control register + or check for overtemperature. + """ + self.ldac.on() + self.clr.on() + self.bus.set_config_mu(SPI_AD53XX_CONFIG, 24, self.div_write, + self.chip_select) + self.write_offset_dacs_mu(self.offset_dacs) + if not blind: + ctrl = self.read_reg(channel=0, op=AD53XX_READ_CONTROL) + if ctrl == 0xffff: + raise ValueError("DAC not found") + if ctrl & 0b10000: + raise ValueError("DAC over temperature") + delay(25*us) + self.bus.write( # enable power and overtemperature shutdown + (AD53XX_CMD_SPECIAL | AD53XX_SPECIAL_CONTROL | 0b0010) << 8) + if not blind: + ctrl = self.read_reg(channel=0, op=AD53XX_READ_CONTROL) + if (ctrl & 0b10111) != 0b00010: + raise ValueError("DAC CONTROL readback mismatch") + delay(15*us) + + @kernel + def read_reg(self, channel=0, op=AD53XX_READ_X1A): + """Read a DAC register. + + This method advances the timeline by the duration of two SPI transfers + plus two RTIO coarse cycles plus 270 ns and consumes all slack. + + :param channel: Channel number to read from (default: 0) + :param op: Operation to perform, one of :const:`AD53XX_READ_X1A`, + :const:`AD53XX_READ_X1B`, :const:`AD53XX_READ_OFFSET`, + :const:`AD53XX_READ_GAIN` etc. (default: :const:`AD53XX_READ_X1A`). + :return: The 16 bit register value + """ + self.bus.write(ad53xx_cmd_read_ch(channel, op) << 8) + self.bus.set_config_mu(SPI_AD53XX_CONFIG | spi.SPI_INPUT, 24, + self.div_read, self.chip_select) + delay(270*ns) # t_21 min sync high in readback + self.bus.write((AD53XX_CMD_SPECIAL | AD53XX_SPECIAL_NOP) << 8) + self.bus.set_config_mu(SPI_AD53XX_CONFIG, 24, self.div_write, + self.chip_select) + # FIXME: the int32 should not be needed to resolve unification + return self.bus.read() & int32(0xffff) + + @kernel + def write_offset_dacs_mu(self, value): + """Program the OFS0 and OFS1 offset DAC registers. + + Writes to the offset DACs take effect immediately without requiring + a LDAC. This method advances the timeline by the duration of two SPI + transfers. + + :param value: Value to set both offset DAC registers to + """ + value &= 0x3fff + self.offset_dacs = value + self.bus.write((AD53XX_CMD_SPECIAL | AD53XX_SPECIAL_OFS0 | value) << 8) + self.bus.write((AD53XX_CMD_SPECIAL | AD53XX_SPECIAL_OFS1 | value) << 8) + + @kernel + def write_gain_mu(self, channel, gain=0xffff): + """Program the gain register for a DAC channel. + + The DAC output is not updated until LDAC is pulsed (see :meth load:). + This method advances the timeline by the duration of one SPI transfer. + + :param gain: 16-bit gain register value (default: 0xffff) + """ + self.bus.write( + ad53xx_cmd_write_ch(channel, gain, AD53XX_CMD_GAIN) << 8) + + @kernel + def write_offset_mu(self, channel, offset=0x8000): + """Program the offset register for a DAC channel. + + The DAC output is not updated until LDAC is pulsed (see :meth load:). + This method advances the timeline by the duration of one SPI transfer. + + :param offset: 16-bit offset register value (default: 0x8000) + """ + self.bus.write( + ad53xx_cmd_write_ch(channel, offset, AD53XX_CMD_OFFSET) << 8) + + @kernel + def write_offset(self, channel, voltage): + """Program the DAC offset voltage for a channel. + + An offset of +V can be used to trim out a DAC offset error of -V. + The DAC output is not updated until LDAC is pulsed (see :meth load:). + This method advances the timeline by the duration of one SPI transfer. + + :param voltage: the offset voltage + """ + self.write_offset_mu(channel, voltage_to_mu(voltage, self.offset_dacs, + self.vref)) + + @kernel + def write_dac_mu(self, channel, value): + """Program the DAC input register for a channel. + + The DAC output is not updated until LDAC is pulsed (see :meth load:). + This method advances the timeline by the duration of one SPI transfer. + """ + self.bus.write( + ad53xx_cmd_write_ch(channel, value, AD53XX_CMD_DATA) << 8) + + @kernel + def write_dac(self, channel, voltage): + """Program the DAC output voltage for a channel. + + The DAC output is not updated until LDAC is pulsed (see :meth load:). + This method advances the timeline by the duration of one SPI transfer. + """ + self.write_dac_mu(channel, voltage_to_mu(voltage, self.offset_dacs, + self.vref)) + + @kernel + def load(self): + """Pulse the LDAC line. + + Note that there is a <= 1.5us "BUSY" period (t10) after writing to a + DAC input/gain/offset register. All DAC registers may be programmed + normally during the busy period, however LDACs during the busy period + cause the DAC output to change *after* the BUSY period has completed, + instead of the usual immediate update on LDAC behaviour. + + This method advances the timeline by two RTIO clock periods. + """ + self.ldac.off() + delay_mu(2*self.bus.ref_period_mu) # t13 = 10ns ldac pulse width low + self.ldac.on() + + @kernel + def set_dac_mu(self, values, channels=list(range(40))): + """Program multiple DAC channels and pulse LDAC to update the DAC + outputs. + + This method does not advance the timeline; write events are scheduled + in the past. The DACs will synchronously start changing their output + levels `now`. + + If no LDAC device was defined, the LDAC pulse is skipped. + + See :meth load:. + + :param values: list of DAC values to program + :param channels: list of DAC channels to program. If not specified, + we program the DAC channels sequentially, starting at 0. + """ + t0 = now_mu() + + # t10: max busy period after writing to DAC registers + t_10 = self.core.seconds_to_mu(1500*ns) + # compensate all delays that will be applied + delay_mu(-t_10-len(values)*self.bus.xfer_duration_mu) + for i in range(len(values)): + self.write_dac_mu(channels[i], values[i]) + delay_mu(t_10) + self.load() + at_mu(t0) + + @kernel + def set_dac(self, voltages, channels=list(range(40))): + """Program multiple DAC channels and pulse LDAC to update the DAC + outputs. + + This method does not advance the timeline; write events are scheduled + in the past. The DACs will synchronously start changing their output + levels `now`. + + If no LDAC device was defined, the LDAC pulse is skipped. + + :param voltages: list of voltages to program the DAC channels to + :param channels: list of DAC channels to program. If not specified, + we program the DAC channels sequentially, starting at 0. + """ + values = [voltage_to_mu(voltage, self.offset_dacs, self.vref) + for voltage in voltages] + self.set_dac_mu(values, channels) + + @kernel + def calibrate(self, channel, vzs, vfs): + """ Two-point calibration of a DAC channel. + + Programs the offset and gain register to trim out DAC errors. Does not + take effect until LDAC is pulsed (see :meth load:). + + Calibration consists of measuring the DAC output voltage for a channel + with the DAC set to zero-scale (0x0000) and full-scale (0xffff). + + Note that only negative offsets and full-scale errors (DAC gain too + high) can be calibrated in this fashion. + + :param channel: The number of the calibrated channel + :params vzs: Measured voltage with the DAC set to zero-scale (0x0000) + :params vfs: Measured voltage with the DAC set to full-scale (0xffff) + """ + offset_err = voltage_to_mu(vzs, self.offset_dacs, self.vref) + gain_err = voltage_to_mu(vfs, self.offset_dacs, self.vref) - ( + offset_err + 0xffff) + + assert offset_err <= 0 + assert gain_err >= 0 + + self.core.break_realtime() + self.write_offset_mu(channel, 0x8000-offset_err) + self.write_gain_mu(channel, 0xffff-gain_err) + + @portable + def voltage_to_mu(self, voltage): + """Returns the 16-bit DAC register value required to produce a given + output voltage, assuming offset and gain errors have been trimmed out. + + The 16-bit register value may also be used with 14-bit DACs. The + additional bits are disregarded by 14-bit DACs. + + :param voltage: Voltage in SI units. + Valid voltages are: [-2*vref, + 2*vref - 1 LSB] + voltage offset. + :return: The 16-bit DAC register value + """ + return voltage_to_mu(voltage, self.offset_dacs, self.vref) diff --git a/artiq/coredevice/ad9154_spi.py b/artiq/coredevice/ad9154_spi.py index 0ebb77034..d83a85ff6 100644 --- a/artiq/coredevice/ad9154_spi.py +++ b/artiq/coredevice/ad9154_spi.py @@ -10,9 +10,8 @@ class AD9154: self.chip_select = chip_select @kernel - def setup_bus(self, write_div=16, read_div=16): - self.bus.set_config_mu(0, write_div, read_div) - self.bus.set_xfer(self.chip_select, 24, 0) + def setup_bus(self, div=16): + self.bus.set_config_mu(0, 24, div, self.chip_select) @kernel def write(self, addr, data): diff --git a/artiq/coredevice/ad9910.py b/artiq/coredevice/ad9910.py new file mode 100644 index 000000000..c1e828cf7 --- /dev/null +++ b/artiq/coredevice/ad9910.py @@ -0,0 +1,926 @@ +from numpy import int32, int64 + +from artiq.language.core import ( + kernel, delay, portable, delay_mu, now_mu, at_mu) +from artiq.language.units import us, ms +from artiq.language.types import * + +from artiq.coredevice import spi2 as spi +from artiq.coredevice import urukul +# Work around ARTIQ-Python import machinery +urukul_sta_pll_lock = urukul.urukul_sta_pll_lock +urukul_sta_smp_err = urukul.urukul_sta_smp_err + + +__all__ = [ + "AD9910", + "PHASE_MODE_CONTINUOUS", "PHASE_MODE_ABSOLUTE", "PHASE_MODE_TRACKING", + "RAM_DEST_FTW", "RAM_DEST_POW", "RAM_DEST_ASF", "RAM_DEST_POWASF", + "RAM_MODE_DIRECTSWITCH", "RAM_MODE_RAMPUP", "RAM_MODE_BIDIR_RAMP", + "RAM_MODE_CONT_BIDIR_RAMP", "RAM_MODE_CONT_RAMPUP", +] + + +_PHASE_MODE_DEFAULT = -1 +PHASE_MODE_CONTINUOUS = 0 +PHASE_MODE_ABSOLUTE = 1 +PHASE_MODE_TRACKING = 2 + +_AD9910_REG_CFR1 = 0x00 +_AD9910_REG_CFR2 = 0x01 +_AD9910_REG_CFR3 = 0x02 +_AD9910_REG_AUX_DAC = 0x03 +_AD9910_REG_IO_UPDATE = 0x04 +_AD9910_REG_FTW = 0x07 +_AD9910_REG_POW = 0x08 +_AD9910_REG_ASF = 0x09 +_AD9910_REG_SYNC = 0x0a +_AD9910_REG_RAMP_LIMIT = 0x0b +_AD9910_REG_RAMP_STEP = 0x0c +_AD9910_REG_RAMP_RATE = 0x0d +_AD9910_REG_PROFILE0 = 0x0e +_AD9910_REG_PROFILE1 = 0x0f +_AD9910_REG_PROFILE2 = 0x10 +_AD9910_REG_PROFILE3 = 0x11 +_AD9910_REG_PROFILE4 = 0x12 +_AD9910_REG_PROFILE5 = 0x13 +_AD9910_REG_PROFILE6 = 0x14 +_AD9910_REG_PROFILE7 = 0x15 +_AD9910_REG_RAM = 0x16 + +# RAM destination +RAM_DEST_FTW = 0 +RAM_DEST_POW = 1 +RAM_DEST_ASF = 2 +RAM_DEST_POWASF = 3 + +# RAM MODES +RAM_MODE_DIRECTSWITCH = 0 +RAM_MODE_RAMPUP = 1 +RAM_MODE_BIDIR_RAMP = 2 +RAM_MODE_CONT_BIDIR_RAMP = 3 +RAM_MODE_CONT_RAMPUP = 4 + + +class SyncDataUser: + def __init__(self, core, sync_delay_seed, io_update_delay): + self.core = core + self.sync_delay_seed = sync_delay_seed + self.io_update_delay = io_update_delay + + @kernel + def init(self): + pass + + +class SyncDataEeprom: + def __init__(self, dmgr, core, eeprom_str): + self.core = core + + eeprom_device, eeprom_offset = eeprom_str.split(":") + self.eeprom_device = dmgr.get(eeprom_device) + self.eeprom_offset = int(eeprom_offset) + + self.sync_delay_seed = 0 + self.io_update_delay = 0 + + @kernel + def init(self): + word = self.eeprom_device.read_i32(self.eeprom_offset) >> 16 + sync_delay_seed = word >> 8 + if sync_delay_seed >= 0: + io_update_delay = word & 0xff + else: + io_update_delay = 0 + if io_update_delay == 0xff: # unprogrammed EEPROM + io_update_delay = 0 + # With Numpy, type(int32(-1) >> 1) == int64 + self.sync_delay_seed = int32(sync_delay_seed) + self.io_update_delay = int32(io_update_delay) + + +class AD9910: + """ + AD9910 DDS channel on Urukul. + + This class supports a single DDS channel and exposes the DDS, + the digital step attenuator, and the RF switch. + + :param chip_select: Chip select configuration. On Urukul this is an + encoded chip select and not "one-hot": 3 to address multiple chips + (as configured through CFG_MASK_NU), 4-7 for individual channels. + :param cpld_device: Name of the Urukul CPLD this device is on. + :param sw_device: Name of the RF switch device. The RF switch is a + TTLOut channel available as the :attr:`sw` attribute of this instance. + :param pll_n: DDS PLL multiplier. The DDS sample clock is + f_ref/clk_div*pll_n where f_ref is the reference frequency and + clk_div is the reference clock divider (both set in the parent + Urukul CPLD instance). + :param pll_en: PLL enable bit, set to 0 to bypass PLL (default: 1). + Note that when bypassing the PLL the red front panel LED may remain on. + :param pll_cp: DDS PLL charge pump setting. + :param pll_vco: DDS PLL VCO range selection. + :param sync_delay_seed: SYNC_IN delay tuning starting value. + To stabilize the SYNC_IN delay tuning, run :meth:`tune_sync_delay` once + and set this to the delay tap number returned (default: -1 to signal no + synchronization and no tuning during :meth:`init`). + Can be a string of the form "eeprom_device:byte_offset" to read the value + from a I2C EEPROM; in which case, `io_update_delay` must be set to the + same string value. + :param io_update_delay: IO_UPDATE pulse alignment delay. + To align IO_UPDATE to SYNC_CLK, run :meth:`tune_io_update_delay` and + set this to the delay tap number returned. + Can be a string of the form "eeprom_device:byte_offset" to read the value + from a I2C EEPROM; in which case, `sync_delay_seed` must be set to the + same string value. + """ + kernel_invariants = {"chip_select", "cpld", "core", "bus", + "ftw_per_hz", "sysclk_per_mu"} + + def __init__(self, dmgr, chip_select, cpld_device, sw_device=None, + pll_n=40, pll_cp=7, pll_vco=5, sync_delay_seed=-1, + io_update_delay=0, pll_en=1): + self.cpld = dmgr.get(cpld_device) + self.core = self.cpld.core + self.bus = self.cpld.bus + assert 3 <= chip_select <= 7 + self.chip_select = chip_select + if sw_device: + self.sw = dmgr.get(sw_device) + self.kernel_invariants.add("sw") + clk = self.cpld.refclk/[4, 1, 2, 4][self.cpld.clk_div] + self.pll_en = pll_en + self.pll_n = pll_n + self.pll_vco = pll_vco + self.pll_cp = pll_cp + if pll_en: + sysclk = clk*pll_n + assert clk <= 60e6 + assert 12 <= pll_n <= 127 + assert 0 <= pll_vco <= 5 + vco_min, vco_max = [(370, 510), (420, 590), (500, 700), + (600, 880), (700, 950), (820, 1150)][pll_vco] + assert vco_min <= sysclk/1e6 <= vco_max + assert 0 <= pll_cp <= 7 + else: + sysclk = clk + assert sysclk <= 1e9 + self.ftw_per_hz = (1 << 32)/sysclk + self.sysclk_per_mu = int(round(sysclk*self.core.ref_period)) + self.sysclk = sysclk + + if isinstance(sync_delay_seed, str) or isinstance(io_update_delay, str): + if sync_delay_seed != io_update_delay: + raise ValueError("When using EEPROM, sync_delay_seed must be equal to io_update_delay") + self.sync_data = SyncDataEeprom(dmgr, self.core, sync_delay_seed) + else: + self.sync_data = SyncDataUser(self.core, sync_delay_seed, io_update_delay) + + self.phase_mode = PHASE_MODE_CONTINUOUS + + @kernel + def set_phase_mode(self, phase_mode): + r"""Set the default phase mode. + + for future calls to :meth:`set` and + :meth:`set_mu`. Supported phase modes are: + + * :const:`PHASE_MODE_CONTINUOUS`: the phase accumulator is unchanged + when changing frequency or phase. The DDS phase is the sum of the + phase accumulator and the phase offset. The only discontinuous + changes in the DDS output phase come from changes to the phase + offset. This mode is also knows as "relative phase mode". + :math:`\phi(t) = q(t^\prime) + p + (t - t^\prime) f` + + * :const:`PHASE_MODE_ABSOLUTE`: the phase accumulator is reset when + changing frequency or phase. Thus, the phase of the DDS at the + time of the change is equal to the specified phase offset. + :math:`\phi(t) = p + (t - t^\prime) f` + + * :const:`PHASE_MODE_TRACKING`: when changing frequency or phase, + the phase accumulator is cleared and the phase offset is offset + by the value the phase accumulator would have if the DDS had been + running at the specified frequency since a given fiducial + time stamp. This is functionally equivalent to + :const:`PHASE_MODE_ABSOLUTE`. The only difference is the fiducial + time stamp. This mode is also known as "coherent phase mode". + The default fiducial time stamp is 0. + :math:`\phi(t) = p + (t - T) f` + + Where: + + * :math:`\phi(t)`: the DDS output phase + * :math:`q(t) = \phi(t) - p`: DDS internal phase accumulator + * :math:`p`: phase offset + * :math:`f`: frequency + * :math:`t^\prime`: time stamp of setting :math:`p`, :math:`f` + * :math:`T`: fiducial time stamp + * :math:`t`: running time + + .. warning:: This setting may become inconsistent when used as part of + a DMA recording. When using DMA, it is recommended to specify the + phase mode explicitly when calling :meth:`set` or :meth:`set_mu`. + """ + self.phase_mode = phase_mode + + @kernel + def write16(self, addr, data): + """Write to 16 bit register. + + :param addr: Register address + :param data: Data to be written + """ + self.bus.set_config_mu(urukul.SPI_CONFIG | spi.SPI_END, 24, + urukul.SPIT_DDS_WR, self.chip_select) + self.bus.write((addr << 24) | (data << 8)) + + @kernel + def write32(self, addr, data): + """Write to 32 bit register. + + :param addr: Register address + :param data: Data to be written + """ + self.bus.set_config_mu(urukul.SPI_CONFIG, 8, + urukul.SPIT_DDS_WR, self.chip_select) + self.bus.write(addr << 24) + self.bus.set_config_mu(urukul.SPI_CONFIG | spi.SPI_END, 32, + urukul.SPIT_DDS_WR, self.chip_select) + self.bus.write(data) + + @kernel + def read16(self, addr): + """Read from 16 bit register. + + :param addr: Register address + """ + self.bus.set_config_mu(urukul.SPI_CONFIG, 8, + urukul.SPIT_DDS_WR, self.chip_select) + self.bus.write((addr | 0x80) << 24) + self.bus.set_config_mu( + urukul.SPI_CONFIG | spi.SPI_END | spi.SPI_INPUT, + 16, urukul.SPIT_DDS_RD, self.chip_select) + self.bus.write(0) + return self.bus.read() + + @kernel + def read32(self, addr): + """Read from 32 bit register. + + :param addr: Register address + """ + self.bus.set_config_mu(urukul.SPI_CONFIG, 8, + urukul.SPIT_DDS_WR, self.chip_select) + self.bus.write((addr | 0x80) << 24) + self.bus.set_config_mu( + urukul.SPI_CONFIG | spi.SPI_END | spi.SPI_INPUT, + 32, urukul.SPIT_DDS_RD, self.chip_select) + self.bus.write(0) + return self.bus.read() + + @kernel + def read64(self, addr): + """Read from 64 bit register. + + :param addr: Register address + :return: 64 bit integer register value + """ + self.bus.set_config_mu( + urukul.SPI_CONFIG, 8, + urukul.SPIT_DDS_WR, self.chip_select) + self.bus.write((addr | 0x80) << 24) + self.bus.set_config_mu( + urukul.SPI_CONFIG | spi.SPI_INPUT, 32, + urukul.SPIT_DDS_RD, self.chip_select) + self.bus.write(0) + self.bus.set_config_mu( + urukul.SPI_CONFIG | spi.SPI_END | spi.SPI_INPUT, 32, + urukul.SPIT_DDS_RD, self.chip_select) + self.bus.write(0) + hi = self.bus.read() + lo = self.bus.read() + return (int64(hi) << 32) | lo + + @kernel + def write64(self, addr, data_high, data_low): + """Write to 64 bit register. + + :param addr: Register address + :param data_high: High (MSB) 32 bits of the data + :param data_low: Low (LSB) 32 data bits + """ + self.bus.set_config_mu(urukul.SPI_CONFIG, 8, + urukul.SPIT_DDS_WR, self.chip_select) + self.bus.write(addr << 24) + self.bus.set_config_mu(urukul.SPI_CONFIG, 32, + urukul.SPIT_DDS_WR, self.chip_select) + self.bus.write(data_high) + self.bus.set_config_mu(urukul.SPI_CONFIG | spi.SPI_END, 32, + urukul.SPIT_DDS_WR, self.chip_select) + self.bus.write(data_low) + + @kernel + def write_ram(self, data): + """Write data to RAM. + + The profile to write to and the step, start, and end address + need to be configured before and separately using + :meth:`set_profile_ram` and the parent CPLD `set_profile`. + + :param data List(int32): Data to be written to RAM. + """ + self.bus.set_config_mu(urukul.SPI_CONFIG, 8, urukul.SPIT_DDS_WR, + self.chip_select) + self.bus.write(_AD9910_REG_RAM << 24) + self.bus.set_config_mu(urukul.SPI_CONFIG, 32, + urukul.SPIT_DDS_WR, self.chip_select) + for i in range(len(data) - 1): + self.bus.write(data[i]) + self.bus.set_config_mu(urukul.SPI_CONFIG | spi.SPI_END, 32, + urukul.SPIT_DDS_WR, self.chip_select) + self.bus.write(data[len(data) - 1]) + + @kernel + def read_ram(self, data): + """Read data from RAM. + + The profile to read from and the step, start, and end address + need to be configured before and separately using + :meth:`set_profile_ram` and the parent CPLD `set_profile`. + + :param data List(int32): List to be filled with data read from RAM. + """ + self.bus.set_config_mu(urukul.SPI_CONFIG, 8, urukul.SPIT_DDS_WR, + self.chip_select) + self.bus.write((_AD9910_REG_RAM | 0x80) << 24) + n = len(data) - 1 + if n > 0: + self.bus.set_config_mu(urukul.SPI_CONFIG | spi.SPI_INPUT, 32, + urukul.SPIT_DDS_RD, self.chip_select) + preload = min(n, 8) + for i in range(n): + self.bus.write(0) + if i >= preload: + data[i - preload] = self.bus.read() + self.bus.set_config_mu( + urukul.SPI_CONFIG | spi.SPI_INPUT | spi.SPI_END, 32, + urukul.SPIT_DDS_RD, self.chip_select) + self.bus.write(0) + for i in range(preload + 1): + data[(n - preload) + i] = self.bus.read() + + @kernel + def set_cfr1(self, power_down=0b0000, phase_autoclear=0, + drg_load_lrr=0, drg_autoclear=0, + internal_profile=0, ram_destination=0, ram_enable=0, + manual_osk_external=0, osk_enable=0, select_auto_osk=0): + """Set CFR1. See the AD9910 datasheet for parameter meanings. + + This method does not pulse IO_UPDATE. + + :param power_down: Power down bits. + :param phase_autoclear: Autoclear phase accumulator. + :param drg_load_lrr: Load digital ramp generator LRR. + :param drg_autoclear: Autoclear digital ramp generator. + :param internal_profile: Internal profile control. + :param ram_destination: RAM destination + (:const:`RAM_DEST_FTW`, :const:`RAM_DEST_POW`, + :const:`RAM_DEST_ASF`, :const:`RAM_DEST_POWASF`). + :param ram_enable: RAM mode enable. + :param manual_osk_external: Enable OSK pin control in manual OSK mode. + :param osk_enable: Enable OSK mode. + :param select_auto_osk: Select manual or automatic OSK mode. + """ + self.write32(_AD9910_REG_CFR1, + (ram_enable << 31) | + (ram_destination << 29) | + (manual_osk_external << 23) | + (internal_profile << 17) | + (drg_load_lrr << 15) | + (drg_autoclear << 14) | + (phase_autoclear << 13) | + (osk_enable << 9) | + (select_auto_osk << 8) | + (power_down << 4) | + 2) # SDIO input only, MSB first + + @kernel + def init(self, blind=False): + """Initialize and configure the DDS. + + Sets up SPI mode, confirms chip presence, powers down unused blocks, + configures the PLL, waits for PLL lock. Uses the + IO_UPDATE signal multiple times. + + :param blind: Do not read back DDS identity and do not wait for lock. + """ + self.sync_data.init() + if self.sync_data.sync_delay_seed >= 0 and not self.cpld.sync_div: + raise ValueError("parent cpld does not drive SYNC") + if self.sync_data.sync_delay_seed >= 0: + if self.sysclk_per_mu != self.sysclk*self.core.ref_period: + raise ValueError("incorrect clock ratio for synchronization") + delay(50*ms) # slack + + # Set SPI mode + self.set_cfr1() + self.cpld.io_update.pulse(1*us) + delay(1*ms) + if not blind: + # Use the AUX DAC setting to identify and confirm presence + aux_dac = self.read32(_AD9910_REG_AUX_DAC) + if aux_dac & 0xff != 0x7f: + raise ValueError("Urukul AD9910 AUX_DAC mismatch") + delay(50*us) # slack + # Configure PLL settings and bring up PLL + # enable amplitude scale from profiles + # read effective FTW + # sync timing validation disable (enabled later) + self.write32(_AD9910_REG_CFR2, 0x01010020) + self.cpld.io_update.pulse(1*us) + cfr3 = (0x0807c000 | (self.pll_vco << 24) | + (self.pll_cp << 19) | (self.pll_en << 8) | + (self.pll_n << 1)) + self.write32(_AD9910_REG_CFR3, cfr3 | 0x400) # PFD reset + self.cpld.io_update.pulse(1*us) + if self.pll_en: + self.write32(_AD9910_REG_CFR3, cfr3) + self.cpld.io_update.pulse(1*us) + if blind: + delay(100*ms) + else: + # Wait for PLL lock, up to 100 ms + for i in range(100): + sta = self.cpld.sta_read() + lock = urukul_sta_pll_lock(sta) + delay(1*ms) + if lock & (1 << self.chip_select - 4): + break + if i >= 100 - 1: + raise ValueError("PLL lock timeout") + delay(10*us) # slack + if self.sync_data.sync_delay_seed >= 0: + self.tune_sync_delay(self.sync_data.sync_delay_seed) + delay(1*ms) + + @kernel + def power_down(self, bits=0b1111): + """Power down DDS. + + :param bits: Power down bits, see datasheet + """ + self.set_cfr1(power_down=bits) + self.cpld.io_update.pulse(1*us) + + # KLUDGE: ref_time_mu default argument is explicitly marked int64() to + # avoid silent truncation of explicitly passed timestamps. (Compiler bug?) + @kernel + def set_mu(self, ftw, pow_=0, asf=0x3fff, phase_mode=_PHASE_MODE_DEFAULT, + ref_time_mu=int64(-1), profile=0): + """Set profile 0 data in machine units. + + This uses machine units (FTW, POW, ASF). The frequency tuning word + width is 32, the phase offset word width is 16, and the amplitude + scale factor width is 12. + + After the SPI transfer, the shared IO update pin is pulsed to + activate the data. + + .. seealso: :meth:`set_phase_mode` for a definition of the different + phase modes. + + :param ftw: Frequency tuning word: 32 bit. + :param pow_: Phase tuning word: 16 bit unsigned. + :param asf: Amplitude scale factor: 14 bit unsigned. + :param phase_mode: If specified, overrides the default phase mode set + by :meth:`set_phase_mode` for this call. + :param ref_time_mu: Fiducial time used to compute absolute or tracking + phase updates. In machine units as obtained by `now_mu()`. + :param profile: Profile number to set (0-7, default: 0). + :return: Resulting phase offset word after application of phase + tracking offset. When using :const:`PHASE_MODE_CONTINUOUS` in + subsequent calls, use this value as the "current" phase. + """ + if phase_mode == _PHASE_MODE_DEFAULT: + phase_mode = self.phase_mode + # Align to coarse RTIO which aligns SYNC_CLK. I.e. clear fine TSC + # This will not cause a collision or sequence error. + at_mu(now_mu() & ~7) + if phase_mode != PHASE_MODE_CONTINUOUS: + # Auto-clear phase accumulator on IO_UPDATE. + # This is active already for the next IO_UPDATE + self.set_cfr1(phase_autoclear=1) + if phase_mode == PHASE_MODE_TRACKING and ref_time_mu < 0: + # set default fiducial time stamp + ref_time_mu = 0 + if ref_time_mu >= 0: + # 32 LSB are sufficient. + # Also no need to use IO_UPDATE time as this + # is equivalent to an output pipeline latency. + dt = int32(now_mu()) - int32(ref_time_mu) + pow_ += dt*ftw*self.sysclk_per_mu >> 16 + self.write64(_AD9910_REG_PROFILE0 + profile, + (asf << 16) | (pow_ & 0xffff), ftw) + delay_mu(int64(self.sync_data.io_update_delay)) + self.cpld.io_update.pulse_mu(8) # assumes 8 mu > t_SYN_CCLK + at_mu(now_mu() & ~7) # clear fine TSC again + if phase_mode != PHASE_MODE_CONTINUOUS: + self.set_cfr1() + # future IO_UPDATE will activate + return pow_ + + @kernel + def set_profile_ram(self, start, end, step=1, profile=0, nodwell_high=0, + zero_crossing=0, mode=1): + """Set the RAM profile settings. + + :param start: Profile start address in RAM. + :param end: Profile end address in RAM (last address). + :param step: Profile time step in units of t_DDS, typically 4 ns + (default: 1). + :param profile: Profile index (0 to 7) (default: 0). + :param nodwell_high: No-dwell high bit (default: 0, + see AD9910 documentation). + :param zero_crossing: Zero crossing bit (default: 0, + see AD9910 documentation). + :param mode: Profile RAM mode (:const:`RAM_MODE_DIRECTSWITCH`, + :const:`RAM_MODE_RAMPUP`, :const:`RAM_MODE_BIDIR_RAMP`, + :const:`RAM_MODE_CONT_BIDIR_RAMP`, or + :const:`RAM_MODE_CONT_RAMPUP`, default: + :const:`RAM_MODE_RAMPUP`) + """ + hi = (step << 8) | (end >> 2) + lo = ((end << 30) | (start << 14) | (nodwell_high << 5) | + (zero_crossing << 3) | mode) + self.write64(_AD9910_REG_PROFILE0 + profile, hi, lo) + + @kernel + def set_ftw(self, ftw): + """Set the value stored to the AD9910's frequency tuning word (FTW) register. + + :param ftw: Frequency tuning word to be stored, range: 0 to 0xffffffff. + """ + self.write32(_AD9910_REG_FTW, ftw) + + @kernel + def set_asf(self, asf): + """Set the value stored to the AD9910's amplitude scale factor (ASF) register. + + :param asf: Amplitude scale factor to be stored, range: 0 to 0x3fff. + """ + self.write32(_AD9910_REG_ASF, asf << 2) + + @kernel + def set_pow(self, pow_): + """Set the value stored to the AD9910's phase offset word (POW) register. + + :param pow_: Phase offset word to be stored, range: 0 to 0xffff. + """ + self.write16(_AD9910_REG_POW, pow_) + + @portable(flags={"fast-math"}) + def frequency_to_ftw(self, frequency) -> TInt32: + """Return the 32-bit frequency tuning word corresponding to the given + frequency. + """ + return int32(round(self.ftw_per_hz*frequency)) + + @portable(flags={"fast-math"}) + def ftw_to_frequency(self, ftw): + """Return the frequency corresponding to the given frequency tuning + word. + """ + return ftw / self.ftw_per_hz + + @portable(flags={"fast-math"}) + def turns_to_pow(self, turns) -> TInt32: + """Return the 16-bit phase offset word corresponding to the given phase + in turns.""" + return int32(round(turns*0x10000)) & int32(0xffff) + + @portable(flags={"fast-math"}) + def pow_to_turns(self, pow_): + """Return the phase in turns corresponding to a given phase offset + word.""" + return pow_/0x10000 + + @portable(flags={"fast-math"}) + def amplitude_to_asf(self, amplitude) -> TInt32: + """Return 14-bit amplitude scale factor corresponding to given + fractional amplitude.""" + code = int32(round(amplitude * 0x3fff)) + if code < 0 or code > 0x3fff: + raise ValueError("Invalid AD9910 fractional amplitude!") + return code + + @portable(flags={"fast-math"}) + def asf_to_amplitude(self, asf): + """Return amplitude as a fraction of full scale corresponding to given + amplitude scale factor.""" + return asf / float(0x3fff) + + @portable(flags={"fast-math"}) + def frequency_to_ram(self, frequency, ram): + """Convert frequency values to RAM profile data. + + To be used with :const:`RAM_DEST_FTW`. + + :param frequency: List of frequency values in Hz. + :param ram: List to write RAM data into. + Suitable for :meth:`write_ram`. + """ + for i in range(len(ram)): + ram[i] = self.frequency_to_ftw(frequency[i]) + + @portable(flags={"fast-math"}) + def turns_to_ram(self, turns, ram): + """Convert phase values to RAM profile data. + + To be used with :const:`RAM_DEST_POW`. + + :param turns: List of phase values in turns. + :param ram: List to write RAM data into. + Suitable for :meth:`write_ram`. + """ + for i in range(len(ram)): + ram[i] = self.turns_to_pow(turns[i]) << 16 + + @portable(flags={"fast-math"}) + def amplitude_to_ram(self, amplitude, ram): + """Convert amplitude values to RAM profile data. + + To be used with :const:`RAM_DEST_ASF`. + + :param amplitude: List of amplitude values in units of full scale. + :param ram: List to write RAM data into. + Suitable for :meth:`write_ram`. + """ + for i in range(len(ram)): + ram[i] = self.amplitude_to_asf(amplitude[i]) << 18 + + @portable(flags={"fast-math"}) + def turns_amplitude_to_ram(self, turns, amplitude, ram): + """Convert phase and amplitude values to RAM profile data. + + To be used with :const:`RAM_DEST_POWASF`. + + :param turns: List of phase values in turns. + :param amplitude: List of amplitude values in units of full scale. + :param ram: List to write RAM data into. + Suitable for :meth:`write_ram`. + """ + for i in range(len(ram)): + ram[i] = ((self.turns_to_pow(turns[i]) << 16) | + self.amplitude_to_asf(amplitude[i]) << 2) + + @kernel + def set_frequency(self, frequency): + """Set the value stored to the AD9910's frequency tuning word (FTW) register. + + :param frequency: frequency to be stored, in Hz. + """ + return self.set_ftw(self.frequency_to_ftw(frequency)) + + @kernel + def set_amplitude(self, amplitude): + """Set the value stored to the AD9910's amplitude scale factor (ASF) register. + + :param amplitude: amplitude to be stored, in units of full scale. + """ + return self.set_asf(self.amplitude_to_asf(amplitude)) + + @kernel + def set_phase(self, turns): + """Set the value stored to the AD9910's phase offset word (POW) register. + + :param turns: phase offset to be stored, in turns. + """ + return self.set_pow(self.turns_to_pow(turns)) + + @kernel + def set(self, frequency, phase=0.0, amplitude=1.0, + phase_mode=_PHASE_MODE_DEFAULT, ref_time_mu=int64(-1), profile=0): + """Set profile 0 data in SI units. + + .. seealso:: :meth:`set_mu` + + :param frequency: Frequency in Hz + :param phase: Phase tuning word in turns + :param amplitude: Amplitude in units of full scale + :param phase_mode: Phase mode constant + :param ref_time_mu: Fiducial time stamp in machine units + :param profile: Profile to affect + :return: Resulting phase offset in turns + """ + return self.pow_to_turns(self.set_mu( + self.frequency_to_ftw(frequency), self.turns_to_pow(phase), + self.amplitude_to_asf(amplitude), phase_mode, ref_time_mu, + profile)) + + @kernel + def set_att_mu(self, att): + """Set digital step attenuator in machine units. + + This method will write the attenuator settings of all four channels. + + .. seealso:: :meth:`artiq.coredevice.urukul.CPLD.set_att_mu` + + :param att: Attenuation setting, 8 bit digital. + """ + self.cpld.set_att_mu(self.chip_select - 4, att) + + @kernel + def set_att(self, att): + """Set digital step attenuator in SI units. + + This method will write the attenuator settings of all four channels. + + .. seealso:: :meth:`artiq.coredevice.urukul.CPLD.set_att` + + :param att: Attenuation in dB. + """ + self.cpld.set_att(self.chip_select - 4, att) + + @kernel + def cfg_sw(self, state): + """Set CPLD CFG RF switch state. The RF switch is controlled by the + logical or of the CPLD configuration shift register + RF switch bit and the SW TTL line (if used). + + :param state: CPLD CFG RF switch bit + """ + self.cpld.cfg_sw(self.chip_select - 4, state) + + @kernel + def set_sync(self, in_delay, window): + """Set the relevant parameters in the multi device synchronization + register. See the AD9910 datasheet for details. The SYNC clock + generator preset value is set to zero, and the SYNC_OUT generator is + disabled. + + :param in_delay: SYNC_IN delay tap (0-31) in steps of ~75ps + :param window: Symmetric SYNC_IN validation window (0-15) in + steps of ~75ps for both hold and setup margin. + """ + self.write32(_AD9910_REG_SYNC, + (window << 28) | # SYNC S/H validation delay + (1 << 27) | # SYNC receiver enable + (0 << 26) | # SYNC generator disable + (0 << 25) | # SYNC generator SYS rising edge + (0 << 18) | # SYNC preset + (0 << 11) | # SYNC output delay + (in_delay << 3)) # SYNC receiver delay + + @kernel + def clear_smp_err(self): + """Clear the SMP_ERR flag and enables SMP_ERR validity monitoring. + + Violations of the SYNC_IN sample and hold margins will result in + SMP_ERR being asserted. This then also activates the red LED on + the respective Urukul channel. + + Also modifies CFR2. + """ + self.write32(_AD9910_REG_CFR2, 0x01010020) # clear SMP_ERR + self.cpld.io_update.pulse(1*us) + self.write32(_AD9910_REG_CFR2, 0x01010000) # enable SMP_ERR + self.cpld.io_update.pulse(1*us) + + @kernel + def tune_sync_delay(self, search_seed=15): + """Find a stable SYNC_IN delay. + + This method first locates a valid SYNC_IN delay at zero validation + window size (setup/hold margin) by scanning around `search_seed`. It + then looks for similar valid delays at successively larger validation + window sizes until none can be found. It then decreases the validation + window a bit to provide some slack and stability and returns the + optimal values. + + This method and :meth:`tune_io_update_delay` can be run in any order. + + :param search_seed: Start value for valid SYNC_IN delay search. + Defaults to 15 (half range). + :return: Tuple of optimal delay and window size. + """ + if not self.cpld.sync_div: + raise ValueError("parent cpld does not drive SYNC") + search_span = 31 + # FIXME https://github.com/sinara-hw/Urukul/issues/16 + # should both be 2-4 once kasli sync_in jitter is identified + min_window = 0 + margin = 1 # 1*75ps setup and hold + for window in range(16): + next_seed = -1 + for in_delay in range(search_span - 2*window): + # alternate search direction around search_seed + if in_delay & 1: + in_delay = -in_delay + in_delay = search_seed + (in_delay >> 1) + if in_delay < 0 or in_delay > 31: + continue + self.set_sync(in_delay, window) + self.clear_smp_err() + # integrate SMP_ERR statistics for a few hundred cycles + delay(100*us) + err = urukul_sta_smp_err(self.cpld.sta_read()) + delay(100*us) # slack + if not (err >> (self.chip_select - 4)) & 1: + next_seed = in_delay + break + if next_seed >= 0: # valid delay found, scan next window + search_seed = next_seed + continue + elif window > min_window: + # no valid delay found here, roll back and add margin + window = max(min_window, window - 1 - margin) + self.set_sync(search_seed, window) + self.clear_smp_err() + delay(100*us) # slack + return search_seed, window + else: + break + raise ValueError("no valid window/delay") + + @kernel + def measure_io_update_alignment(self, delay_start, delay_stop): + """Use the digital ramp generator to locate the alignment between + IO_UPDATE and SYNC_CLK. + + The ramp generator is set up to a linear frequency ramp + (dFTW/t_SYNC_CLK=1) and started at a coarse RTIO time stamp plus + `delay_start` and stopped at a coarse RTIO time stamp plus + `delay_stop`. + + :param delay_start: Start IO_UPDATE delay in machine units. + :param delay_stop: Stop IO_UPDATE delay in machine units. + :return: Odd/even SYNC_CLK cycle indicator. + """ + # set up DRG + self.set_cfr1(drg_load_lrr=1, drg_autoclear=1) + # DRG -> FTW, DRG enable + self.write32(_AD9910_REG_CFR2, 0x01090000) + # no limits + self.write64(_AD9910_REG_RAMP_LIMIT, -1, 0) + # DRCTL=0, dt=1 t_SYNC_CLK + self.write32(_AD9910_REG_RAMP_RATE, 0x00010000) + # dFTW = 1, (work around negative slope) + self.write64(_AD9910_REG_RAMP_STEP, -1, 0) + # delay io_update after RTIO edge + t = now_mu() + 8 & ~7 + at_mu(t + delay_start) + # assumes a maximum t_SYNC_CLK period + self.cpld.io_update.pulse_mu(16 - delay_start) # realign + # disable DRG autoclear and LRR on io_update + self.set_cfr1() + # stop DRG + self.write64(_AD9910_REG_RAMP_STEP, 0, 0) + at_mu(t + 0x1000 + delay_stop) + self.cpld.io_update.pulse_mu(16 - delay_stop) # realign + ftw = self.read32(_AD9910_REG_FTW) # read out effective FTW + delay(100*us) # slack + # disable DRG + self.write32(_AD9910_REG_CFR2, 0x01010000) + self.cpld.io_update.pulse_mu(8) + return ftw & 1 + + @kernel + def tune_io_update_delay(self): + """Find a stable IO_UPDATE delay alignment. + + Scan through increasing IO_UPDATE delays until a delay is found that + lets IO_UPDATE be registered in the next SYNC_CLK cycle. Return a + IO_UPDATE delay that is as far away from that SYNC_CLK edge + as possible. + + This method assumes that the IO_UPDATE TTLOut device has one machine + unit resolution (SERDES). + + This method and :meth:`tune_sync_delay` can be run in any order. + + :return: Stable IO_UPDATE delay to be passed to the constructor + :class:`AD9910` via the device database. + """ + period = self.sysclk_per_mu * 4 # SYNC_CLK period + repeat = 100 + for i in range(period): + t = 0 + # check whether the sync edge is strictly between i, i+2 + for j in range(repeat): + t += self.measure_io_update_alignment(i, i + 2) + if t != 0: # no certain edge + continue + # check left/right half: i,i+1 and i+1,i+2 + t1 = [0, 0] + for j in range(repeat): + t1[0] += self.measure_io_update_alignment(i, i + 1) + t1[1] += self.measure_io_update_alignment(i + 1, i + 2) + if ((t1[0] == 0 and t1[1] == 0) or + (t1[0] == repeat and t1[1] == repeat)): + # edge is not close to i + 1, can't interpret result + raise ValueError( + "no clear IO_UPDATE-SYNC_CLK alignment edge found") + else: + # the good delay is period//2 after the edge + return (i + 1 + period//2) & (period - 1) + raise ValueError("no IO_UPDATE-SYNC_CLK alignment edge found") diff --git a/artiq/coredevice/ad9912.py b/artiq/coredevice/ad9912.py index 10b360806..fa88f2003 100644 --- a/artiq/coredevice/ad9912.py +++ b/artiq/coredevice/ad9912.py @@ -1,55 +1,200 @@ -""" -Driver for the AD9912 DDS. -""" +from numpy import int32, int64 +from artiq.language.types import TInt32, TInt64 +from artiq.language.core import kernel, delay, portable +from artiq.language.units import ms, us, ns +from artiq.coredevice.ad9912_reg import * -from artiq.language.core import kernel, delay_mu -from artiq.language.units import ns, us -from artiq.coredevice import spi - - -_AD9912_SPI_CONFIG = (0*spi.SPI_OFFLINE | 0*spi.SPI_CS_POLARITY | - 0*spi.SPI_CLK_POLARITY | 0*spi.SPI_CLK_PHASE | - 0*spi.SPI_LSB_FIRST | 0*spi.SPI_HALF_DUPLEX) +from artiq.coredevice import spi2 as spi +from artiq.coredevice import urukul class AD9912: """ - Support for the Analog devices AD9912 DDS + AD9912 DDS channel on Urukul - :param spi_device: Name of the SPI bus this device is on. - :param chip_select: Value to drive on the chip select lines - during transactions. + This class supports a single DDS channel and exposes the DDS, + the digital step attenuator, and the RF switch. + + :param chip_select: Chip select configuration. On Urukul this is an + encoded chip select and not "one-hot". + :param cpld_device: Name of the Urukul CPLD this device is on. + :param sw_device: Name of the RF switch device. The RF switch is a + TTLOut channel available as the :attr:`sw` attribute of this instance. + :param pll_n: DDS PLL multiplier. The DDS sample clock is + f_ref/clk_div*pll_n where f_ref is the reference frequency and clk_div + is the reference clock divider (both set in the parent Urukul CPLD + instance). """ + kernel_invariants = {"chip_select", "cpld", "core", "bus", "ftw_per_hz"} - def __init__(self, dmgr, spi_device, chip_select): - self.core = dmgr.get("core") - self.bus = dmgr.get(spi_device) + def __init__(self, dmgr, chip_select, cpld_device, sw_device=None, + pll_n=10): + self.cpld = dmgr.get(cpld_device) + self.core = self.cpld.core + self.bus = self.cpld.bus + assert 4 <= chip_select <= 7 self.chip_select = chip_select + if sw_device: + self.sw = dmgr.get(sw_device) + self.kernel_invariants.add("sw") + self.pll_n = pll_n + sysclk = self.cpld.refclk/[1, 1, 2, 4][self.cpld.clk_div]*pll_n + assert sysclk <= 1e9 + self.ftw_per_hz = 1/sysclk*(int64(1) << 48) @kernel - def setup_bus(self, write_div=5, read_div=20): - """Configure the SPI bus and the SPI transaction parameters - for this device. This method has to be called before any other method - if the bus has been used to access a different device in the meantime. + def write(self, addr, data, length): + """Variable length write to a register. + Up to 4 bytes. - This method advances the timeline by the duration of two - RTIO-to-Wishbone bus transactions. - - :param write_div: Write clock divider. - :param read_div: Read clock divider. + :param addr: Register address + :param data: Data to be written: int32 + :param length: Length in bytes (1-4) """ - # write: 5*8ns >= 40ns = t_clk (typ clk rate) - # read: 2*8*ns >= 25ns = t_dv (clk falling to miso valid) + RTT - self.bus.set_config_mu(_AD9912_SPI_CONFIG, write_div, read_div) - self.bus.set_xfer(self.chip_select, 24, 0) + assert length > 0 + assert length <= 4 + self.bus.set_config_mu(urukul.SPI_CONFIG, 16, + urukul.SPIT_DDS_WR, self.chip_select) + self.bus.write((addr | ((length - 1) << 13)) << 16) + self.bus.set_config_mu(urukul.SPI_CONFIG | spi.SPI_END, length*8, + urukul.SPIT_DDS_WR, self.chip_select) + self.bus.write(data << (32 - length*8)) @kernel - def write(self, data): - """Write 24 bits of data. + def read(self, addr, length): + """Variable length read from a register. + Up to 4 bytes. - This method advances the timeline by the duration of the SPI transfer - and the required CS high time. + :param addr: Register address + :param length: Length in bytes (1-4) + :return: Data read """ - self.bus.write(data << 8) - delay_mu(self.bus.ref_period_mu) # get to 20ns min cs high + assert length > 0 + assert length <= 4 + self.bus.set_config_mu(urukul.SPI_CONFIG, 16, + urukul.SPIT_DDS_WR, self.chip_select) + self.bus.write((addr | ((length - 1) << 13) | 0x8000) << 16) + self.bus.set_config_mu(urukul.SPI_CONFIG | spi.SPI_END + | spi.SPI_INPUT, length*8, + urukul.SPIT_DDS_RD, self.chip_select) + self.bus.write(0) + data = self.bus.read() + if length < 4: + data &= (1 << (length*8)) - 1 + return data + + @kernel + def init(self): + """Initialize and configure the DDS. + + Sets up SPI mode, confirms chip presence, powers down unused blocks, + and configures the PLL. Does not wait for PLL lock. Uses the + IO_UPDATE signal multiple times. + """ + # SPI mode + self.write(AD9912_SER_CONF, 0x99, length=1) + self.cpld.io_update.pulse(2*us) + # Verify chip ID and presence + prodid = self.read(AD9912_PRODIDH, length=2) + if (prodid != 0x1982) and (prodid != 0x1902): + raise ValueError("Urukul AD9912 product id mismatch") + delay(50*us) + # HSTL power down, CMOS power down + self.write(AD9912_PWRCNTRL1, 0x80, length=1) + self.cpld.io_update.pulse(2*us) + self.write(AD9912_N_DIV, self.pll_n//2 - 2, length=1) + self.cpld.io_update.pulse(2*us) + # I_cp = 375 µA, VCO high range + self.write(AD9912_PLLCFG, 0b00000101, length=1) + self.cpld.io_update.pulse(2*us) + delay(1*ms) + + @kernel + def set_att_mu(self, att): + """Set digital step attenuator in machine units. + + This method will write the attenuator settings of all four channels. + + .. seealso:: :meth:`artiq.coredevice.urukul.CPLD.set_att_mu` + + :param att: Attenuation setting, 8 bit digital. + """ + self.cpld.set_att_mu(self.chip_select - 4, att) + + @kernel + def set_att(self, att): + """Set digital step attenuator in SI units. + + This method will write the attenuator settings of all four channels. + + .. seealso:: :meth:`artiq.coredevice.urukul.CPLD.set_att` + + :param att: Attenuation in dB. Higher values mean more attenuation. + """ + self.cpld.set_att(self.chip_select - 4, att) + + @kernel + def set_mu(self, ftw, pow): + """Set profile 0 data in machine units. + + After the SPI transfer, the shared IO update pin is pulsed to + activate the data. + + :param ftw: Frequency tuning word: 48 bit unsigned. + :param pow: Phase tuning word: 16 bit unsigned. + """ + # streaming transfer of FTW and POW + self.bus.set_config_mu(urukul.SPI_CONFIG, 16, + urukul.SPIT_DDS_WR, self.chip_select) + self.bus.write((AD9912_POW1 << 16) | (3 << 29)) + self.bus.set_config_mu(urukul.SPI_CONFIG, 32, + urukul.SPIT_DDS_WR, self.chip_select) + self.bus.write((pow << 16) | (int32(ftw >> 32) & 0xffff)) + self.bus.set_config_mu(urukul.SPI_CONFIG | spi.SPI_END, 32, + urukul.SPIT_DDS_WR, self.chip_select) + self.bus.write(int32(ftw)) + self.cpld.io_update.pulse(10*ns) + + @portable(flags={"fast-math"}) + def frequency_to_ftw(self, frequency) -> TInt64: + """Returns the 48-bit frequency tuning word corresponding to the given + frequency. + """ + return int64(round(self.ftw_per_hz*frequency)) & ((int64(1) << 48) - 1) + + @portable(flags={"fast-math"}) + def ftw_to_frequency(self, ftw): + """Returns the frequency corresponding to the given + frequency tuning word. + """ + return ftw/self.ftw_per_hz + + @portable(flags={"fast-math"}) + def turns_to_pow(self, phase) -> TInt32: + """Returns the 16-bit phase offset word corresponding to the given + phase. + """ + return int32(round((1 << 14)*phase)) & 0xffff + + @kernel + def set(self, frequency, phase=0.0): + """Set profile 0 data in SI units. + + .. seealso:: :meth:`set_mu` + + :param ftw: Frequency in Hz + :param pow: Phase tuning word in turns + """ + self.set_mu(self.frequency_to_ftw(frequency), + self.turns_to_pow(phase)) + + @kernel + def cfg_sw(self, state): + """Set CPLD CFG RF switch state. The RF switch is controlled by the + logical or of the CPLD configuration shift register + RF switch bit and the SW TTL line (if used). + + :param state: CPLD CFG RF switch bit + """ + self.cpld.cfg_sw(self.chip_select - 4, state) diff --git a/artiq/coredevice/ad9914.py b/artiq/coredevice/ad9914.py new file mode 100644 index 000000000..ce7e3b1a2 --- /dev/null +++ b/artiq/coredevice/ad9914.py @@ -0,0 +1,342 @@ +""" +Driver for the AD9914 DDS (with parallel bus) on RTIO. +""" + + +from artiq.language.core import * +from artiq.language.types import * +from artiq.language.units import * +from artiq.coredevice.rtio import rtio_output + +from numpy import int32, int64 + + +__all__ = [ + "AD9914", + "PHASE_MODE_CONTINUOUS", "PHASE_MODE_ABSOLUTE", "PHASE_MODE_TRACKING" +] + + +_PHASE_MODE_DEFAULT = -1 +PHASE_MODE_CONTINUOUS = 0 +PHASE_MODE_ABSOLUTE = 1 +PHASE_MODE_TRACKING = 2 + +AD9914_REG_CFR1L = 0x01 +AD9914_REG_CFR1H = 0x03 +AD9914_REG_CFR2L = 0x05 +AD9914_REG_CFR2H = 0x07 +AD9914_REG_CFR3L = 0x09 +AD9914_REG_CFR3H = 0x0b +AD9914_REG_CFR4L = 0x0d +AD9914_REG_CFR4H = 0x0f +AD9914_REG_DRGFL = 0x11 +AD9914_REG_DRGFH = 0x13 +AD9914_REG_DRGBL = 0x15 +AD9914_REG_DRGBH = 0x17 +AD9914_REG_DRGAL = 0x19 +AD9914_REG_DRGAH = 0x1b +AD9914_REG_POW = 0x31 +AD9914_REG_ASF = 0x33 +AD9914_REG_USR0 = 0x6d +AD9914_FUD = 0x80 +AD9914_GPIO = 0x81 + + +class AD9914: + """Driver for one AD9914 DDS channel. + + The time cursor is not modified by any function in this class. + + Output event replacement is not supported and issuing commands at the same + time is an error. + + :param sysclk: DDS system frequency. The DDS system clock must be a + phase-locked multiple of the RTIO clock. + :param bus_channel: RTIO channel number of the DDS bus. + :param channel: channel number (on the bus) of the DDS device to control. + """ + + kernel_invariants = {"core", "sysclk", "bus_channel", "channel", + "rtio_period_mu", "sysclk_per_mu", "write_duration_mu", + "dac_cal_duration_mu", "init_duration_mu", "init_sync_duration_mu", + "set_duration_mu", "set_x_duration_mu", "exit_x_duration_mu"} + + def __init__(self, dmgr, sysclk, bus_channel, channel, core_device="core"): + self.core = dmgr.get(core_device) + self.sysclk = sysclk + self.bus_channel = bus_channel + self.channel = channel + self.phase_mode = PHASE_MODE_CONTINUOUS + + self.rtio_period_mu = int64(8) + self.sysclk_per_mu = int32(self.sysclk * self.core.ref_period) + + self.write_duration_mu = 5 * self.rtio_period_mu + self.dac_cal_duration_mu = 147000 * self.rtio_period_mu + self.init_duration_mu = 13 * self.write_duration_mu + self.dac_cal_duration_mu + self.init_sync_duration_mu = 21 * self.write_duration_mu + 2 * self.dac_cal_duration_mu + self.set_duration_mu = 7 * self.write_duration_mu + self.set_x_duration_mu = 7 * self.write_duration_mu + self.exit_x_duration_mu = 3 * self.write_duration_mu + + @kernel + def write(self, addr, data): + rtio_output((self.bus_channel << 8) | addr, data) + delay_mu(self.write_duration_mu) + + @kernel + def init(self): + """Resets and initializes the DDS channel. + + This needs to be done for each DDS channel before it can be used, and + it is recommended to use the startup kernel for this purpose. + """ + delay_mu(-self.init_duration_mu) + self.write(AD9914_GPIO, (1 << self.channel) << 1); + + # Note another undocumented "feature" of the AD9914: + # Programmable modulus breaks if the digital ramp enable bit is + # not set at the same time. + self.write(AD9914_REG_CFR1H, 0x0000) # Enable cosine output + self.write(AD9914_REG_CFR2L, 0x8900) # Enable matched latency + self.write(AD9914_REG_CFR2H, 0x0089) # Enable profile mode + programmable modulus + DRG + self.write(AD9914_REG_DRGAL, 0) # Programmable modulus A = 0 + self.write(AD9914_REG_DRGAH, 0) + self.write(AD9914_REG_DRGBH, 0x8000) # Programmable modulus B == 2**31 + self.write(AD9914_REG_DRGBL, 0x0000) + self.write(AD9914_REG_ASF, 0x0fff) # Set amplitude to maximum + self.write(AD9914_REG_CFR4H, 0x0105) # Enable DAC calibration + self.write(AD9914_FUD, 0) + delay_mu(self.dac_cal_duration_mu) + self.write(AD9914_REG_CFR4H, 0x0005) # Disable DAC calibration + self.write(AD9914_FUD, 0) + + @kernel + def init_sync(self, sync_delay): + """Resets and initializes the DDS channel as well as configures + the AD9914 DDS for synchronisation. The synchronisation procedure + follows the steps outlined in the AN-1254 application note. + + This needs to be done for each DDS channel before it can be used, and + it is recommended to use the startup kernel for this. + + This function cannot be used in a batch; the correct way of + initializing multiple DDS channels is to call this function + sequentially with a delay between the calls. 10ms provides a good + timing margin. + + :param sync_delay: integer from 0 to 0x3f that sets the value of + SYNC_OUT (bits 3-5) and SYNC_IN (bits 0-2) delay ADJ bits. + """ + delay_mu(-self.init_sync_duration_mu) + self.write(AD9914_GPIO, (1 << self.channel) << 1) + + self.write(AD9914_REG_CFR4H, 0x0105) # Enable DAC calibration + self.write(AD9914_FUD, 0) + delay_mu(self.dac_cal_duration_mu) + self.write(AD9914_REG_CFR4H, 0x0005) # Disable DAC calibration + self.write(AD9914_FUD, 0) + self.write(AD9914_REG_CFR2L, 0x8b00) # Enable matched latency and sync_out + self.write(AD9914_FUD, 0) + # Set cal with sync and set sync_out and sync_in delay + self.write(AD9914_REG_USR0, 0x0840 | (sync_delay & 0x3f)) + self.write(AD9914_FUD, 0) + self.write(AD9914_REG_CFR4H, 0x0105) # Enable DAC calibration + self.write(AD9914_FUD, 0) + delay_mu(self.dac_cal_duration_mu) + self.write(AD9914_REG_CFR4H, 0x0005) # Disable DAC calibration + self.write(AD9914_FUD, 0) + self.write(AD9914_REG_CFR1H, 0x0000) # Enable cosine output + self.write(AD9914_REG_CFR2H, 0x0089) # Enable profile mode + programmable modulus + DRG + self.write(AD9914_REG_DRGAL, 0) # Programmable modulus A = 0 + self.write(AD9914_REG_DRGAH, 0) + self.write(AD9914_REG_DRGBH, 0x8000) # Programmable modulus B == 2**31 + self.write(AD9914_REG_DRGBL, 0x0000) + self.write(AD9914_REG_ASF, 0x0fff) # Set amplitude to maximum + self.write(AD9914_FUD, 0) + + @kernel + def set_phase_mode(self, phase_mode): + """Sets the phase mode of the DDS channel. Supported phase modes are: + + * :const:`PHASE_MODE_CONTINUOUS`: the phase accumulator is unchanged when + switching frequencies. The DDS phase is the sum of the phase + accumulator and the phase offset. The only discrete jumps in the + DDS output phase come from changes to the phase offset. + + * :const:`PHASE_MODE_ABSOLUTE`: the phase accumulator is reset when + switching frequencies. Thus, the phase of the DDS at the time of + the frequency change is equal to the phase offset. + + * :const:`PHASE_MODE_TRACKING`: when switching frequencies, the phase + accumulator is set to the value it would have if the DDS had been + running at the specified frequency since the start of the + experiment. + + .. warning:: This setting may become inconsistent when used as part of + a DMA recording. When using DMA, it is recommended to specify the + phase mode explicitly when calling :meth:`set` or :meth:`set_mu`. + """ + self.phase_mode = phase_mode + + @kernel + def set_mu(self, ftw, pow=0, phase_mode=_PHASE_MODE_DEFAULT, + asf=0x0fff, ref_time_mu=-1): + """Sets the DDS channel to the specified frequency and phase. + + This uses machine units (FTW and POW). The frequency tuning word width + is 32, the phase offset word width is 16, and the amplitude scale factor + width is 12. + + The "frequency update" pulse is sent to the DDS with a fixed latency + with respect to the current position of the time cursor. + + :param ftw: frequency to generate. + :param pow: adds an offset to the phase. + :param phase_mode: if specified, overrides the default phase mode set + by :meth:`set_phase_mode` for this call. + :param ref_time_mu: reference time used to compute phase. Specifying this + makes it easier to have a well-defined phase relationship between + DDSes on the same bus that are updated at a similar time. + :return: Resulting phase offset word after application of phase + tracking offset. When using :const:`PHASE_MODE_CONTINUOUS` in + subsequent calls, use this value as the "current" phase. + """ + if phase_mode == _PHASE_MODE_DEFAULT: + phase_mode = self.phase_mode + if ref_time_mu < 0: + ref_time_mu = now_mu() + delay_mu(-self.set_duration_mu) + + self.write(AD9914_GPIO, (1 << self.channel) << 1) + + self.write(AD9914_REG_DRGFL, ftw & 0xffff) + self.write(AD9914_REG_DRGFH, (ftw >> 16) & 0xffff) + + # We need the RTIO fine timestamp clock to be phase-locked + # to DDS SYSCLK, and divided by an integer self.sysclk_per_mu. + if phase_mode == PHASE_MODE_CONTINUOUS: + # Do not clear phase accumulator on FUD + # Disable autoclear phase accumulator and enables OSK. + self.write(AD9914_REG_CFR1L, 0x0108) + else: + # Clear phase accumulator on FUD + # Enable autoclear phase accumulator and enables OSK. + self.write(AD9914_REG_CFR1L, 0x2108) + fud_time = now_mu() + 2 * self.write_duration_mu + pow -= int32((ref_time_mu - fud_time) * self.sysclk_per_mu * ftw >> (32 - 16)) + if phase_mode == PHASE_MODE_TRACKING: + pow += int32(ref_time_mu * self.sysclk_per_mu * ftw >> (32 - 16)) + + self.write(AD9914_REG_POW, pow) + self.write(AD9914_REG_ASF, asf) + self.write(AD9914_FUD, 0) + return pow + + @portable(flags={"fast-math"}) + def frequency_to_ftw(self, frequency): + """Returns the 32-bit frequency tuning word corresponding to the given + frequency. + """ + return int32(round(float(int64(2)**32*frequency/self.sysclk))) + + @portable(flags={"fast-math"}) + def ftw_to_frequency(self, ftw): + """Returns the frequency corresponding to the given frequency tuning + word. + """ + return ftw*self.sysclk/int64(2)**32 + + @portable(flags={"fast-math"}) + def turns_to_pow(self, turns): + """Returns the 16-bit phase offset word corresponding to the given + phase in turns.""" + return round(float(turns*2**16)) & 0xffff + + @portable(flags={"fast-math"}) + def pow_to_turns(self, pow): + """Returns the phase in turns corresponding to the given phase offset + word.""" + return pow/2**16 + + @portable(flags={"fast-math"}) + def amplitude_to_asf(self, amplitude): + """Returns 12-bit amplitude scale factor corresponding to given + amplitude.""" + code = round(float(amplitude * 0x0fff)) + if code < 0 or code > 0xfff: + raise ValueError("Invalid AD9914 amplitude!") + return code + + @portable(flags={"fast-math"}) + def asf_to_amplitude(self, asf): + """Returns the amplitude corresponding to the given amplitude scale + factor.""" + return asf/0x0fff + + @kernel + def set(self, frequency, phase=0.0, phase_mode=_PHASE_MODE_DEFAULT, + amplitude=1.0): + """Like :meth:`set_mu`, but uses Hz and turns.""" + return self.pow_to_turns( + self.set_mu(self.frequency_to_ftw(frequency), + self.turns_to_pow(phase), phase_mode, + self.amplitude_to_asf(amplitude))) + + # Extended-resolution functions + @kernel + def set_x_mu(self, xftw, amplitude=0x0fff): + """Set the DDS frequency and amplitude with an extended-resolution + (63-bit) frequency tuning word. + + Phase control is not implemented in this mode; the phase offset + can assume any value. + + After this function has been called, exit extended-resolution mode + before calling functions that use standard-resolution mode. + """ + delay_mu(-self.set_x_duration_mu) + + self.write(AD9914_GPIO, (1 << self.channel) << 1) + + self.write(AD9914_REG_DRGAL, xftw & 0xffff) + self.write(AD9914_REG_DRGAH, (xftw >> 16) & 0x7fff) + self.write(AD9914_REG_DRGFL, (xftw >> 31) & 0xffff) + self.write(AD9914_REG_DRGFH, (xftw >> 47) & 0xffff) + self.write(AD9914_REG_ASF, amplitude) + + self.write(AD9914_FUD, 0) + + @kernel + def exit_x(self): + """Exits extended-resolution mode.""" + delay_mu(-self.exit_x_duration_mu) + self.write(AD9914_GPIO, (1 << self.channel) << 1) + self.write(AD9914_REG_DRGAL, 0) + self.write(AD9914_REG_DRGAH, 0) + + @portable(flags={"fast-math"}) + def frequency_to_xftw(self, frequency): + """Returns the 63-bit frequency tuning word corresponding to the given + frequency (extended resolution mode). + """ + return int64(round(2.0*float(int64(2)**62)*frequency/self.sysclk)) & ( + (int64(1) << 63) - 1) + + @portable(flags={"fast-math"}) + def xftw_to_frequency(self, xftw): + """Returns the frequency corresponding to the given frequency tuning + word (extended resolution mode). + """ + return xftw*self.sysclk/(2.0*float(int64(2)**62)) + + @kernel + def set_x(self, frequency, amplitude=1.0): + """Like :meth:`set_x_mu`, but uses Hz and turns. + + Note that the precision of ``float`` is less than the precision + of the extended frequency tuning word. + """ + self.set_x_mu(self.frequency_to_xftw(frequency), + self.amplitude_to_asf(amplitude)) diff --git a/artiq/coredevice/adf5356.py b/artiq/coredevice/adf5356.py new file mode 100644 index 000000000..37505682f --- /dev/null +++ b/artiq/coredevice/adf5356.py @@ -0,0 +1,569 @@ +"""RTIO driver for the Analog Devices ADF[45]35[56] family of GHz PLLs +on Mirny-style prefixed SPI buses. +""" + +# https://github.com/analogdevicesinc/linux/blob/master/Documentation/devicetree/bindings/iio/frequency/adf5355.txt +# https://github.com/analogdevicesinc/linux/blob/master/drivers/iio/frequency/adf5355.c +# https://www.analog.com/media/en/technical-documentation/data-sheets/ADF5355.pdf +# https://www.analog.com/media/en/technical-documentation/data-sheets/ADF5355.pdf +# https://www.analog.com/media/en/technical-documentation/user-guides/EV-ADF5355SD1Z-UG-1087.pdf + + +from artiq.language.core import kernel, portable, delay +from artiq.language.units import us, GHz, MHz +from artiq.language.types import TInt32, TInt64 +from artiq.coredevice import spi2 as spi +from artiq.coredevice.adf5356_reg import * + +from numpy import int32, int64, floor, ceil + + +SPI_CONFIG = ( + 0 * spi.SPI_OFFLINE + | 0 * spi.SPI_END + | 0 * spi.SPI_INPUT + | 1 * spi.SPI_CS_POLARITY + | 0 * spi.SPI_CLK_POLARITY + | 0 * spi.SPI_CLK_PHASE + | 0 * spi.SPI_LSB_FIRST + | 0 * spi.SPI_HALF_DUPLEX +) + + +ADF5356_MIN_VCO_FREQ = int64(3.4 * GHz) +ADF5356_MAX_VCO_FREQ = int64(6.8 * GHz) +ADF5356_MAX_FREQ_PFD = int32(125.0 * MHz) +ADF5356_MODULUS1 = int32(1 << 24) +ADF5356_MAX_MODULUS2 = int32(1 << 28) # FIXME: ADF5356 has 28 bits MOD2 +ADF5356_MAX_R_CNT = int32(1023) + + +class ADF5356: + """Analog Devices AD[45]35[56] family of GHz PLLs. + + :param cpld_device: Mirny CPLD device name + :param sw_device: Mirny RF switch device name + :param channel: Mirny RF channel index + :param ref_doubler: enable/disable reference clock doubler + :param ref_divider: enable/disable reference clock divide-by-2 + :param core_device: Core device name (default: "core") + """ + + kernel_invariants = {"cpld", "sw", "channel", "core", "sysclk"} + + def __init__( + self, + dmgr, + cpld_device, + sw_device, + channel, + ref_doubler=False, + ref_divider=False, + core="core", + ): + self.cpld = dmgr.get(cpld_device) + self.sw = dmgr.get(sw_device) + self.channel = channel + self.core = dmgr.get(core) + + self.ref_doubler = ref_doubler + self.ref_divider = ref_divider + self.sysclk = self.cpld.refclk + assert 10 <= self.sysclk / 1e6 <= 600 + + self._init_registers() + + @kernel + def init(self, blind=False): + """ + Initialize and configure the PLL. + + :param blind: Do not attempt to verify presence. + """ + if not blind: + # MUXOUT = VDD + self.write(ADF5356_REG4_MUXOUT(1) | 4) + delay(5000 * us) + if not self.read_muxout(): + raise ValueError("MUXOUT not high") + delay(1000 * us) + + # MUXOUT = DGND + self.write(ADF5356_REG4_MUXOUT(2) | 4) + delay(5000 * us) + if self.read_muxout(): + raise ValueError("MUXOUT not low") + delay(1000 * us) + + @kernel + def set_att_mu(self, att): + """Set digital step attenuator in machine units. + + :param att: Attenuation setting, 8 bit digital. + """ + self.cpld.set_att_mu(self.channel, att) + + @kernel + def write(self, data): + self.cpld.write_ext(self.channel | 4, 32, data) + + @kernel + def read_muxout(self): + """ + Read the state of the MUXOUT line. + + By default, this is configured to be the digital lock detection. + """ + return bool(self.cpld.read_reg(0) & (1 << (self.channel + 8))) + + @kernel + def set_output_power_mu(self, n): + """ + Set the power level at output A of the PLL chip in machine units. + + This driver defaults to `n = 3` at init. + + :param n: output power setting, 0, 1, 2, or 3 (see ADF5356 datasheet, fig. 44). + """ + if n not in [0, 1, 2, 3]: + raise ValueError("invalid power setting") + self.regs[6] = ADF5356_REG6_RF_OUTPUT_A_POWER_UPDATE(self.regs[6], n) + self.sync() + + @portable + def output_power_mu(self): + """ + Return the power level at output A of the PLL chip in machine units. + """ + return ADF5356_REG6_RF_OUTPUT_A_POWER_GET(self.regs[6]) + + @kernel + def enable_output(self): + """ + Enable output A of the PLL chip. This is the default after init. + """ + self.regs[6] |= ADF5356_REG6_RF_OUTPUT_A_ENABLE(1) + self.sync() + + @kernel + def disable_output(self): + """ + Disable output A of the PLL chip. + """ + self.regs[6] &= ~ADF5356_REG6_RF_OUTPUT_A_ENABLE(1) + self.sync() + + @kernel + def set_frequency(self, f): + """ + Output given frequency on output A. + + :param f: 53.125 MHz <= f <= 6800 MHz + """ + freq = int64(round(f)) + + if freq > ADF5356_MAX_VCO_FREQ: + raise ValueError("Requested too high frequency") + + # select minimal output divider + rf_div_sel = 0 + while freq < ADF5356_MIN_VCO_FREQ: + freq <<= 1 + rf_div_sel += 1 + + if (1 << rf_div_sel) > 64: + raise ValueError("Requested too low frequency") + + # choose reference divider that maximizes PFD frequency + self.regs[4] = ADF5356_REG4_R_COUNTER_UPDATE( + self.regs[4], self._compute_reference_counter() + ) + f_pfd = self.f_pfd() + + # choose prescaler + if freq > int64(6e9): + self.regs[0] |= ADF5356_REG0_PRESCALER(1) # 8/9 + n_min, n_max = 75, 65535 + + # adjust reference divider to be able to match n_min constraint + while n_min * f_pfd > freq: + r = ADF5356_REG4_R_COUNTER_GET(self.regs[4]) + self.regs[4] = ADF5356_REG4_R_COUNTER_UPDATE(self.regs[4], r + 1) + f_pfd = self.f_pfd() + else: + self.regs[0] &= ~ADF5356_REG0_PRESCALER(1) # 4/5 + n_min, n_max = 23, 32767 + + # calculate PLL parameters + n, frac1, (frac2_msb, frac2_lsb), (mod2_msb, mod2_lsb) = calculate_pll( + freq, f_pfd + ) + + if not (n_min <= n <= n_max): + raise ValueError("Invalid INT value") + + # configure PLL + self.regs[0] = ADF5356_REG0_INT_VALUE_UPDATE(self.regs[0], n) + self.regs[1] = ADF5356_REG1_MAIN_FRAC_VALUE_UPDATE(self.regs[1], frac1) + self.regs[2] = ADF5356_REG2_AUX_FRAC_LSB_VALUE_UPDATE(self.regs[2], frac2_lsb) + self.regs[2] = ADF5356_REG2_AUX_MOD_LSB_VALUE_UPDATE(self.regs[2], mod2_lsb) + self.regs[13] = ADF5356_REG13_AUX_FRAC_MSB_VALUE_UPDATE( + self.regs[13], frac2_msb + ) + self.regs[13] = ADF5356_REG13_AUX_MOD_MSB_VALUE_UPDATE(self.regs[13], mod2_msb) + + self.regs[6] = ADF5356_REG6_RF_DIVIDER_SELECT_UPDATE(self.regs[6], rf_div_sel) + self.regs[6] = ADF5356_REG6_CP_BLEED_CURRENT_UPDATE( + self.regs[6], int32(floor(24 * f_pfd / (61.44 * MHz))) + ) + self.regs[9] = ADF5356_REG9_VCO_BAND_DIVISION_UPDATE( + self.regs[9], int32(ceil(f_pfd / 160e3)) + ) + + # commit + self.sync() + + @kernel + def sync(self): + """ + Write all registers to the device. Attempts to lock the PLL. + """ + f_pfd = self.f_pfd() + + if f_pfd <= 75.0 * MHz: + for i in range(13, 0, -1): + self.write(self.regs[i]) + delay(200 * us) + self.write(self.regs[0] | ADF5356_REG0_AUTOCAL(1)) + else: + # AUTOCAL AT HALF PFD FREQUENCY + + # calculate PLL at f_pfd/2 + n, frac1, (frac2_msb, frac2_lsb), (mod2_msb, mod2_lsb) = calculate_pll( + self.f_vco(), f_pfd >> 1 + ) + + self.write( + 13 + | ADF5356_REG13_AUX_FRAC_MSB_VALUE(frac2_msb) + | ADF5356_REG13_AUX_MOD_MSB_VALUE(mod2_msb) + ) + + for i in range(12, 4, -1): + self.write(self.regs[i]) + + self.write( + ADF5356_REG4_R_COUNTER_UPDATE(self.regs[4], 2 * self.ref_counter()) + ) + + self.write(self.regs[3]) + self.write( + 2 + | ADF5356_REG2_AUX_MOD_LSB_VALUE(mod2_lsb) + | ADF5356_REG2_AUX_FRAC_LSB_VALUE(frac2_lsb) + ) + self.write(1 | ADF5356_REG1_MAIN_FRAC_VALUE(frac1)) + + delay(200 * us) + self.write(ADF5356_REG0_INT_VALUE(n) | ADF5356_REG0_AUTOCAL(1)) + + # RELOCK AT WANTED PFD FREQUENCY + + for i in [4, 2, 1]: + self.write(self.regs[i]) + + # force-disable autocal + self.write(self.regs[0] & ~ADF5356_REG0_AUTOCAL(1)) + + @portable + def f_pfd(self) -> TInt64: + """ + Return the PFD frequency for the cached set of registers. + """ + r = ADF5356_REG4_R_COUNTER_GET(self.regs[4]) + d = ADF5356_REG4_R_DOUBLER_GET(self.regs[4]) + t = ADF5356_REG4_R_DIVIDER_GET(self.regs[4]) + return self._compute_pfd_frequency(r, d, t) + + @portable + def f_vco(self) -> TInt64: + """ + Return the VCO frequency for the cached set of registers. + """ + return int64( + self.f_pfd() + * ( + self.pll_n() + + (self.pll_frac1() + self.pll_frac2() / self.pll_mod2()) + / ADF5356_MODULUS1 + ) + ) + + @portable + def pll_n(self) -> TInt32: + """ + Return the PLL integer value (INT) for the cached set of registers. + """ + return ADF5356_REG0_INT_VALUE_GET(self.regs[0]) + + @portable + def pll_frac1(self) -> TInt32: + """ + Return the main fractional value (FRAC1) for the cached set of registers. + """ + return ADF5356_REG1_MAIN_FRAC_VALUE_GET(self.regs[1]) + + @portable + def pll_frac2(self) -> TInt32: + """ + Return the auxiliary fractional value (FRAC2) for the cached set of registers. + """ + return ( + ADF5356_REG13_AUX_FRAC_MSB_VALUE_GET(self.regs[13]) << 14 + ) | ADF5356_REG2_AUX_FRAC_LSB_VALUE_GET(self.regs[2]) + + @portable + def pll_mod2(self) -> TInt32: + """ + Return the auxiliary modulus value (MOD2) for the cached set of registers. + """ + return ( + ADF5356_REG13_AUX_MOD_MSB_VALUE_GET(self.regs[13]) << 14 + ) | ADF5356_REG2_AUX_MOD_LSB_VALUE_GET(self.regs[2]) + + @portable + def ref_counter(self) -> TInt32: + """ + Return the reference counter value (R) for the cached set of registers. + """ + return ADF5356_REG4_R_COUNTER_GET(self.regs[4]) + + @portable + def output_divider(self) -> TInt32: + """ + Return the value of the output A divider. + """ + return 1 << ADF5356_REG6_RF_DIVIDER_SELECT_GET(self.regs[6]) + + def info(self): + """ + Return a summary of high-level parameters as a dict. + """ + prescaler = ADF5356_REG0_PRESCALER_GET(self.regs[0]) + return { + # output + "f_outA": self.f_vco() / self.output_divider(), + "f_outB": self.f_vco() * 2, + "output_divider": self.output_divider(), + # PLL parameters + "f_vco": self.f_vco(), + "pll_n": self.pll_n(), + "pll_frac1": self.pll_frac1(), + "pll_frac2": self.pll_frac2(), + "pll_mod2": self.pll_mod2(), + "prescaler": "4/5" if prescaler == 0 else "8/9", + # reference / PFD + "sysclk": self.sysclk, + "ref_doubler": self.ref_doubler, + "ref_divider": self.ref_divider, + "ref_counter": self.ref_counter(), + "f_pfd": self.f_pfd(), + } + + @portable + def _init_registers(self): + """ + Initialize cached registers with sensible defaults. + """ + # fill with control bits + self.regs = [int32(i) for i in range(ADF5356_NUM_REGS)] + + # REG2 + # ==== + + # avoid divide-by-zero + self.regs[2] |= ADF5356_REG2_AUX_MOD_LSB_VALUE(1) + + # REG4 + # ==== + + # single-ended reference mode is recommended + # for references up to 250 MHz, even if the signal is differential + if self.sysclk <= 250 * MHz: + self.regs[4] |= ADF5356_REG4_REF_MODE(0) + else: + self.regs[4] |= ADF5356_REG4_REF_MODE(1) + + # phase detector polarity: positive + self.regs[4] |= ADF5356_REG4_PD_POLARITY(1) + + # charge pump current: 0.94 mA + self.regs[4] |= ADF5356_REG4_CURRENT_SETTING(2) + + # MUXOUT: digital lock detect + self.regs[4] |= ADF5356_REG4_MUX_LOGIC(1) # 3v3 logic + self.regs[4] |= ADF5356_REG4_MUXOUT(6) + + # setup reference path + if self.ref_doubler: + self.regs[4] |= ADF5356_REG4_R_DOUBLER(1) + + if self.ref_divider: + self.regs[4] |= ADF5356_REG4_R_DIVIDER(1) + + r = self._compute_reference_counter() + self.regs[4] |= ADF5356_REG4_R_COUNTER(r) + + # REG5 + # ==== + + # reserved values + self.regs[5] = int32(0x800025) + + # REG6 + # ==== + + # reserved values + self.regs[6] = int32(0x14000006) + + # enable negative bleed + self.regs[6] |= ADF5356_REG6_NEGATIVE_BLEED(1) + + # charge pump bleed current + # self.regs[6] |= ADF5356_REG6_CP_BLEED_CURRENT( + # int32(floor(24 * self.f_pfd / (61.44 * MHz))) + # ) + + # direct feedback from VCO to N counter + self.regs[6] |= ADF5356_REG6_FB_SELECT(1) + + # mute until the PLL is locked + self.regs[6] |= ADF5356_REG6_MUTE_TILL_LD(1) + + # enable output A + self.regs[6] |= ADF5356_REG6_RF_OUTPUT_A_ENABLE(1) + + # set output A power to max power, is adjusted by extra attenuator + self.regs[6] |= ADF5356_REG6_RF_OUTPUT_A_POWER(3) # +5 dBm + + # REG7 + # ==== + + # reserved values + self.regs[7] = int32(0x10000007) + + # sync load-enable to reference + self.regs[7] |= ADF5356_REG7_LE_SYNC(1) + + # frac-N lock-detect precision: 12 ns + self.regs[7] |= ADF5356_REG7_FRAC_N_LD_PRECISION(3) + + # REG8 + # ==== + + # reserved values + self.regs[8] = int32(0x102D0428) + + # REG9 + # ==== + + # default timeouts (from eval software) + self.regs[9] |= ( + ADF5356_REG9_SYNTH_LOCK_TIMEOUT(13) + | ADF5356_REG9_AUTOCAL_TIMEOUT(31) + | ADF5356_REG9_TIMEOUT(0x67) + ) + + # REG10 + # ===== + + # reserved values + self.regs[10] = int32(0xC0000A) + + # ADC defaults (from eval software) + self.regs[10] |= ( + ADF5356_REG10_ADC_ENABLE(1) + | ADF5356_REG10_ADC_CLK_DIV(256) + | ADF5356_REG10_ADC_CONV(1) + ) + + # REG11 + # ===== + + # reserved values + self.regs[11] = int32(0x61200B) + + # REG12 + # ===== + + # reserved values + self.regs[12] = int32(0x15FC) + + @portable + def _compute_pfd_frequency(self, r, d, t) -> TInt64: + """ + Calculate the PFD frequency from the given reference path parameters + """ + return int64(self.sysclk * ((1 + d) / (r * (1 + t)))) + + @portable + def _compute_reference_counter(self) -> TInt32: + """ + Determine the reference counter R that maximizes the PFD frequency + """ + d = ADF5356_REG4_R_DOUBLER_GET(self.regs[4]) + t = ADF5356_REG4_R_DIVIDER_GET(self.regs[4]) + r = 1 + while self._compute_pfd_frequency(r, d, t) > ADF5356_MAX_FREQ_PFD: + r += 1 + return int32(r) + + +@portable +def gcd(a, b): + while b: + a, b = b, a % b + return a + + +@portable +def split_msb_lsb_28b(v): + return int32((v >> 14) & 0x3FFF), int32(v & 0x3FFF) + + +@portable +def calculate_pll(f_vco: TInt64, f_pfd: TInt64): + """ + Calculate fractional-N PLL parameters such that + + ``f_vco`` = ``f_pfd`` * (``n`` + (``frac1`` + ``frac2``/``mod2``) / ``mod1``) + + where + ``mod1 = 2**24`` and ``mod2 <= 2**28`` + + :param f_vco: target VCO frequency + :param f_pfd: PFD frequency + :return: ``(n, frac1, (frac2_msb, frac2_lsb), (mod2_msb, mod2_lsb))`` + """ + f_pfd = int64(f_pfd) + f_vco = int64(f_vco) + + # integral part + n, r = int32(f_vco // f_pfd), f_vco % f_pfd + + # main fractional part + r *= ADF5356_MODULUS1 + frac1, frac2 = int32(r // f_pfd), r % f_pfd + + # auxiliary fractional part + mod2 = f_pfd + + while mod2 > ADF5356_MAX_MODULUS2: + mod2 >>= 1 + frac2 >>= 1 + + gcd_div = gcd(frac2, mod2) + mod2 //= gcd_div + frac2 //= gcd_div + + return n, frac1, split_msb_lsb_28b(frac2), split_msb_lsb_28b(mod2) diff --git a/artiq/coredevice/adf5356_reg.py b/artiq/coredevice/adf5356_reg.py new file mode 100644 index 000000000..a61582d1a --- /dev/null +++ b/artiq/coredevice/adf5356_reg.py @@ -0,0 +1,642 @@ +# auto-generated, do not edit +from artiq.language.core import portable +from artiq.language.types import TInt32 +from numpy import int32 + +@portable +def ADF5356_REG0_AUTOCAL_GET(reg: TInt32) -> TInt32: + return int32((reg >> 21) & 0x1) + +@portable +def ADF5356_REG0_AUTOCAL(x: TInt32) -> TInt32: + return int32((x & 0x1) << 21) + +@portable +def ADF5356_REG0_AUTOCAL_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 21)) | ((x & 0x1) << 21)) + + +@portable +def ADF5356_REG0_INT_VALUE_GET(reg: TInt32) -> TInt32: + return int32((reg >> 4) & 0xffff) + +@portable +def ADF5356_REG0_INT_VALUE(x: TInt32) -> TInt32: + return int32((x & 0xffff) << 4) + +@portable +def ADF5356_REG0_INT_VALUE_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0xffff << 4)) | ((x & 0xffff) << 4)) + + +@portable +def ADF5356_REG0_PRESCALER_GET(reg: TInt32) -> TInt32: + return int32((reg >> 20) & 0x1) + +@portable +def ADF5356_REG0_PRESCALER(x: TInt32) -> TInt32: + return int32((x & 0x1) << 20) + +@portable +def ADF5356_REG0_PRESCALER_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 20)) | ((x & 0x1) << 20)) + + +@portable +def ADF5356_REG1_MAIN_FRAC_VALUE_GET(reg: TInt32) -> TInt32: + return int32((reg >> 4) & 0xffffff) + +@portable +def ADF5356_REG1_MAIN_FRAC_VALUE(x: TInt32) -> TInt32: + return int32((x & 0xffffff) << 4) + +@portable +def ADF5356_REG1_MAIN_FRAC_VALUE_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0xffffff << 4)) | ((x & 0xffffff) << 4)) + + +@portable +def ADF5356_REG2_AUX_FRAC_LSB_VALUE_GET(reg: TInt32) -> TInt32: + return int32((reg >> 18) & 0x3fff) + +@portable +def ADF5356_REG2_AUX_FRAC_LSB_VALUE(x: TInt32) -> TInt32: + return int32((x & 0x3fff) << 18) + +@portable +def ADF5356_REG2_AUX_FRAC_LSB_VALUE_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x3fff << 18)) | ((x & 0x3fff) << 18)) + + +@portable +def ADF5356_REG2_AUX_MOD_LSB_VALUE_GET(reg: TInt32) -> TInt32: + return int32((reg >> 4) & 0x3fff) + +@portable +def ADF5356_REG2_AUX_MOD_LSB_VALUE(x: TInt32) -> TInt32: + return int32((x & 0x3fff) << 4) + +@portable +def ADF5356_REG2_AUX_MOD_LSB_VALUE_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x3fff << 4)) | ((x & 0x3fff) << 4)) + + +@portable +def ADF5356_REG3_PHASE_ADJUST_GET(reg: TInt32) -> TInt32: + return int32((reg >> 28) & 0x1) + +@portable +def ADF5356_REG3_PHASE_ADJUST(x: TInt32) -> TInt32: + return int32((x & 0x1) << 28) + +@portable +def ADF5356_REG3_PHASE_ADJUST_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 28)) | ((x & 0x1) << 28)) + + +@portable +def ADF5356_REG3_PHASE_RESYNC_GET(reg: TInt32) -> TInt32: + return int32((reg >> 29) & 0x1) + +@portable +def ADF5356_REG3_PHASE_RESYNC(x: TInt32) -> TInt32: + return int32((x & 0x1) << 29) + +@portable +def ADF5356_REG3_PHASE_RESYNC_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 29)) | ((x & 0x1) << 29)) + + +@portable +def ADF5356_REG3_PHASE_VALUE_GET(reg: TInt32) -> TInt32: + return int32((reg >> 4) & 0xffffff) + +@portable +def ADF5356_REG3_PHASE_VALUE(x: TInt32) -> TInt32: + return int32((x & 0xffffff) << 4) + +@portable +def ADF5356_REG3_PHASE_VALUE_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0xffffff << 4)) | ((x & 0xffffff) << 4)) + + +@portable +def ADF5356_REG3_SD_LOAD_RESET_GET(reg: TInt32) -> TInt32: + return int32((reg >> 30) & 0x1) + +@portable +def ADF5356_REG3_SD_LOAD_RESET(x: TInt32) -> TInt32: + return int32((x & 0x1) << 30) + +@portable +def ADF5356_REG3_SD_LOAD_RESET_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 30)) | ((x & 0x1) << 30)) + + +@portable +def ADF5356_REG4_COUNTER_RESET_GET(reg: TInt32) -> TInt32: + return int32((reg >> 4) & 0x1) + +@portable +def ADF5356_REG4_COUNTER_RESET(x: TInt32) -> TInt32: + return int32((x & 0x1) << 4) + +@portable +def ADF5356_REG4_COUNTER_RESET_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 4)) | ((x & 0x1) << 4)) + + +@portable +def ADF5356_REG4_CP_THREE_STATE_GET(reg: TInt32) -> TInt32: + return int32((reg >> 5) & 0x1) + +@portable +def ADF5356_REG4_CP_THREE_STATE(x: TInt32) -> TInt32: + return int32((x & 0x1) << 5) + +@portable +def ADF5356_REG4_CP_THREE_STATE_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 5)) | ((x & 0x1) << 5)) + + +@portable +def ADF5356_REG4_CURRENT_SETTING_GET(reg: TInt32) -> TInt32: + return int32((reg >> 10) & 0xf) + +@portable +def ADF5356_REG4_CURRENT_SETTING(x: TInt32) -> TInt32: + return int32((x & 0xf) << 10) + +@portable +def ADF5356_REG4_CURRENT_SETTING_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0xf << 10)) | ((x & 0xf) << 10)) + + +@portable +def ADF5356_REG4_DOUBLE_BUFF_GET(reg: TInt32) -> TInt32: + return int32((reg >> 14) & 0x1) + +@portable +def ADF5356_REG4_DOUBLE_BUFF(x: TInt32) -> TInt32: + return int32((x & 0x1) << 14) + +@portable +def ADF5356_REG4_DOUBLE_BUFF_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 14)) | ((x & 0x1) << 14)) + + +@portable +def ADF5356_REG4_MUX_LOGIC_GET(reg: TInt32) -> TInt32: + return int32((reg >> 8) & 0x1) + +@portable +def ADF5356_REG4_MUX_LOGIC(x: TInt32) -> TInt32: + return int32((x & 0x1) << 8) + +@portable +def ADF5356_REG4_MUX_LOGIC_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 8)) | ((x & 0x1) << 8)) + + +@portable +def ADF5356_REG4_MUXOUT_GET(reg: TInt32) -> TInt32: + return int32((reg >> 27) & 0x7) + +@portable +def ADF5356_REG4_MUXOUT(x: TInt32) -> TInt32: + return int32((x & 0x7) << 27) + +@portable +def ADF5356_REG4_MUXOUT_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x7 << 27)) | ((x & 0x7) << 27)) + + +@portable +def ADF5356_REG4_PD_POLARITY_GET(reg: TInt32) -> TInt32: + return int32((reg >> 7) & 0x1) + +@portable +def ADF5356_REG4_PD_POLARITY(x: TInt32) -> TInt32: + return int32((x & 0x1) << 7) + +@portable +def ADF5356_REG4_PD_POLARITY_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 7)) | ((x & 0x1) << 7)) + + +@portable +def ADF5356_REG4_POWER_DOWN_GET(reg: TInt32) -> TInt32: + return int32((reg >> 6) & 0x1) + +@portable +def ADF5356_REG4_POWER_DOWN(x: TInt32) -> TInt32: + return int32((x & 0x1) << 6) + +@portable +def ADF5356_REG4_POWER_DOWN_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 6)) | ((x & 0x1) << 6)) + + +@portable +def ADF5356_REG4_R_COUNTER_GET(reg: TInt32) -> TInt32: + return int32((reg >> 15) & 0x3ff) + +@portable +def ADF5356_REG4_R_COUNTER(x: TInt32) -> TInt32: + return int32((x & 0x3ff) << 15) + +@portable +def ADF5356_REG4_R_COUNTER_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x3ff << 15)) | ((x & 0x3ff) << 15)) + + +@portable +def ADF5356_REG4_R_DIVIDER_GET(reg: TInt32) -> TInt32: + return int32((reg >> 25) & 0x1) + +@portable +def ADF5356_REG4_R_DIVIDER(x: TInt32) -> TInt32: + return int32((x & 0x1) << 25) + +@portable +def ADF5356_REG4_R_DIVIDER_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 25)) | ((x & 0x1) << 25)) + + +@portable +def ADF5356_REG4_R_DOUBLER_GET(reg: TInt32) -> TInt32: + return int32((reg >> 26) & 0x1) + +@portable +def ADF5356_REG4_R_DOUBLER(x: TInt32) -> TInt32: + return int32((x & 0x1) << 26) + +@portable +def ADF5356_REG4_R_DOUBLER_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 26)) | ((x & 0x1) << 26)) + + +@portable +def ADF5356_REG4_REF_MODE_GET(reg: TInt32) -> TInt32: + return int32((reg >> 9) & 0x1) + +@portable +def ADF5356_REG4_REF_MODE(x: TInt32) -> TInt32: + return int32((x & 0x1) << 9) + +@portable +def ADF5356_REG4_REF_MODE_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 9)) | ((x & 0x1) << 9)) + + +@portable +def ADF5356_REG6_BLEED_POLARITY_GET(reg: TInt32) -> TInt32: + return int32((reg >> 31) & 0x1) + +@portable +def ADF5356_REG6_BLEED_POLARITY(x: TInt32) -> TInt32: + return int32((x & 0x1) << 31) + +@portable +def ADF5356_REG6_BLEED_POLARITY_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 31)) | ((x & 0x1) << 31)) + + +@portable +def ADF5356_REG6_CP_BLEED_CURRENT_GET(reg: TInt32) -> TInt32: + return int32((reg >> 13) & 0xff) + +@portable +def ADF5356_REG6_CP_BLEED_CURRENT(x: TInt32) -> TInt32: + return int32((x & 0xff) << 13) + +@portable +def ADF5356_REG6_CP_BLEED_CURRENT_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0xff << 13)) | ((x & 0xff) << 13)) + + +@portable +def ADF5356_REG6_FB_SELECT_GET(reg: TInt32) -> TInt32: + return int32((reg >> 24) & 0x1) + +@portable +def ADF5356_REG6_FB_SELECT(x: TInt32) -> TInt32: + return int32((x & 0x1) << 24) + +@portable +def ADF5356_REG6_FB_SELECT_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 24)) | ((x & 0x1) << 24)) + + +@portable +def ADF5356_REG6_GATE_BLEED_GET(reg: TInt32) -> TInt32: + return int32((reg >> 30) & 0x1) + +@portable +def ADF5356_REG6_GATE_BLEED(x: TInt32) -> TInt32: + return int32((x & 0x1) << 30) + +@portable +def ADF5356_REG6_GATE_BLEED_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 30)) | ((x & 0x1) << 30)) + + +@portable +def ADF5356_REG6_MUTE_TILL_LD_GET(reg: TInt32) -> TInt32: + return int32((reg >> 11) & 0x1) + +@portable +def ADF5356_REG6_MUTE_TILL_LD(x: TInt32) -> TInt32: + return int32((x & 0x1) << 11) + +@portable +def ADF5356_REG6_MUTE_TILL_LD_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 11)) | ((x & 0x1) << 11)) + + +@portable +def ADF5356_REG6_NEGATIVE_BLEED_GET(reg: TInt32) -> TInt32: + return int32((reg >> 29) & 0x1) + +@portable +def ADF5356_REG6_NEGATIVE_BLEED(x: TInt32) -> TInt32: + return int32((x & 0x1) << 29) + +@portable +def ADF5356_REG6_NEGATIVE_BLEED_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 29)) | ((x & 0x1) << 29)) + + +@portable +def ADF5356_REG6_RF_DIVIDER_SELECT_GET(reg: TInt32) -> TInt32: + return int32((reg >> 21) & 0x7) + +@portable +def ADF5356_REG6_RF_DIVIDER_SELECT(x: TInt32) -> TInt32: + return int32((x & 0x7) << 21) + +@portable +def ADF5356_REG6_RF_DIVIDER_SELECT_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x7 << 21)) | ((x & 0x7) << 21)) + + +@portable +def ADF5356_REG6_RF_OUTPUT_A_ENABLE_GET(reg: TInt32) -> TInt32: + return int32((reg >> 6) & 0x1) + +@portable +def ADF5356_REG6_RF_OUTPUT_A_ENABLE(x: TInt32) -> TInt32: + return int32((x & 0x1) << 6) + +@portable +def ADF5356_REG6_RF_OUTPUT_A_ENABLE_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 6)) | ((x & 0x1) << 6)) + + +@portable +def ADF5356_REG6_RF_OUTPUT_A_POWER_GET(reg: TInt32) -> TInt32: + return int32((reg >> 4) & 0x3) + +@portable +def ADF5356_REG6_RF_OUTPUT_A_POWER(x: TInt32) -> TInt32: + return int32((x & 0x3) << 4) + +@portable +def ADF5356_REG6_RF_OUTPUT_A_POWER_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x3 << 4)) | ((x & 0x3) << 4)) + + +@portable +def ADF5356_REG6_RF_OUTPUT_B_ENABLE_GET(reg: TInt32) -> TInt32: + return int32((reg >> 10) & 0x1) + +@portable +def ADF5356_REG6_RF_OUTPUT_B_ENABLE(x: TInt32) -> TInt32: + return int32((x & 0x1) << 10) + +@portable +def ADF5356_REG6_RF_OUTPUT_B_ENABLE_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 10)) | ((x & 0x1) << 10)) + + +@portable +def ADF5356_REG7_FRAC_N_LD_PRECISION_GET(reg: TInt32) -> TInt32: + return int32((reg >> 5) & 0x3) + +@portable +def ADF5356_REG7_FRAC_N_LD_PRECISION(x: TInt32) -> TInt32: + return int32((x & 0x3) << 5) + +@portable +def ADF5356_REG7_FRAC_N_LD_PRECISION_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x3 << 5)) | ((x & 0x3) << 5)) + + +@portable +def ADF5356_REG7_LD_CYCLE_COUNT_GET(reg: TInt32) -> TInt32: + return int32((reg >> 8) & 0x3) + +@portable +def ADF5356_REG7_LD_CYCLE_COUNT(x: TInt32) -> TInt32: + return int32((x & 0x3) << 8) + +@portable +def ADF5356_REG7_LD_CYCLE_COUNT_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x3 << 8)) | ((x & 0x3) << 8)) + + +@portable +def ADF5356_REG7_LD_MODE_GET(reg: TInt32) -> TInt32: + return int32((reg >> 4) & 0x1) + +@portable +def ADF5356_REG7_LD_MODE(x: TInt32) -> TInt32: + return int32((x & 0x1) << 4) + +@portable +def ADF5356_REG7_LD_MODE_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 4)) | ((x & 0x1) << 4)) + + +@portable +def ADF5356_REG7_LE_SEL_SYNC_EDGE_GET(reg: TInt32) -> TInt32: + return int32((reg >> 27) & 0x1) + +@portable +def ADF5356_REG7_LE_SEL_SYNC_EDGE(x: TInt32) -> TInt32: + return int32((x & 0x1) << 27) + +@portable +def ADF5356_REG7_LE_SEL_SYNC_EDGE_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 27)) | ((x & 0x1) << 27)) + + +@portable +def ADF5356_REG7_LE_SYNC_GET(reg: TInt32) -> TInt32: + return int32((reg >> 25) & 0x1) + +@portable +def ADF5356_REG7_LE_SYNC(x: TInt32) -> TInt32: + return int32((x & 0x1) << 25) + +@portable +def ADF5356_REG7_LE_SYNC_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 25)) | ((x & 0x1) << 25)) + + +@portable +def ADF5356_REG7_LOL_MODE_GET(reg: TInt32) -> TInt32: + return int32((reg >> 7) & 0x1) + +@portable +def ADF5356_REG7_LOL_MODE(x: TInt32) -> TInt32: + return int32((x & 0x1) << 7) + +@portable +def ADF5356_REG7_LOL_MODE_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 7)) | ((x & 0x1) << 7)) + + +@portable +def ADF5356_REG9_AUTOCAL_TIMEOUT_GET(reg: TInt32) -> TInt32: + return int32((reg >> 9) & 0x1f) + +@portable +def ADF5356_REG9_AUTOCAL_TIMEOUT(x: TInt32) -> TInt32: + return int32((x & 0x1f) << 9) + +@portable +def ADF5356_REG9_AUTOCAL_TIMEOUT_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1f << 9)) | ((x & 0x1f) << 9)) + + +@portable +def ADF5356_REG9_SYNTH_LOCK_TIMEOUT_GET(reg: TInt32) -> TInt32: + return int32((reg >> 4) & 0x1f) + +@portable +def ADF5356_REG9_SYNTH_LOCK_TIMEOUT(x: TInt32) -> TInt32: + return int32((x & 0x1f) << 4) + +@portable +def ADF5356_REG9_SYNTH_LOCK_TIMEOUT_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1f << 4)) | ((x & 0x1f) << 4)) + + +@portable +def ADF5356_REG9_TIMEOUT_GET(reg: TInt32) -> TInt32: + return int32((reg >> 14) & 0x3ff) + +@portable +def ADF5356_REG9_TIMEOUT(x: TInt32) -> TInt32: + return int32((x & 0x3ff) << 14) + +@portable +def ADF5356_REG9_TIMEOUT_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x3ff << 14)) | ((x & 0x3ff) << 14)) + + +@portable +def ADF5356_REG9_VCO_BAND_DIVISION_GET(reg: TInt32) -> TInt32: + return int32((reg >> 24) & 0xff) + +@portable +def ADF5356_REG9_VCO_BAND_DIVISION(x: TInt32) -> TInt32: + return int32((x & 0xff) << 24) + +@portable +def ADF5356_REG9_VCO_BAND_DIVISION_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0xff << 24)) | ((x & 0xff) << 24)) + + +@portable +def ADF5356_REG10_ADC_CLK_DIV_GET(reg: TInt32) -> TInt32: + return int32((reg >> 6) & 0xff) + +@portable +def ADF5356_REG10_ADC_CLK_DIV(x: TInt32) -> TInt32: + return int32((x & 0xff) << 6) + +@portable +def ADF5356_REG10_ADC_CLK_DIV_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0xff << 6)) | ((x & 0xff) << 6)) + + +@portable +def ADF5356_REG10_ADC_CONV_GET(reg: TInt32) -> TInt32: + return int32((reg >> 5) & 0x1) + +@portable +def ADF5356_REG10_ADC_CONV(x: TInt32) -> TInt32: + return int32((x & 0x1) << 5) + +@portable +def ADF5356_REG10_ADC_CONV_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 5)) | ((x & 0x1) << 5)) + + +@portable +def ADF5356_REG10_ADC_ENABLE_GET(reg: TInt32) -> TInt32: + return int32((reg >> 4) & 0x1) + +@portable +def ADF5356_REG10_ADC_ENABLE(x: TInt32) -> TInt32: + return int32((x & 0x1) << 4) + +@portable +def ADF5356_REG10_ADC_ENABLE_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 4)) | ((x & 0x1) << 4)) + + +@portable +def ADF5356_REG11_VCO_BAND_HOLD_GET(reg: TInt32) -> TInt32: + return int32((reg >> 24) & 0x1) + +@portable +def ADF5356_REG11_VCO_BAND_HOLD(x: TInt32) -> TInt32: + return int32((x & 0x1) << 24) + +@portable +def ADF5356_REG11_VCO_BAND_HOLD_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x1 << 24)) | ((x & 0x1) << 24)) + + +@portable +def ADF5356_REG12_PHASE_RESYNC_CLK_VALUE_GET(reg: TInt32) -> TInt32: + return int32((reg >> 12) & 0xfffff) + +@portable +def ADF5356_REG12_PHASE_RESYNC_CLK_VALUE(x: TInt32) -> TInt32: + return int32((x & 0xfffff) << 12) + +@portable +def ADF5356_REG12_PHASE_RESYNC_CLK_VALUE_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0xfffff << 12)) | ((x & 0xfffff) << 12)) + + +@portable +def ADF5356_REG13_AUX_FRAC_MSB_VALUE_GET(reg: TInt32) -> TInt32: + return int32((reg >> 18) & 0x3fff) + +@portable +def ADF5356_REG13_AUX_FRAC_MSB_VALUE(x: TInt32) -> TInt32: + return int32((x & 0x3fff) << 18) + +@portable +def ADF5356_REG13_AUX_FRAC_MSB_VALUE_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x3fff << 18)) | ((x & 0x3fff) << 18)) + + +@portable +def ADF5356_REG13_AUX_MOD_MSB_VALUE_GET(reg: TInt32) -> TInt32: + return int32((reg >> 4) & 0x3fff) + +@portable +def ADF5356_REG13_AUX_MOD_MSB_VALUE(x: TInt32) -> TInt32: + return int32((x & 0x3fff) << 4) + +@portable +def ADF5356_REG13_AUX_MOD_MSB_VALUE_UPDATE(reg: TInt32, x: TInt32) -> TInt32: + return int32((reg & ~(0x3fff << 4)) | ((x & 0x3fff) << 4)) + +ADF5356_NUM_REGS = 14 diff --git a/artiq/coredevice/basemod_att.py b/artiq/coredevice/basemod_att.py new file mode 100644 index 000000000..5015324ff --- /dev/null +++ b/artiq/coredevice/basemod_att.py @@ -0,0 +1,79 @@ +from artiq.language.core import kernel, portable, delay +from artiq.language.units import us, ms +from artiq.coredevice.shiftreg import ShiftReg + + +@portable +def to_mu(att): + return round(att*2.0) ^ 0x3f + +@portable +def from_mu(att_mu): + return 0.5*(att_mu ^ 0x3f) + + +class BaseModAtt: + def __init__(self, dmgr, rst_n, clk, le, mosi, miso): + self.rst_n = dmgr.get(rst_n) + self.shift_reg = ShiftReg(dmgr, + clk=clk, ser=mosi, latch=le, ser_in=miso, n=8*4) + + @kernel + def reset(self): + # HMC's incompetence in digital design and interfaces means that + # the HMC542 needs a level low on RST_N and then a rising edge + # on Latch Enable. Their "latch" isn't a latch but a DFF. + # Of course, it also powers up with a random attenuation, and + # that cannot be fixed with simple pull-ups/pull-downs. + self.rst_n.off() + self.shift_reg.latch.off() + delay(1*us) + self.shift_reg.latch.on() + delay(1*us) + self.shift_reg.latch.off() + self.rst_n.on() + delay(1*us) + + @kernel + def set_mu(self, att0, att1, att2, att3): + """ + Sets the four attenuators on BaseMod. + The values are in half decibels, between 0 (no attenuation) + and 63 (31.5dB attenuation). + """ + word = ( + (att0 << 2) | + (att1 << 10) | + (att2 << 18) | + (att3 << 26) + ) + self.shift_reg.set(word) + + @kernel + def get_mu(self): + """ + Retrieves the current settings of the four attenuators on BaseMod. + """ + word = self.shift_reg.get() + att0 = (word >> 2) & 0x3f + att1 = (word >> 10) & 0x3f + att2 = (word >> 18) & 0x3f + att3 = (word >> 26) & 0x3f + return att0, att1, att2, att3 + + @kernel + def set(self, att0, att1, att2, att3): + """ + Sets the four attenuators on BaseMod. + The values are in decibels. + """ + self.set_mu(to_mu(att0), to_mu(att1), to_mu(att2), to_mu(att3)) + + @kernel + def get(self): + """ + Retrieves the current settings of the four attenuators on BaseMod. + The values are in decibels. + """ + att0, att1, att2, att3 = self.get_mu() + return from_mu(att0), from_mu(att1), from_mu(att2), from_mu(att3) diff --git a/artiq/coredevice/comm_analyzer.py b/artiq/coredevice/comm_analyzer.py index d8b6e0434..dd58acb68 100644 --- a/artiq/coredevice/comm_analyzer.py +++ b/artiq/coredevice/comm_analyzer.py @@ -27,9 +27,9 @@ class ExceptionType(Enum): legacy_o_sequence_error_reset = 0b010001 legacy_o_collision_reset = 0b010010 legacy_i_overflow_reset = 0b100000 + legacy_o_sequence_error = 0b010101 o_underflow = 0b010100 - o_sequence_error = 0b010101 i_overflow = 0b100001 @@ -92,16 +92,16 @@ DecodedDump = namedtuple( def decode_dump(data): parts = struct.unpack(">IQbbb", data[:15]) (sent_bytes, total_byte_count, - overflow_occured, log_channel, dds_onehot_sel) = parts + error_occured, log_channel, dds_onehot_sel) = parts expected_len = sent_bytes + 15 if expected_len != len(data): raise ValueError("analyzer dump has incorrect length " "(got {}, expected {})".format( len(data), expected_len)) - if overflow_occured: - logger.warning("analyzer FIFO overflow occured, " - "some messages have been lost") + if error_occured: + logger.warning("error occured within the analyzer, " + "data may be corrupted") if total_byte_count > sent_bytes: logger.info("analyzer ring buffer has wrapped %d times", total_byte_count//sent_bytes) @@ -211,9 +211,8 @@ class TTLClockGenHandler: class DDSHandler: - def __init__(self, vcd_manager, dds_type, onehot_sel, sysclk): + def __init__(self, vcd_manager, onehot_sel, sysclk): self.vcd_manager = vcd_manager - self.dds_type = dds_type self.onehot_sel = onehot_sel self.sysclk = sysclk @@ -227,9 +226,8 @@ class DDSHandler: self.vcd_manager.get_channel(name + "/frequency", 64) dds_channel["vcd_phase"] = \ self.vcd_manager.get_channel(name + "/phase", 64) - if self.dds_type == "DDSChannelAD9914": - dds_channel["ftw"] = [None, None] - dds_channel["pow"] = None + dds_channel["ftw"] = [None, None] + dds_channel["pow"] = None self.dds_channels[dds_channel_nr] = dds_channel def _gpio_to_channels(self, gpio): @@ -252,9 +250,9 @@ class DDSHandler: self.selected_dds_channels = self._gpio_to_channels(message.data) for dds_channel_nr in self.selected_dds_channels: dds_channel = self.dds_channels[dds_channel_nr] - if message.address == 0x2d: + if message.address == 0x11: dds_channel["ftw"][0] = message.data - elif message.address == 0x2f: + elif message.address == 0x13: dds_channel["ftw"][1] = message.data elif message.address == 0x31: dds_channel["pow"] = message.data @@ -273,8 +271,7 @@ class DDSHandler: logger.debug("DDS write @%d 0x%04x to 0x%02x, selected channels: %s", message.timestamp, message.data, message.address, self.selected_dds_channels) - if self.dds_type == "DDSChannelAD9914": - self._decode_ad9914_write(message) + self._decode_ad9914_write(message) class WishboneHandler: @@ -344,6 +341,56 @@ class SPIMasterHandler(WishboneHandler): raise ValueError("bad address %d", address) +class SPIMaster2Handler(WishboneHandler): + def __init__(self, vcd_manager, name): + self._reads = [] + self.channels = {} + with vcd_manager.scope("spi2/{}".format(name)): + self.stb = vcd_manager.get_channel("{}/{}".format(name, "stb"), 1) + for reg_name, reg_width in [ + ("flags", 8), + ("length", 5), + ("div", 8), + ("chip_select", 8), + ("write", 32), + ("read", 32)]: + self.channels[reg_name] = vcd_manager.get_channel( + "{}/{}".format(name, reg_name), reg_width) + + def process_message(self, message): + self.stb.set_value("1") + self.stb.set_value("0") + if isinstance(message, OutputMessage): + data = message.data + address = message.address + if address == 1: + logger.debug("SPI config @%d data=0x%08x", + message.timestamp, data) + self.channels["chip_select"].set_value( + "{:08b}".format(data >> 24)) + self.channels["div"].set_value( + "{:08b}".format(data >> 16 & 0xff)) + self.channels["length"].set_value( + "{:08b}".format(data >> 8 & 0x1f)) + self.channels["flags"].set_value( + "{:08b}".format(data & 0xff)) + elif address == 0: + logger.debug("SPI write @%d data=0x%08x", + message.timestamp, data) + self.channels["write"].set_value("{:032b}".format(data)) + else: + raise ValueError("bad address", address) + # process untimed reads and insert them here + while (self._reads and + self._reads[0].rtio_counter < message.timestamp): + read = self._reads.pop(0) + logger.debug("SPI read @%d data=0x%08x", + read.rtio_counter, read.data) + self.channels["read"].set_value("{:032b}".format(read.data)) + elif isinstance(message, InputMessage): + self._reads.append(message) + + def _extract_log_chars(data): r = "" for i in range(4): @@ -395,16 +442,17 @@ def get_vcd_log_channels(log_channel, messages): def get_single_device_argument(devices, module, cls, argument): - ref_period = None + found = None for desc in devices.values(): if isinstance(desc, dict) and desc["type"] == "local": if (desc["module"] == module and desc["class"] in cls): - if ref_period is None: - ref_period = desc["arguments"][argument] - else: - return None # more than one device found - return ref_period + value = desc["arguments"][argument] + if found is None: + found = value + elif value != found: + return None # more than one value/device found + return found def get_ref_period(devices): @@ -413,8 +461,8 @@ def get_ref_period(devices): def get_dds_sysclk(devices): - return get_single_device_argument(devices, "artiq.coredevice.dds", - ("DDSGroupAD9914",), "sysclk") + return get_single_device_argument(devices, "artiq.coredevice.ad9914", + ("AD9914",), "sysclk") def create_channel_handlers(vcd_manager, devices, ref_period, @@ -430,23 +478,20 @@ def create_channel_handlers(vcd_manager, devices, ref_period, and desc["class"] == "TTLClockGen"): channel = desc["arguments"]["channel"] channel_handlers[channel] = TTLClockGenHandler(vcd_manager, name, ref_period) - if (desc["module"] == "artiq.coredevice.dds" - and desc["class"] in {"DDSChannelAD9914"}): + if (desc["module"] == "artiq.coredevice.ad9914" + and desc["class"] == "AD9914"): dds_bus_channel = desc["arguments"]["bus_channel"] dds_channel = desc["arguments"]["channel"] if dds_bus_channel in channel_handlers: dds_handler = channel_handlers[dds_bus_channel] - if dds_handler.dds_type != desc["class"]: - raise ValueError("All DDS channels must have the same type") else: - dds_handler = DDSHandler(vcd_manager, desc["class"], - dds_onehot_sel, dds_sysclk) + dds_handler = DDSHandler(vcd_manager, dds_onehot_sel, dds_sysclk) channel_handlers[dds_bus_channel] = dds_handler dds_handler.add_dds_channel(name, dds_channel) - if (desc["module"] == "artiq.coredevice.spi" and + if (desc["module"] == "artiq.coredevice.spi2" and desc["class"] == "SPIMaster"): channel = desc["arguments"]["channel"] - channel_handlers[channel] = SPIMasterHandler( + channel_handlers[channel] = SPIMaster2Handler( vcd_manager, name) return channel_handlers @@ -455,11 +500,13 @@ def get_message_time(message): return getattr(message, "timestamp", message.rtio_counter) -def decoded_dump_to_vcd(fileobj, devices, dump): +def decoded_dump_to_vcd(fileobj, devices, dump, uniform_interval=False): vcd_manager = VCDManager(fileobj) ref_period = get_ref_period(devices) + if ref_period is not None: - vcd_manager.set_timescale_ps(ref_period*1e12) + if not uniform_interval: + vcd_manager.set_timescale_ps(ref_period*1e12) else: logger.warning("unable to determine core device ref_period") ref_period = 1e-9 # guess @@ -481,6 +528,12 @@ def decoded_dump_to_vcd(fileobj, devices, dump): vcd_log_channels = get_vcd_log_channels(dump.log_channel, messages) channel_handlers[dump.log_channel] = LogHandler( vcd_manager, vcd_log_channels) + if uniform_interval: + # RTIO event timestamp in machine units + timestamp = vcd_manager.get_channel("timestamp", 64) + # RTIO time interval between this and the next timed event + # in SI seconds + interval = vcd_manager.get_channel("interval", 64) slack = vcd_manager.get_channel("rtio_slack", 64) vcd_manager.set_time(0) @@ -490,11 +543,18 @@ def decoded_dump_to_vcd(fileobj, devices, dump): if start_time: break - for message in messages: + t0 = 0 + for i, message in enumerate(messages): if message.channel in channel_handlers: t = get_message_time(message) - start_time if t >= 0: - vcd_manager.set_time(t) + if uniform_interval: + interval.set_value_double((t - t0)*ref_period) + vcd_manager.set_time(i) + timestamp.set_value("{:064b}".format(t)) + t0 = t + else: + vcd_manager.set_time(t) channel_handlers[message.channel].process_message(message) if isinstance(message, OutputMessage): slack.set_value_double( diff --git a/artiq/coredevice/comm_kernel.py b/artiq/coredevice/comm_kernel.py index 2731ffd4d..5387b9d10 100644 --- a/artiq/coredevice/comm_kernel.py +++ b/artiq/coredevice/comm_kernel.py @@ -1,9 +1,8 @@ import struct import logging -import socket -import sys import traceback import numpy +import socket from enum import Enum from fractions import Fraction from collections import namedtuple @@ -15,69 +14,39 @@ from artiq import __version__ as software_version logger = logging.getLogger(__name__) -class _H2DMsgType(Enum): - LOG_REQUEST = 1 - LOG_CLEAR = 2 - LOG_FILTER = 13 +class Request(Enum): + SystemInfo = 3 - SYSTEM_INFO_REQUEST = 3 - SWITCH_CLOCK = 4 + LoadKernel = 5 + RunKernel = 6 - LOAD_KERNEL = 5 - RUN_KERNEL = 6 - - RPC_REPLY = 7 - RPC_EXCEPTION = 8 - - FLASH_READ_REQUEST = 9 - FLASH_WRITE_REQUEST = 10 - FLASH_ERASE_REQUEST = 11 - FLASH_REMOVE_REQUEST = 12 - - HOTSWAP = 14 + RPCReply = 7 + RPCException = 8 -class _D2HMsgType(Enum): - LOG_REPLY = 1 +class Reply(Enum): + SystemInfo = 2 - SYSTEM_INFO_REPLY = 2 - CLOCK_SWITCH_COMPLETED = 3 - CLOCK_SWITCH_FAILED = 4 + LoadCompleted = 5 + LoadFailed = 6 - LOAD_COMPLETED = 5 - LOAD_FAILED = 6 + KernelFinished = 7 + KernelStartupFailed = 8 + KernelException = 9 - KERNEL_FINISHED = 7 - KERNEL_STARTUP_FAILED = 8 - KERNEL_EXCEPTION = 9 + RPCRequest = 10 - RPC_REQUEST = 10 - - FLASH_READ_REPLY = 11 - FLASH_OK_REPLY = 12 - FLASH_ERROR_REPLY = 13 - - WATCHDOG_EXPIRED = 14 - CLOCK_FAILURE = 15 - - HOTSWAP_IMMINENT = 16 - - -class _LogLevel(Enum): - OFF = 0 - ERROR = 1 - WARN = 2 - INFO = 3 - DEBUG = 4 - TRACE = 5 + ClockFailure = 15 class UnsupportedDevice(Exception): pass + class LoadError(Exception): pass + class RPCReturnValueError(ValueError): pass @@ -85,37 +54,109 @@ class RPCReturnValueError(ValueError): RPCKeyword = namedtuple('RPCKeyword', ['name', 'value']) -def set_keepalive(sock, after_idle, interval, max_fails): - if sys.platform.startswith("linux"): - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, after_idle) - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, interval) - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, max_fails) - elif sys.platform.startswith("win") or sys.platform.startswith("cygwin"): - # setting max_fails is not supported, typically ends up being 5 or 10 - # depending on Windows version - sock.ioctl(socket.SIO_KEEPALIVE_VALS, - (1, after_idle*1000, interval*1000)) +def _receive_fraction(kernel, embedding_map): + numerator = kernel._read_int64() + denominator = kernel._read_int64() + return Fraction(numerator, denominator) + + +def _receive_list(kernel, embedding_map): + length = kernel._read_int32() + tag = chr(kernel._read_int8()) + if tag == "b": + buffer = kernel._read(length) + return list(buffer) + elif tag == "i": + buffer = kernel._read(4 * length) + return list(struct.unpack(">%sl" % length, buffer)) + elif tag == "I": + buffer = kernel._read(8 * length) + return list(struct.unpack(">%sq" % length, buffer)) + elif tag == "f": + buffer = kernel._read(8 * length) + return list(struct.unpack(">%sd" % length, buffer)) else: - logger.warning("TCP keepalive not supported on platform '%s', ignored", - sys.platform) + fn = receivers[tag] + elems = [] + for _ in range(length): + # discard tag, as our device would still send the tag for each + # non-primitive elements. + kernel._read_int8() + item = fn(kernel, embedding_map) + elems.append(item) + return elems -def initialize_connection(host, port): - sock = socket.create_connection((host, port), 5.0) - sock.settimeout(None) - set_keepalive(sock, 3, 2, 3) - logger.debug("connected to host %s on port %d", host, port) - return sock +def _receive_array(kernel, embedding_map): + num_dims = kernel._read_int8() + shape = tuple(kernel._read_int32() for _ in range(num_dims)) + tag = chr(kernel._read_int8()) + fn = receivers[tag] + length = numpy.prod(shape) + if tag == "b": + buffer = kernel._read(length) + elems = numpy.ndarray((length, ), 'B', buffer) + elif tag == "i": + buffer = kernel._read(4 * length) + elems = numpy.ndarray((length, ), '>i4', buffer) + elif tag == "I": + buffer = kernel._read(8 * length) + elems = numpy.ndarray((length, ), '>i8', buffer) + elif tag == "f": + buffer = kernel._read(8 * length) + elems = numpy.ndarray((length, ), '>d', buffer) + else: + fn = receivers[tag] + elems = [] + for _ in range(numpy.prod(shape)): + # discard the tag + kernel._read_int8() + item = fn(kernel, embedding_map) + elems.append(item) + elems = numpy.array(elems) + return elems.reshape(shape) + + +def _receive_range(kernel, embedding_map): + start = kernel._receive_rpc_value(embedding_map) + stop = kernel._receive_rpc_value(embedding_map) + step = kernel._receive_rpc_value(embedding_map) + return range(start, stop, step) + + +def _receive_keyword(kernel, embedding_map): + name = kernel._read_string() + value = kernel._receive_rpc_value(embedding_map) + return RPCKeyword(name, value) + + +receivers = { + "\x00": lambda kernel, embedding_map: kernel._rpc_sentinel, + "t": lambda kernel, embedding_map: + tuple(kernel._receive_rpc_value(embedding_map) + for _ in range(kernel._read_int8())), + "n": lambda kernel, embedding_map: None, + "b": lambda kernel, embedding_map: bool(kernel._read_int8()), + "i": lambda kernel, embedding_map: numpy.int32(kernel._read_int32()), + "I": lambda kernel, embedding_map: numpy.int64(kernel._read_int64()), + "f": lambda kernel, embedding_map: kernel._read_float64(), + "s": lambda kernel, embedding_map: kernel._read_string(), + "B": lambda kernel, embedding_map: kernel._read_bytes(), + "A": lambda kernel, embedding_map: kernel._read_bytes(), + "O": lambda kernel, embedding_map: + embedding_map.retrieve_object(kernel._read_int32()), + "F": _receive_fraction, + "l": _receive_list, + "a": _receive_array, + "r": _receive_range, + "k": _receive_keyword +} class CommKernelDummy: def __init__(self): pass - def switch_clock(self, external): - pass - def load(self, kernel_library): pass @@ -128,23 +169,31 @@ class CommKernelDummy: def check_system_info(self): pass - def get_log(self): - return "" - - def clear_log(self): - pass - class CommKernel: + warned_of_mismatch = False + def __init__(self, host, port=1381): self._read_type = None self.host = host self.port = port + self.read_buffer = bytearray() + self.write_buffer = bytearray() + + self.unpack_int32 = struct.Struct(">l").unpack + self.unpack_int64 = struct.Struct(">q").unpack + self.unpack_float64 = struct.Struct(">d").unpack + + self.pack_header = struct.Struct(">lB").pack + self.pack_int32 = struct.Struct(">l").pack + self.pack_int64 = struct.Struct(">q").pack + self.pack_float64 = struct.Struct(">d").pack def open(self): if hasattr(self, "socket"): return - self.socket = initialize_connection(self.host, self.port) + self.socket = socket.create_connection((self.host, self.port)) + logger.debug("connected to %s:%d", self.host, self.port) self.socket.sendall(b"ARTIQ coredev\n") def close(self): @@ -154,37 +203,39 @@ class CommKernel: del self.socket logger.debug("disconnected") - def read(self, length): - r = bytes() - while len(r) < length: - rn = self.socket.recv(min(8192, length - len(r))) - if not rn: - raise ConnectionResetError("Connection closed") - r += rn - return r - - def write(self, data): - self.socket.sendall(data) - # # Reader interface # + def _read(self, length): + # cache the reads to avoid frequent call to recv + while len(self.read_buffer) < length: + # the number is just the maximum amount + # when there is not much data, it would return earlier + diff = length - len(self.read_buffer) + flag = 0 + if diff > 8192: + flag |= socket.MSG_WAITALL + self.read_buffer += self.socket.recv(8192, flag) + result = self.read_buffer[:length] + self.read_buffer = self.read_buffer[length:] + return result + def _read_header(self): self.open() # Wait for a synchronization sequence, 5a 5a 5a 5a. sync_count = 0 while sync_count < 4: - (sync_byte, ) = struct.unpack("B", self.read(1)) + sync_byte = self._read(1)[0] if sync_byte == 0x5a: sync_count += 1 else: sync_count = 0 # Read message header. - (raw_type, ) = struct.unpack("B", self.read(1)) - self._read_type = _D2HMsgType(raw_type) + raw_type = self._read(1)[0] + self._read_type = Reply(raw_type) logger.debug("receiving message: type=%r", self._read_type) @@ -198,30 +249,26 @@ class CommKernel: self._read_header() self._read_expect(ty) - def _read_chunk(self, length): - return self.read(length) - def _read_int8(self): - (value, ) = struct.unpack("B", self._read_chunk(1)) - return value + return self._read(1)[0] def _read_int32(self): - (value, ) = struct.unpack(">l", self._read_chunk(4)) + (value, ) = self.unpack_int32(self._read(4)) return value def _read_int64(self): - (value, ) = struct.unpack(">q", self._read_chunk(8)) + (value, ) = self.unpack_int64(self._read(8)) return value def _read_float64(self): - (value, ) = struct.unpack(">d", self._read_chunk(8)) + (value, ) = self.unpack_float64(self._read(8)) return value def _read_bool(self): return True if self._read_int8() else False def _read_bytes(self): - return self._read_chunk(self._read_int32()) + return self._read(self._read_int32()) def _read_string(self): return self._read_bytes().decode("utf-8") @@ -230,38 +277,49 @@ class CommKernel: # Writer interface # + def _write(self, data): + self.write_buffer += data + # if the buffer is already pretty large, send it + # the block size is arbitrary, tuning it may improve performance + if len(self.write_buffer) > 4096: + self._flush() + + def _flush(self): + self.socket.sendall(self.write_buffer) + self.write_buffer.clear() + def _write_header(self, ty): self.open() logger.debug("sending message: type=%r", ty) # Write synchronization sequence and header. - self.write(struct.pack(">lB", 0x5a5a5a5a, ty.value)) + self._write(self.pack_header(0x5a5a5a5a, ty.value)) def _write_empty(self, ty): self._write_header(ty) def _write_chunk(self, chunk): - self.write(chunk) + self._write(chunk) def _write_int8(self, value): - self.write(struct.pack("B", value)) + self._write(value) def _write_int32(self, value): - self.write(struct.pack(">l", value)) + self._write(self.pack_int32(value)) def _write_int64(self, value): - self.write(struct.pack(">q", value)) + self._write(self.pack_int64(value)) def _write_float64(self, value): - self.write(struct.pack(">d", value)) + self._write(self.pack_float64(value)) def _write_bool(self, value): - self.write(struct.pack("B", value)) + self._write(b'\x01' if value else b'\x00') def _write_bytes(self, value): self._write_int32(len(value)) - self.write(value) + self._write(value) def _write_string(self, value): self._write_bytes(value.encode("utf-8")) @@ -270,126 +328,53 @@ class CommKernel: # Exported APIs # - def reset_session(self): - self.write(struct.pack(">ll", 0x5a5a5a5a, 0)) - def check_system_info(self): - self._write_empty(_H2DMsgType.SYSTEM_INFO_REQUEST) + self._write_empty(Request.SystemInfo) + self._flush() self._read_header() - self._read_expect(_D2HMsgType.SYSTEM_INFO_REPLY) - runtime_id = self._read_chunk(4) - if runtime_id != b"AROR": + self._read_expect(Reply.SystemInfo) + runtime_id = self._read(4) + if runtime_id == b"AROR": + gateware_version = self._read_string().split(";")[0] + if gateware_version != software_version and not self.warned_of_mismatch: + logger.warning("Mismatch between gateware (%s) " + "and software (%s) versions", + gateware_version, software_version) + CommKernel.warned_of_mismatch = True + + finished_cleanly = self._read_bool() + if not finished_cleanly: + logger.warning("Previous kernel did not cleanly finish") + elif runtime_id == b"ARZQ": + pass + else: raise UnsupportedDevice("Unsupported runtime ID: {}" .format(runtime_id)) - gateware_version = self._read_string() - if gateware_version != software_version: - logger.warning("Mismatch between gateware (%s) " - "and software (%s) versions", - gateware_version, software_version) - - finished_cleanly = self._read_bool() - if not finished_cleanly: - logger.warning("Previous kernel did not cleanly finish") - - def switch_clock(self, external): - self._write_header(_H2DMsgType.SWITCH_CLOCK) - self._write_int8(external) - - self._read_empty(_D2HMsgType.CLOCK_SWITCH_COMPLETED) - - def flash_storage_read(self, key): - self._write_header(_H2DMsgType.FLASH_READ_REQUEST) - self._write_string(key) - - self._read_header() - self._read_expect(_D2HMsgType.FLASH_READ_REPLY) - return self._read_string() - - def flash_storage_write(self, key, value): - self._write_header(_H2DMsgType.FLASH_WRITE_REQUEST) - self._write_string(key) - self._write_bytes(value) - - self._read_header() - if self._read_type == _D2HMsgType.FLASH_ERROR_REPLY: - raise IOError("Flash storage is full") - else: - self._read_expect(_D2HMsgType.FLASH_OK_REPLY) - - def flash_storage_erase(self): - self._write_empty(_H2DMsgType.FLASH_ERASE_REQUEST) - - self._read_empty(_D2HMsgType.FLASH_OK_REPLY) - - def flash_storage_remove(self, key): - self._write_header(_H2DMsgType.FLASH_REMOVE_REQUEST) - self._write_string(key) - - self._read_empty(_D2HMsgType.FLASH_OK_REPLY) - def load(self, kernel_library): - self._write_header(_H2DMsgType.LOAD_KERNEL) + self._write_header(Request.LoadKernel) self._write_bytes(kernel_library) + self._flush() self._read_header() - if self._read_type == _D2HMsgType.LOAD_FAILED: + if self._read_type == Reply.LoadFailed: raise LoadError(self._read_string()) else: - self._read_expect(_D2HMsgType.LOAD_COMPLETED) + self._read_expect(Reply.LoadCompleted) def run(self): - self._write_empty(_H2DMsgType.RUN_KERNEL) + self._write_empty(Request.RunKernel) + self._flush() logger.debug("running kernel") _rpc_sentinel = object() - # See session.c:{send,receive}_rpc_value and llvm_ir_generator.py:_rpc_tag. + # See rpc_proto.rs and compiler/ir.py:rpc_tag. def _receive_rpc_value(self, embedding_map): tag = chr(self._read_int8()) - if tag == "\x00": - return self._rpc_sentinel - elif tag == "t": - length = self._read_int8() - return tuple(self._receive_rpc_value(embedding_map) for _ in range(length)) - elif tag == "n": - return None - elif tag == "b": - return bool(self._read_int8()) - elif tag == "i": - return numpy.int32(self._read_int32()) - elif tag == "I": - return numpy.int64(self._read_int64()) - elif tag == "f": - return self._read_float64() - elif tag == "F": - numerator = self._read_int64() - denominator = self._read_int64() - return Fraction(numerator, denominator) - elif tag == "s": - return self._read_string() - elif tag == "B": - return self._read_bytes() - elif tag == "A": - return self._read_bytes() - elif tag == "l": - length = self._read_int32() - return [self._receive_rpc_value(embedding_map) for _ in range(length)] - elif tag == "a": - length = self._read_int32() - return numpy.array([self._receive_rpc_value(embedding_map) for _ in range(length)]) - elif tag == "r": - start = self._receive_rpc_value(embedding_map) - stop = self._receive_rpc_value(embedding_map) - step = self._receive_rpc_value(embedding_map) - return range(start, stop, step) - elif tag == "k": - name = self._read_string() - value = self._receive_rpc_value(embedding_map) - return RPCKeyword(name, value) - elif tag == "O": - return embedding_map.retrieve_object(self._read_int32()) + if tag in receivers: + return receivers.get(tag)(self, embedding_map) else: raise IOError("Unknown RPC value tag: {}".format(repr(tag))) @@ -405,7 +390,7 @@ class CommKernel: args.append(value) def _skip_rpc_value(self, tags): - tag = tags.pop(0) + tag = chr(tags.pop(0)) if tag == "t": length = tags.pop(0) for _ in range(length): @@ -439,7 +424,7 @@ class CommKernel: elif tag == "b": check(isinstance(value, bool), lambda: "bool") - self._write_int8(value) + self._write_bool(value) elif tag == "i": check(isinstance(value, (int, numpy.int32)) and (-2**31 < value < 2**31-1), @@ -456,8 +441,8 @@ class CommKernel: self._write_float64(value) elif tag == "F": check(isinstance(value, Fraction) and - (-2**63 < value.numerator < 2**63-1) and - (-2**63 < value.denominator < 2**63-1), + (-2**63 < value.numerator < 2**63-1) and + (-2**63 < value.denominator < 2**63-1), lambda: "64-bit Fraction") self._write_int64(value.numerator) self._write_int64(value.denominator) @@ -477,9 +462,44 @@ class CommKernel: check(isinstance(value, list), lambda: "list") self._write_int32(len(value)) - for elt in value: - tags_copy = bytearray(tags) - self._send_rpc_value(tags_copy, elt, root, function) + tag_element = chr(tags[0]) + if tag_element == "b": + self._write(bytes(value)) + elif tag_element == "i": + self._write(struct.pack(">%sl" % len(value), *value)) + elif tag_element == "I": + self._write(struct.pack(">%sq" % len(value), *value)) + elif tag_element == "f": + self._write(struct.pack(">%sd" % len(value), *value)) + else: + for elt in value: + tags_copy = bytearray(tags) + self._send_rpc_value(tags_copy, elt, root, function) + self._skip_rpc_value(tags) + elif tag == "a": + check(isinstance(value, numpy.ndarray), + lambda: "numpy.ndarray") + num_dims = tags.pop(0) + check(num_dims == len(value.shape), + lambda: "{}-dimensional numpy.ndarray".format(num_dims)) + for s in value.shape: + self._write_int32(s) + tag_element = chr(tags[0]) + if tag_element == "b": + self._write(value.reshape((-1,), order="C").tobytes()) + elif tag_element == "i": + array = value.reshape((-1,), order="C").astype('>i4') + self._write(array.tobytes()) + elif tag_element == "I": + array = value.reshape((-1,), order="C").astype('>i8') + self._write(array.tobytes()) + elif tag_element == "f": + array = value.reshape((-1,), order="C").astype('>d') + self._write(array.tobytes()) + else: + for elt in value.reshape((-1,), order="C"): + tags_copy = bytearray(tags) + self._send_rpc_value(tags_copy, elt, root, function) self._skip_rpc_value(tags) elif tag == "r": check(isinstance(value, range), @@ -501,35 +521,39 @@ class CommKernel: return msg def _serve_rpc(self, embedding_map): - async = self._read_bool() - service_id = self._read_int32() + is_async = self._read_bool() + service_id = self._read_int32() args, kwargs = self._receive_rpc_args(embedding_map) - return_tags = self._read_bytes() + return_tags = self._read_bytes() - if service_id is 0: - service = lambda obj, attr, value: setattr(obj, attr, value) + if service_id == 0: + def service(obj, attr, value): return setattr(obj, attr, value) else: - service = embedding_map.retrieve_object(service_id) + service = embedding_map.retrieve_object(service_id) logger.debug("rpc service: [%d]%r%s %r %r -> %s", service_id, service, - (" (async)" if async else ""), args, kwargs, return_tags) + (" (async)" if is_async else ""), args, kwargs, return_tags) - if async: + if is_async: service(*args, **kwargs) return try: result = service(*args, **kwargs) - logger.debug("rpc service: %d %r %r = %r", service_id, args, kwargs, result) + logger.debug("rpc service: %d %r %r = %r", + service_id, args, kwargs, result) - self._write_header(_H2DMsgType.RPC_REPLY) + self._write_header(Request.RPCReply) self._write_bytes(return_tags) - self._send_rpc_value(bytearray(return_tags), result, result, service) + self._send_rpc_value(bytearray(return_tags), + result, result, service) + self._flush() except RPCReturnValueError as exn: raise except Exception as exn: - logger.debug("rpc service: %d %r %r ! %r", service_id, args, kwargs, exn) + logger.debug("rpc service: %d %r %r ! %r", + service_id, args, kwargs, exn) - self._write_header(_H2DMsgType.RPC_EXCEPTION) + self._write_header(Request.RPCException) if hasattr(exn, "artiq_core_exception"): exn = exn.artiq_core_exception @@ -545,7 +569,7 @@ class CommKernel: self._write_string(function) else: exn_type = type(exn) - if exn_type in (ZeroDivisionError, ValueError, IndexError) or \ + if exn_type in (ZeroDivisionError, ValueError, IndexError, RuntimeError) or \ hasattr(exn, "artiq_builtin"): self._write_string("0:{}".format(exn_type.__name__)) else: @@ -566,23 +590,24 @@ class CommKernel: assert False self._write_string(filename) self._write_int32(line) - self._write_int32(-1) # column not known + self._write_int32(-1) # column not known self._write_string(function) + self._flush() def _serve_exception(self, embedding_map, symbolizer, demangler): - name = self._read_string() - message = self._read_string() - params = [self._read_int64() for _ in range(3)] + name = self._read_string() + message = self._read_string() + params = [self._read_int64() for _ in range(3)] - filename = self._read_string() - line = self._read_int32() - column = self._read_int32() - function = self._read_string() + filename = self._read_string() + line = self._read_int32() + column = self._read_int32() + function = self._read_string() backtrace = [self._read_int32() for _ in range(self._read_int32())] traceback = list(reversed(symbolizer(backtrace))) + \ - [(filename, line, column, *demangler([function]), None)] + [(filename, line, column, *demangler([function]), None)] core_exn = exceptions.CoreException(name, message, params, traceback) if core_exn.id == 0: @@ -597,14 +622,12 @@ class CommKernel: def serve(self, embedding_map, symbolizer, demangler): while True: self._read_header() - if self._read_type == _D2HMsgType.RPC_REQUEST: + if self._read_type == Reply.RPCRequest: self._serve_rpc(embedding_map) - elif self._read_type == _D2HMsgType.KERNEL_EXCEPTION: + elif self._read_type == Reply.KernelException: self._serve_exception(embedding_map, symbolizer, demangler) - elif self._read_type == _D2HMsgType.WATCHDOG_EXPIRED: - raise exceptions.WatchdogExpired - elif self._read_type == _D2HMsgType.CLOCK_FAILURE: + elif self._read_type == Reply.ClockFailure: raise exceptions.ClockFailure else: - self._read_expect(_D2HMsgType.KERNEL_FINISHED) + self._read_expect(Reply.KernelFinished) return diff --git a/artiq/coredevice/comm_mgmt.py b/artiq/coredevice/comm_mgmt.py index 9a551a378..6468499e0 100644 --- a/artiq/coredevice/comm_mgmt.py +++ b/artiq/coredevice/comm_mgmt.py @@ -14,15 +14,32 @@ class Request(Enum): SetLogFilter = 3 SetUartLogFilter = 6 + ConfigRead = 12 + ConfigWrite = 13 + ConfigRemove = 14 + ConfigErase = 15 + + StartProfiler = 9 + StopProfiler = 10 + GetProfile = 11 + Hotswap = 4 Reboot = 5 + DebugAllocator = 8 + class Reply(Enum): Success = 1 + Error = 6 + Unavailable = 4 LogContent = 2 + ConfigData = 7 + + Profile = 5 + RebootImminent = 3 @@ -35,13 +52,6 @@ class LogLevel(Enum): TRACE = 5 -def initialize_connection(host, port): - sock = socket.create_connection((host, port), 5.0) - sock.settimeout(None) - logger.debug("connected to host %s on port %d", host, port) - return sock - - class CommMgmt: def __init__(self, host, port=1380): self.host = host @@ -50,7 +60,8 @@ class CommMgmt: def open(self): if hasattr(self, "socket"): return - self.socket = initialize_connection(self.host, self.port) + self.socket = socket.create_connection((self.host, self.port)) + logger.debug("connected to %s:%d", self.host, self.port) self.socket.sendall(b"ARTIQ management\n") def close(self): @@ -81,6 +92,9 @@ class CommMgmt: self._write_int32(len(value)) self._write(value) + def _write_string(self, value): + self._write_bytes(value.encode("utf-8")) + def _read(self, length): r = bytes() while len(r) < length: @@ -143,6 +157,66 @@ class CommMgmt: self._write_int8(getattr(LogLevel, level).value) self._read_expect(Reply.Success) + def config_read(self, key): + self._write_header(Request.ConfigRead) + self._write_string(key) + self._read_expect(Reply.ConfigData) + return self._read_string() + + def config_write(self, key, value): + self._write_header(Request.ConfigWrite) + self._write_string(key) + self._write_bytes(value) + ty = self._read_header() + if ty == Reply.Error: + raise IOError("Flash storage is full") + elif ty != Reply.Success: + raise IOError("Incorrect reply from device: {} (expected {})". + format(ty, Reply.Success)) + + def config_remove(self, key): + self._write_header(Request.ConfigRemove) + self._write_string(key) + self._read_expect(Reply.Success) + + def config_erase(self): + self._write_header(Request.ConfigErase) + self._read_expect(Reply.Success) + + def start_profiler(self, interval, edges_size, hits_size): + self._write_header(Request.StartProfiler) + self._write_int32(interval) + self._write_int32(edges_size) + self._write_int32(hits_size) + self._read_expect(Reply.Success) + + def stop_profiler(self): + self._write_header(Request.StopProfiler) + self._read_expect(Reply.Success) + + def stop_profiler(self): + self._write_header(Request.StopProfiler) + self._read_expect(Reply.Success) + + def get_profile(self): + self._write_header(Request.GetProfile) + self._read_expect(Reply.Profile) + + hits = {} + for _ in range(self._read_int32()): + addr = self._read_int32() + count = self._read_int32() + hits[addr] = count + + edges = {} + for _ in range(self._read_int32()): + caller = self._read_int32() + callee = self._read_int32() + count = self._read_int32() + edges[(caller, callee)] = count + + return hits, edges + def hotswap(self, firmware): self._write_header(Request.Hotswap) self._write_bytes(firmware) @@ -151,3 +225,6 @@ class CommMgmt: def reboot(self): self._write_header(Request.Reboot) self._read_expect(Reply.RebootImminent) + + def debug_allocator(self): + self._write_header(Request.DebugAllocator) diff --git a/artiq/coredevice/comm_moninj.py b/artiq/coredevice/comm_moninj.py index 25fd11ba0..08cbcc779 100644 --- a/artiq/coredevice/comm_moninj.py +++ b/artiq/coredevice/comm_moninj.py @@ -51,10 +51,14 @@ class CommMonInj: del self._reader del self._writer - def monitor(self, enable, channel, probe): + def monitor_probe(self, enable, channel, probe): packet = struct.pack(">bblb", 0, enable, channel, probe) self._writer.write(packet) + def monitor_injection(self, enable, channel, overrd): + packet = struct.pack(">bblb", 3, enable, channel, overrd) + self._writer.write(packet) + def inject(self, channel, override, value): packet = struct.pack(">blbb", 1, channel, override, value) self._writer.write(packet) diff --git a/artiq/coredevice/core.py b/artiq/coredevice/core.py index 8f96bb7c9..d150df596 100644 --- a/artiq/coredevice/core.py +++ b/artiq/coredevice/core.py @@ -11,7 +11,7 @@ from artiq.language.units import * from artiq.compiler.module import Module from artiq.compiler.embedding import Stitcher -from artiq.compiler.targets import OR1KTarget +from artiq.compiler.targets import OR1KTarget, CortexA9Target from artiq.coredevice.comm_kernel import CommKernel, CommKernelDummy # Import for side effects (creating the exception classes). @@ -43,6 +43,10 @@ class CompileError(Exception): def rtio_init() -> TNone: raise NotImplementedError("syscall not simulated") +@syscall(flags={"nounwind", "nowrite"}) +def rtio_get_destination_status(linkno: TInt32) -> TBool: + raise NotImplementedError("syscall not simulated") + @syscall(flags={"nounwind", "nowrite"}) def rtio_get_counter() -> TInt64: raise NotImplementedError("syscall not simulated") @@ -58,8 +62,6 @@ class Core: clocked at 125MHz and a SERDES multiplication factor of 8, the reference period is 1ns. The time machine unit is equal to this period. - :param external_clock: whether the core device should switch to its - external RTIO clock input instead of using its internal oscillator. :param ref_multiplier: ratio between the RTIO fine timestamp frequency and the RTIO coarse timestamp frequency (e.g. SERDES multiplication factor). @@ -67,14 +69,17 @@ class Core: kernel_invariants = { "core", "ref_period", "coarse_ref_period", "ref_multiplier", - "external_clock", } - def __init__(self, dmgr, host, ref_period, external_clock=False, - ref_multiplier=8): + def __init__(self, dmgr, host, ref_period, ref_multiplier=8, target="or1k"): self.ref_period = ref_period - self.external_clock = external_clock self.ref_multiplier = ref_multiplier + if target == "or1k": + self.target_cls = OR1KTarget + elif target == "cortexa9": + self.target_cls = CortexA9Target + else: + raise ValueError("Unsupported target") self.coarse_ref_period = ref_period*ref_multiplier if host is None: self.comm = CommKernelDummy() @@ -102,7 +107,7 @@ class Core: module = Module(stitcher, ref_period=self.ref_period, attribute_writeback=attribute_writeback) - target = OR1KTarget() + target = self.target_cls() library = target.compile_and_link([module]) stripped_library = target.strip(library) @@ -125,7 +130,6 @@ class Core: if self.first_run: self.comm.check_system_info() - self.comm.switch_clock(self.external_clock) self.first_run = False self.comm.load(kernel_library) @@ -136,7 +140,7 @@ class Core: @portable def seconds_to_mu(self, seconds): - """Converts seconds to the corresponding number of machine units + """Convert seconds to the corresponding number of machine units (RTIO cycles). :param seconds: time (in seconds) to convert. @@ -145,7 +149,7 @@ class Core: @portable def mu_to_seconds(self, mu): - """Converts machine units (RTIO cycles) to seconds. + """Convert machine units (RTIO cycles) to seconds. :param mu: cycle count to convert. """ @@ -153,8 +157,35 @@ class Core: @kernel def get_rtio_counter_mu(self): + """Retrieve the current value of the hardware RTIO timeline counter. + + As the timing of kernel code executed on the CPU is inherently + non-deterministic, the return value is by necessity only a lower bound + for the actual value of the hardware register at the instant when + execution resumes in the caller. + + For a more detailed description of these concepts, see :doc:`/rtio`. + """ return rtio_get_counter() + @kernel + def wait_until_mu(self, cursor_mu): + """Block execution until the hardware RTIO counter reaches the given + value (see :meth:`get_rtio_counter_mu`). + + If the hardware counter has already passed the given time, the function + returns immediately. + """ + while self.get_rtio_counter_mu() < cursor_mu: + pass + + @kernel + def get_rtio_destination_status(self, destination): + """Returns whether the specified RTIO destination is up. + This is particularly useful in startup kernels to delay + startup until certain DRTIO destinations are up.""" + return rtio_get_destination_status(destination) + @kernel def reset(self): """Clear RTIO FIFOs, release RTIO PHY reset, and set the time cursor diff --git a/artiq/coredevice/dac34h84.py b/artiq/coredevice/dac34h84.py new file mode 100644 index 000000000..155096a1e --- /dev/null +++ b/artiq/coredevice/dac34h84.py @@ -0,0 +1,276 @@ +class DAC34H84: + """DAC34H84 settings and register map. + + For possible values, documentation, and explanation, see the DAC datasheet + at https://www.ti.com/lit/pdf/slas751 + """ + qmc_corr_ena = 0 # msb ab + qmc_offset_ena = 0 # msb ab + invsinc_ena = 0 # msb ab + interpolation = 1 # 2x + fifo_ena = 1 + alarm_out_ena = 1 + alarm_out_pol = 1 + clkdiv_sync_ena = 1 + + iotest_ena = 0 + cnt64_ena = 0 + oddeven_parity = 0 # even + single_parity_ena = 1 + dual_parity_ena = 0 + rev_interface = 0 + dac_complement = 0b0000 # msb A + alarm_fifo = 0b111 # msb 2-away + + dacclkgone_ena = 1 + dataclkgone_ena = 1 + collisiongone_ena = 1 + sif4_ena = 1 + mixer_ena = 0 + mixer_gain = 1 + nco_ena = 0 + revbus = 0 + twos = 1 + + coarse_dac = 9 # 18.75 mA, 0-15 + sif_txenable = 0 + + mask_alarm_from_zerochk = 0 + mask_alarm_fifo_collision = 0 + mask_alarm_fifo_1away = 0 + mask_alarm_fifo_2away = 0 + mask_alarm_dacclk_gone = 0 + mask_alarm_dataclk_gone = 0 + mask_alarm_output_gone = 0 + mask_alarm_from_iotest = 0 + mask_alarm_from_pll = 0 + mask_alarm_parity = 0b0000 # msb a + + qmc_offseta = 0 # 12b + fifo_offset = 2 # 0-7 + qmc_offsetb = 0 # 12b + + qmc_offsetc = 0 # 12b + + qmc_offsetd = 0 # 12b + + qmc_gaina = 0 # 11b + + cmix_fs8 = 0 + cmix_fs4 = 0 + cmix_fs2 = 0 + cmix_nfs4 = 0 + qmc_gainb = 0 # 11b + + qmc_gainc = 0 # 11b + + output_delayab = 0b00 + output_delaycd = 0b00 + qmc_gaind = 0 # 11b + + qmc_phaseab = 0 # 12b + + qmc_phasecd = 0 # 12b + + phase_offsetab = 0 # 16b + phase_offsetcd = 0 # 16b + phase_addab_lsb = 0 # 16b + phase_addab_msb = 0 # 16b + phase_addcd_lsb = 0 # 16b + phase_addcd_msb = 0 # 16b + + pll_reset = 0 + pll_ndivsync_ena = 1 + pll_ena = 1 + pll_cp = 0b01 # single charge pump + pll_p = 0b100 # p=4 + + pll_m2 = 1 # x2 + pll_m = 8 # m = 8 + pll_n = 0b0001 # n = 2 + pll_vcotune = 0b01 + + pll_vco = 0x3f # 4 GHz + bias_sleep = 0 + tsense_sleep = 0 + pll_sleep = 0 + clkrecv_sleep = 0 + dac_sleep = 0b0000 # msb a + + extref_ena = 0 + fuse_sleep = 1 + atest = 0b00000 # atest mode + + syncsel_qmcoffsetab = 0b1001 # sif_sync and register write + syncsel_qmcoffsetcd = 0b1001 # sif_sync and register write + syncsel_qmccorrab = 0b1001 # sif_sync and register write + syncsel_qmccorrcd = 0b1001 # sif_sync and register write + + syncsel_mixerab = 0b1001 # sif_sync and register write + syncsel_mixercd = 0b1001 # sif_sync and register write + syncsel_nco = 0b1000 # sif_sync + syncsel_fifo_input = 0b10 # external lvds istr + sif_sync = 1 + + syncsel_fifoin = 0b0010 # istr + syncsel_fifoout = 0b0100 # ostr + clkdiv_sync_sel = 0 # ostr + + path_a_sel = 0 + path_b_sel = 1 + path_c_sel = 2 + path_d_sel = 3 + # swap dac pairs (CDAB) for layout + # swap I-Q dacs for spectral inversion + dac_a_sel = 3 + dac_b_sel = 2 + dac_c_sel = 1 + dac_d_sel = 0 + + dac_sleep_en = 0b1111 # msb a + clkrecv_sleep_en = 1 + pll_sleep_en = 1 + lvds_data_sleep_en = 1 + lvds_control_sleep_en = 1 + temp_sense_sleep_en = 1 + bias_sleep_en = 1 + + data_dly = 2 + clk_dly = 0 + + ostrtodig_sel = 0 + ramp_ena = 0 + sifdac_ena = 0 + + grp_delaya = 0x00 + grp_delayb = 0x00 + + grp_delayc = 0x00 + grp_delayd = 0x00 + + sifdac = 0 + + def __init__(self, updates=None): + if updates is None: + return + for key, value in updates.items(): + if not hasattr(self, key): + raise KeyError("invalid setting", key) + setattr(self, key, value) + + def get_mmap(self): + mmap = [] + mmap.append( + (0x00 << 16) | + (self.qmc_offset_ena << 14) | (self.qmc_corr_ena << 12) | + (self.interpolation << 8) | (self.fifo_ena << 7) | + (self.alarm_out_ena << 4) | (self.alarm_out_pol << 3) | + (self.clkdiv_sync_ena << 2) | (self.invsinc_ena << 0)) + mmap.append( + (0x01 << 16) | + (self.iotest_ena << 15) | (self.cnt64_ena << 12) | + (self.oddeven_parity << 11) | (self.single_parity_ena << 10) | + (self.dual_parity_ena << 9) | (self.rev_interface << 8) | + (self.dac_complement << 4) | (self.alarm_fifo << 1)) + mmap.append( + (0x02 << 16) | + (self.dacclkgone_ena << 14) | (self.dataclkgone_ena << 13) | + (self.collisiongone_ena << 12) | (self.sif4_ena << 7) | + (self.mixer_ena << 6) | (self.mixer_gain << 5) | + (self.nco_ena << 4) | (self.revbus << 3) | (self.twos << 1)) + mmap.append((0x03 << 16) | (self.coarse_dac << 12) | (self.sif_txenable << 0)) + mmap.append( + (0x07 << 16) | + (self.mask_alarm_from_zerochk << 15) | (1 << 14) | + (self.mask_alarm_fifo_collision << 13) | + (self.mask_alarm_fifo_1away << 12) | + (self.mask_alarm_fifo_2away << 11) | + (self.mask_alarm_dacclk_gone << 10) | + (self.mask_alarm_dataclk_gone << 9) | + (self.mask_alarm_output_gone << 8) | + (self.mask_alarm_from_iotest << 7) | (1 << 6) | + (self.mask_alarm_from_pll << 5) | (self.mask_alarm_parity << 1)) + mmap.append( + (0x08 << 16) | (self.qmc_offseta << 0)) + mmap.append( + (0x09 << 16) | (self.fifo_offset << 13) | (self.qmc_offsetb << 0)) + mmap.append((0x0a << 16) | (self.qmc_offsetc << 0)) + mmap.append((0x0b << 16) | (self.qmc_offsetd << 0)) + mmap.append((0x0c << 16) | (self.qmc_gaina << 0)) + mmap.append( + (0x0d << 16) | + (self.cmix_fs8 << 15) | (self.cmix_fs4 << 14) | + (self.cmix_fs2 << 12) | (self.cmix_nfs4 << 11) | + (self.qmc_gainb << 0)) + mmap.append((0x0e << 16) | (self.qmc_gainc << 0)) + mmap.append( + (0x0f << 16) | + (self.output_delayab << 14) | (self.output_delaycd << 12) | + (self.qmc_gaind << 0)) + mmap.append((0x10 << 16) | (self.qmc_phaseab << 0)) + mmap.append((0x11 << 16) | (self.qmc_phasecd << 0)) + mmap.append((0x12 << 16) | (self.phase_offsetab << 0)) + mmap.append((0x13 << 16) | (self.phase_offsetcd << 0)) + mmap.append((0x14 << 16) | (self.phase_addab_lsb << 0)) + mmap.append((0x15 << 16) | (self.phase_addab_msb << 0)) + mmap.append((0x16 << 16) | (self.phase_addcd_lsb << 0)) + mmap.append((0x17 << 16) | (self.phase_addcd_msb << 0)) + mmap.append( + (0x18 << 16) | + (0b001 << 13) | (self.pll_reset << 12) | + (self.pll_ndivsync_ena << 11) | (self.pll_ena << 10) | + (self.pll_cp << 6) | (self.pll_p << 3)) + mmap.append( + (0x19 << 16) | + (self.pll_m2 << 15) | (self.pll_m << 8) | (self.pll_n << 4) | + (self.pll_vcotune << 2)) + mmap.append( + (0x1a << 16) | + (self.pll_vco << 10) | (self.bias_sleep << 7) | + (self.tsense_sleep << 6) | + (self.pll_sleep << 5) | (self.clkrecv_sleep << 4) | + (self.dac_sleep << 0)) + mmap.append( + (0x1b << 16) | + (self.extref_ena << 15) | (self.fuse_sleep << 11) | + (self.atest << 0)) + mmap.append( + (0x1e << 16) | + (self.syncsel_qmcoffsetab << 12) | + (self.syncsel_qmcoffsetcd << 8) | + (self.syncsel_qmccorrab << 4) | + (self.syncsel_qmccorrcd << 0)) + mmap.append( + (0x1f << 16) | + (self.syncsel_mixerab << 12) | (self.syncsel_mixercd << 8) | + (self.syncsel_nco << 4) | (self.syncsel_fifo_input << 2) | + (self.sif_sync << 1)) + mmap.append( + (0x20 << 16) | + (self.syncsel_fifoin << 12) | (self.syncsel_fifoout << 8) | + (self.clkdiv_sync_sel << 0)) + mmap.append( + (0x22 << 16) | + (self.path_a_sel << 14) | (self.path_b_sel << 12) | + (self.path_c_sel << 10) | (self.path_d_sel << 8) | + (self.dac_a_sel << 6) | (self.dac_b_sel << 4) | + (self.dac_c_sel << 2) | (self.dac_d_sel << 0)) + mmap.append( + (0x23 << 16) | + (self.dac_sleep_en << 12) | (self.clkrecv_sleep_en << 11) | + (self.pll_sleep_en << 10) | (self.lvds_data_sleep_en << 9) | + (self.lvds_control_sleep_en << 8) | + (self.temp_sense_sleep_en << 7) | (1 << 6) | + (self.bias_sleep_en << 5) | (0x1f << 0)) + mmap.append( + (0x24 << 16) | (self.data_dly << 13) | (self.clk_dly << 10)) + mmap.append( + (0x2d << 16) | + (self.ostrtodig_sel << 14) | (self.ramp_ena << 13) | + (0x002 << 1) | (self.sifdac_ena << 0)) + mmap.append( + (0x2e << 16) | (self.grp_delaya << 8) | (self.grp_delayb << 0)) + mmap.append( + (0x2f << 16) | (self.grp_delayc << 8) | (self.grp_delayd << 0)) + mmap.append((0x30 << 16) | self.sifdac) + return mmap diff --git a/artiq/coredevice/dds.py b/artiq/coredevice/dds.py deleted file mode 100644 index 5c7293edf..000000000 --- a/artiq/coredevice/dds.py +++ /dev/null @@ -1,401 +0,0 @@ -""" -Drivers for direct digital synthesis (DDS) chips on RTIO. - -Output event replacement is not supported and issuing commands at the same -time is an error. -""" - - -from artiq.language.core import * -from artiq.language.types import * -from artiq.language.units import * -from artiq.coredevice.rtio import rtio_output -from artiq.coredevice.exceptions import DDSError - -from numpy import int32, int64 - - -_PHASE_MODE_DEFAULT = -1 -PHASE_MODE_CONTINUOUS = 0 -PHASE_MODE_ABSOLUTE = 1 -PHASE_MODE_TRACKING = 2 - - -class DDSParams: - def __init__(self): - self.bus_channel = 0 - self.channel = 0 - self.ftw = 0 - self.pow = 0 - self.phase_mode = 0 - self.amplitude = 0 - - -class BatchContextManager: - kernel_invariants = {"core", "core_dds", "params"} - - def __init__(self, core_dds): - self.core_dds = core_dds - self.core = self.core_dds.core - self.active = False - self.params = [DDSParams() for _ in range(16)] - self.count = 0 - self.ref_time = int64(0) - - @kernel - def __enter__(self): - """Starts a DDS command batch. All DDS commands are buffered - after this call, until ``batch_exit`` is called. - - The time of execution of the DDS commands is the time cursor position - when the batch is entered.""" - if self.active: - raise DDSError("DDS batch entered twice") - - self.active = True - self.count = 0 - self.ref_time = now_mu() - - @kernel - def append(self, bus_channel, channel, ftw, pow, phase_mode, amplitude): - if self.count == len(self.params): - raise DDSError("Too many commands in DDS batch") - - params = self.params[self.count] - params.bus_channel = bus_channel - params.channel = channel - params.ftw = ftw - params.pow = pow - params.phase_mode = phase_mode - params.amplitude = amplitude - self.count += 1 - - @kernel - def __exit__(self, type, value, traceback): - """Ends a DDS command batch. All buffered DDS commands are issued - on the bus.""" - if not self.active: - raise DDSError("DDS batch exited twice") - - self.active = False - at_mu(self.ref_time - self.core_dds.batch_duration_mu()) - for i in range(self.count): - param = self.params[i] - self.core_dds.program(self.ref_time, - param.bus_channel, param.channel, param.ftw, - param.pow, param.phase_mode, param.amplitude) - - -class DDSGroup: - """Core device Direct Digital Synthesis (DDS) driver. - - Gives access to the DDS functionality of the core device. - - :param sysclk: DDS system frequency. The DDS system clock must be a - phase-locked multiple of the RTIO clock. - """ - - kernel_invariants = {"core", "sysclk", "batch"} - - def __init__(self, dmgr, sysclk, core_device="core"): - self.core = dmgr.get(core_device) - self.sysclk = sysclk - self.batch = BatchContextManager(self) - - @kernel - def batch_duration_mu(self): - raise NotImplementedError - - @kernel - def init(self, bus_channel, channel): - raise NotImplementedError - - @kernel - def program(self, ref_time, bus_channel, channel, ftw, pow, phase_mode, amplitude): - raise NotImplementedError - - @kernel - def set(self, bus_channel, channel, ftw, pow, phase_mode, amplitude): - if self.batch.active: - self.batch.append(bus_channel, channel, ftw, pow, phase_mode, amplitude) - else: - ref_time = now_mu() - at_mu(ref_time - self.program_duration_mu) - self.program(ref_time, - bus_channel, channel, ftw, pow, phase_mode, amplitude) - - @portable(flags={"fast-math"}) - def frequency_to_ftw(self, frequency): - """Returns the frequency tuning word corresponding to the given - frequency. - """ - return round(float(int64(2)**32*frequency/self.sysclk)) - - @portable(flags={"fast-math"}) - def ftw_to_frequency(self, ftw): - """Returns the frequency corresponding to the given frequency tuning - word. - """ - return ftw*self.sysclk/int64(2)**32 - - @portable(flags={"fast-math"}) - def turns_to_pow(self, turns): - """Returns the phase offset word corresponding to the given phase - in turns.""" - return round(float(turns*2**self.pow_width)) - - @portable(flags={"fast-math"}) - def pow_to_turns(self, pow): - """Returns the phase in turns corresponding to the given phase offset - word.""" - return pow/2**self.pow_width - - @portable(flags={"fast-math"}) - def amplitude_to_asf(self, amplitude): - """Returns amplitude scale factor corresponding to given amplitude.""" - return round(float(amplitude*0x0fff)) - - @portable(flags={"fast-math"}) - def asf_to_amplitude(self, asf): - """Returns the amplitude corresponding to the given amplitude scale - factor.""" - return asf/0x0fff - - -class DDSChannel: - """Core device Direct Digital Synthesis (DDS) channel driver. - - Controls one DDS channel managed directly by the core device's runtime. - - This class should not be used directly, instead, use the chip-specific - drivers such as ``DDSChannelAD9914``. - - The time cursor is not modified by any function in this class. - - :param bus: name of the DDS bus device that this DDS is connected to. - :param channel: channel number of the DDS device to control. - """ - - kernel_invariants = { - "core", "core_dds", "bus_channel", "channel", - } - - def __init__(self, dmgr, bus_channel, channel, core_dds_device="core_dds"): - self.core_dds = dmgr.get(core_dds_device) - self.core = self.core_dds.core - self.bus_channel = bus_channel - self.channel = channel - self.phase_mode = PHASE_MODE_CONTINUOUS - - @kernel - def init(self): - """Resets and initializes the DDS channel. - - This needs to be done for each DDS channel before it can be used, and - it is recommended to use the startup kernel for this. - - This function cannot be used in a batch; the correct way of - initializing multiple DDS channels is to call this function - sequentially with a delay between the calls. 2ms provides a good - timing margin.""" - self.core_dds.init(self.bus_channel, self.channel) - - @kernel - def set_phase_mode(self, phase_mode): - """Sets the phase mode of the DDS channel. Supported phase modes are: - - * ``PHASE_MODE_CONTINUOUS``: the phase accumulator is unchanged when - switching frequencies. The DDS phase is the sum of the phase - accumulator and the phase offset. The only discrete jumps in the - DDS output phase come from changes to the phase offset. - - * ``PHASE_MODE_ABSOLUTE``: the phase accumulator is reset when - switching frequencies. Thus, the phase of the DDS at the time of - the frequency change is equal to the phase offset. - - * ``PHASE_MODE_TRACKING``: when switching frequencies, the phase - accumulator is set to the value it would have if the DDS had been - running at the specified frequency since the start of the - experiment. - """ - self.phase_mode = phase_mode - - @kernel - def set_mu(self, frequency, phase=0, phase_mode=_PHASE_MODE_DEFAULT, - amplitude=0x0fff): - """Sets the DDS channel to the specified frequency and phase. - - This uses machine units (FTW and POW). The frequency tuning word width - is 32, whereas the phase offset word width depends on the type of DDS - chip and can be retrieved via the ``pow_width`` attribute. The amplitude - width is 12. - - The "frequency update" pulse is sent to the DDS with a fixed latency - with respect to the current position of the time cursor. - - :param frequency: frequency to generate. - :param phase: adds an offset, in turns, to the phase. - :param phase_mode: if specified, overrides the default phase mode set - by ``set_phase_mode`` for this call. - """ - if phase_mode == _PHASE_MODE_DEFAULT: - phase_mode = self.phase_mode - self.core_dds.set(self.bus_channel, self.channel, frequency, phase, phase_mode, amplitude) - - @kernel - def set(self, frequency, phase=0.0, phase_mode=_PHASE_MODE_DEFAULT, - amplitude=1.0): - """Like ``set_mu``, but uses Hz and turns.""" - self.set_mu(self.core_dds.frequency_to_ftw(frequency), - self.core_dds.turns_to_pow(phase), phase_mode, - self.core_dds.amplitude_to_asf(amplitude)) - - -AD9914_REG_CFR1L = 0x01 -AD9914_REG_CFR1H = 0x03 -AD9914_REG_CFR2L = 0x05 -AD9914_REG_CFR2H = 0x07 -AD9914_REG_CFR3L = 0x09 -AD9914_REG_CFR3H = 0x0b -AD9914_REG_CFR4L = 0x0d -AD9914_REG_CFR4H = 0x0f -AD9914_REG_FTWL = 0x2d -AD9914_REG_FTWH = 0x2f -AD9914_REG_POW = 0x31 -AD9914_REG_ASF = 0x33 -AD9914_REG_USR0 = 0x6d -AD9914_FUD = 0x80 -AD9914_GPIO = 0x81 - - -class DDSGroupAD9914(DDSGroup): - """Driver for AD9914 DDS chips. See ``DDSGroup`` for a description - of the functionality.""" - kernel_invariants = DDSGroup.kernel_invariants.union({ - "pow_width", "rtio_period_mu", "sysclk_per_mu", "write_duration_mu", "dac_cal_duration_mu", - "init_duration_mu", "init_sync_duration_mu", "program_duration_mu", - "first_dds_bus_channel", "dds_channel_count", "continuous_phase_comp" - }) - - pow_width = 16 - - def __init__(self, *args, first_dds_bus_channel, dds_bus_count, dds_channel_count, **kwargs): - super().__init__(*args, **kwargs) - - self.first_dds_bus_channel = first_dds_bus_channel - self.dds_bus_count = dds_bus_count - self.dds_channel_count = dds_channel_count - - self.rtio_period_mu = int64(8) - self.sysclk_per_mu = int32(self.sysclk * self.core.ref_period) - - self.write_duration_mu = 5 * self.rtio_period_mu - self.dac_cal_duration_mu = 147000 * self.rtio_period_mu - self.init_duration_mu = 8 * self.write_duration_mu + self.dac_cal_duration_mu - self.init_sync_duration_mu = 16 * self.write_duration_mu + 2 * self.dac_cal_duration_mu - self.program_duration_mu = 6 * self.write_duration_mu - - self.continuous_phase_comp = [0] * (self.dds_bus_count * self.dds_channel_count) - - @kernel - def batch_duration_mu(self): - return self.batch.count * (self.program_duration_mu + - self.write_duration_mu) # + FUD time - - @kernel - def write(self, bus_channel, addr, data): - rtio_output(now_mu(), bus_channel, addr, data) - delay_mu(self.write_duration_mu) - - @kernel - def init(self, bus_channel, channel): - delay_mu(-self.init_duration_mu) - self.write(bus_channel, AD9914_GPIO, (1 << channel) << 1); - - self.write(bus_channel, AD9914_REG_CFR1H, 0x0000) # Enable cosine output - self.write(bus_channel, AD9914_REG_CFR2L, 0x8900) # Enable matched latency - self.write(bus_channel, AD9914_REG_CFR2H, 0x0080) # Enable profile mode - self.write(bus_channel, AD9914_REG_ASF, 0x0fff) # Set amplitude to maximum - self.write(bus_channel, AD9914_REG_CFR4H, 0x0105) # Enable DAC calibration - self.write(bus_channel, AD9914_FUD, 0) - delay_mu(self.dac_cal_duration_mu) - self.write(bus_channel, AD9914_REG_CFR4H, 0x0005) # Disable DAC calibration - self.write(bus_channel, AD9914_FUD, 0) - - @kernel - def init_sync(self, bus_channel, channel, sync_delay): - delay_mu(-self.init_sync_duration_mu) - self.write(bus_channel, AD9914_GPIO, (1 << channel) << 1) - - self.write(bus_channel, AD9914_REG_CFR4H, 0x0105) # Enable DAC calibration - self.write(bus_channel, AD9914_FUD, 0) - delay_mu(self.dac_cal_duration_mu) - self.write(bus_channel, AD9914_REG_CFR4H, 0x0005) # Disable DAC calibration - self.write(bus_channel, AD9914_FUD, 0) - self.write(bus_channel, AD9914_REG_CFR2L, 0x8b00) # Enable matched latency and sync_out - self.write(bus_channel, AD9914_FUD, 0) - # Set cal with sync and set sync_out and sync_in delay - self.write(bus_channel, AD9914_REG_USR0, 0x0840 | (sync_delay & 0x3f)) - self.write(bus_channel, AD9914_FUD, 0) - self.write(bus_channel, AD9914_REG_CFR4H, 0x0105) # Enable DAC calibration - self.write(bus_channel, AD9914_FUD, 0) - delay_mu(self.dac_cal_duration_mu) - self.write(bus_channel, AD9914_REG_CFR4H, 0x0005) # Disable DAC calibration - self.write(bus_channel, AD9914_FUD, 0) - self.write(bus_channel, AD9914_REG_CFR1H, 0x0000) # Enable cosine output - self.write(bus_channel, AD9914_REG_CFR2H, 0x0080) # Enable profile mode - self.write(bus_channel, AD9914_REG_ASF, 0x0fff) # Set amplitude to maximum - self.write(bus_channel, AD9914_FUD, 0) - - @kernel - def program(self, ref_time, bus_channel, channel, ftw, pow, phase_mode, amplitude): - self.write(bus_channel, AD9914_GPIO, (1 << channel) << 1) - - self.write(bus_channel, AD9914_REG_FTWL, ftw & 0xffff) - self.write(bus_channel, AD9914_REG_FTWH, (ftw >> 16) & 0xffff) - - # We need the RTIO fine timestamp clock to be phase-locked - # to DDS SYSCLK, and divided by an integer self.sysclk_per_mu. - dds_bus_index = bus_channel - self.first_dds_bus_channel - phase_comp_index = dds_bus_index * self.dds_channel_count + channel - if phase_mode == PHASE_MODE_CONTINUOUS: - # Do not clear phase accumulator on FUD - # Disable autoclear phase accumulator and enables OSK. - self.write(bus_channel, AD9914_REG_CFR1L, 0x0108) - pow += self.continuous_phase_comp[phase_comp_index] - else: - # Clear phase accumulator on FUD - # Enable autoclear phase accumulator and enables OSK. - self.write(bus_channel, AD9914_REG_CFR1L, 0x2108) - fud_time = now_mu() + 2 * self.write_duration_mu - pow -= int32((ref_time - fud_time) * self.sysclk_per_mu * ftw >> (32 - self.pow_width)) - if phase_mode == PHASE_MODE_TRACKING: - pow += int32(ref_time * self.sysclk_per_mu * ftw >> (32 - self.pow_width)) - self.continuous_phase_comp[phase_comp_index] = pow - - self.write(bus_channel, AD9914_REG_POW, pow) - self.write(bus_channel, AD9914_REG_ASF, amplitude) - self.write(bus_channel, AD9914_FUD, 0) - - -class DDSChannelAD9914(DDSChannel): - """Driver for AD9914 DDS chips. See ``DDSChannel`` for a description - of the functionality.""" - @kernel - def init_sync(self, sync_delay=0): - """Resets and initializes the DDS channel as well as configures - the AD9914 DDS for synchronisation. The synchronisation procedure - follows the steps outlined in the AN-1254 application note. - - This needs to be done for each DDS channel before it can be used, and - it is recommended to use the startup kernel for this. - - This function cannot be used in a batch; the correct way of - initializing multiple DDS channels is to call this function - sequentially with a delay between the calls. 10ms provides a good - timing margin. - - :param sync_delay: integer from 0 to 0x3f that sets the value of - SYNC_OUT (bits 3-5) and SYNC_IN (bits 0-2) delay ADJ bits. - """ - self.core_dds.init_sync(self.bus_channel, self.channel, sync_delay) diff --git a/artiq/coredevice/dma.py b/artiq/coredevice/dma.py index eafe98e6c..261a6bcfe 100644 --- a/artiq/coredevice/dma.py +++ b/artiq/coredevice/dma.py @@ -34,7 +34,7 @@ def dma_playback(timestamp: TInt64, ptr: TInt32) -> TNone: class DMARecordContextManager: - """Context manager returned by ``CoreDMA.record()``. + """Context manager returned by :meth:`CoreDMA.record()`. Upon entering, starts recording a DMA trace. All RTIO operations are redirected to a newly created DMA buffer after this call, and ``now`` diff --git a/artiq/coredevice/drtio_dbg.py b/artiq/coredevice/drtio_dbg.py deleted file mode 100644 index 4c92f0926..000000000 --- a/artiq/coredevice/drtio_dbg.py +++ /dev/null @@ -1,32 +0,0 @@ -""" -DRTIO debugging functions. - -Those syscalls are intended for ARTIQ developers only. -""" - -from artiq.language.core import syscall -from artiq.language.types import TTuple, TInt32, TInt64, TNone - - -@syscall(flags={"nounwind", "nowrite"}) -def drtio_get_channel_state(channel: TInt32) -> TTuple([TInt32, TInt64]): - raise NotImplementedError("syscall not simulated") - - -@syscall(flags={"nounwind", "nowrite"}) -def drtio_reset_channel_state(channel: TInt32) -> TNone: - raise NotImplementedError("syscall not simulated") - - -@syscall(flags={"nounwind", "nowrite"}) -def drtio_get_fifo_space(channel: TInt32) -> TNone: - raise NotImplementedError("syscall not simulated") - - -@syscall(flags={"nounwind", "nowrite"}) -def drtio_get_packet_counts(linkno: TInt32) -> TTuple([TInt32, TInt32]): - raise NotImplementedError("syscall not simulated") - -@syscall(flags={"nounwind", "nowrite"}) -def drtio_get_fifo_space_req_count(linkno: TInt32) -> TInt32: - raise NotImplementedError("syscall not simulated") diff --git a/artiq/coredevice/edge_counter.py b/artiq/coredevice/edge_counter.py new file mode 100644 index 000000000..e7782064e --- /dev/null +++ b/artiq/coredevice/edge_counter.py @@ -0,0 +1,236 @@ +"""Driver for RTIO-enabled TTL edge counter. + +Like for the TTL input PHYs, sensitivity can be configured over RTIO +(``gate_rising()``, etc.). In contrast to the former, however, the count is +accumulated in gateware, and only a single input event is generated at the end +of each gate period:: + + with parallel: + doppler_cool() + self.pmt_counter.gate_rising(1 * ms) + + with parallel: + readout() + self.pmt_counter.gate_rising(100 * us) + + print("Doppler cooling counts:", self.pmt_counter.fetch_count()) + print("Readout counts:", self.pmt_counter.fetch_count()) + +For applications where the timestamps of the individual input events are not +required, this has two advantages over ``TTLInOut.count()`` beyond raw +throughput. First, it is easy to count events during multiple separate periods +without blocking to read back counts in between, as illustrated in the above +example. Secondly, as each count total only takes up a single input event, it +is much easier to acquire counts on several channels in parallel without +risking input FIFO overflows:: + + # Using the TTLInOut driver, pmt_1 input events are only processed + # after pmt_0 is done counting. To avoid RTIOOverflows, a round-robin + # scheme would have to be implemented manually. + + with parallel: + self.pmt_0.gate_rising(10 * ms) + self.pmt_1.gate_rising(10 * ms) + + counts_0 = self.pmt_0.count(now_mu()) # blocks + counts_1 = self.pmt_1.count(now_mu()) + + # + + # Using gateware counters, only a single input event each is + # generated, greatly reducing the load on the input FIFOs: + + with parallel: + self.pmt_0_counter.gate_rising(10 * ms) + self.pmt_1_counter.gate_rising(10 * ms) + + counts_0 = self.pmt_0_counter.fetch_count() # blocks + counts_1 = self.pmt_1_counter.fetch_count() + +See :mod:`artiq.gateware.rtio.phy.edge_counter` and +:meth:`artiq.gateware.eem.DIO.add_std` for the gateware components. +""" + +from artiq.language.core import * +from artiq.language.types import * +from artiq.coredevice.rtio import (rtio_output, rtio_input_data, + rtio_input_timestamped_data) +from numpy import int32, int64 + +CONFIG_COUNT_RISING = 0b0001 +CONFIG_COUNT_FALLING = 0b0010 +CONFIG_SEND_COUNT_EVENT = 0b0100 +CONFIG_RESET_TO_ZERO = 0b1000 + + +class CounterOverflow(Exception): + """Raised when an edge counter value is read which indicates that the + counter might have overflowed.""" + pass + + +class EdgeCounter: + """RTIO TTL edge counter driver driver. + + Like for regular TTL inputs, timeline periods where the counter is + sensitive to a chosen set of input transitions can be specified. Unlike the + former, however, the specified edges do not create individual input events; + rather, the total count can be requested as a single input event from the + core (typically at the end of the gate window). + + :param channel: The RTIO channel of the gateware phy. + :param gateware_width: The width of the gateware counter register, in + bits. This is only used for overflow handling; to change the size, + the gateware needs to be rebuilt. + """ + + kernel_invariants = {"core", "channel", "counter_max"} + + def __init__(self, dmgr, channel, gateware_width=31, core_device="core"): + self.core = dmgr.get(core_device) + self.channel = channel + self.counter_max = (1 << (gateware_width - 1)) - 1 + + @kernel + def gate_rising(self, duration): + """Count rising edges for the given duration and request the total at + the end. + + The counter is reset at the beginning of the gate period. Use + :meth:`set_config` directly for more detailed control. + + :param duration: The duration for which the gate is to stay open. + + :return: The timestamp at the end of the gate period, in machine units. + """ + return self.gate_rising_mu(self.core.seconds_to_mu(duration)) + + @kernel + def gate_falling(self, duration): + """Count falling edges for the given duration and request the total at + the end. + + The counter is reset at the beginning of the gate period. Use + :meth:`set_config` directly for more detailed control. + + :param duration: The duration for which the gate is to stay open. + + :return: The timestamp at the end of the gate period, in machine units. + """ + return self.gate_falling_mu(self.core.seconds_to_mu(duration)) + + @kernel + def gate_both(self, duration): + """Count both rising and falling edges for the given duration, and + request the total at the end. + + The counter is reset at the beginning of the gate period. Use + :meth:`set_config` directly for more detailed control. + + :param duration: The duration for which the gate is to stay open. + + :return: The timestamp at the end of the gate period, in machine units. + """ + return self.gate_both_mu(self.core.seconds_to_mu(duration)) + + @kernel + def gate_rising_mu(self, duration_mu): + """See :meth:`gate_rising`.""" + return self._gate_mu( + duration_mu, count_rising=True, count_falling=False) + + @kernel + def gate_falling_mu(self, duration_mu): + """See :meth:`gate_falling`.""" + return self._gate_mu( + duration_mu, count_rising=False, count_falling=True) + + @kernel + def gate_both_mu(self, duration_mu): + """See :meth:`gate_both_mu`.""" + return self._gate_mu( + duration_mu, count_rising=True, count_falling=True) + + @kernel + def _gate_mu(self, duration_mu, count_rising, count_falling): + self.set_config( + count_rising=count_rising, + count_falling=count_falling, + send_count_event=False, + reset_to_zero=True) + delay_mu(duration_mu) + self.set_config( + count_rising=False, + count_falling=False, + send_count_event=True, + reset_to_zero=False) + return now_mu() + + @kernel + def set_config(self, count_rising: TBool, count_falling: TBool, + send_count_event: TBool, reset_to_zero: TBool): + """Emit an RTIO event at the current timeline position to set the + gateware configuration. + + For most use cases, the `gate_*` wrappers will be more convenient. + + :param count_rising: Whether to count rising signal edges. + :param count_falling: Whether to count falling signal edges. + :param send_count_event: If `True`, an input event with the current + counter value is generated on the next clock cycle (once). + :param reset_to_zero: If `True`, the counter value is reset to zero on + the next clock cycle (once). + """ + config = int32(0) + if count_rising: + config |= CONFIG_COUNT_RISING + if count_falling: + config |= CONFIG_COUNT_FALLING + if send_count_event: + config |= CONFIG_SEND_COUNT_EVENT + if reset_to_zero: + config |= CONFIG_RESET_TO_ZERO + rtio_output(self.channel << 8, config) + + @kernel + def fetch_count(self) -> TInt32: + """Wait for and return count total from previously requested input + event. + + It is valid to trigger multiple gate periods without immediately + reading back the count total; the results will be returned in order on + subsequent fetch calls. + + This function blocks until a result becomes available. + """ + count = rtio_input_data(self.channel) + if count == self.counter_max: + raise CounterOverflow( + "Input edge counter overflow on RTIO channel {0}", + int64(self.channel)) + return count + + @kernel + def fetch_timestamped_count( + self, timeout_mu=int64(-1)) -> TTuple([TInt64, TInt32]): + """Wait for and return the timestamp and count total of a previously + requested input event. + + It is valid to trigger multiple gate periods without immediately + reading back the count total; the results will be returned in order on + subsequent fetch calls. + + This function blocks until a result becomes available or the given + timeout elapses. + + :return: A tuple of timestamp (-1 if timeout elapsed) and counter + value. (The timestamp is that of the requested input event – + typically the gate closing time – and not that of any input edges.) + """ + timestamp, count = rtio_input_timestamped_data(timeout_mu, + self.channel) + if count == self.counter_max: + raise CounterOverflow( + "Input edge counter overflow on RTIO channel {0}", + int64(self.channel)) + return timestamp, count diff --git a/artiq/coredevice/exceptions.py b/artiq/coredevice/exceptions.py index 44d0af86e..cfa2ce85a 100644 --- a/artiq/coredevice/exceptions.py +++ b/artiq/coredevice/exceptions.py @@ -10,6 +10,8 @@ from artiq.coredevice.runtime import source_loader ZeroDivisionError = builtins.ZeroDivisionError ValueError = builtins.ValueError IndexError = builtins.IndexError +RuntimeError = builtins.RuntimeError +AssertionError = builtins.AssertionError class CoreException: @@ -71,20 +73,13 @@ class CacheError(Exception): class RTIOUnderflow(Exception): - """Raised when the CPU fails to submit a RTIO event early enough - (with respect to the event's timestamp). + """Raised when the CPU or DMA core fails to submit a RTIO event early + enough (with respect to the event's timestamp). The offending event is discarded and the RTIO core keeps operating. """ artiq_builtin = True -class RTIOSequenceError(Exception): - """Raised when an event is submitted on a given channel with a timestamp - not larger than the previous one. - - The offending event is discarded and the RTIO core keeps operating. - """ - artiq_builtin = True class RTIOOverflow(Exception): """Raised when at least one event could not be registered into the RTIO @@ -96,26 +91,28 @@ class RTIOOverflow(Exception): """ artiq_builtin = True + +class RTIODestinationUnreachable(Exception): + """Raised with a RTIO operation could not be completed due to a DRTIO link + being down. + """ + artiq_builtin = True + + class DMAError(Exception): """Raised when performing an invalid DMA operation.""" artiq_builtin = True -class DDSError(Exception): - """Raised when attempting to start a DDS batch while already in a batch, - when too many commands are batched, and when DDS channel settings are - incorrect. - """ - -class WatchdogExpired(Exception): - """Raised when a watchdog expires.""" class ClockFailure(Exception): """Raised when RTIO PLL has lost lock.""" + class I2CError(Exception): """Raised when a I2C transaction fails.""" pass + class SPIError(Exception): """Raised when a SPI transaction fails.""" pass diff --git a/artiq/coredevice/fastino.py b/artiq/coredevice/fastino.py new file mode 100644 index 000000000..73fcfdf38 --- /dev/null +++ b/artiq/coredevice/fastino.py @@ -0,0 +1,192 @@ +"""RTIO driver for the Fastino 32channel, 16 bit, 2.5 MS/s per channel, +streaming DAC. +""" + +from artiq.language.core import kernel, portable, delay +from artiq.coredevice.rtio import (rtio_output, rtio_output_wide, + rtio_input_data) +from artiq.language.units import us +from artiq.language.types import TInt32, TList + + +class Fastino: + """Fastino 32-channel, 16-bit, 2.5 MS/s per channel streaming DAC + + The RTIO PHY supports staging DAC data before transmitting them by writing + to the DAC RTIO addresses, if a channel is not "held" by setting its bit + using :meth:`set_hold`, the next frame will contain the update. For the + DACs held, the update is triggered explicitly by setting the corresponding + bit using :meth:`set_update`. Update is self-clearing. This enables atomic + DAC updates synchronized to a frame edge. + + The `log2_width=0` RTIO layout uses one DAC channel per RTIO address and a + dense RTIO address space. The RTIO words are narrow. (32 bit) and + few-channel updates are efficient. There is the least amount of DAC state + tracking in kernels, at the cost of more DMA and RTIO data. + The setting here and in the RTIO PHY (gateware) must match. + + Other `log2_width` (up to `log2_width=5`) settings pack multiple + (in powers of two) DAC channels into one group and into one RTIO write. + The RTIO data width increases accordingly. The `log2_width` + LSBs of the RTIO address for a DAC channel write must be zero and the + address space is sparse. For `log2_width=5` the RTIO data is 512 bit wide. + + If `log2_width` is zero, the :meth:`set_dac`/:meth:`set_dac_mu` interface + must be used. If non-zero, the :meth:`set_group`/:meth:`set_group_mu` + interface must be used. + + :param channel: RTIO channel number + :param core_device: Core device name (default: "core") + :param log2_width: Width of DAC channel group (logarithm base 2). + Value must match the corresponding value in the RTIO PHY (gateware). + """ + kernel_invariants = {"core", "channel", "width"} + + def __init__(self, dmgr, channel, core_device="core", log2_width=0): + self.channel = channel << 8 + self.core = dmgr.get(core_device) + self.width = 1 << log2_width + + @kernel + def init(self): + """Initialize the device. + + This clears reset, unsets DAC_CLR, enables AFE_PWR, + clears error counters, then enables error counting + """ + self.set_cfg(reset=0, afe_power_down=0, dac_clr=0, clr_err=1) + delay(1*us) + self.set_cfg(reset=0, afe_power_down=0, dac_clr=0, clr_err=0) + delay(1*us) + + @kernel + def write(self, addr, data): + """Write data to a Fastino register. + + :param addr: Address to write to. + :param data: Data to write. + """ + rtio_output(self.channel | addr, data) + + @kernel + def read(self, addr): + """Read from Fastino register. + + TODO: untested + + :param addr: Address to read from. + :return: The data read. + """ + rtio_output(self.channel | addr | 0x80) + return rtio_input_data(self.channel >> 8) + + @kernel + def set_dac_mu(self, dac, data): + """Write DAC data in machine units. + + :param dac: DAC channel to write to (0-31). + :param data: DAC word to write, 16 bit unsigned integer, in machine + units. + """ + self.write(dac, data) + + @kernel + def set_group_mu(self, dac: TInt32, data: TList(TInt32)): + """Write a group of DAC channels in machine units. + + :param dac: First channel in DAC channel group (0-31). The `log2_width` + LSBs must be zero. + :param data: List of DAC data pairs (2x16 bit unsigned) to write, + in machine units. Data exceeding group size is ignored. + If the list length is less than group size, the remaining + DAC channels within the group are cleared to 0 (machine units). + """ + if dac & (self.width - 1): + raise ValueError("Group index LSBs must be zero") + rtio_output_wide(self.channel | dac, data) + + @portable + def voltage_to_mu(self, voltage): + """Convert SI Volts to DAC machine units. + + :param voltage: Voltage in SI Volts. + :return: DAC data word in machine units, 16 bit integer. + """ + data = int(round((0x8000/10.)*voltage)) + 0x8000 + if data < 0 or data > 0xffff: + raise ValueError("DAC voltage out of bounds") + return data + + @portable + def voltage_group_to_mu(self, voltage, data): + """Convert SI Volts to packed DAC channel group machine units. + + :param voltage: List of SI Volt voltages. + :param data: List of DAC channel data pairs to write to. + Half the length of `voltage`. + """ + for i in range(len(voltage)): + v = self.voltage_to_mu(voltage[i]) + if i & 1: + v = data[i // 2] | (v << 16) + data[i // 2] = v + + @kernel + def set_dac(self, dac, voltage): + """Set DAC data to given voltage. + + :param dac: DAC channel (0-31). + :param voltage: Desired output voltage. + """ + self.write(dac, self.voltage_to_mu(voltage)) + + @kernel + def set_group(self, dac, voltage): + """Set DAC group data to given voltage. + + :param dac: DAC channel (0-31). + :param voltage: Desired output voltage. + """ + data = [int32(0)] * (len(voltage) // 2) + self.voltage_group_to_mu(voltage, data) + self.set_group_mu(dac, data) + + @kernel + def update(self, update): + """Schedule channels for update. + + :param update: Bit mask of channels to update (32 bit). + """ + self.write(0x20, update) + + @kernel + def set_hold(self, hold): + """Set channels to manual update. + + :param hold: Bit mask of channels to hold (32 bit). + """ + self.write(0x21, hold) + + @kernel + def set_cfg(self, reset=0, afe_power_down=0, dac_clr=0, clr_err=0): + """Set configuration bits. + + :param reset: Reset SPI PLL and SPI clock domain. + :param afe_power_down: Disable AFE power. + :param dac_clr: Assert all 32 DAC_CLR signals setting all DACs to + mid-scale (0 V). + :param clr_err: Clear error counters and PLL reset indicator. + This clears the sticky red error LED. Must be cleared to enable + error counting. + """ + self.write(0x22, (reset << 0) | (afe_power_down << 1) | + (dac_clr << 2) | (clr_err << 3)) + + @kernel + def set_leds(self, leds): + """Set the green user-defined LEDs + + :param leds: LED status, 8 bit integer each bit corresponding to one + green LED. + """ + self.write(0x23, leds) diff --git a/artiq/coredevice/fmcdio_vhdci_eem.py b/artiq/coredevice/fmcdio_vhdci_eem.py new file mode 100644 index 000000000..bddb55812 --- /dev/null +++ b/artiq/coredevice/fmcdio_vhdci_eem.py @@ -0,0 +1,51 @@ +# Definitions for using the "FMC DIO 32ch LVDS a" card with the VHDCI-EEM breakout v1.1 + +eem_fmc_connections = { + 0: [0, 8, 2, 3, 4, 5, 6, 7], + 1: [1, 9, 10, 11, 12, 13, 14, 15], + 2: [17, 16, 24, 19, 20, 21, 22, 23], + 3: [18, 25, 26, 27, 28, 29, 30, 31], +} + + +def shiftreg_bits(eem, out_pins): + """ + Returns the bits that have to be set in the FMC card direction + shift register for the given EEM. + + Takes a set of pin numbers (0-7) at the EEM. Return values + of this function for different EEMs should be ORed together. + """ + r = 0 + for i in range(8): + if i not in out_pins: + lvds_line = eem_fmc_connections[eem][i] + # lines are swapped in pairs to ease PCB routing + # at the shift register + shift = lvds_line ^ 1 + r |= 1 << shift + return r + + +dio_bank0_out_pins = set(range(4)) +dio_bank1_out_pins = set(range(4, 8)) +urukul_out_pins = { + 0, # clk + 1, # mosi + 3, 4, 5, # cs_n + 6, # io_update + 7, # dds_reset +} +urukul_aux_out_pins = { + 4, # sw0 + 5, # sw1 + 6, # sw2 + 7, # sw3 +} +zotino_out_pins = { + 0, # clk + 1, # mosi + 3, 4, # cs_n + 5, # ldac_n + 7, # clr_n +} diff --git a/artiq/coredevice/grabber.py b/artiq/coredevice/grabber.py new file mode 100644 index 000000000..6f29debe3 --- /dev/null +++ b/artiq/coredevice/grabber.py @@ -0,0 +1,105 @@ +from numpy import int32, int64 + +from artiq.language.core import * +from artiq.language.types import * +from artiq.coredevice.rtio import rtio_output, rtio_input_data + + +class OutOfSyncException(Exception): + """Raised when an incorrect number of ROI engine outputs has been + retrieved from the RTIO input FIFO.""" + pass + + +class Grabber: + """Driver for the Grabber camera interface.""" + kernel_invariants = {"core", "channel_base", "sentinel"} + + def __init__(self, dmgr, channel_base, res_width=12, count_shift=0, + core_device="core"): + self.core = dmgr.get(core_device) + self.channel_base = channel_base + + count_width = min(31, 2*res_width + 16 - count_shift) + # This value is inserted by the gateware to mark the start of a series of + # ROI engine outputs for one video frame. + self.sentinel = int32(int64(2**count_width)) + + @kernel + def setup_roi(self, n, x0, y0, x1, y1): + """ + Defines the coordinates of a ROI. + + The coordinates are set around the current position of the RTIO time + cursor. + + The user must keep the ROI engine disabled for the duration of more + than one video frame after calling this function, as the output + generated for that video frame is undefined. + + Advances the timeline by 4 coarse RTIO cycles. + """ + c = int64(self.core.ref_multiplier) + rtio_output((self.channel_base << 8) | (4*n+0), x0) + delay_mu(c) + rtio_output((self.channel_base << 8) | (4*n+1), y0) + delay_mu(c) + rtio_output((self.channel_base << 8) | (4*n+2), x1) + delay_mu(c) + rtio_output((self.channel_base << 8) | (4*n+3), y1) + delay_mu(c) + + @kernel + def gate_roi(self, mask): + """ + Defines which ROI engines produce input events. + + At the end of each video frame, the output from each ROI engine that + has been enabled by the mask is enqueued into the RTIO input FIFO. + + This function sets the mask at the current position of the RTIO time + cursor. + + Setting the mask using this function is atomic; in other words, + if the system is in the middle of processing a frame and the mask + is changed, the processing will complete using the value of the mask + that it started with. + + :param mask: bitmask enabling or disabling each ROI engine. + """ + rtio_output((self.channel_base + 1) << 8, mask) + + @kernel + def gate_roi_pulse(self, mask, dt): + """Sets a temporary mask for the specified duration (in seconds), before + disabling all ROI engines.""" + self.gate_roi(mask) + delay(dt) + self.gate_roi(0) + + @kernel + def input_mu(self, data): + """ + Retrieves the accumulated values for one frame from the ROI engines. + Blocks until values are available. + + The input list must be a list of integers of the same length as there + are enabled ROI engines. This method replaces the elements of the + input list with the outputs of the enabled ROI engines, sorted by + number. + + If the number of elements in the list does not match the number of + ROI engines that produced output, an exception will be raised during + this call or the next. + """ + channel = self.channel_base + 1 + + sentinel = rtio_input_data(channel) + if sentinel != self.sentinel: + raise OutOfSyncException + + for i in range(len(data)): + roi_output = rtio_input_data(channel) + if roi_output == self.sentinel: + raise OutOfSyncException + data[i] = roi_output diff --git a/artiq/coredevice/i2c.py b/artiq/coredevice/i2c.py index 92d66b8dd..61be474e4 100644 --- a/artiq/coredevice/i2c.py +++ b/artiq/coredevice/i2c.py @@ -33,6 +33,110 @@ def i2c_read(busno: TInt32, ack: TBool) -> TInt32: raise NotImplementedError("syscall not simulated") +@kernel +def i2c_poll(busno, busaddr): + """Poll I2C device at address. + + :param busno: I2C bus number + :param busaddr: 8 bit I2C device address (LSB=0) + :returns: True if the poll was ACKed + """ + i2c_start(busno) + ack = i2c_write(busno, busaddr) + i2c_stop(busno) + return ack + + +@kernel +def i2c_write_byte(busno, busaddr, data, ack=True): + """Write one byte to a device. + + :param busno: I2C bus number + :param busaddr: 8 bit I2C device address (LSB=0) + :param data: Data byte to be written + :param nack: Allow NACK + """ + i2c_start(busno) + try: + if not i2c_write(busno, busaddr): + raise I2CError("failed to ack bus address") + if not i2c_write(busno, data) and ack: + raise I2CError("failed to ack write data") + finally: + i2c_stop(busno) + + +@kernel +def i2c_read_byte(busno, busaddr): + """Read one byte from a device. + + :param busno: I2C bus number + :param busaddr: 8 bit I2C device address (LSB=0) + :returns: Byte read + """ + i2c_start(busno) + data = 0 + try: + if not i2c_write(busno, busaddr | 1): + raise I2CError("failed to ack bus read address") + data = i2c_read(busno, ack=False) + finally: + i2c_stop(busno) + return data + + +@kernel +def i2c_write_many(busno, busaddr, addr, data, ack_last=True): + """Transfer multiple bytes to a device. + + :param busno: I2c bus number + :param busaddr: 8 bit I2C device address (LSB=0) + :param addr: 8 bit data address + :param data: Data bytes to be written + :param ack_last: Expect I2C ACK of the last byte written. If `False`, + the last byte may be NACKed (e.g. EEPROM full page writes). + """ + n = len(data) + i2c_start(busno) + try: + if not i2c_write(busno, busaddr): + raise I2CError("failed to ack bus address") + if not i2c_write(busno, addr): + raise I2CError("failed to ack data address") + for i in range(n): + if not i2c_write(busno, data[i]) and ( + i < n - 1 or ack_last): + raise I2CError("failed to ack write data") + finally: + i2c_stop(busno) + + +@kernel +def i2c_read_many(busno, busaddr, addr, data): + """Transfer multiple bytes from a device. + + :param busno: I2c bus number + :param busaddr: 8 bit I2C device address (LSB=0) + :param addr: 8 bit data address + :param data: List of integers to be filled with the data read. + One entry ber byte. + """ + m = len(data) + i2c_start(busno) + try: + if not i2c_write(busno, busaddr): + raise I2CError("failed to ack bus address") + if not i2c_write(busno, addr): + raise I2CError("failed to ack data address") + i2c_restart(busno) + if not i2c_write(busno, busaddr | 1): + raise I2CError("failed to ack bus read address") + for i in range(m): + data[i] = i2c_read(busno, ack=i < m - 1) + finally: + i2c_stop(busno) + + class PCA9548: """Driver for the PCA9548 I2C bus switch. @@ -48,34 +152,24 @@ class PCA9548: self.address = address @kernel - def set(self, channel): - """Select one channel. + def select(self, mask): + """Enable/disable channels. - Selecting multiple channels at the same time is not supported by this - driver. + :param mask: Bit mask of enabled channels + """ + i2c_write_byte(self.busno, self.address, mask) + + @kernel + def set(self, channel): + """Enable one channel. :param channel: channel number (0-7) """ - i2c_start(self.busno) - try: - if not i2c_write(self.busno, self.address): - raise I2CError("PCA9548 failed to ack address") - if not i2c_write(self.busno, 1 << channel): - raise I2CError("PCA9548 failed to ack control word") - finally: - i2c_stop(self.busno) + self.select(1 << channel) @kernel def readback(self): - i2c_start(self.busno) - r = 0 - try: - if not i2c_write(self.busno, self.address | 1): - raise I2CError("PCA9548 failed to ack address") - r = i2c_read(self.busno, False) - finally: - i2c_stop(self.busno) - return r + return i2c_read_byte(self.busno, self.address) class TCA6424A: @@ -92,19 +186,9 @@ class TCA6424A: self.address = address @kernel - def _write24(self, command, value): - i2c_start(self.busno) - try: - if not i2c_write(self.busno, self.address): - raise I2CError("TCA6424A failed to ack address") - if not i2c_write(self.busno, command): - raise I2CError("TCA6424A failed to ack command") - for i in range(3): - if not i2c_write(self.busno, value >> 16): - raise I2CError("TCA6424A failed to ack data") - value <<= 8 - finally: - i2c_stop(self.busno) + def _write24(self, addr, value): + i2c_write_many(self.busno, self.address, addr, + [value >> 16, value >> 8, value]) @kernel def set(self, outputs): diff --git a/artiq/coredevice/kasli_i2c.py b/artiq/coredevice/kasli_i2c.py new file mode 100644 index 000000000..11c69172f --- /dev/null +++ b/artiq/coredevice/kasli_i2c.py @@ -0,0 +1,73 @@ +from numpy import int32 + +from artiq.experiment import * +from artiq.coredevice.i2c import i2c_write_many, i2c_read_many, i2c_poll + + +port_mapping = { + "EEM0": 7, + "EEM1": 5, + "EEM2": 4, + "EEM3": 3, + "EEM4": 2, + "EEM5": 1, + "EEM6": 0, + "EEM7": 6, + "EEM8": 12, + "EEM9": 13, + "EEM10": 15, + "EEM11": 14, + "SFP0": 8, + "SFP1": 9, + "SFP2": 10, + "LOC0": 11, +} + + +class KasliEEPROM: + def __init__(self, dmgr, port, busno=0, + core_device="core", sw0_device="i2c_switch0", sw1_device="i2c_switch1"): + self.core = dmgr.get(core_device) + self.sw0 = dmgr.get(sw0_device) + self.sw1 = dmgr.get(sw1_device) + self.busno = busno + self.port = port_mapping[port] + self.address = 0xa0 # i2c 8 bit + + @kernel + def select(self): + mask = 1 << self.port + self.sw0.select(mask) + self.sw1.select(mask >> 8) + + @kernel + def deselect(self): + self.sw0.select(0) + self.sw1.select(0) + + @kernel + def write_i32(self, addr, value): + self.select() + try: + data = [0]*4 + for i in range(4): + data[i] = (value >> 24) & 0xff + value <<= 8 + i2c_write_many(self.busno, self.address, addr, data) + i2c_poll(self.busno, self.address) + finally: + self.deselect() + + @kernel + def read_i32(self, addr): + self.select() + try: + data = [0]*4 + i2c_read_many(self.busno, self.address, addr, data) + value = int32(0) + for i in range(4): + value <<= 8 + value |= data[i] + finally: + self.deselect() + return value diff --git a/artiq/coredevice/mirny.py b/artiq/coredevice/mirny.py new file mode 100644 index 000000000..9fdc18c0c --- /dev/null +++ b/artiq/coredevice/mirny.py @@ -0,0 +1,110 @@ +"""RTIO driver for Mirny (4 channel GHz PLLs) +""" + +from artiq.language.core import kernel, delay +from artiq.language.units import us + +from numpy import int32 + +from artiq.coredevice import spi2 as spi + + +SPI_CONFIG = ( + 0 * spi.SPI_OFFLINE + | 0 * spi.SPI_END + | 0 * spi.SPI_INPUT + | 1 * spi.SPI_CS_POLARITY + | 0 * spi.SPI_CLK_POLARITY + | 0 * spi.SPI_CLK_PHASE + | 0 * spi.SPI_LSB_FIRST + | 0 * spi.SPI_HALF_DUPLEX +) + +# SPI clock write and read dividers +SPIT_WR = 4 +SPIT_RD = 16 + +SPI_CS = 1 + +WE = 1 << 24 + + +class Mirny: + """ + Mirny PLL-based RF generator. + + :param spi_device: SPI bus device + :param refclk: Reference clock (SMA, MMCX or on-board 100 MHz oscillator) + frequency in Hz + :param clk_sel: Reference clock selection. + valid options are: 0 - internal 100MHz XO; 1 - front-panel SMA; 2 - + internal MMCX + :param core_device: Core device name (default: "core") + """ + + kernel_invariants = {"bus", "core"} + + def __init__(self, dmgr, spi_device, refclk=100e6, clk_sel=0, core_device="core"): + self.core = dmgr.get(core_device) + self.bus = dmgr.get(spi_device) + + self.refclk = refclk + assert 10 <= self.refclk / 1e6 <= 600, "Invalid refclk" + + self.clk_sel = clk_sel & 0b11 + assert 0 <= self.clk_sel <= 3, "Invalid clk_sel" + + # TODO: support clk_div on v1.0 boards + + @kernel + def read_reg(self, addr): + """Read a register""" + self.bus.set_config_mu( + SPI_CONFIG | spi.SPI_INPUT | spi.SPI_END, 24, SPIT_RD, SPI_CS + ) + self.bus.write((addr << 25)) + return self.bus.read() & int32(0xFFFF) + + @kernel + def write_reg(self, addr, data): + """Write a register""" + self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END, 24, SPIT_WR, SPI_CS) + self.bus.write((addr << 25) | WE | ((data & 0xFFFF) << 8)) + + @kernel + def init(self, blind=False): + """ + Initialize and detect Mirny. + + :param blind: Do not attempt to verify presence and compatibility. + """ + if not blind: + reg0 = self.read_reg(0) + if reg0 & 0b11 != 0b11: + raise ValueError("Mirny HW_REV mismatch") + if (reg0 >> 2) & 0b11 != 0b00: + raise ValueError("Mirny PROTO_REV mismatch") + delay(100 * us) # slack + + # select clock source + self.write_reg(1, (self.clk_sel << 4)) + delay(1000 * us) + + @kernel + def set_att_mu(self, channel, att): + """Set digital step attenuator in machine units. + + :param att: Attenuation setting, 8 bit digital. + """ + self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END, 16, SPIT_WR, SPI_CS) + self.bus.write(((channel | 8) << 25) | (att << 16)) + + @kernel + def write_ext(self, addr, length, data): + """Perform SPI write to a prefixed address""" + self.bus.set_config_mu(SPI_CONFIG, 8, SPIT_WR, SPI_CS) + self.bus.write(addr << 25) + self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END, length, SPIT_WR, SPI_CS) + if length < 32: + data <<= 32 - length + self.bus.write(data) diff --git a/artiq/coredevice/novogorny.py b/artiq/coredevice/novogorny.py new file mode 100644 index 000000000..49bb9a22c --- /dev/null +++ b/artiq/coredevice/novogorny.py @@ -0,0 +1,174 @@ +from artiq.language.core import kernel, delay, portable +from artiq.language.units import ns + +from artiq.coredevice import spi2 as spi + + +SPI_CONFIG = (0*spi.SPI_OFFLINE | 0*spi.SPI_END | + 0*spi.SPI_INPUT | 0*spi.SPI_CS_POLARITY | + 0*spi.SPI_CLK_POLARITY | 0*spi.SPI_CLK_PHASE | + 0*spi.SPI_LSB_FIRST | 0*spi.SPI_HALF_DUPLEX) + + +SPI_CS_ADC = 1 +SPI_CS_SR = 2 + + +@portable +def adc_ctrl(channel=1, softspan=0b111, valid=1): + """Build a LTC2335-16 control word""" + return (valid << 7) | (channel << 3) | softspan + + +@portable +def adc_softspan(data): + """Return the softspan configuration index from a result packet""" + return data & 0x7 + + +@portable +def adc_channel(data): + """Return the channel index from a result packet""" + return (data >> 3) & 0x7 + + +@portable +def adc_data(data): + """Return the ADC value from a result packet""" + return (data >> 8) & 0xffff + + +@portable +def adc_value(data, v_ref=5.): + """Convert a ADC result packet to SI units (Volt)""" + softspan = adc_softspan(data) + data = adc_data(data) + g = 625 + if softspan & 4: + g *= 2 + if softspan & 2: + h = 1 << 15 + else: + h = 1 << 16 + data = -(data & h) + (data & ~h) + if softspan & 1: + h *= 500 + else: + h *= 512 + v_per_lsb = v_ref*g/h + return data*v_per_lsb + + +class Novogorny: + """Novogorny ADC. + + Controls the LTC2335-16 8 channel ADC with SPI interface and + the switchable gain instrumentation amplifiers using a shift + register. + + :param spi_device: SPI bus device name + :param cnv_device: CNV RTIO TTLOut channel name + :param div: SPI clock divider (default: 8) + :param gains: Initial value for PGIA gains shift register + (default: 0x0000). Knowledge of this state is not transferred + between experiments. + :param core_device: Core device name + """ + kernel_invariants = {"bus", "core", "cnv", "div", "v_ref"} + + def __init__(self, dmgr, spi_device, cnv_device, div=8, gains=0x0000, + core_device="core"): + self.bus = dmgr.get(spi_device) + self.core = dmgr.get(core_device) + self.cnv = dmgr.get(cnv_device) + self.div = div + self.gains = gains + self.v_ref = 5. # 5 Volt reference + + @kernel + def set_gain_mu(self, channel, gain): + """Set instrumentation amplifier gain of a channel. + + The four gain settings (0, 1, 2, 3) corresponds to gains of + (1, 10, 100, 1000) respectively. + + :param channel: Channel index + :param gain: Gain setting + """ + gains = self.gains + gains &= ~(0b11 << (channel*2)) + gains |= gain << (channel*2) + self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END, + 16, self.div, SPI_CS_SR) + self.bus.write(gains << 16) + self.gains = gains + + @kernel + def configure(self, data): + """Set up the ADC sequencer. + + :param data: List of 8 bit control words to write into the sequencer + table. + """ + if len(data) > 1: + self.bus.set_config_mu(SPI_CONFIG, + 8, self.div, SPI_CS_ADC) + for i in range(len(data) - 1): + self.bus.write(data[i] << 24) + self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END, + 8, self.div, SPI_CS_ADC) + self.bus.write(data[len(data) - 1] << 24) + + @kernel + def sample_mu(self, next_ctrl=0): + """Acquire a sample: + + Perform a conversion and transfer the sample. + + :param next_ctrl: ADC control word for the next sample + :return: The ADC result packet (machine units) + """ + self.cnv.pulse(40*ns) # t_CNVH + delay(560*ns) # t_CONV max + self.bus.set_config_mu(SPI_CONFIG | spi.SPI_INPUT | spi.SPI_END, + 24, self.div, SPI_CS_ADC) + self.bus.write(next_ctrl << 24) + return self.bus.read() + + @kernel + def sample(self, next_ctrl=0): + """Acquire a sample + + .. seealso:: :meth:`sample_mu` + + :param next_ctrl: ADC control word for the next sample + :return: The ADC result packet (Volt) + """ + return adc_value(self.sample_mu(), self.v_ref) + + @kernel + def burst_mu(self, data, dt_mu, ctrl=0): + """Acquire a burst of samples. + + If the burst is too long and the sample rate too high, there will be + RTIO input overflows. + + High sample rates lead to gain errors since the impedance between the + instrumentation amplifier and the ADC is high. + + :param data: List of data values to write result packets into. + In machine units. + :param dt: Sample interval in machine units. + :param ctrl: ADC control word to write during each result packet + transfer. + """ + self.bus.set_config_mu(SPI_CONFIG | spi.SPI_INPUT | spi.SPI_END, + 24, self.div, SPI_CS_ADC) + for i in range(len(data)): + t0 = now_mu() + self.cnv.pulse(40*ns) # t_CNVH + delay(560*ns) # t_CONV max + self.bus.write(ctrl << 24) + at_mu(t0 + dt_mu) + for i in range(len(data)): + data[i] = self.bus.read() diff --git a/artiq/coredevice/pcf8574a.py b/artiq/coredevice/pcf8574a.py new file mode 100644 index 000000000..ed11cc311 --- /dev/null +++ b/artiq/coredevice/pcf8574a.py @@ -0,0 +1,47 @@ +from artiq.experiment import kernel +from artiq.coredevice.i2c import ( + i2c_start, i2c_write, i2c_read, i2c_stop, I2CError) + + +class PCF8574A: + """Driver for the PCF8574 I2C remote 8-bit I/O expander. + + I2C transactions not real-time, and are performed by the CPU without + involving RTIO. + """ + def __init__(self, dmgr, busno=0, address=0x7c, core_device="core"): + self.core = dmgr.get(core_device) + self.busno = busno + self.address = address + + @kernel + def set(self, data): + """Drive data on the quasi-bidirectional pins. + + :param data: Pin data. High bits are weakly driven high + (and thus inputs), low bits are strongly driven low. + """ + i2c_start(self.busno) + try: + if not i2c_write(self.busno, self.address): + raise I2CError("PCF8574A failed to ack address") + if not i2c_write(self.busno, data): + raise I2CError("PCF8574A failed to ack data") + finally: + i2c_stop(self.busno) + + @kernel + def get(self): + """Retrieve quasi-bidirectional pin input data. + + :return: Pin data + """ + i2c_start(self.busno) + ret = 0 + try: + if not i2c_write(self.busno, self.address | 1): + raise I2CError("PCF8574A failed to ack address") + ret = i2c_read(self.busno, False) + finally: + i2c_stop(self.busno) + return ret diff --git a/artiq/coredevice/pcu.py b/artiq/coredevice/pcu.py index f9b77f553..6dd1ff150 100644 --- a/artiq/coredevice/pcu.py +++ b/artiq/coredevice/pcu.py @@ -41,7 +41,7 @@ class CorePCU: """ Configure and clear the kernel CPU performance counters. - The eight counters are configures to count the folloging events: + The eight counters are configured to count the following events: * Load or store * Instruction fetch * Data cache miss diff --git a/artiq/coredevice/phaser.py b/artiq/coredevice/phaser.py new file mode 100644 index 000000000..ab00d7f1a --- /dev/null +++ b/artiq/coredevice/phaser.py @@ -0,0 +1,945 @@ +from artiq.language.core import kernel, delay_mu, delay +from artiq.coredevice.rtio import rtio_output, rtio_input_data +from artiq.language.units import us, ns, ms, MHz, dB +from artiq.language.types import TInt32 +from artiq.coredevice.dac34h84 import DAC34H84 +from artiq.coredevice.trf372017 import TRF372017 + + +PHASER_BOARD_ID = 19 +PHASER_ADDR_BOARD_ID = 0x00 +PHASER_ADDR_HW_REV = 0x01 +PHASER_ADDR_GW_REV = 0x02 +PHASER_ADDR_CFG = 0x03 +PHASER_ADDR_STA = 0x04 +PHASER_ADDR_CRC_ERR = 0x05 +PHASER_ADDR_LED = 0x06 +PHASER_ADDR_FAN = 0x07 +PHASER_ADDR_DUC_STB = 0x08 +PHASER_ADDR_ADC_CFG = 0x09 +PHASER_ADDR_SPI_CFG = 0x0a +PHASER_ADDR_SPI_DIVLEN = 0x0b +PHASER_ADDR_SPI_SEL = 0x0c +PHASER_ADDR_SPI_DATW = 0x0d +PHASER_ADDR_SPI_DATR = 0x0e +PHASER_ADDR_SYNC_DLY = 0x0f + +PHASER_ADDR_DUC0_CFG = 0x10 +# PHASER_ADDR_DUC0_RESERVED0 = 0x11 +PHASER_ADDR_DUC0_F = 0x12 +PHASER_ADDR_DUC0_P = 0x16 +PHASER_ADDR_DAC0_DATA = 0x18 +PHASER_ADDR_DAC0_TEST = 0x1c + +PHASER_ADDR_DUC1_CFG = 0x20 +# PHASER_ADDR_DUC1_RESERVED0 = 0x21 +PHASER_ADDR_DUC1_F = 0x22 +PHASER_ADDR_DUC1_P = 0x26 +PHASER_ADDR_DAC1_DATA = 0x28 +PHASER_ADDR_DAC1_TEST = 0x2c + +PHASER_SEL_DAC = 1 << 0 +PHASER_SEL_TRF0 = 1 << 1 +PHASER_SEL_TRF1 = 1 << 2 +PHASER_SEL_ATT0 = 1 << 3 +PHASER_SEL_ATT1 = 1 << 4 + +PHASER_STA_DAC_ALARM = 1 << 0 +PHASER_STA_TRF0_LD = 1 << 1 +PHASER_STA_TRF1_LD = 1 << 2 +PHASER_STA_TERM0 = 1 << 3 +PHASER_STA_TERM1 = 1 << 4 +PHASER_STA_SPI_IDLE = 1 << 5 + +PHASER_DAC_SEL_DUC = 0 +PHASER_DAC_SEL_TEST = 1 + +PHASER_HW_REV_VARIANT = 1 << 4 + + +class Phaser: + """Phaser 4-channel, 16-bit, 1 GS/s DAC coredevice driver. + + Phaser contains a 4 channel, 1 GS/s DAC chip with integrated upconversion, + quadrature modulation compensation and interpolation features. + + The coredevice produces 2 IQ (in-phase and quadrature) data streams with 25 + MS/s and 14 bit per quadrature. Each data stream supports 5 independent + numerically controlled IQ oscillators (NCOs, DDSs with 32 bit frequency, 16 + bit phase, 15 bit amplitude, and phase accumulator clear functionality) + added together. See :class:`PhaserChannel` and :class:`PhaserOscillator`. + + Together with a data clock, framing marker, a checksum and metadata for + register access the streams are sent in groups of 8 samples over 1.5 Gb/s + FastLink via a single EEM connector from coredevice to Phaser. + + On Phaser in the FPGA the data streams are buffered and interpolated + from 25 MS/s to 500 MS/s 16 bit followed by a 500 MS/s digital upconverter + with adjustable frequency and phase. The interpolation passband is 20 MHz + wide, passband ripple is less than 1e-3 amplitude, stopband attenuation + is better than 75 dB at offsets > 15 MHz and better than 90 dB at offsets + > 30 MHz. + + The four 16 bit 500 MS/s DAC data streams are sent via a 32 bit parallel + LVDS bus operating at 1 Gb/s per pin pair and processed in the DAC (Texas + Instruments DAC34H84). On the DAC 2x interpolation, sinx/x compensation, + quadrature modulator compensation, fine and coarse mixing as well as group + delay capabilities are available. + + The latency/group delay from the RTIO events setting + :class:`PhaserOscillator` or :class:`PhaserChannel` DUC parameters all the + way to the DAC outputs is deterministic. This enables deterministic + absolute phase with respect to other RTIO input and output events. + + The four analog DAC outputs are passed through anti-aliasing filters. + + In the baseband variant, the even/in-phase DAC channels feed 31.5 dB range + attenuators and are available on the front panel. The odd outputs are + available at MMCX connectors on board. + + In the upconverter variant, each IQ output pair feeds one quadrature + upconverter (Texas Instruments TRF372017) with integrated PLL/VCO. This + digitally configured analog quadrature upconverter supports offset tuning + for carrier and sideband suppression. The output from the upconverter + passes through the 31.5 dB range step attenuator and is available at the + front panel. + + The DAC, the analog quadrature upconverters and the attenuators are + configured through a shared SPI bus that is accessed and controlled via + FPGA registers. + + .. note:: Various register settings of the DAC and the quadrature + upconverters are available to be modified through the `dac`, `trf0`, + `trf1` dictionaries. These can be set through the device database + (`device_db.py`). The settings are frozen during instantiation of the + class and applied during `init()`. See the :class:`DAC34H84` and + :class:`TRF372017` source for details. + + .. note:: To establish deterministic latency between RTIO time base and DAC + output, the DAC FIFO read pointer value (`fifo_offset`) must be + fixed. If `tune_fifo_offset=True` (the default) a value with maximum + margin is determined automatically by `dac_tune_fifo_offset` each time + `init()` is called. This value should be used for the `fifo_offset` key + of the `dac` settings of Phaser in `device_db.py` and automatic + tuning should be disabled by `tune_fifo_offset=False`. + + :param channel: Base RTIO channel number + :param core_device: Core device name (default: "core") + :param miso_delay: Fastlink MISO signal delay to account for cable + and buffer round trip. Tuning this might be automated later. + :param tune_fifo_offset: Tune the DAC FIFO read pointer offset + (default=True) + :param clk_sel: Select the external SMA clock input (1 or 0) + :param sync_dly: SYNC delay with respect to ISTR. + :param dac: DAC34H84 DAC settings as a dictionary. + :param trf0: Channel 0 TRF372017 quadrature upconverter settings as a + dictionary. + :param trf1: Channel 1 TRF372017 quadrature upconverter settings as a + dictionary. + + Attributes: + + * :attr:`channel`: List of two :class:`PhaserChannel` + To access oscillators, digital upconverters, PLL/VCO analog + quadrature upconverters and attenuators. + """ + kernel_invariants = {"core", "channel_base", "t_frame", "miso_delay", + "dac_mmap"} + + def __init__(self, dmgr, channel_base, miso_delay=1, tune_fifo_offset=True, + clk_sel=0, sync_dly=0, dac=None, trf0=None, trf1=None, + core_device="core"): + self.channel_base = channel_base + self.core = dmgr.get(core_device) + # TODO: auto-align miso-delay in phy + self.miso_delay = miso_delay + # frame duration in mu (10 words, 8 clock cycles each 4 ns) + # self.core.seconds_to_mu(10*8*4*ns) # unfortunately this returns 319 + assert self.core.ref_period == 1*ns + self.t_frame = 10*8*4 + self.clk_sel = clk_sel + self.tune_fifo_offset = tune_fifo_offset + self.sync_dly = sync_dly + + self.dac_mmap = DAC34H84(dac).get_mmap() + + self.channel = [PhaserChannel(self, ch, trf) + for ch, trf in enumerate([trf0, trf1])] + + @kernel + def init(self, debug=False): + """Initialize the board. + + Verifies board and chip presence, resets components, performs + communication and configuration tests and establishes initial + conditions. + """ + board_id = self.read8(PHASER_ADDR_BOARD_ID) + if board_id != PHASER_BOARD_ID: + raise ValueError("invalid board id") + delay(.1*ms) # slack + + hw_rev = self.read8(PHASER_ADDR_HW_REV) + delay(.1*ms) # slack + is_baseband = hw_rev & PHASER_HW_REV_VARIANT + + gw_rev = self.read8(PHASER_ADDR_GW_REV) + delay(.1*ms) # slack + + # allow a few errors during startup and alignment since boot + if self.get_crc_err() > 20: + raise ValueError("large number of frame CRC errors") + delay(.1*ms) # slack + + # reset + self.set_cfg(dac_resetb=0, dac_sleep=1, dac_txena=0, + trf0_ps=1, trf1_ps=1, + att0_rstn=0, att1_rstn=0) + self.set_leds(0x00) + self.set_fan_mu(0) + # bring dac out of reset, keep tx off + self.set_cfg(clk_sel=self.clk_sel, dac_txena=0, + trf0_ps=1, trf1_ps=1, + att0_rstn=0, att1_rstn=0) + delay(.1*ms) # slack + + # crossing dac_clk (reference) edges with sync_dly + # changes the optimal fifo_offset by 4 + self.set_sync_dly(self.sync_dly) + + # 4 wire SPI, sif4_enable + self.dac_write(0x02, 0x0080) + if self.dac_read(0x7f) != 0x5409: + raise ValueError("DAC version readback invalid") + delay(.1*ms) + if self.dac_read(0x00) != 0x049c: + raise ValueError("DAC config0 reset readback invalid") + delay(.1*ms) + + t = self.get_dac_temperature() + delay(.1*ms) + if t < 10 or t > 90: + raise ValueError("DAC temperature out of bounds") + + for data in self.dac_mmap: + self.dac_write(data >> 16, data) + delay(40*us) + + # pll_ndivsync_ena disable + config18 = self.dac_read(0x18) + delay(.1*ms) + self.dac_write(0x18, config18 & ~0x0800) + + patterns = [ + [0xf05a, 0x05af, 0x5af0, 0xaf05], # test channel/iq/byte/nibble + [0x7a7a, 0xb6b6, 0xeaea, 0x4545], # datasheet pattern a + [0x1a1a, 0x1616, 0xaaaa, 0xc6c6], # datasheet pattern b + ] + # A data delay of 2*50 ps heuristically and reproducibly matches + # FPGA+board+DAC skews. There is plenty of margin (>= 250 ps + # either side) and no need to tune at runtime. + # Parity provides another level of safety. + for i in range(len(patterns)): + delay(.5*ms) + errors = self.dac_iotest(patterns[i]) + if errors: + raise ValueError("DAC iotest failure") + + delay(2*ms) # let it settle + lvolt = self.dac_read(0x18) & 7 + delay(.1*ms) + if lvolt < 2 or lvolt > 5: + raise ValueError("DAC PLL lock failed, check clocking") + + if self.tune_fifo_offset: + fifo_offset = self.dac_tune_fifo_offset() + if debug: + print(fifo_offset) + self.core.break_realtime() + + # self.dac_write(0x20, 0x0000) # stop fifo sync + # alarm = self.get_sta() & 1 + # delay(.1*ms) + self.clear_dac_alarms() + delay(2*ms) # let it run a bit + alarms = self.get_dac_alarms() + delay(.1*ms) # slack + if alarms & ~0x0040: # ignore PLL alarms (see DS) + if debug: + print(alarms) + self.core.break_realtime() + # ignore alarms + else: + raise ValueError("DAC alarm") + + # power up trfs, release att reset + self.set_cfg(clk_sel=self.clk_sel, dac_txena=0) + + for ch in range(2): + channel = self.channel[ch] + # test attenuator write and readback + channel.set_att_mu(0x5a) + if channel.get_att_mu() != 0x5a: + raise ValueError("attenuator test failed") + delay(.1*ms) + channel.set_att_mu(0x00) # minimum attenuation + + # test oscillators and DUC + for i in range(len(channel.oscillator)): + oscillator = channel.oscillator[i] + asf = 0 + if i == 0: + asf = 0x7fff + # 6pi/4 phase + oscillator.set_amplitude_phase_mu(asf=asf, pow=0xc000, clr=1) + delay(1*us) + # 3pi/4 + channel.set_duc_phase_mu(0x6000) + channel.set_duc_cfg(select=0, clr=1) + self.duc_stb() + delay(.1*ms) # settle link, pipeline and impulse response + data = channel.get_dac_data() + delay(.1*ms) + sqrt2 = 0x5a81 # 0x7fff/sqrt(2) + data_i = data & 0xffff + data_q = (data >> 16) & 0xffff + # allow ripple + if (data_i < sqrt2 - 30 or data_i > sqrt2 or + abs(data_i - data_q) > 2): + raise ValueError("DUC+oscillator phase/amplitude test failed") + + if is_baseband: + continue + + if channel.trf_read(0) & 0x7f != 0x68: + raise ValueError("TRF identification failed") + delay(.1*ms) + + delay(.2*ms) + for data in channel.trf_mmap: + channel.trf_write(data) + + delay(2*ms) # lock + if not (self.get_sta() & (PHASER_STA_TRF0_LD << ch)): + raise ValueError("TRF lock failure") + delay(.1*ms) + if channel.trf_read(0) & 0x1000: + raise ValueError("TRF R_SAT_ERR") + delay(.1*ms) + + # enable dac tx + self.set_cfg(clk_sel=self.clk_sel) + + @kernel + def write8(self, addr, data): + """Write data to FPGA register. + + :param addr: Address to write to (7 bit) + :param data: Data to write (8 bit) + """ + rtio_output((self.channel_base << 8) | (addr & 0x7f) | 0x80, data) + delay_mu(int64(self.t_frame)) + + @kernel + def read8(self, addr) -> TInt32: + """Read from FPGA register. + + :param addr: Address to read from (7 bit) + :return: Data read (8 bit) + """ + rtio_output((self.channel_base << 8) | (addr & 0x7f), 0) + response = rtio_input_data(self.channel_base) + return response >> self.miso_delay + + @kernel + def write32(self, addr, data: TInt32): + """Write 32 bit to a sequence of FPGA registers.""" + for offset in range(4): + byte = data >> 24 + self.write8(addr + offset, byte) + data <<= 8 + + @kernel + def read32(self, addr) -> TInt32: + """Read 32 bit from a sequence of FPGA registers.""" + data = 0 + for offset in range(4): + data <<= 8 + data |= self.read8(addr + offset) + delay(20*us) # slack + return data + + @kernel + def set_leds(self, leds): + """Set the front panel LEDs. + + :param leds: LED settings (6 bit) + """ + self.write8(PHASER_ADDR_LED, leds) + + @kernel + def set_fan_mu(self, pwm): + """Set the fan duty cycle. + + :param pwm: Duty cycle in machine units (8 bit) + """ + self.write8(PHASER_ADDR_FAN, pwm) + + @kernel + def set_fan(self, duty): + """Set the fan duty cycle. + + :param duty: Duty cycle (0. to 1.) + """ + pwm = int32(round(duty*255.)) + if pwm < 0 or pwm > 255: + raise ValueError("duty cycle out of bounds") + self.set_fan_mu(pwm) + + @kernel + def set_cfg(self, clk_sel=0, dac_resetb=1, dac_sleep=0, dac_txena=1, + trf0_ps=0, trf1_ps=0, att0_rstn=1, att1_rstn=1): + """Set the configuration register. + + Each flag is a single bit (0 or 1). + + :param clk_sel: Select the external SMA clock input + :param dac_resetb: Active low DAC reset pin + :param dac_sleep: DAC sleep pin + :param dac_txena: Enable DAC transmission pin + :param trf0_ps: Quadrature upconverter 0 power save + :param trf1_ps: Quadrature upconverter 1 power save + :param att0_rstn: Active low attenuator 0 reset + :param att1_rstn: Active low attenuator 1 reset + """ + self.write8(PHASER_ADDR_CFG, + ((clk_sel & 1) << 0) | ((dac_resetb & 1) << 1) | + ((dac_sleep & 1) << 2) | ((dac_txena & 1) << 3) | + ((trf0_ps & 1) << 4) | ((trf1_ps & 1) << 5) | + ((att0_rstn & 1) << 6) | ((att1_rstn & 1) << 7)) + + @kernel + def get_sta(self): + """Get the status register value. + + Bit flags are: + + * :const:`PHASER_STA_DAC_ALARM`: DAC alarm pin + * :const:`PHASER_STA_TRF0_LD`: Quadrature upconverter 0 lock detect + * :const:`PHASER_STA_TRF1_LD`: Quadrature upconverter 1 lock detect + * :const:`PHASER_STA_TERM0`: ADC channel 0 termination indicator + * :const:`PHASER_STA_TERM1`: ADC channel 1 termination indicator + * :const:`PHASER_STA_SPI_IDLE`: SPI machine is idle and data registers + can be read/written + + :return: Status register + """ + return self.read8(PHASER_ADDR_STA) + + @kernel + def get_crc_err(self): + """Get the frame CRC error counter. + + :return: The number of frames with CRC mismatches sind the reset of the + device. Overflows at 256. + """ + return self.read8(PHASER_ADDR_CRC_ERR) + + @kernel + def set_sync_dly(self, dly): + """Set SYNC delay. + + :param dly: DAC SYNC delay setting (0 to 7) + """ + if dly < 0 or dly > 7: + raise ValueError("SYNC delay out of bounds") + self.write8(PHASER_ADDR_SYNC_DLY, dly) + + @kernel + def duc_stb(self): + """Strobe the DUC configuration register update. + + Transfer staging to active registers. + This affects both DUC channels. + """ + self.write8(PHASER_ADDR_DUC_STB, 0) + + @kernel + def spi_cfg(self, select, div, end, clk_phase=0, clk_polarity=0, + half_duplex=0, lsb_first=0, offline=0, length=8): + """Set the SPI machine configuration + + :param select: Chip selects to assert (DAC, TRF0, TRF1, ATT0, ATT1) + :param div: SPI clock divider relative to 250 MHz fabric clock + :param end: Whether to end the SPI transaction and deassert chip select + :param clk_phase: SPI clock phase (sample on first or second edge) + :param clk_polarity: SPI clock polarity (idle low or high) + :param half_duplex: Read MISO data from MOSI wire + :param lsb_first: Transfer the least significant bit first + :param offline: Put the SPI interfaces offline and don't drive voltages + :param length: SPI transfer length (1 to 8 bits) + """ + if div < 2 or div > 257: + raise ValueError("divider out of bounds") + if length < 1 or length > 8: + raise ValueError("length out of bounds") + self.write8(PHASER_ADDR_SPI_SEL, select) + self.write8(PHASER_ADDR_SPI_DIVLEN, (div - 2 >> 3) | (length - 1 << 5)) + self.write8(PHASER_ADDR_SPI_CFG, + ((offline & 1) << 0) | ((end & 1) << 1) | + ((clk_phase & 1) << 2) | ((clk_polarity & 1) << 3) | + ((half_duplex & 1) << 4) | ((lsb_first & 1) << 5)) + + @kernel + def spi_write(self, data): + """Write 8 bits into the SPI data register and start/continue the + transaction.""" + self.write8(PHASER_ADDR_SPI_DATW, data) + + @kernel + def spi_read(self): + """Read from the SPI input data register.""" + return self.read8(PHASER_ADDR_SPI_DATR) + + @kernel + def dac_write(self, addr, data): + """Write 16 bit to a DAC register. + + :param addr: Register address + :param data: Register data to write + """ + div = 34 # 100 ns min period + t_xfer = self.core.seconds_to_mu((8 + 1)*div*4*ns) + self.spi_cfg(select=PHASER_SEL_DAC, div=div, end=0) + self.spi_write(addr) + delay_mu(t_xfer) + self.spi_write(data >> 8) + delay_mu(t_xfer) + self.spi_cfg(select=PHASER_SEL_DAC, div=div, end=1) + self.spi_write(data) + delay_mu(t_xfer) + + @kernel + def dac_read(self, addr, div=34) -> TInt32: + """Read from a DAC register. + + :param addr: Register address to read from + :param div: SPI clock divider. Needs to be at least 250 (1 µs SPI + clock) to read the temperature register. + """ + t_xfer = self.core.seconds_to_mu((8 + 1)*div*4*ns) + self.spi_cfg(select=PHASER_SEL_DAC, div=div, end=0) + self.spi_write(addr | 0x80) + delay_mu(t_xfer) + self.spi_write(0) + delay_mu(t_xfer) + data = self.spi_read() << 8 + delay(20*us) # slack + self.spi_cfg(select=PHASER_SEL_DAC, div=div, end=1) + self.spi_write(0) + delay_mu(t_xfer) + data |= self.spi_read() + return data + + @kernel + def get_dac_temperature(self) -> TInt32: + """Read the DAC die temperature. + + :return: DAC temperature in degree Celsius + """ + return self.dac_read(0x06, div=257) >> 8 + + @kernel + def get_dac_alarms(self): + """Read the DAC alarm flags. + + :return: DAC alarm flags (see datasheet for bit meaning) + """ + return self.dac_read(0x05) + + @kernel + def clear_dac_alarms(self): + """Clear DAC alarm flags.""" + self.dac_write(0x05, 0x0000) + + @kernel + def dac_iotest(self, pattern) -> TInt32: + """Performs a DAC IO test according to the datasheet. + + :param patterm: List of four int32 containing the pattern + :return: Bit error mask (16 bits) + """ + if len(pattern) != 4: + raise ValueError("pattern length out of bounds") + for addr in range(len(pattern)): + self.dac_write(0x25 + addr, pattern[addr]) + # repeat the pattern twice + self.dac_write(0x29 + addr, pattern[addr]) + delay(.1*ms) + for ch in range(2): + channel = self.channel[ch] + channel.set_duc_cfg(select=1) # test + # dac test data is i msb, q lsb + data = pattern[2*ch] | (pattern[2*ch + 1] << 16) + channel.set_dac_test(data) + if channel.get_dac_data() != data: + raise ValueError("DAC test data readback failed") + delay(.1*ms) + cfg = self.dac_read(0x01) + delay(.1*ms) + self.dac_write(0x01, cfg | 0x8000) # iotest_ena + self.dac_write(0x04, 0x0000) # clear iotest_result + delay(.2*ms) # let it rip + # no need to go through the alarm register, + # just read the error mask + # self.clear_dac_alarms() + alarms = self.get_dac_alarms() + delay(.1*ms) # slack + if alarms & 0x0080: # alarm_from_iotest + errors = self.dac_read(0x04) + delay(.1*ms) # slack + else: + errors = 0 + self.dac_write(0x01, cfg) # clear config + self.dac_write(0x04, 0x0000) # clear iotest_result + return errors + + @kernel + def dac_tune_fifo_offset(self): + """Scan through `fifo_offset` and configure midpoint setting. + + :return: Optimal `fifo_offset` setting with maximum margin to write + pointer. + """ + # expect two or three error free offsets: + # + # read offset 01234567 + # write pointer w + # distance 32101234 + # error free x xx + config9 = self.dac_read(0x09) + delay(.1*ms) + good = 0 + for o in range(8): + # set new fifo_offset + self.dac_write(0x09, (config9 & 0x1fff) | (o << 13)) + self.clear_dac_alarms() + delay(.1*ms) # run + alarms = self.get_dac_alarms() + delay(.1*ms) # slack + if (alarms >> 11) & 0x7 == 0: # any fifo alarm + good |= 1 << o + # if there are good offsets accross the wrap around + # offset for computations + if good & 0x81 == 0x81: + good = ((good << 4) & 0xf0) | (good >> 4) + offset = 4 + else: + offset = 0 + # calculate mean + sum = 0 + count = 0 + for o in range(8): + if good & (1 << o): + sum += o + count += 1 + best = ((sum // count) + offset) % 8 + self.dac_write(0x09, (config9 & 0x1fff) | (best << 13)) + return best + + +class PhaserChannel: + """Phaser channel IQ pair. + + A Phaser channel contains: + + * multiple oscillators (in the coredevice phy), + * an interpolation chain and digital upconverter (DUC) on Phaser, + * several channel-specific settings in the DAC: + * quadrature modulation compensation QMC + * numerically controlled oscillator NCO or coarse mixer CMIX, + * the analog quadrature upconverter (in the Phaser-Upconverter hardware + variant), and + * a digitally controlled step attenuator. + + Attributes: + + * :attr:`oscillator`: List of five :class:`PhaserOscillator`. + + .. note:: The amplitude sum of the oscillators must be less than one to + avoid clipping or overflow. If any of the DDS or DUC frequencies are + non-zero, it is not sufficient to ensure that the sum in each + quadrature is within range. + + .. note:: The interpolation filter on Phaser has an intrinsic sinc-like + overshoot in its step response. That overshoot is a direct consequence + of its near-brick-wall frequency response. For large and wide-band + changes in oscillator parameters, the overshoot can lead to clipping + or overflow after the interpolation. Either band-limit any changes + in the oscillator parameters or back off the amplitude sufficiently. + """ + kernel_invariants = {"index", "phaser", "trf_mmap"} + + def __init__(self, phaser, index, trf): + self.phaser = phaser + self.index = index + self.trf_mmap = TRF372017(trf).get_mmap() + self.oscillator = [PhaserOscillator(self, osc) for osc in range(5)] + + @kernel + def get_dac_data(self) -> TInt32: + """Get a sample of the current DAC data. + + The data is split accross multiple registers and thus the data + is only valid if constant. + + :return: DAC data as 32 bit IQ. I/DACA/DACC in the 16 LSB, + Q/DACB/DACD in the 16 MSB + """ + return self.phaser.read32(PHASER_ADDR_DAC0_DATA + (self.index << 4)) + + @kernel + def set_dac_test(self, data: TInt32): + """Set the DAC test data. + + :param data: 32 bit IQ test data, I/DACA/DACC in the 16 LSB, + Q/DACB/DACD in the 16 MSB + """ + self.phaser.write32(PHASER_ADDR_DAC0_TEST + (self.index << 4), data) + + @kernel + def set_duc_cfg(self, clr=0, clr_once=0, select=0): + """Set the digital upconverter (DUC) and interpolator configuration. + + :param clr: Keep the phase accumulator cleared (persistent) + :param clr_once: Clear the phase accumulator for one cycle + :param select: Select the data to send to the DAC (0: DUC data, 1: test + data, other values: reserved) + """ + self.phaser.write8(PHASER_ADDR_DUC0_CFG + (self.index << 4), + ((clr & 1) << 0) | ((clr_once & 1) << 1) | + ((select & 3) << 2)) + + @kernel + def set_duc_frequency_mu(self, ftw): + """Set the DUC frequency. + + :param ftw: DUC frequency tuning word (32 bit) + """ + self.phaser.write32(PHASER_ADDR_DUC0_F + (self.index << 4), ftw) + + @kernel + def set_duc_frequency(self, frequency): + """Set the DUC frequency in SI units. + + :param frequency: DUC frequency in Hz (passband from -200 MHz to + 200 MHz, wrapping around at +- 250 MHz) + """ + ftw = int32(round(frequency*((1 << 30)/(125*MHz)))) + self.set_duc_frequency_mu(ftw) + + @kernel + def set_duc_phase_mu(self, pow): + """Set the DUC phase offset. + + :param pow: DUC phase offset word (16 bit) + """ + addr = PHASER_ADDR_DUC0_P + (self.index << 4) + self.phaser.write8(addr, pow >> 8) + self.phaser.write8(addr + 1, pow) + + @kernel + def set_duc_phase(self, phase): + """Set the DUC phase in SI units. + + :param phase: DUC phase in turns + """ + pow = int32(round(phase*(1 << 16))) + self.set_duc_phase_mu(pow) + + @kernel + def set_nco_frequency_mu(self, ftw): + """Set the NCO frequency. + + :param ftw: NCO frequency tuning word (32 bit) + """ + self.phaser.dac_write(0x15 + (self.index << 1), ftw >> 16) + self.phaser.dac_write(0x14 + (self.index << 1), ftw) + + @kernel + def set_nco_frequency(self, frequency): + """Set the NCO frequency in SI units. + + :param frequency: NCO frequency in Hz (passband from -400 MHz + to 400 MHz, wrapping around at +- 500 MHz) + """ + ftw = int32(round(frequency*((1 << 30)/(250*MHz)))) + self.set_nco_frequency_mu(ftw) + + @kernel + def set_nco_phase_mu(self, pow): + """Set the NCO phase offset. + + :param pow: NCO phase offset word (16 bit) + """ + self.phaser.dac_write(0x12 + self.index, pow) + + @kernel + def set_nco_phase(self, phase): + """Set the NCO phase in SI units. + + :param phase: NCO phase in turns + """ + pow = int32(round(phase*(1 << 16))) + self.set_duc_phase_mu(pow) + + @kernel + def set_att_mu(self, data): + """Set channel attenuation. + + :param data: Attenuator data in machine units (8 bit) + """ + div = 34 # 30 ns min period + t_xfer = self.phaser.core.seconds_to_mu((8 + 1)*div*4*ns) + self.phaser.spi_cfg(select=PHASER_SEL_ATT0 << self.index, div=div, + end=1) + self.phaser.spi_write(data) + delay_mu(t_xfer) + + @kernel + def set_att(self, att): + """Set channel attenuation in SI units. + + :param att: Attenuation in dB + """ + # 2 lsb are inactive, resulting in 8 LSB per dB + data = 0xff - int32(round(att*8)) + if data < 0 or data > 0xff: + raise ValueError("attenuation out of bounds") + self.set_att_mu(data) + + @kernel + def get_att_mu(self) -> TInt32: + """Read current attenuation. + + The current attenuation value is read without side effects. + + :return: Current attenuation in machine units + """ + div = 34 + t_xfer = self.phaser.core.seconds_to_mu((8 + 1)*div*4*ns) + self.phaser.spi_cfg(select=PHASER_SEL_ATT0 << self.index, div=div, + end=0) + self.phaser.spi_write(0) + delay_mu(t_xfer) + data = self.phaser.spi_read() + delay(20*us) # slack + self.phaser.spi_cfg(select=PHASER_SEL_ATT0 << self.index, div=div, + end=1) + self.phaser.spi_write(data) + delay_mu(t_xfer) + return data + + @kernel + def trf_write(self, data, readback=False): + """Write 32 bits to quadrature upconverter register. + + :param data: Register data (32 bit) containing encoded address + :param readback: Whether to return the read back MISO data + """ + div = 34 # 50 ns min period + t_xfer = self.phaser.core.seconds_to_mu((8 + 1)*div*4*ns) + read = 0 + end = 0 + clk_phase = 0 + if readback: + clk_phase = 1 + for i in range(4): + if i == 0 or i == 3: + if i == 3: + end = 1 + self.phaser.spi_cfg(select=PHASER_SEL_TRF0 << self.index, + div=div, lsb_first=1, clk_phase=clk_phase, + end=end) + self.phaser.spi_write(data) + data >>= 8 + delay_mu(t_xfer) + if readback: + read >>= 8 + read |= self.phaser.spi_read() << 24 + delay(20*us) # slack + return read + + @kernel + def trf_read(self, addr, cnt_mux_sel=0) -> TInt32: + """Quadrature upconverter register read. + + :param addr: Register address to read (0 to 7) + :param cnt_mux_sel: Report VCO counter min or max frequency + :return: Register data (32 bit) + """ + self.trf_write(0x80000008 | (addr << 28) | (cnt_mux_sel << 27)) + # single clk pulse with ~LE to start readback + self.phaser.spi_cfg(select=0, div=34, end=1, length=1) + self.phaser.spi_write(0) + delay((1 + 1)*34*4*ns) + return self.trf_write(0x00000008 | (cnt_mux_sel << 27), + readback=True) + + +class PhaserOscillator: + """Phaser IQ channel oscillator (NCO/DDS). + + .. note:: Latencies between oscillators within a channel and between + oscillator paramters (amplitude and phase/frequency) are deterministic + (with respect to the 25 MS/s sample clock) but not matched. + """ + kernel_invariants = {"channel", "base_addr"} + + def __init__(self, channel, index): + self.channel = channel + self.base_addr = ((self.channel.phaser.channel_base + 1 + + 2*self.channel.index) << 8) | index + + @kernel + def set_frequency_mu(self, ftw): + """Set Phaser MultiDDS frequency tuning word. + + :param ftw: Frequency tuning word (32 bit) + """ + rtio_output(self.base_addr, ftw) + + @kernel + def set_frequency(self, frequency): + """Set Phaser MultiDDS frequency. + + :param frequency: Frequency in Hz (passband from -10 MHz to 10 MHz, + wrapping around at +- 12.5 MHz) + """ + ftw = int32(round(frequency*((1 << 30)/(6.25*MHz)))) + self.set_frequency_mu(ftw) + + @kernel + def set_amplitude_phase_mu(self, asf=0x7fff, pow=0, clr=0): + """Set Phaser MultiDDS amplitude, phase offset and accumulator clear. + + :param asf: Amplitude (15 bit) + :param pow: Phase offset word (16 bit) + :param clr: Clear the phase accumulator (persistent) + """ + data = (asf & 0x7fff) | ((clr & 1) << 15) | (pow << 16) + rtio_output(self.base_addr | (1 << 8), data) + + @kernel + def set_amplitude_phase(self, amplitude, phase=0., clr=0): + """Set Phaser MultiDDS amplitude and phase. + + :param amplitude: Amplitude in units of full scale + :param phase: Phase in turns + :param clr: Clear the phase accumulator (persistent) + """ + asf = int32(round(amplitude*0x7fff)) + if asf < 0 or asf > 0x7fff: + raise ValueError("amplitude out of bounds") + pow = int32(round(phase*(1 << 16))) + self.set_amplitude_phase_mu(asf, pow, clr) diff --git a/artiq/coredevice/profiler.py b/artiq/coredevice/profiler.py new file mode 100644 index 000000000..5ed431915 --- /dev/null +++ b/artiq/coredevice/profiler.py @@ -0,0 +1,92 @@ +from collections import defaultdict +import subprocess + + +class Symbolizer: + def __init__(self, binary, triple, demangle=True): + cmdline = [ + triple + "-addr2line", "--exe=" + binary, + "--addresses", "--functions", "--inlines" + ] + if demangle: + cmdline.append("--demangle=rust") + self._addr2line = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, + universal_newlines=True) + + def symbolize(self, addr): + self._addr2line.stdin.write("0x{:08x}\n0\n".format(addr)) + self._addr2line.stdin.flush() + self._addr2line.stdout.readline() # 0x[addr] + + result = [] + while True: + function = self._addr2line.stdout.readline().rstrip() + + # check for end marker + if function == "0x00000000": # 0x00000000 + self._addr2line.stdout.readline() # ?? + self._addr2line.stdout.readline() # ??:0 + return result + + file, line = self._addr2line.stdout.readline().rstrip().split(":") + + result.append((function, file, line, addr)) + + +class CallgrindWriter: + def __init__(self, output, binary, triple, compression=True, demangle=True): + self._output = output + self._binary = binary + self._current = defaultdict(lambda: None) + self._ids = defaultdict(lambda: {}) + self._compression = compression + self._symbolizer = Symbolizer(binary, triple, demangle=demangle) + + def _write(self, fmt, *args, **kwargs): + self._output.write(fmt.format(*args, **kwargs)) + self._output.write("\n") + + def _spec(self, spec, value): + if self._current[spec] == value: + return + self._current[spec] = value + + if not self._compression or value == "??": + self._write("{}={}", spec, value) + return + + spec_ids = self._ids[spec] + if value in spec_ids: + self._write("{}=({})", spec, spec_ids[value]) + else: + spec_ids[value] = len(spec_ids) + 1 + self._write("{}=({}) {}", spec, spec_ids[value], value) + + def header(self): + self._write("# callgrind format") + self._write("version: 1") + self._write("creator: ARTIQ") + self._write("positions: instr line") + self._write("events: Hits") + self._write("") + self._spec("ob", self._binary) + self._spec("cob", self._binary) + + def hit(self, addr, count): + for function, file, line, addr in self._symbolizer.symbolize(addr): + self._spec("fl", file) + self._spec("fn", function) + self._write("0x{:08x} {} {}", addr, line, count) + + def edge(self, caller, callee, count): + edges = self._symbolizer.symbolize(callee) + self._symbolizer.symbolize(caller) + for (callee, caller) in zip(edges, edges[1:]): + function, file, line, addr = callee + self._spec("cfl", file) + self._spec("cfn", function) + self._write("calls={} 0x{:08x} {}", count, addr, line) + + function, file, line, addr = caller + self._spec("fl", file) + self._spec("fn", function) + self._write("0x{:08x} {} {}", addr, line, count) diff --git a/artiq/coredevice/rtio.py b/artiq/coredevice/rtio.py index 471a48c31..7b5ab38a2 100644 --- a/artiq/coredevice/rtio.py +++ b/artiq/coredevice/rtio.py @@ -1,16 +1,14 @@ from artiq.language.core import syscall -from artiq.language.types import TInt64, TInt32, TNone, TList +from artiq.language.types import TInt32, TInt64, TList, TNone, TTuple @syscall(flags={"nowrite"}) -def rtio_output(time_mu: TInt64, channel: TInt32, addr: TInt32, data: TInt32 - ) -> TNone: +def rtio_output(target: TInt32, data: TInt32) -> TNone: raise NotImplementedError("syscall not simulated") @syscall(flags={"nowrite"}) -def rtio_output_wide(time_mu: TInt64, channel: TInt32, addr: TInt32, - data: TList(TInt32)) -> TNone: +def rtio_output_wide(target: TInt32, data: TList(TInt32)) -> TNone: raise NotImplementedError("syscall not simulated") @@ -22,3 +20,12 @@ def rtio_input_timestamp(timeout_mu: TInt64, channel: TInt32) -> TInt64: @syscall(flags={"nowrite"}) def rtio_input_data(channel: TInt32) -> TInt32: raise NotImplementedError("syscall not simulated") + + +@syscall(flags={"nowrite"}) +def rtio_input_timestamped_data(timeout_mu: TInt64, + channel: TInt32) -> TTuple([TInt64, TInt32]): + """Wait for an input event up to timeout_mu on the given channel, and + return a tuple of timestamp and attached data, or (-1, 0) if the timeout is + reached.""" + raise NotImplementedError("syscall not simulated") diff --git a/artiq/coredevice/sampler.py b/artiq/coredevice/sampler.py new file mode 100644 index 000000000..8679ad93c --- /dev/null +++ b/artiq/coredevice/sampler.py @@ -0,0 +1,147 @@ +from artiq.language.core import kernel, delay, portable +from artiq.language.units import ns + +from artiq.coredevice import spi2 as spi + + +SPI_CONFIG = (0*spi.SPI_OFFLINE | 0*spi.SPI_END | + 0*spi.SPI_INPUT | 0*spi.SPI_CS_POLARITY | + 0*spi.SPI_CLK_POLARITY | 0*spi.SPI_CLK_PHASE | + 0*spi.SPI_LSB_FIRST | 0*spi.SPI_HALF_DUPLEX) + + +SPI_CS_ADC = 0 # no CS, SPI_END does not matter, framing is done with CNV +SPI_CS_PGIA = 1 # separate SPI bus, CS used as RCLK + + +@portable +def adc_mu_to_volt(data, gain=0): + """Convert ADC data in machine units to Volts. + + :param data: 16 bit signed ADC word + :param gain: PGIA gain setting (0: 1, ..., 3: 1000) + :return: Voltage in Volts + """ + if gain == 0: + volt_per_lsb = 20./(1 << 16) + elif gain == 1: + volt_per_lsb = 2./(1 << 16) + elif gain == 2: + volt_per_lsb = .2/(1 << 16) + elif gain == 3: + volt_per_lsb = .02/(1 << 16) + else: + raise ValueError("invalid gain") + return data*volt_per_lsb + + +class Sampler: + """Sampler ADC. + + Controls the LTC2320-16 8 channel 16 bit ADC with SPI interface and + the switchable gain instrumentation amplifiers. + + :param spi_adc_device: ADC SPI bus device name + :param spi_pgia_device: PGIA SPI bus device name + :param cnv_device: CNV RTIO TTLOut channel name + :param div: SPI clock divider (default: 8) + :param gains: Initial value for PGIA gains shift register + (default: 0x0000). Knowledge of this state is not transferred + between experiments. + :param core_device: Core device name + """ + kernel_invariants = {"bus_adc", "bus_pgia", "core", "cnv", "div"} + + def __init__(self, dmgr, spi_adc_device, spi_pgia_device, cnv_device, + div=8, gains=0x0000, core_device="core"): + self.bus_adc = dmgr.get(spi_adc_device) + self.bus_adc.update_xfer_duration_mu(div, 32) + self.bus_pgia = dmgr.get(spi_pgia_device) + self.bus_pgia.update_xfer_duration_mu(div, 16) + self.core = dmgr.get(core_device) + self.cnv = dmgr.get(cnv_device) + self.div = div + self.gains = gains + + @kernel + def init(self): + """Initialize the device. + + Sets up SPI channels. + """ + self.bus_adc.set_config_mu(SPI_CONFIG | spi.SPI_INPUT | spi.SPI_END, + 32, self.div, SPI_CS_ADC) + self.bus_pgia.set_config_mu(SPI_CONFIG | spi.SPI_END, + 16, self.div, SPI_CS_PGIA) + + @kernel + def set_gain_mu(self, channel, gain): + """Set instrumentation amplifier gain of a channel. + + The four gain settings (0, 1, 2, 3) corresponds to gains of + (1, 10, 100, 1000) respectively. + + :param channel: Channel index + :param gain: Gain setting + """ + gains = self.gains + gains &= ~(0b11 << (channel*2)) + gains |= gain << (channel*2) + self.bus_pgia.write(gains << 16) + self.gains = gains + + @kernel + def get_gains_mu(self): + """Read the PGIA gain settings of all channels. + + :return: The PGIA gain settings in machine units. + """ + self.bus_pgia.set_config_mu(SPI_CONFIG | spi.SPI_END | spi.SPI_INPUT, + 16, self.div, SPI_CS_PGIA) + self.bus_pgia.write(self.gains << 16) + self.bus_pgia.set_config_mu(SPI_CONFIG | spi.SPI_END, + 16, self.div, SPI_CS_PGIA) + self.gains = self.bus_pgia.read() & 0xffff + return self.gains + + @kernel + def sample_mu(self, data): + """Acquire a set of samples. + + Perform a conversion and transfer the samples. + + This assumes that the input FIFO of the ADC SPI RTIO channel is deep + enough to buffer the samples (half the length of `data` deep). + If it is not, there will be RTIO input overflows. + + :param data: List of data samples to fill. Must have even length. + Samples are always read from the last channel (channel 7) down. + The `data` list will always be filled with the last item + holding to the sample from channel 7. + """ + self.cnv.pulse(30*ns) # t_CNVH + delay(450*ns) # t_CONV + mask = 1 << 15 + for i in range(len(data)//2): + self.bus_adc.write(0) + for i in range(len(data) - 1, -1, -2): + val = self.bus_adc.read() + data[i] = val >> 16 + val &= 0xffff + data[i - 1] = -(val & mask) + (val & ~mask) + + @kernel + def sample(self, data): + """Acquire a set of samples. + + .. seealso:: :meth:`sample_mu` + + :param data: List of floating point data samples to fill. + """ + n = len(data) + adc_data = [0]*n + self.sample_mu(adc_data) + for i in range(n): + channel = i + 8 - len(data) + gain = (self.gains >> (channel*2)) & 0b11 + data[i] = adc_mu_to_volt(adc_data[i], gain) diff --git a/artiq/coredevice/sawg.py b/artiq/coredevice/sawg.py index cc071ffa0..0a5905fa7 100644 --- a/artiq/coredevice/sawg.py +++ b/artiq/coredevice/sawg.py @@ -10,7 +10,7 @@ Output event replacement is supported except on the configuration channel. from artiq.language.types import TInt32, TFloat from numpy import int32, int64 -from artiq.language.core import kernel, now_mu +from artiq.language.core import kernel from artiq.coredevice.spline import Spline from artiq.coredevice.rtio import rtio_output @@ -69,7 +69,7 @@ class Config: ``t_sawg_spline/t_rtio_coarse = div + 1``. Default: ``0``. :param n: Current value of the counter. Default: ``0``. """ - rtio_output(now_mu(), self.channel, _SAWG_DIV, div | (n << 16)) + rtio_output((self.channel << 8) | _SAWG_DIV, div | (n << 16)) delay_mu(self._rtio_interval) @kernel @@ -108,7 +108,7 @@ class Config: :param clr2: Auto-clear phase accumulator of the ``phase2``/ ``frequency2`` DDS. Default: ``True`` """ - rtio_output(now_mu(), self.channel, _SAWG_CLR, clr0 | + rtio_output((self.channel << 8) | _SAWG_CLR, clr0 | (clr1 << 1) | (clr2 << 2)) delay_mu(self._rtio_interval) @@ -135,7 +135,7 @@ class Config: DUC-DDS data of this SAWG's *buddy* channel to *this* DAC channel. Default: ``0``. """ - rtio_output(now_mu(), self.channel, _SAWG_IQ_EN, i_enable | + rtio_output((self.channel << 8) | _SAWG_IQ_EN, i_enable | (q_enable << 1)) delay_mu(self._rtio_interval) @@ -151,25 +151,25 @@ class Config: .. seealso:: :meth:`set_duc_max` """ - rtio_output(now_mu(), self.channel, _SAWG_DUC_MAX, limit) + rtio_output((self.channel << 8) | _SAWG_DUC_MAX, limit) delay_mu(self._rtio_interval) @kernel def set_duc_min_mu(self, limit: TInt32): """.. seealso:: :meth:`set_duc_max_mu`""" - rtio_output(now_mu(), self.channel, _SAWG_DUC_MIN, limit) + rtio_output((self.channel << 8) | _SAWG_DUC_MIN, limit) delay_mu(self._rtio_interval) @kernel def set_out_max_mu(self, limit: TInt32): """.. seealso:: :meth:`set_duc_max_mu`""" - rtio_output(now_mu(), self.channel, _SAWG_OUT_MAX, limit) + rtio_output((self.channel << 8) | _SAWG_OUT_MAX, limit) delay_mu(self._rtio_interval) @kernel def set_out_min_mu(self, limit: TInt32): """.. seealso:: :meth:`set_duc_max_mu`""" - rtio_output(now_mu(), self.channel, _SAWG_OUT_MIN, limit) + rtio_output((self.channel << 8) | _SAWG_OUT_MIN, limit) delay_mu(self._rtio_interval) @kernel @@ -342,7 +342,7 @@ class SAWG: settings. This method advances the timeline by the time required to perform all - seven writes to the configuration channel. + 7 writes to the configuration channel, plus 9 coarse RTIO cycles. """ self.config.set_div(0, 0) self.config.set_clr(1, 1, 1) @@ -352,11 +352,21 @@ class SAWG: self.config.set_out_min(-1.) self.config.set_out_max(1.) self.frequency0.set_mu(0) + coarse_cycle = int64(self.core.ref_multiplier) + delay_mu(coarse_cycle) self.frequency1.set_mu(0) + delay_mu(coarse_cycle) self.frequency2.set_mu(0) + delay_mu(coarse_cycle) self.phase0.set_mu(0) + delay_mu(coarse_cycle) self.phase1.set_mu(0) + delay_mu(coarse_cycle) self.phase2.set_mu(0) + delay_mu(coarse_cycle) self.amplitude1.set_mu(0) + delay_mu(coarse_cycle) self.amplitude2.set_mu(0) + delay_mu(coarse_cycle) self.offset.set_mu(0) + delay_mu(coarse_cycle) diff --git a/artiq/coredevice/shiftreg.py b/artiq/coredevice/shiftreg.py index a71d6e217..79000eba3 100644 --- a/artiq/coredevice/shiftreg.py +++ b/artiq/coredevice/shiftreg.py @@ -6,13 +6,15 @@ class ShiftReg: """Driver for shift registers/latch combos connected to TTLs""" kernel_invariants = {"dt", "n"} - def __init__(self, dmgr, clk, ser, latch, n=32, dt=10*us): + def __init__(self, dmgr, clk, ser, latch, n=32, dt=10*us, ser_in=None): self.core = dmgr.get("core") self.clk = dmgr.get(clk) self.ser = dmgr.get(ser) self.latch = dmgr.get(latch) self.n = n self.dt = dt + if ser_in is not None: + self.ser_in = dmgr.get(ser_in) @kernel def set(self, data): @@ -34,3 +36,19 @@ class ShiftReg: delay(self.dt) self.latch.off() delay(self.dt) + + @kernel + def get(self): + delay(-2*(self.n + 1)*self.dt) + data = 0 + for i in range(self.n): + data <<= 1 + self.ser_in.sample_input() + if self.ser_in.sample_get(): + data |= 1 + delay(self.dt) + self.clk.on() + delay(self.dt) + self.clk.off() + delay(self.dt) + return data diff --git a/artiq/coredevice/spi.py b/artiq/coredevice/spi.py deleted file mode 100644 index 22859a962..000000000 --- a/artiq/coredevice/spi.py +++ /dev/null @@ -1,346 +0,0 @@ -""" -Driver for generic SPI on RTIO. - -Output event replacement is not supported and issuing commands at the same -time is an error. -""" - - -import numpy - -from artiq.language.core import syscall, kernel, portable, now_mu, delay_mu -from artiq.language.types import TInt32, TNone -from artiq.language.units import MHz -from artiq.coredevice.rtio import rtio_output, rtio_input_data - - -__all__ = [ - "SPI_DATA_ADDR", "SPI_XFER_ADDR", "SPI_CONFIG_ADDR", - "SPI_OFFLINE", "SPI_ACTIVE", "SPI_PENDING", - "SPI_CS_POLARITY", "SPI_CLK_POLARITY", "SPI_CLK_PHASE", - "SPI_LSB_FIRST", "SPI_HALF_DUPLEX", - "SPIMaster", "NRTSPIMaster" -] - - -SPI_DATA_ADDR, SPI_XFER_ADDR, SPI_CONFIG_ADDR = range(3) -( - SPI_OFFLINE, - SPI_ACTIVE, - SPI_PENDING, - SPI_CS_POLARITY, - SPI_CLK_POLARITY, - SPI_CLK_PHASE, - SPI_LSB_FIRST, - SPI_HALF_DUPLEX, -) = (1 << i for i in range(8)) - -SPI_RT2WB_READ = 1 << 2 - - -class SPIMaster: - """Core device Serial Peripheral Interface (SPI) bus master. - Owns one SPI bus. - - **Transfer Sequence**: - - * If desired, write the ``config`` register (:meth:`set_config`) - to configure and activate the core. - * If desired, write the ``xfer`` register (:meth:`set_xfer`) - to set ``cs_n``, ``write_length``, and ``read_length``. - * :meth:`write` to the ``data`` register (also for transfers with - zero bits to be written). Writing starts the transfer. - * If desired, :meth:`read_sync` (or :meth:`read_async` followed by a - :meth:`input_async` later) the ``data`` register corresponding to - the last completed transfer. - * If desired, :meth:`set_xfer` for the next transfer. - * If desired, :meth:`write` ``data`` queuing the next - (possibly chained) transfer. - - **Notes**: - - * In order to chain a transfer onto an in-flight transfer without - deasserting ``cs`` in between, the second :meth:`write` needs to - happen strictly later than ``2*ref_period_mu`` (two coarse RTIO - cycles) but strictly earlier than ``xfer_period_mu + write_period_mu`` - after the first. Note that :meth:`write` already applies a delay of - ``xfer_period_mu + write_period_mu``. - * A full transfer takes ``write_period_mu + xfer_period_mu``. - * Chained transfers can happen every ``xfer_period_mu``. - * Read data is available every ``xfer_period_mu`` starting - a bit after xfer_period_mu (depending on ``clk_phase``). - * As a consequence, in order to chain transfers together, new data must - be written before the pending transfer's read data becomes available. - - :param channel: RTIO channel number of the SPI bus to control. - """ - - kernel_invariants = {"core", "ref_period_mu", "channel"} - - def __init__(self, dmgr, channel, core_device="core"): - self.core = dmgr.get(core_device) - self.ref_period_mu = self.core.seconds_to_mu( - self.core.coarse_ref_period) - assert self.ref_period_mu == self.core.ref_multiplier - self.channel = channel - self.write_period_mu = numpy.int64(0) - self.read_period_mu = numpy.int64(0) - self.xfer_period_mu = numpy.int64(0) - - @portable - def frequency_to_div(self, f): - return int(1/(f*self.core.mu_to_seconds(self.ref_period_mu))) + 1 - - @kernel - def set_config(self, flags=0, write_freq=20*MHz, read_freq=20*MHz): - """Set the configuration register. - - * If ``config.cs_polarity`` == 0 (``cs`` active low, the default), - "``cs_n`` all deasserted" means "all ``cs_n`` bits high". - * ``cs_n`` is not mandatory in the pads supplied to the gateware core. - Framing and chip selection can also be handled independently - through other means, e.g. ``TTLOut``. - * If there is a ``miso`` wire in the pads supplied in the gateware, - input and output may be two signals ("4-wire SPI"), - otherwise ``mosi`` must be used for both output and input - ("3-wire SPI") and ``config.half_duplex`` must to be set - when reading data is desired or when the slave drives the - ``mosi`` signal at any point. - * The first bit output on ``mosi`` is always the MSB/LSB (depending - on ``config.lsb_first``) of the ``data`` register, independent of - ``xfer.write_length``. The last bit input from ``miso`` always ends - up in the LSB/MSB (respectively) of the ``data`` register, - independent of ``xfer.read_length``. - * Writes to the ``config`` register take effect immediately. - - **Configuration flags**: - - * :const:`SPI_OFFLINE`: all pins high-z (reset=1) - * :const:`SPI_ACTIVE`: transfer in progress (read-only) - * :const:`SPI_PENDING`: transfer pending in intermediate buffer - (read-only) - * :const:`SPI_CS_POLARITY`: active level of ``cs_n`` (reset=0) - * :const:`SPI_CLK_POLARITY`: idle level of ``clk`` (reset=0) - * :const:`SPI_CLK_PHASE`: first edge after ``cs`` assertion to sample - data on (reset=0). In Motorola/Freescale SPI language - (:const:`SPI_CLK_POLARITY`, :const:`SPI_CLK_PHASE`) == (CPOL, CPHA): - - - (0, 0): idle low, output on falling, input on rising - - (0, 1): idle low, output on rising, input on falling - - (1, 0): idle high, output on rising, input on falling - - (1, 1): idle high, output on falling, input on rising - * :const:`SPI_LSB_FIRST`: LSB is the first bit on the wire (reset=0) - * :const:`SPI_HALF_DUPLEX`: 3-wire SPI, in/out on ``mosi`` (reset=0) - - This method advances the timeline by the duration of the - RTIO-to-Wishbone bus transaction (three RTIO clock cycles). - - :param flags: A bit map of `SPI_*` flags. - :param write_freq: Desired SPI clock frequency during write bits. - :param read_freq: Desired SPI clock frequency during read bits. - """ - self.set_config_mu(flags, self.frequency_to_div(write_freq), - self.frequency_to_div(read_freq)) - - @kernel - def set_config_mu(self, flags=0, write_div=6, read_div=6): - """Set the ``config`` register (in SPI bus machine units). - - .. seealso:: :meth:`set_config` - - :param write_div: Counter load value to divide the RTIO - clock by to generate the SPI write clk. (minimum=2, reset=2) - ``f_rtio_clk/f_spi_write == write_div``. If ``write_div`` is odd, - the setup phase of the SPI clock is biased to longer lengths - by one RTIO clock cycle. - :param read_div: Ditto for the read clock. - """ - if write_div > 257 or write_div < 2 or read_div > 257 or read_div < 2: - raise ValueError('Divider values out of range') - rtio_output(now_mu(), self.channel, SPI_CONFIG_ADDR, flags | - ((write_div - 2) << 16) | ((read_div - 2) << 24)) - self.write_period_mu = int(write_div*self.ref_period_mu) - self.read_period_mu = int(read_div*self.ref_period_mu) - delay_mu(3*self.ref_period_mu) - - @kernel - def set_xfer(self, chip_select=0, write_length=0, read_length=0): - """Set the ``xfer`` register. - - * Every transfer consists of a write of ``write_length`` bits - immediately followed by a read of ``read_length`` bits. - * ``cs_n`` is asserted at the beginning and deasserted at the end - of the transfer if there is no other transfer pending. - * ``cs_n`` handling is agnostic to whether it is one-hot or decoded - somewhere downstream. If it is decoded, "``cs_n`` all deasserted" - should be handled accordingly (no slave selected). - If it is one-hot, asserting multiple slaves should only be attempted - if ``miso`` is either not connected between slaves, or open - collector, or correctly multiplexed externally. - * For 4-wire SPI only the sum of ``read_length`` and ``write_length`` - matters. The behavior is the same (except for clock speeds) no matter - how the total transfer length is divided between the two. For - 3-wire SPI, the direction of ``mosi`` is switched from output to - input after ``write_length`` bits. - * Data output on ``mosi`` in 4-wire SPI during the read cycles is what - is found in the data register at the time. - Data in the ``data`` register outside the least/most (depending - on ``config.lsb_first``) significant ``read_length`` bits is what is - seen on ``miso`` (or ``mosi`` if ``config.half_duplex``) - during the write cycles. - * Writes to ``xfer`` are synchronized to the start of the next - (possibly chained) transfer. - - This method advances the timeline by the duration of the - RTIO-to-Wishbone bus transaction (three RTIO clock cycles). - - :param chip_select: Bit mask of chip selects to assert. Or number of - the chip select to assert if ``cs`` is decoded downstream. - (reset=0) - :param write_length: Number of bits to write during the next transfer. - (reset=0) - :param read_length: Number of bits to read during the next transfer. - (reset=0) - """ - rtio_output(now_mu(), self.channel, SPI_XFER_ADDR, - chip_select | (write_length << 16) | (read_length << 24)) - self.xfer_period_mu = int(write_length*self.write_period_mu + - read_length*self.read_period_mu) - delay_mu(3*self.ref_period_mu) - - @kernel - def write(self, data=0): - """Write data to data register. - - * The ``data`` register and the shift register are 32 bits wide. - If there are no writes to the register, ``miso`` data reappears on - ``mosi`` after 32 cycles. - * A wishbone data register write is acknowledged when the - transfer has been written to the intermediate buffer. - It will be started when there are no other transactions being - executed, either beginning a new SPI transfer of chained - to an in-flight transfer. - * Writes take three ``ref_period`` cycles unless another - chained transfer is pending and the transfer being - executed is not complete. - * The SPI ``data`` register is double-buffered: Once a transfer has - started, new write data can be written, queuing a new transfer. - Transfers submitted this way are chained and executed without - deasserting ``cs`` in between. Once a transfer completes, - the previous transfer's read data is available in the - ``data`` register. - * For bit alignment and bit ordering see :meth:`set_config`. - - This method advances the timeline by the duration of the SPI transfer. - If a transfer is to be chained, the timeline needs to be rewound. - """ - rtio_output(now_mu(), self.channel, SPI_DATA_ADDR, data) - delay_mu(self.xfer_period_mu + self.write_period_mu) - - @kernel - def read_async(self): - """Trigger an asynchronous read from the ``data`` register. - - For bit alignment and bit ordering see :meth:`set_config`. - - Reads always finish in two cycles. - - Every data register read triggered by a :meth:`read_async` - must be matched by a :meth:`input_async` to retrieve the data. - - This method advances the timeline by the duration of the - RTIO-to-Wishbone bus transaction (three RTIO clock cycles). - """ - rtio_output(now_mu(), self.channel, SPI_DATA_ADDR | SPI_RT2WB_READ, 0) - delay_mu(3*self.ref_period_mu) - - @kernel - def input_async(self): - """Retrieves data read asynchronously from the ``data`` register. - - :meth:`input_async` must match a preeeding :meth:`read_async`. - """ - return rtio_input_data(self.channel) - - @kernel - def read_sync(self): - """Read the ``data`` register synchronously. - - This is a shortcut for :meth:`read_async` followed by - :meth:`input_async`. - """ - self.read_async() - return self.input_async() - - @kernel - def _get_xfer_sync(self): - rtio_output(now_mu(), self.channel, SPI_XFER_ADDR | SPI_RT2WB_READ, 0) - return rtio_input_data(self.channel) - - @kernel - def _get_config_sync(self): - rtio_output(now_mu(), self.channel, SPI_CONFIG_ADDR | SPI_RT2WB_READ, - 0) - return rtio_input_data(self.channel) - - -@syscall(flags={"nounwind", "nowrite"}) -def spi_set_config(busno: TInt32, flags: TInt32, write_div: TInt32, read_div: TInt32) -> TNone: - raise NotImplementedError("syscall not simulated") - - -@syscall(flags={"nounwind", "nowrite"}) -def spi_set_xfer(busno: TInt32, chip_select: TInt32, write_length: TInt32, read_length: TInt32) -> TNone: - raise NotImplementedError("syscall not simulated") - - -@syscall(flags={"nounwind", "nowrite"}) -def spi_write(busno: TInt32, data: TInt32) -> TNone: - raise NotImplementedError("syscall not simulated") - - -@syscall(flags={"nounwind", "nowrite"}) -def spi_read(busno: TInt32) -> TInt32: - raise NotImplementedError("syscall not simulated") - - -class NRTSPIMaster: - """Core device non-realtime Serial Peripheral Interface (SPI) bus master. - Owns one non-realtime SPI bus. - - With this driver, SPI transactions and are performed by the CPU without - involving RTIO. - - Realtime and non-realtime buses are separate and defined at bitstream - compilation time. - - See :class:`SPIMaster` for a description of the methods. - """ - def __init__(self, dmgr, busno=0, core_device="core"): - self.core = dmgr.get(core_device) - self.busno = busno - - @kernel - def set_config_mu(self, flags=0, write_div=6, read_div=6): - """Set the ``config`` register. - - Note that the non-realtime SPI cores are usually clocked by the system - clock and not the RTIO clock. In many cases, the SPI configuration is - already set by the firmware and you do not need to call this method. - - The offline bit cannot be set using this method. - The SPI bus is briefly taken offline when this method is called. - """ - spi_set_config(self.busno, flags, write_div, read_div) - - @kernel - def set_xfer(self, chip_select=0, write_length=0, read_length=0): - spi_set_xfer(self.busno, chip_select, write_length, read_length) - - @kernel - def write(self, data=0): - spi_write(self.busno, data) - - @kernel - def read(self): - return spi_read(self.busno) diff --git a/artiq/coredevice/spi2.py b/artiq/coredevice/spi2.py new file mode 100644 index 000000000..aa1045973 --- /dev/null +++ b/artiq/coredevice/spi2.py @@ -0,0 +1,288 @@ +""" +Driver for generic SPI on RTIO. + +This ARTIQ coredevice driver corresponds to the "new" MiSoC SPI core (v2). + +Output event replacement is not supported and issuing commands at the same +time is an error. +""" + +from artiq.language.core import syscall, kernel, portable, delay_mu +from artiq.language.types import TInt32, TNone +from artiq.coredevice.rtio import rtio_output, rtio_input_data + + +__all__ = [ + "SPI_DATA_ADDR", "SPI_CONFIG_ADDR", + "SPI_OFFLINE", "SPI_END", "SPI_INPUT", + "SPI_CS_POLARITY", "SPI_CLK_POLARITY", "SPI_CLK_PHASE", + "SPI_LSB_FIRST", "SPI_HALF_DUPLEX", + "SPIMaster", "NRTSPIMaster" +] + +SPI_DATA_ADDR = 0 +SPI_CONFIG_ADDR = 1 + +SPI_OFFLINE = 0x01 +SPI_END = 0x02 +SPI_INPUT = 0x04 +SPI_CS_POLARITY = 0x08 +SPI_CLK_POLARITY = 0x10 +SPI_CLK_PHASE = 0x20 +SPI_LSB_FIRST = 0x40 +SPI_HALF_DUPLEX = 0x80 + + +class SPIMaster: + """Core device Serial Peripheral Interface (SPI) bus master. + + Owns one SPI bus. + + This ARTIQ coredevice driver corresponds to the "new" MiSoC SPI core (v2). + + **Transfer Sequence**: + + * If necessary, set the ``config`` register (:meth:`set_config` and + :meth:`set_config_mu`) to activate and configure the core and to set + various transfer parameters like transfer length, clock divider, + and chip selects. + * :meth:`write` to the ``data`` register. Writing starts the transfer. + * If the transfer included submitting the SPI input data as an RTIO input + event (``SPI_INPUT`` set), then :meth:`read` the ``data``. + * If ``SPI_END`` was not set, repeat the transfer sequence. + + A **transaction** consists of one or more **transfers**. The chip select + pattern is asserted for the entire length of the transaction. All but the + last transfer are submitted with ``SPI_END`` cleared in the configuration + register. + + :param channel: RTIO channel number of the SPI bus to control. + :param div: Initial CLK divider, see also: :meth:`update_xfer_duration_mu` + :param length: Initial transfer length, see also: + :meth:`update_xfer_duration_mu` + :param core_device: Core device name + """ + kernel_invariants = {"core", "ref_period_mu", "channel"} + + def __init__(self, dmgr, channel, div=0, length=0, core_device="core"): + self.core = dmgr.get(core_device) + self.ref_period_mu = self.core.seconds_to_mu( + self.core.coarse_ref_period) + assert self.ref_period_mu == self.core.ref_multiplier + self.channel = channel + self.update_xfer_duration_mu(div, length) + + @portable + def frequency_to_div(self, f): + """Convert a SPI clock frequency to the closest SPI clock divider.""" + return int(round(1/(f*self.core.mu_to_seconds(self.ref_period_mu)))) + + @kernel + def set_config(self, flags, length, freq, cs): + """Set the configuration register. + + * If ``SPI_CS_POLARITY`` is cleared (``cs`` active low, the default), + "``cs`` all deasserted" means "all ``cs_n`` bits high". + * ``cs_n`` is not mandatory in the pads supplied to the gateware core. + Framing and chip selection can also be handled independently + through other means, e.g. ``TTLOut``. + * If there is a ``miso`` wire in the pads supplied in the gateware, + input and output may be two signals ("4-wire SPI"), + otherwise ``mosi`` must be used for both output and input + ("3-wire SPI") and ``SPI_HALF_DUPLEX`` must to be set + when reading data or when the slave drives the + ``mosi`` signal at any point. + * The first bit output on ``mosi`` is always the MSB/LSB (depending + on ``SPI_LSB_FIRST``) of the ``data`` written, independent of + the ``length`` of the transfer. The last bit input from ``miso`` + always ends up in the LSB/MSB (respectively) of the ``data`` read, + independent of the ``length`` of the transfer. + * ``cs`` is asserted at the beginning and deasserted at the end + of the transaction. + * ``cs`` handling is agnostic to whether it is one-hot or decoded + somewhere downstream. If it is decoded, "``cs`` all deasserted" + should be handled accordingly (no slave selected). + If it is one-hot, asserting multiple slaves should only be attempted + if ``miso`` is either not connected between slaves, or open + collector, or correctly multiplexed externally. + * Changes to the configuration register take effect on the start of the + next transfer with the exception of ``SPI_OFFLINE`` which takes + effect immediately. + * The SPI core can only be written to when it is idle or waiting + for the next transfer data. Writing (:meth:`set_config`, + :meth:`set_config_mu` or :meth:`write`) + when the core is busy will result in an RTIO busy error being logged. + + This method advances the timeline by one coarse RTIO clock cycle. + + **Configuration flags**: + + * :const:`SPI_OFFLINE`: all pins high-z (reset=1) + * :const:`SPI_END`: transfer in progress (reset=1) + * :const:`SPI_INPUT`: submit SPI read data as RTIO input event when + transfer is complete (reset=0) + * :const:`SPI_CS_POLARITY`: active level of ``cs_n`` (reset=0) + * :const:`SPI_CLK_POLARITY`: idle level of ``clk`` (reset=0) + * :const:`SPI_CLK_PHASE`: first edge after ``cs`` assertion to sample + data on (reset=0). In Motorola/Freescale SPI language + (:const:`SPI_CLK_POLARITY`, :const:`SPI_CLK_PHASE`) == (CPOL, CPHA): + + - (0, 0): idle low, output on falling, input on rising + - (0, 1): idle low, output on rising, input on falling + - (1, 0): idle high, output on rising, input on falling + - (1, 1): idle high, output on falling, input on rising + * :const:`SPI_LSB_FIRST`: LSB is the first bit on the wire (reset=0) + * :const:`SPI_HALF_DUPLEX`: 3-wire SPI, in/out on ``mosi`` (reset=0) + + :param flags: A bit map of `SPI_*` flags. + :param length: Number of bits to write during the next transfer. + (reset=1) + :param freq: Desired SPI clock frequency. (reset=f_rtio/2) + :param cs: Bit pattern of chip selects to assert. + Or number of the chip select to assert if ``cs`` is decoded + downstream. (reset=0) + """ + self.set_config_mu(flags, length, self.frequency_to_div(freq), cs) + + @kernel + def set_config_mu(self, flags, length, div, cs): + """Set the ``config`` register (in SPI bus machine units). + + .. seealso:: :meth:`set_config` + + :param flags: A bit map of `SPI_*` flags. + :param length: Number of bits to write during the next transfer. + (reset=1) + :param div: Counter load value to divide the RTIO + clock by to generate the SPI clock. (minimum=2, reset=2) + ``f_rtio_clk/f_spi == div``. If ``div`` is odd, + the setup phase of the SPI clock is one coarse RTIO clock cycle + longer than the hold phase. + :param cs: Bit pattern of chip selects to assert. + Or number of the chip select to assert if ``cs`` is decoded + downstream. (reset=0) + """ + if length > 32 or length < 1: + raise ValueError("Invalid SPI transfer length") + if div > 257 or div < 2: + raise ValueError("Invalid SPI clock divider") + rtio_output((self.channel << 8) | SPI_CONFIG_ADDR, flags | + ((length - 1) << 8) | ((div - 2) << 16) | (cs << 24)) + self.update_xfer_duration_mu(div, length) + delay_mu(self.ref_period_mu) + + @portable + def update_xfer_duration_mu(self, div, length): + """Calculate and set the transfer duration. + + This method updates the SPI transfer duration which is used + in :meth:`write` to advance the timeline. + + Use this method (and avoid having to call :meth:`set_config_mu`) + when the divider and transfer length have been configured + (using :meth:`set_config` or :meth:`set_config_mu`) by previous + experiments and are known. + + This method is portable and can also be called from e.g. + :meth:`__init__`. + + .. warning:: If this method is called while recording a DMA + sequence, the playback of the sequence will not update the + driver state. + When required, update the driver state manually (by calling + this method) after playing back a DMA sequence. + + :param div: SPI clock divider (see: :meth:`set_config_mu`) + :param length: SPI transfer length (see: :meth:`set_config_mu`) + """ + self.xfer_duration_mu = ((length + 1)*div + 1)*self.ref_period_mu + + @kernel + def write(self, data): + """Write SPI data to shift register register and start transfer. + + * The ``data`` register and the shift register are 32 bits wide. + * Data writes take one ``ref_period`` cycle. + * A transaction consisting of a single transfer (``SPI_END``) takes + :attr:`xfer_duration_mu` ``=(n + 1)*div`` cycles RTIO time where + ``n`` is the number of bits and ``div`` is the SPI clock divider. + * Transfers in a multi-transfer transaction take up to one SPI clock + cycle less time depending on multiple parameters. Advanced users may + rewind the timeline appropriately to achieve faster multi-transfer + transactions. + * The SPI core will be busy for the duration of the SPI transfer. + * For bit alignment and bit ordering see :meth:`set_config`. + * The SPI core can only be written to when it is idle or waiting + for the next transfer data. Writing (:meth:`set_config`, + :meth:`set_config_mu` or :meth:`write`) + when the core is busy will result in an RTIO busy error being logged. + + This method advances the timeline by the duration of one + single-transfer SPI transaction (:attr:`xfer_duration_mu`). + + :param data: SPI output data to be written. + """ + rtio_output((self.channel << 8) | SPI_DATA_ADDR, data) + delay_mu(self.xfer_duration_mu) + + @kernel + def read(self): + """Read SPI data submitted by the SPI core. + + For bit alignment and bit ordering see :meth:`set_config`. + + This method does not alter the timeline. + + :return: SPI input data. + """ + return rtio_input_data(self.channel) + + +@syscall(flags={"nounwind", "nowrite"}) +def spi_set_config(busno: TInt32, flags: TInt32, length: TInt32, div: TInt32, cs: TInt32) -> TNone: + raise NotImplementedError("syscall not simulated") + + +@syscall(flags={"nounwind", "nowrite"}) +def spi_write(busno: TInt32, data: TInt32) -> TNone: + raise NotImplementedError("syscall not simulated") + + +@syscall(flags={"nounwind", "nowrite"}) +def spi_read(busno: TInt32) -> TInt32: + raise NotImplementedError("syscall not simulated") + + +class NRTSPIMaster: + """Core device non-realtime Serial Peripheral Interface (SPI) bus master. + Owns one non-realtime SPI bus. + + With this driver, SPI transactions and are performed by the CPU without + involving RTIO. + + Realtime and non-realtime buses are separate and defined at bitstream + compilation time. + + See :class:`SPIMaster` for a description of the methods. + """ + def __init__(self, dmgr, busno=0, core_device="core"): + self.core = dmgr.get(core_device) + self.busno = busno + + @kernel + def set_config_mu(self, flags=0, length=8, div=6, cs=1): + """Set the ``config`` register. + + Note that the non-realtime SPI cores are usually clocked by the system + clock and not the RTIO clock. In many cases, the SPI configuration is + already set by the firmware and you do not need to call this method. + """ + spi_set_config(self.busno, flags, length, div, cs) + + @kernel + def write(self, data=0): + spi_write(self.busno, data) + + @kernel + def read(self): + return spi_read(self.busno) diff --git a/artiq/coredevice/spline.py b/artiq/coredevice/spline.py index 3aeedf57a..9f8310d1e 100644 --- a/artiq/coredevice/spline.py +++ b/artiq/coredevice/spline.py @@ -1,5 +1,5 @@ from numpy import int32, int64 -from artiq.language.core import kernel, now_mu, portable, delay +from artiq.language.core import kernel, portable, delay from artiq.coredevice.rtio import rtio_output, rtio_output_wide from artiq.language.types import TInt32, TInt64, TFloat @@ -65,7 +65,7 @@ class Spline: :param value: Spline value in integer machine units. """ - rtio_output(now_mu(), self.channel, 0, value) + rtio_output(self.channel << 8, value) @kernel(flags={"fast-math"}) def set(self, value: TFloat): @@ -76,9 +76,9 @@ class Spline: if self.width > 32: l = [int32(0)] * 2 self.pack_coeff_mu([self.to_mu64(value)], l) - rtio_output_wide(now_mu(), self.channel, 0, l) + rtio_output_wide(self.channel << 8, l) else: - rtio_output(now_mu(), self.channel, 0, self.to_mu(value)) + rtio_output(self.channel << 8, self.to_mu(value)) @kernel def set_coeff_mu(self, value): # TList(TInt32) @@ -86,7 +86,7 @@ class Spline: :param value: Spline packed raw values. """ - rtio_output_wide(now_mu(), self.channel, 0, value) + rtio_output_wide(self.channel << 8, value) @portable(flags={"fast-math"}) def pack_coeff_mu(self, coeff, packed): # TList(TInt64), TList(TInt32) diff --git a/artiq/coredevice/suservo.py b/artiq/coredevice/suservo.py new file mode 100644 index 000000000..932adf35b --- /dev/null +++ b/artiq/coredevice/suservo.py @@ -0,0 +1,564 @@ +from artiq.language.core import kernel, delay, delay_mu, portable +from artiq.language.units import us, ns +from artiq.coredevice.rtio import rtio_output, rtio_input_data +from artiq.coredevice import spi2 as spi +from artiq.coredevice import urukul, sampler + + +COEFF_WIDTH = 18 +Y_FULL_SCALE_MU = (1 << (COEFF_WIDTH - 1)) - 1 +COEFF_DEPTH = 10 + 1 +WE = 1 << COEFF_DEPTH + 1 +STATE_SEL = 1 << COEFF_DEPTH +CONFIG_SEL = 1 << COEFF_DEPTH - 1 +CONFIG_ADDR = CONFIG_SEL | STATE_SEL +T_CYCLE = (2*(8 + 64) + 2)*8*ns # Must match gateware Servo.t_cycle. +COEFF_SHIFT = 11 + + +@portable +def y_mu_to_full_scale(y): + """Convert servo Y data from machine units to units of full scale.""" + return y / Y_FULL_SCALE_MU + + +@portable +def adc_mu_to_volts(x, gain): + """Convert servo ADC data from machine units to Volt.""" + val = (x >> 1) & 0xffff + mask = 1 << 15 + val = -(val & mask) + (val & ~mask) + return sampler.adc_mu_to_volt(val, gain) + + +class SUServo: + """Sampler-Urukul Servo parent and configuration device. + + Sampler-Urukul Servo is a integrated device controlling one + 8-channel ADC (Sampler) and two 4-channel DDS (Urukuls) with a DSP engine + connecting the ADC data and the DDS output amplitudes to enable + feedback. SU Servo can for example be used to implement intensity + stabilization of laser beams with an amplifier and AOM driven by Urukul + and a photodetector connected to Sampler. + + Additionally SU Servo supports multiple preconfigured profiles per channel + and features like automatic integrator hold. + + Notes: + + * See the SU Servo variant of the Kasli target for an example of how to + connect the gateware and the devices. Sampler and each Urukul need + two EEM connections. + * Ensure that both Urukuls are AD9910 variants and have the on-board + dip switches set to 1100 (first two on, last two off). + * Refer to the Sampler and Urukul documentation and the SU Servo + example device database for runtime configuration of the devices + (PLLs, gains, clock routing etc.) + + :param channel: RTIO channel number + :param pgia_device: Name of the Sampler PGIA gain setting SPI bus + :param cpld0_device: Name of the first Urukul CPLD SPI bus + :param cpld1_device: Name of the second Urukul CPLD SPI bus + :param dds0_device: Name of the AD9910 device for the DDS on the first + Urukul + :param dds1_device: Name of the AD9910 device for the DDS on the second + Urukul + :param gains: Initial value for PGIA gains shift register + (default: 0x0000). Knowledge of this state is not transferred + between experiments. + :param core_device: Core device name + """ + kernel_invariants = {"channel", "core", "pgia", "cpld0", "cpld1", + "dds0", "dds1", "ref_period_mu"} + + def __init__(self, dmgr, channel, pgia_device, + cpld0_device, cpld1_device, + dds0_device, dds1_device, + gains=0x0000, core_device="core"): + + self.core = dmgr.get(core_device) + self.pgia = dmgr.get(pgia_device) + self.pgia.update_xfer_duration_mu(div=4, length=16) + self.dds0 = dmgr.get(dds0_device) + self.dds1 = dmgr.get(dds1_device) + self.cpld0 = dmgr.get(cpld0_device) + self.cpld1 = dmgr.get(cpld1_device) + self.channel = channel + self.gains = gains + self.ref_period_mu = self.core.seconds_to_mu( + self.core.coarse_ref_period) + assert self.ref_period_mu == self.core.ref_multiplier + + @kernel + def init(self): + """Initialize the servo, Sampler and both Urukuls. + + Leaves the servo disabled (see :meth:`set_config`), resets and + configures all DDS. + + Urukul initialization is performed blindly as there is no readback from + the DDS or the CPLDs. + + This method does not alter the profile configuration memory + or the channel controls. + """ + self.set_config(enable=0) + delay(3*us) # pipeline flush + + self.pgia.set_config_mu( + sampler.SPI_CONFIG | spi.SPI_END, + 16, 4, sampler.SPI_CS_PGIA) + + self.cpld0.init(blind=True) + cfg0 = self.cpld0.cfg_reg + self.cpld0.cfg_write(cfg0 | (0xf << urukul.CFG_MASK_NU)) + self.dds0.init(blind=True) + self.cpld0.cfg_write(cfg0) + + self.cpld1.init(blind=True) + cfg1 = self.cpld1.cfg_reg + self.cpld1.cfg_write(cfg1 | (0xf << urukul.CFG_MASK_NU)) + self.dds1.init(blind=True) + self.cpld1.cfg_write(cfg1) + + @kernel + def write(self, addr, value): + """Write to servo memory. + + This method advances the timeline by one coarse RTIO cycle. + + :param addr: Memory location address. + :param value: Data to be written. + """ + addr |= WE + value &= (1 << COEFF_WIDTH) - 1 + value |= (addr >> 8) << COEFF_WIDTH + addr = addr & 0xff + rtio_output((self.channel << 8) | addr, value) + delay_mu(self.ref_period_mu) + + @kernel + def read(self, addr): + """Read from servo memory. + + This method does not advance the timeline but consumes all slack. + + :param addr: Memory location address. + """ + value = (addr >> 8) << COEFF_WIDTH + addr = addr & 0xff + rtio_output((self.channel << 8) | addr, value) + return rtio_input_data(self.channel) + + @kernel + def set_config(self, enable): + """Set SU Servo configuration. + + This method advances the timeline by one servo memory access. + It does not support RTIO event replacement. + + :param enable (int): Enable servo operation. Enabling starts servo + iterations beginning with the ADC sampling stage. The first DDS + update will happen about two servo cycles (~2.3 µs) after enabling + the servo. The delay is deterministic. + This also provides a mean for synchronization of servo updates to + other RTIO activity. + Disabling takes up to two servo cycles (~2.3 µs) to clear the + processing pipeline. + """ + self.write(CONFIG_ADDR, enable) + + @kernel + def get_status(self): + """Get current SU Servo status. + + This method does not advance the timeline but consumes all slack. + + The ``done`` bit indicates that a SU Servo cycle has completed. + It is pulsed for one RTIO cycle every SU Servo cycle and asserted + continuously when the servo is not ``enabled`` and the pipeline has + drained (the last DDS update is done). + + This method returns and clears the clip indicator for all channels. + An asserted clip indicator corresponds to the servo having encountered + an input signal on an active channel that would have resulted in the + IIR state exceeding the output range. + + :return: Status. Bit 0: enabled, bit 1: done, + bits 8-15: channel clip indicators. + """ + return self.read(CONFIG_ADDR) + + @kernel + def get_adc_mu(self, adc): + """Get the latest ADC reading (IIR filter input X0) in machine units. + + This method does not advance the timeline but consumes all slack. + + If reading servo state through this method collides with the servo + writing that same data, the data can become invalid. To ensure + consistent and valid data, stop the servo before using this method. + + :param adc: ADC channel number (0-7) + :return: 17 bit signed X0 + """ + # State memory entries are 25 bits. Due to the pre-adder dynamic + # range, X0/X1/OFFSET are only 24 bits. Finally, the RTIO interface + # only returns the 18 MSBs (the width of the coefficient memory). + return self.read(STATE_SEL | (adc << 1) | (1 << 8)) + + @kernel + def set_pgia_mu(self, channel, gain): + """Set instrumentation amplifier gain of a ADC channel. + + The four gain settings (0, 1, 2, 3) corresponds to gains of + (1, 10, 100, 1000) respectively. + + :param channel: Channel index + :param gain: Gain setting + """ + gains = self.gains + gains &= ~(0b11 << (channel*2)) + gains |= gain << (channel*2) + self.pgia.write(gains << 16) + self.gains = gains + + @kernel + def get_adc(self, channel): + """Get the latest ADC reading (IIR filter input X0). + + This method does not advance the timeline but consumes all slack. + + If reading servo state through this method collides with the servo + writing that same data, the data can become invalid. To ensure + consistent and valid data, stop the servo before using this method. + + The PGIA gain setting must be known prior to using this method, either + by setting the gain (:meth:`set_pgia_mu`) or by supplying it + (:attr:`gains` or via the constructor/device database). + + :param adc: ADC channel number (0-7) + :return: ADC voltage + """ + val = self.get_adc_mu(channel) + gain = (self.gains >> (channel*2)) & 0b11 + return adc_mu_to_volts(val, gain) + + +class Channel: + """Sampler-Urukul Servo channel + + :param channel: RTIO channel number + :param servo_device: Name of the parent SUServo device + """ + kernel_invariants = {"channel", "core", "servo", "servo_channel"} + + def __init__(self, dmgr, channel, servo_device): + self.servo = dmgr.get(servo_device) + self.core = self.servo.core + self.channel = channel + # FIXME: this assumes the mem channel is right after the control + # channels + self.servo_channel = self.channel + 8 - self.servo.channel + + @kernel + def set(self, en_out, en_iir=0, profile=0): + """Operate channel. + + This method does not advance the timeline. Output RF switch setting + takes effect immediately and is independent of any other activity + (profile settings, other channels). The RF switch behaves like + :class:`artiq.coredevice.ttl.TTLOut`. RTIO event replacement is + supported. IIR updates take place once the RF switch has been enabled + for the configured delay and the profile setting has been stable. + Profile changes take between one and two servo cycles to reach the DDS. + + :param en_out: RF switch enable + :param en_iir: IIR updates enable + :param profile: Active profile (0-31) + """ + rtio_output(self.channel << 8, + en_out | (en_iir << 1) | (profile << 2)) + + @kernel + def set_dds_mu(self, profile, ftw, offs, pow_=0): + """Set profile DDS coefficients in machine units. + + .. seealso:: :meth:`set_amplitude` + + :param profile: Profile number (0-31) + :param ftw: Frequency tuning word (32 bit unsigned) + :param offs: IIR offset (17 bit signed) + :param pow_: Phase offset word (16 bit) + """ + base = (self.servo_channel << 8) | (profile << 3) + self.servo.write(base + 0, ftw >> 16) + self.servo.write(base + 6, (ftw & 0xffff)) + self.set_dds_offset_mu(profile, offs) + self.servo.write(base + 2, pow_) + + @kernel + def set_dds(self, profile, frequency, offset, phase=0.): + """Set profile DDS coefficients. + + This method advances the timeline by four servo memory accesses. + Profile parameter changes are not synchronized. Activate a different + profile or stop the servo to ensure synchronous changes. + + :param profile: Profile number (0-31) + :param frequency: DDS frequency in Hz + :param offset: IIR offset (negative setpoint) in units of full scale, + see :meth:`dds_offset_to_mu` + :param phase: DDS phase in turns + """ + if self.servo_channel < 4: + dds = self.servo.dds0 + else: + dds = self.servo.dds1 + ftw = dds.frequency_to_ftw(frequency) + pow_ = dds.turns_to_pow(phase) + offs = self.dds_offset_to_mu(offset) + self.set_dds_mu(profile, ftw, offs, pow_) + + @kernel + def set_dds_offset_mu(self, profile, offs): + """Set only IIR offset in DDS coefficient profile. + + See :meth:`set_dds_mu` for setting the complete DDS profile. + + :param profile: Profile number (0-31) + :param offs: IIR offset (17 bit signed) + """ + base = (self.servo_channel << 8) | (profile << 3) + self.servo.write(base + 4, offs) + + @kernel + def set_dds_offset(self, profile, offset): + """Set only IIR offset in DDS coefficient profile. + + See :meth:`set_dds` for setting the complete DDS profile. + + :param profile: Profile number (0-31) + :param offset: IIR offset (negative setpoint) in units of full scale + """ + self.set_dds_offset_mu(profile, self.dds_offset_to_mu(offset)) + + @portable + def dds_offset_to_mu(self, offset): + """Convert IIR offset (negative setpoint) from units of full scale to + machine units (see :meth:`set_dds_mu`, :meth:`set_dds_offset_mu`). + + For positive ADC voltages as setpoints, this should be negative. Due to + rounding and representation as two's complement, ``offset=1`` can not + be represented while ``offset=-1`` can. + """ + return int(round(offset * (1 << COEFF_WIDTH - 1))) + + @kernel + def set_iir_mu(self, profile, adc, a1, b0, b1, dly=0): + """Set profile IIR coefficients in machine units. + + The recurrence relation is (all data signed and MSB aligned): + + .. math:: + a_0 y_n = a_1 y_{n - 1} + b_0 (x_n + o)/2 + b_1 (x_{n - 1} + o)/2 + + Where: + + * :math:`y_n` and :math:`y_{n-1}` are the current and previous + filter outputs, clipped to :math:`[0, 1[`. + * :math:`x_n` and :math:`x_{n-1}` are the current and previous + filter inputs in :math:`[-1, 1[`. + * :math:`o` is the offset + * :math:`a_0` is the normalization factor :math:`2^{11}` + * :math:`a_1` is the feedback gain + * :math:`b_0` and :math:`b_1` are the feedforward gains for the two + delays + + .. seealso:: :meth:`set_iir` + + :param profile: Profile number (0-31) + :param adc: ADC channel to take IIR input from (0-7) + :param a1: 18 bit signed A1 coefficient (Y1 coefficient, + feedback, integrator gain) + :param b0: 18 bit signed B0 coefficient (recent, + X0 coefficient, feed forward, proportional gain) + :param b1: 18 bit signed B1 coefficient (old, + X1 coefficient, feed forward, proportional gain) + :param dly: IIR update suppression time. In units of IIR cycles + (~1.2 µs, 0-255). + """ + base = (self.servo_channel << 8) | (profile << 3) + self.servo.write(base + 3, adc | (dly << 8)) + self.servo.write(base + 1, b1) + self.servo.write(base + 5, a1) + self.servo.write(base + 7, b0) + + @kernel + def set_iir(self, profile, adc, kp, ki=0., g=0., delay=0.): + """Set profile IIR coefficients. + + This method advances the timeline by four servo memory accesses. + Profile parameter changes are not synchronized. Activate a different + profile or stop the servo to ensure synchronous changes. + + Gains are given in units of output full per scale per input full scale. + + The transfer function is (up to time discretization and + coefficient quantization errors): + + .. math:: + H(s) = k_p + \\frac{k_i}{s + \\frac{k_i}{g}} + + Where: + * :math:`s = \\sigma + i\\omega` is the complex frequency + * :math:`k_p` is the proportional gain + * :math:`k_i` is the integrator gain + * :math:`g` is the integrator gain limit + + :param profile: Profile number (0-31) + :param adc: ADC channel to take IIR input from (0-7) + :param kp: Proportional gain (1). This is usually negative (closed + loop, positive ADC voltage, positive setpoint). When 0, this + implements a pure I controller. + :param ki: Integrator gain (rad/s). When 0 (the default) + this implements a pure P controller. Same sign as ``kp``. + :param g: Integrator gain limit (1). When 0 (the default) the + integrator gain limit is infinite. Same sign as ``ki``. + :param delay: Delay (in seconds, 0-300 µs) before allowing IIR updates + after invoking :meth:`set`. This is rounded to the nearest number + of servo cycles (~1.2 µs). Since the RF switch (:meth:`set`) can be + opened at any time relative to the servo cycle, the first DDS + update that carries updated IIR data will occur approximately + between ``delay + 1 cycle`` and ``delay + 2 cycles`` after + :meth:`set`. + """ + B_NORM = 1 << COEFF_SHIFT + 1 + A_NORM = 1 << COEFF_SHIFT + COEFF_MAX = 1 << COEFF_WIDTH - 1 + + kp *= B_NORM + if ki == 0.: + # pure P + a1 = 0 + b1 = 0 + b0 = int(round(kp)) + else: + # I or PI + ki *= B_NORM*T_CYCLE/2. + if g == 0.: + c = 1. + a1 = A_NORM + else: + c = 1./(1. + ki/(g*B_NORM)) + a1 = int(round((2.*c - 1.)*A_NORM)) + b0 = int(round(kp + ki*c)) + b1 = int(round(kp + (ki - 2.*kp)*c)) + if b1 == -b0: + raise ValueError("low integrator gain and/or gain limit") + + if (b0 >= COEFF_MAX or b0 < -COEFF_MAX or + b1 >= COEFF_MAX or b1 < -COEFF_MAX): + raise ValueError("high gains") + + dly = int(round(delay/T_CYCLE)) + self.set_iir_mu(profile, adc, a1, b0, b1, dly) + + @kernel + def get_profile_mu(self, profile, data): + """Retrieve profile data. + + Profile data is returned in the ``data`` argument in machine units + packed as: ``[ftw >> 16, b1, pow, adc | (delay << 8), offset, a1, + ftw & 0xffff, b0]``. + + .. seealso:: The individual fields are described in + :meth:`set_iir_mu` and :meth:`set_dds_mu`. + + This method advances the timeline by 32 µs and consumes all slack. + + :param profile: Profile number (0-31) + :param data: List of 8 integers to write the profile data into + """ + base = (self.servo_channel << 8) | (profile << 3) + for i in range(len(data)): + data[i] = self.servo.read(base + i) + delay(4*us) + + @kernel + def get_y_mu(self, profile): + """Get a profile's IIR state (filter output, Y0) in machine units. + + The IIR state is also know as the "integrator", or the DDS amplitude + scale factor. It is 17 bits wide and unsigned. + + This method does not advance the timeline but consumes all slack. + + If reading servo state through this method collides with the servo + writing that same data, the data can become invalid. To ensure + consistent and valid data, stop the servo before using this method. + + :param profile: Profile number (0-31) + :return: 17 bit unsigned Y0 + """ + return self.servo.read(STATE_SEL | (self.servo_channel << 5) | profile) + + @kernel + def get_y(self, profile): + """Get a profile's IIR state (filter output, Y0). + + The IIR state is also know as the "integrator", or the DDS amplitude + scale factor. It is 17 bits wide and unsigned. + + This method does not advance the timeline but consumes all slack. + + If reading servo state through this method collides with the servo + writing that same data, the data can become invalid. To ensure + consistent and valid data, stop the servo before using this method. + + :param profile: Profile number (0-31) + :return: IIR filter output in Y0 units of full scale + """ + return y_mu_to_full_scale(self.get_y_mu(profile)) + + @kernel + def set_y_mu(self, profile, y): + """Set a profile's IIR state (filter output, Y0) in machine units. + + The IIR state is also know as the "integrator", or the DDS amplitude + scale factor. It is 17 bits wide and unsigned. + + This method must not be used when the servo could be writing to the + same location. Either deactivate the profile, or deactivate IIR + updates, or disable servo iterations. + + This method advances the timeline by one servo memory access. + + :param profile: Profile number (0-31) + :param y: 17 bit unsigned Y0 + """ + # State memory is 25 bits wide and signed. + # Reads interact with the 18 MSBs (coefficient memory width) + self.servo.write(STATE_SEL | (self.servo_channel << 5) | profile, y) + + @kernel + def set_y(self, profile, y): + """Set a profile's IIR state (filter output, Y0). + + The IIR state is also know as the "integrator", or the DDS amplitude + scale factor. It is 17 bits wide and unsigned. + + This method must not be used when the servo could be writing to the + same location. Either deactivate the profile, or deactivate IIR + updates, or disable servo iterations. + + This method advances the timeline by one servo memory access. + + :param profile: Profile number (0-31) + :param y: IIR state in units of full scale + """ + y_mu = int(round(y * Y_FULL_SCALE_MU)) + if y_mu < 0 or y_mu > (1 << 17) - 1: + raise ValueError("Invalid SUServo y-value!") + self.set_y_mu(profile, y_mu) + return y_mu diff --git a/artiq/coredevice/trf372017.py b/artiq/coredevice/trf372017.py new file mode 100644 index 000000000..40957db86 --- /dev/null +++ b/artiq/coredevice/trf372017.py @@ -0,0 +1,133 @@ +class TRF372017: + """TRF372017 settings and register map. + + For possible values, documentation, and explanation, see the datasheet. + https://www.ti.com/lit/gpn/trf372017 + """ + rdiv = 21 # 13b + ref_inv = 0 + neg_vco = 1 + icp = 0 # 1.94 mA, 5b + icp_double = 0 + cal_clk_sel = 12 # /16, 4b + + ndiv = 420 # 16b + pll_div_sel = 0 # /1, 2b + prsc_sel = 1 # 8/9 + vco_sel = 2 # 2b + vcosel_mode = 0 + cal_acc = 0b00 # 2b + en_cal = 1 + + nfrac = 0 # 25b + + pwd_pll = 0 + pwd_cp = 0 + pwd_vco = 0 + pwd_vcomux = 0 + pwd_div124 = 0 + pwd_presc = 0 + pwd_out_buff = 1 + pwd_lo_div = 1 + pwd_tx_div = 0 + pwd_bb_vcm = 0 + pwd_dc_off = 0 + en_extvco = 0 + en_isource = 0 + ld_ana_prec = 0 # 2b + cp_tristate = 0 # 2b + speedup = 0 + ld_dig_prec = 1 + en_dith = 1 + mod_ord = 2 # 3rd order, 2b + dith_sel = 0 + del_sd_clk = 2 # 2b + en_frac = 0 + + vcobias_rtrim = 4 # 3b + pllbias_rtrim = 2 # 2b + vco_bias = 8 # 460 µA, 4b + vcobuf_bias = 2 # 2b + vcomux_bias = 3 # 2b + bufout_bias = 0 # 300 µA, 2b + vco_cal_ib = 0 # PTAT + vco_cal_ref = 2 # 1.04 V, 2b + vco_ampl_ctrl = 3 # 2b + vco_vb_ctrl = 0 # 1.2 V, 2b + en_ld_isource = 0 + + ioff = 0x80 # 8b + qoff = 0x80 # 8b + vref_sel = 4 # 0.85 V, 3b + tx_div_sel = 1 # div2, 2b + lo_div_sel = 3 # div8, 2b + tx_div_bias = 1 # 37.5 µA, 2b + lo_div_bias = 2 # 50 µA, 2b + + vco_trim = 0x20 # 6b + vco_test_mode = 0 + cal_bypass = 0 + mux_ctrl = 1 # lock detect, 3b + isource_sink = 0 + isource_trim = 4 # 3b + pd_tc = 0 # 2b + ib_vcm_sel = 0 # ptat + dcoffset_i = 2 # 150 µA, 2b + vco_bias_sel = 1 # spi + + def __init__(self, updates=None): + if updates is None: + return + for key, value in updates.items(): + if not hasattr(self, key): + raise KeyError("invalid setting", key) + setattr(self, key, value) + + def get_mmap(self): + mmap = [] + mmap.append( + 0x9 | + (self.rdiv << 5) | (self.ref_inv << 19) | (self.neg_vco << 20) | + (self.icp << 21) | (self.icp_double << 26) | + (self.cal_clk_sel << 27)) + mmap.append( + 0xa | + (self.ndiv << 5) | (self.pll_div_sel << 21) | (self.prsc_sel << 23) | + (self.vco_sel << 26) | (self.vcosel_mode << 28) | + (self.cal_acc << 29) | (self.en_cal << 31)) + mmap.append(0xb | (self.nfrac << 5)) + mmap.append( + 0xc | + (self.pwd_pll << 5) | (self.pwd_cp << 6) | (self.pwd_vco << 7) | + (self.pwd_vcomux << 8) | (self.pwd_div124 << 9) | + (self.pwd_presc << 10) | (self.pwd_out_buff << 12) | + (self.pwd_lo_div << 13) | (self.pwd_tx_div << 14) | + (self.pwd_bb_vcm << 15) | (self.pwd_dc_off << 16) | + (self.en_extvco << 17) | (self.en_isource << 18) | + (self.ld_ana_prec << 19) | (self.cp_tristate << 21) | + (self.speedup << 23) | (self.ld_dig_prec << 24) | + (self.en_dith << 25) | (self.mod_ord << 26) | + (self.dith_sel << 28) | (self.del_sd_clk << 29) | + (self.en_frac << 31)) + mmap.append( + 0xd | + (self.vcobias_rtrim << 5) | (self.pllbias_rtrim << 8) | + (self.vco_bias << 10) | (self.vcobuf_bias << 14) | + (self.vcomux_bias << 16) | (self.bufout_bias << 18) | + (1 << 21) | (self.vco_cal_ib << 22) | (self.vco_cal_ref << 23) | + (self.vco_ampl_ctrl << 26) | (self.vco_vb_ctrl << 28) | + (self.en_ld_isource << 31)) + mmap.append( + 0xe | + (self.ioff << 5) | (self.qoff << 13) | (self.vref_sel << 21) | + (self.tx_div_sel << 24) | (self.lo_div_sel << 26) | + (self.tx_div_bias << 28) | (self.lo_div_bias << 30)) + mmap.append( + 0xf | + (self.vco_trim << 7) | (self.vco_test_mode << 14) | + (self.cal_bypass << 15) | (self.mux_ctrl << 16) | + (self.isource_sink << 19) | (self.isource_trim << 20) | + (self.pd_tc << 23) | (self.ib_vcm_sel << 25) | + (1 << 28) | (self.dcoffset_i << 29) | + (self.vco_bias_sel << 31)) + return mmap diff --git a/artiq/coredevice/ttl.py b/artiq/coredevice/ttl.py index 76b443eb7..2bc40ed58 100644 --- a/artiq/coredevice/ttl.py +++ b/artiq/coredevice/ttl.py @@ -2,8 +2,8 @@ Drivers for TTL signals on RTIO. TTL channels (including the clock generator) all support output event -replacement. For example, pulses of "zero" length (e.g. ``on()`` -immediately followed by ``off()``, without a delay) are suppressed. +replacement. For example, pulses of "zero" length (e.g. :meth:`TTLInOut.on` +immediately followed by :meth:`TTLInOut.off`, without a delay) are suppressed. """ import numpy @@ -29,14 +29,12 @@ class TTLOut: :param channel: channel number """ - kernel_invariants = {"core", "channel"} + kernel_invariants = {"core", "channel", "target_o"} def __init__(self, dmgr, channel, core_device="core"): self.core = dmgr.get(core_device) self.channel = channel - - # in RTIO cycles - self.o_previous_timestamp = numpy.int64(0) + self.target_o = channel << 8 @kernel def output(self): @@ -44,19 +42,11 @@ class TTLOut: @kernel def set_o(self, o): - rtio_output(now_mu(), self.channel, 0, 1 if o else 0) - self.o_previous_timestamp = now_mu() - - @kernel - def sync(self): - """Busy-wait until all programmed level switches have been - effected.""" - while self.core.get_rtio_counter_mu() < self.o_previous_timestamp: - pass + rtio_output(self.target_o, 1 if o else 0) @kernel def on(self): - """Sets the output to a logic high state at the current position + """Set the output to a logic high state at the current position of the time cursor. The time cursor is not modified by this function.""" @@ -107,8 +97,8 @@ class TTLInOut: This should be used with bidirectional channels. Note that the channel is in input mode by default. If you need to drive a - signal, you must call ``output``. If the channel is in output mode most of - the time in your setup, it is a good idea to call ``output`` in the + signal, you must call :meth:`output`. If the channel is in output mode most of + the time in your setup, it is a good idea to call :meth:`output` in the startup kernel. There are three input APIs: gating, sampling and watching. When one @@ -117,20 +107,30 @@ class TTLInOut: :param channel: channel number """ - kernel_invariants = {"core", "channel"} + kernel_invariants = {"core", "channel", "gate_latency_mu", + "target_o", "target_oe", "target_sens", "target_sample"} - def __init__(self, dmgr, channel, core_device="core"): + def __init__(self, dmgr, channel, gate_latency_mu=None, + core_device="core"): self.core = dmgr.get(core_device) self.channel = channel - # in RTIO cycles - self.o_previous_timestamp = numpy.int64(0) - self.i_previous_timestamp = numpy.int64(0) - self.queued_samples = 0 + # With TTLs inputs, the gate control is connected to a high-latency + # path through SED. When looking at the RTIO counter to determine if + # the gate has closed, we need to take this latency into account. + # See: https://github.com/m-labs/artiq/issues/1137 + if gate_latency_mu is None: + gate_latency_mu = 13*self.core.ref_multiplier + self.gate_latency_mu = gate_latency_mu + + self.target_o = (channel << 8) + 0 + self.target_oe = (channel << 8) + 1 + self.target_sens = (channel << 8) + 2 + self.target_sample = (channel << 8) + 3 @kernel def set_oe(self, oe): - rtio_output(now_mu(), self.channel, 1, 1 if oe else 0) + rtio_output(self.target_oe, 1 if oe else 0) @kernel def output(self): @@ -138,7 +138,11 @@ class TTLInOut: cursor. There must be a delay of at least one RTIO clock cycle before any - other command can be issued.""" + other command can be issued. + + This method only configures the direction at the FPGA. When using + buffered I/O interfaces, such as the Sinara TTL cards, the buffer + direction must be configured separately in the hardware.""" self.set_oe(True) @kernel @@ -147,20 +151,16 @@ class TTLInOut: cursor. There must be a delay of at least one RTIO clock cycle before any - other command can be issued.""" + other command can be issued. + + This method only configures the direction at the FPGA. When using + buffered I/O interfaces, such as the Sinara TTL cards, the buffer + direction must be configured separately in the hardware.""" self.set_oe(False) @kernel def set_o(self, o): - rtio_output(now_mu(), self.channel, 0, 1 if o else 0) - self.o_previous_timestamp = now_mu() - - @kernel - def sync(self): - """Busy-wait until all programmed level switches have been - effected.""" - while self.core.get_rtio_counter_mu() < self.o_previous_timestamp: - pass + rtio_output(self.target_o, 1 if o else 0) @kernel def on(self): @@ -184,7 +184,7 @@ class TTLInOut: @kernel def pulse_mu(self, duration): - """Pulses the output high for the specified duration + """Pulse the output high for the specified duration (in machine units). The time cursor is advanced by the specified duration.""" @@ -194,7 +194,7 @@ class TTLInOut: @kernel def pulse(self, duration): - """Pulses the output high for the specified duration + """Pulse the output high for the specified duration (in seconds). The time cursor is advanced by the specified duration.""" @@ -205,89 +205,172 @@ class TTLInOut: # Input API: gating @kernel def _set_sensitivity(self, value): - rtio_output(now_mu(), self.channel, 2, value) - self.i_previous_timestamp = now_mu() + rtio_output(self.target_sens, value) @kernel def gate_rising_mu(self, duration): """Register rising edge events for the specified duration (in machine units). - The time cursor is advanced by the specified duration.""" + The time cursor is advanced by the specified duration. + + :return: The timeline cursor at the end of the gate window, for + convenience when used with :meth:`count`/:meth:`timestamp_mu`. + """ self._set_sensitivity(1) delay_mu(duration) self._set_sensitivity(0) + return now_mu() @kernel def gate_falling_mu(self, duration): """Register falling edge events for the specified duration (in machine units). - The time cursor is advanced by the specified duration.""" + The time cursor is advanced by the specified duration. + + :return: The timeline cursor at the end of the gate window, for + convenience when used with :meth:`count`/:meth:`timestamp_mu`. + """ self._set_sensitivity(2) delay_mu(duration) self._set_sensitivity(0) + return now_mu() @kernel def gate_both_mu(self, duration): """Register both rising and falling edge events for the specified duration (in machine units). - The time cursor is advanced by the specified duration.""" + The time cursor is advanced by the specified duration. + + :return: The timeline cursor at the end of the gate window, for + convenience when used with :meth:`count`/:meth:`timestamp_mu`. + """ self._set_sensitivity(3) delay_mu(duration) self._set_sensitivity(0) + return now_mu() @kernel def gate_rising(self, duration): """Register rising edge events for the specified duration (in seconds). - The time cursor is advanced by the specified duration.""" + The time cursor is advanced by the specified duration. + + :return: The timeline cursor at the end of the gate window, for + convenience when used with :meth:`count`/:meth:`timestamp_mu`. + """ self._set_sensitivity(1) delay(duration) self._set_sensitivity(0) + return now_mu() @kernel def gate_falling(self, duration): """Register falling edge events for the specified duration (in seconds). - The time cursor is advanced by the specified duration.""" + The time cursor is advanced by the specified duration. + + :return: The timeline cursor at the end of the gate window, for + convenience when used with :meth:`count`/:meth:`timestamp_mu`. + + """ self._set_sensitivity(2) delay(duration) self._set_sensitivity(0) + return now_mu() @kernel def gate_both(self, duration): """Register both rising and falling edge events for the specified duration (in seconds). - The time cursor is advanced by the specified duration.""" + The time cursor is advanced by the specified duration. + + :return: The timeline cursor at the end of the gate window, for + convenience when used with :meth:`count`/:meth:`timestamp_mu`. + """ self._set_sensitivity(3) delay(duration) self._set_sensitivity(0) + return now_mu() @kernel - def count(self): - """Poll the RTIO input during all the previously programmed gate - openings, and returns the number of registered events. + def count(self, up_to_timestamp_mu): + """Consume RTIO input events until the hardware timestamp counter has + reached the specified timestamp and return the number of observed + events. - This function does not interact with the time cursor.""" + This function does not interact with the timeline cursor. + + See the ``gate_*()`` family of methods to select the input transitions + that generate events, and :meth:`timestamp_mu` to obtain the timestamp + of the first event rather than an accumulated count. + + :param up_to_timestamp_mu: The timestamp up to which execution is + blocked, that is, up to which input events are guaranteed to be + taken into account. (Events with later timestamps might still be + registered if they are already available.) + + :return: The number of events before the timeout elapsed (0 if none + observed). + + Examples: + To count events on channel ``ttl_input``, up to the current timeline + position:: + + ttl_input.count(now_mu()) + + If other events are scheduled between the end of the input gate + period and when the number of events is counted, using ``now_mu()`` + as timeout consumes an unnecessary amount of timeline slack. In + such cases, it can be beneficial to pass a more precise timestamp, + for example:: + + gate_end_mu = ttl_input.gate_rising(100 * us) + + # Schedule a long pulse sequence, represented here by a delay. + delay(10 * ms) + + # Get number of rising edges. This will block until the end of + # the gate window, but does not wait for the long pulse sequence + # afterwards, thus (likely) completing with a large amount of + # slack left. + num_rising_edges = ttl_input.count(gate_end_mu) + + The ``gate_*()`` family of methods return the cursor at the end + of the window, allowing this to be expressed in a compact fashion:: + + ttl_input.count(ttl_input.gate_rising(100 * us)) + """ count = 0 - while rtio_input_timestamp(self.i_previous_timestamp, self.channel) >= 0: + while rtio_input_timestamp(up_to_timestamp_mu + self.gate_latency_mu, self.channel) >= 0: count += 1 return count @kernel - def timestamp_mu(self): - """Poll the RTIO input and returns an event timestamp (in machine - units), according to the gating. + def timestamp_mu(self, up_to_timestamp_mu): + """Return the timestamp of the next RTIO input event, or -1 if the + hardware timestamp counter reaches the given value before an event is + received. - If the gate is permanently closed, returns a negative value. + This function does not interact with the timeline cursor. - This function does not interact with the time cursor.""" - return rtio_input_timestamp(self.i_previous_timestamp, self.channel) + See the ``gate_*()`` family of methods to select the input transitions + that generate events, and :meth:`count` for usage examples. + + :param up_to_timestamp_mu: The timestamp up to which execution is + blocked, that is, up to which input events are guaranteed to be + taken into account. (Events with later timestamps might still be + registered if they are already available.) + + :return: The timestamp (in machine units) of the first event received; + -1 on timeout. + """ + return rtio_input_timestamp(up_to_timestamp_mu + self.gate_latency_mu, self.channel) # Input API: sampling @kernel @@ -296,15 +379,15 @@ class TTLInOut: position of the time cursor. The time cursor is not modified by this function.""" - rtio_output(now_mu(), self.channel, 3, 0) + rtio_output(self.target_sample, 0) @kernel def sample_get(self): """Returns the value of a sample previously obtained with - ``sample_input``. + :meth:`sample_input`. Multiple samples may be queued (using multiple calls to - ``sample_input``) into the RTIO FIFOs and subsequently read out using + :meth:`sample_input`) into the RTIO FIFOs and subsequently read out using multiple calls to this function. This function does not interact with the time cursor.""" @@ -324,22 +407,22 @@ class TTLInOut: @kernel def watch_stay_on(self): """Checks that the input is at a high level at the position - of the time cursor and keep checking until ``watch_done`` + of the time cursor and keep checking until :meth:`watch_done` is called. Returns ``True`` if the input is high. A call to this function - must always be followed by an eventual call to ``watch_done`` + must always be followed by an eventual call to :meth:`watch_done` (use e.g. a try/finally construct to ensure this). The time cursor is not modified by this function. """ - rtio_output(now_mu(), self.channel, 3, 2) # gate falling + rtio_output(self.target_sample, 2) # gate falling return rtio_input_data(self.channel) == 1 @kernel def watch_stay_off(self): - """Like ``watch_stay_on``, but for low levels.""" - rtio_output(now_mu(), self.channel, 3, 1) # gate rising + """Like :meth:`watch_stay_on`, but for low levels.""" + rtio_output(self.target_sample, 1) # gate rising return rtio_input_data(self.channel) == 0 @kernel @@ -352,10 +435,10 @@ class TTLInOut: The time cursor is not modified by this function. This function always makes the slack negative. """ - rtio_output(now_mu(), self.channel, 2, 0) + rtio_output(self.target_sens, 0) success = True try: - while rtio_input_timestamp(now_mu(), self.channel) != -1: + while rtio_input_timestamp(now_mu() + self.gate_latency_mu, self.channel) != -1: success = False except RTIOOverflow: success = False @@ -371,16 +454,16 @@ class TTLClockGen: The time cursor is not modified by any function in this class. :param channel: channel number + :param acc_width: accumulator width in bits """ - kernel_invariants = {"core", "channel", "acc_width"} + kernel_invariants = {"core", "channel", "target", "acc_width"} - def __init__(self, dmgr, channel, core_device="core"): + def __init__(self, dmgr, channel, acc_width=24, core_device="core"): self.core = dmgr.get(core_device) self.channel = channel + self.target = channel << 8 - # in RTIO cycles - self.previous_timestamp = numpy.int64(0) - self.acc_width = numpy.int64(24) + self.acc_width = numpy.int64(acc_width) @portable def frequency_to_ftw(self, frequency): @@ -414,22 +497,14 @@ class TTLClockGen: Due to the way the clock generator operates, frequency tuning words that are not powers of two cause jitter of one RTIO clock cycle at the output.""" - rtio_output(now_mu(), self.channel, 0, frequency) - self.previous_timestamp = now_mu() + rtio_output(self.target, frequency) @kernel def set(self, frequency): - """Like ``set_mu``, but using Hz.""" + """Like :meth:`set_mu`, but using Hz.""" self.set_mu(self.frequency_to_ftw(frequency)) @kernel def stop(self): """Stop the toggling of the clock and set the output level to 0.""" self.set_mu(0) - - @kernel - def sync(self): - """Busy-wait until all programmed frequency switches and stops have - been effected.""" - while self.core.get_rtio_counter_mu() < self.o_previous_timestamp: - pass diff --git a/artiq/coredevice/urukul.py b/artiq/coredevice/urukul.py new file mode 100644 index 000000000..cbc3edbfc --- /dev/null +++ b/artiq/coredevice/urukul.py @@ -0,0 +1,378 @@ +from artiq.language.core import kernel, delay, portable, at_mu, now_mu +from artiq.language.units import us, ms + +from numpy import int32, int64 + +from artiq.coredevice import spi2 as spi + + +SPI_CONFIG = (0*spi.SPI_OFFLINE | 0*spi.SPI_END | + 0*spi.SPI_INPUT | 1*spi.SPI_CS_POLARITY | + 0*spi.SPI_CLK_POLARITY | 0*spi.SPI_CLK_PHASE | + 0*spi.SPI_LSB_FIRST | 0*spi.SPI_HALF_DUPLEX) + +# SPI clock write and read dividers +SPIT_CFG_WR = 2 +SPIT_CFG_RD = 16 +# 30 MHz fmax, 20 ns setup, 40 ns shift to latch (limiting) +SPIT_ATT_WR = 6 +SPIT_ATT_RD = 16 +SPIT_DDS_WR = 2 +SPIT_DDS_RD = 16 + +# CFG configuration register bit offsets +CFG_RF_SW = 0 +CFG_LED = 4 +CFG_PROFILE = 8 +CFG_IO_UPDATE = 12 +CFG_MASK_NU = 13 +CFG_CLK_SEL0 = 17 +CFG_CLK_SEL1 = 21 +CFG_SYNC_SEL = 18 +CFG_RST = 19 +CFG_IO_RST = 20 +CFG_CLK_DIV = 22 + +# STA status register bit offsets +STA_RF_SW = 0 +STA_SMP_ERR = 4 +STA_PLL_LOCK = 8 +STA_IFC_MODE = 12 +STA_PROTO_REV = 16 + +# supported hardware and CPLD code version +STA_PROTO_REV_MATCH = 0x08 + +# chip select (decoded) +CS_CFG = 1 +CS_ATT = 2 +CS_DDS_MULTI = 3 +CS_DDS_CH0 = 4 +CS_DDS_CH1 = 5 +CS_DDS_CH2 = 6 +CS_DDS_CH3 = 7 + + +@portable +def urukul_cfg(rf_sw, led, profile, io_update, mask_nu, + clk_sel, sync_sel, rst, io_rst, clk_div): + """Build Urukul CPLD configuration register""" + return ((rf_sw << CFG_RF_SW) | + (led << CFG_LED) | + (profile << CFG_PROFILE) | + (io_update << CFG_IO_UPDATE) | + (mask_nu << CFG_MASK_NU) | + ((clk_sel & 0x01) << CFG_CLK_SEL0) | + ((clk_sel & 0x02) << (CFG_CLK_SEL1 - 1)) | + (sync_sel << CFG_SYNC_SEL) | + (rst << CFG_RST) | + (io_rst << CFG_IO_RST) | + (clk_div << CFG_CLK_DIV)) + + +@portable +def urukul_sta_rf_sw(sta): + """Return the RF switch status from Urukul status register value.""" + return (sta >> STA_RF_SW) & 0xf + + +@portable +def urukul_sta_smp_err(sta): + """Return the SMP_ERR status from Urukul status register value.""" + return (sta >> STA_SMP_ERR) & 0xf + + +@portable +def urukul_sta_pll_lock(sta): + """Return the PLL_LOCK status from Urukul status register value.""" + return (sta >> STA_PLL_LOCK) & 0xf + + +@portable +def urukul_sta_ifc_mode(sta): + """Return the IFC_MODE status from Urukul status register value.""" + return (sta >> STA_IFC_MODE) & 0xf + + +@portable +def urukul_sta_proto_rev(sta): + """Return the PROTO_REV value from Urukul status register value.""" + return (sta >> STA_PROTO_REV) & 0x7f + + +class _RegIOUpdate: + def __init__(self, cpld): + self.cpld = cpld + + @kernel + def pulse(self, t): + cfg = self.cpld.cfg_reg + self.cpld.cfg_write(cfg | (1 << CFG_IO_UPDATE)) + delay(t) + self.cpld.cfg_write(cfg) + + +class _DummySync: + def __init__(self, cpld): + self.cpld = cpld + + @kernel + def set_mu(self, ftw): + pass + + +class CPLD: + """Urukul CPLD SPI router and configuration interface. + + :param spi_device: SPI bus device name + :param io_update_device: IO update RTIO TTLOut channel name + :param dds_reset_device: DDS reset RTIO TTLOut channel name + :param sync_device: AD9910 SYNC_IN RTIO TTLClockGen channel name + :param refclk: Reference clock (SMA, MMCX or on-board 100 MHz oscillator) + frequency in Hz + :param clk_sel: Reference clock selection. For hardware revision >= 1.3 + valid options are: 0 - internal 100MHz XO; 1 - front-panel SMA; 2 + internal MMCX. For hardware revision <= v1.2 valid options are: 0 - + either XO or MMCX dependent on component population; 1 SMA. Unsupported + clocking options are silently ignored. + :param clk_div: Reference clock divider. Valid options are 0: variant + dependent default (divide-by-4 for AD9910 and divide-by-1 for AD9912); + 1: divide-by-1; 2: divide-by-2; 3: divide-by-4. + On Urukul boards with CPLD gateware before v1.3.1 only the default + (0, i.e. variant dependent divider) is valid. + :param sync_sel: SYNC (multi-chip synchronisation) signal source selection. + 0 corresponds to SYNC_IN being supplied by the FPGA via the EEM + connector. 1 corresponds to SYNC_OUT from DDS0 being distributed to the + other chips. + :param rf_sw: Initial CPLD RF switch register setting (default: 0x0). + Knowledge of this state is not transferred between experiments. + :param att: Initial attenuator setting shift register (default: + 0x00000000). See also :meth:`get_att_mu` which retrieves the hardware + state without side effects. Knowledge of this state is not transferred + between experiments. + :param sync_div: SYNC_IN generator divider. The ratio between the coarse + RTIO frequency and the SYNC_IN generator frequency (default: 2 if + `sync_device` was specified). + :param core_device: Core device name + + If the clocking is incorrect (for example, setting ``clk_sel`` to the + front panel SMA with no clock connected), then the ``init()`` method of + the DDS channels can fail with the error message ``PLL lock timeout``. + """ + kernel_invariants = {"refclk", "bus", "core", "io_update", "clk_div"} + + def __init__(self, dmgr, spi_device, io_update_device=None, + dds_reset_device=None, sync_device=None, + sync_sel=0, clk_sel=0, clk_div=0, rf_sw=0, + refclk=125e6, att=0x00000000, sync_div=None, + core_device="core"): + + self.core = dmgr.get(core_device) + self.refclk = refclk + assert 0 <= clk_div <= 3 + self.clk_div = clk_div + + self.bus = dmgr.get(spi_device) + if io_update_device is not None: + self.io_update = dmgr.get(io_update_device) + else: + self.io_update = _RegIOUpdate(self) + if dds_reset_device is not None: + self.dds_reset = dmgr.get(dds_reset_device) + if sync_device is not None: + self.sync = dmgr.get(sync_device) + if sync_div is None: + sync_div = 2 + else: + self.sync = _DummySync(self) + assert sync_div is None + sync_div = 0 + + self.cfg_reg = urukul_cfg(rf_sw=rf_sw, led=0, profile=0, + io_update=0, mask_nu=0, clk_sel=clk_sel, + sync_sel=sync_sel, + rst=0, io_rst=0, clk_div=clk_div) + self.att_reg = int32(int64(att)) + self.sync_div = sync_div + + @kernel + def cfg_write(self, cfg): + """Write to the configuration register. + + See :func:`urukul_cfg` for possible flags. + + :param data: 24 bit data to be written. Will be stored at + :attr:`cfg_reg`. + """ + self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END, 24, + SPIT_CFG_WR, CS_CFG) + self.bus.write(cfg << 8) + self.cfg_reg = cfg + + @kernel + def sta_read(self): + """Read the status register. + + Use any of the following functions to extract values: + + * :func:`urukul_sta_rf_sw` + * :func:`urukul_sta_smp_err` + * :func:`urukul_sta_pll_lock` + * :func:`urukul_sta_ifc_mode` + * :func:`urukul_sta_proto_rev` + + :return: The status register value. + """ + self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END | spi.SPI_INPUT, 24, + SPIT_CFG_RD, CS_CFG) + self.bus.write(self.cfg_reg << 8) + return self.bus.read() + + @kernel + def init(self, blind=False): + """Initialize and detect Urukul. + + Resets the DDS I/O interface and verifies correct CPLD gateware + version. + Does not pulse the DDS MASTER_RESET as that confuses the AD9910. + + :param blind: Do not attempt to verify presence and compatibility. + """ + cfg = self.cfg_reg + # Don't pulse MASTER_RESET (m-labs/artiq#940) + self.cfg_reg = cfg | (0 << CFG_RST) | (1 << CFG_IO_RST) + if blind: + self.cfg_write(self.cfg_reg) + else: + proto_rev = urukul_sta_proto_rev(self.sta_read()) + if proto_rev != STA_PROTO_REV_MATCH: + raise ValueError("Urukul proto_rev mismatch") + delay(100*us) # reset, slack + self.cfg_write(cfg) + if self.sync_div: + at_mu(now_mu() & ~0xf) # align to RTIO/2 + self.set_sync_div(self.sync_div) # 125 MHz/2 = 1 GHz/16 + delay(1*ms) # DDS wake up + + @kernel + def io_rst(self): + """Pulse IO_RST""" + self.cfg_write(self.cfg_reg | (1 << CFG_IO_RST)) + self.cfg_write(self.cfg_reg & ~(1 << CFG_IO_RST)) + + @kernel + def cfg_sw(self, channel, on): + """Configure the RF switches through the configuration register. + + These values are logically OR-ed with the LVDS lines on EEM1. + + :param channel: Channel index (0-3) + :param on: Switch value + """ + c = self.cfg_reg + if on: + c |= 1 << channel + else: + c &= ~(1 << channel) + self.cfg_write(c) + + @kernel + def cfg_switches(self, state): + """Configure all four RF switches through the configuration register. + + :param state: RF switch state as a 4 bit integer. + """ + self.cfg_write((self.cfg_reg & ~0xf) | state) + + @kernel + def set_att_mu(self, channel, att): + """Set digital step attenuator in machine units. + + This method will also write the attenuator settings of the three other channels. Use + :meth:`get_att_mu` to retrieve the hardware state set in previous experiments. + + :param channel: Attenuator channel (0-3). + :param att: 8-bit digital attenuation setting: + 255 minimum attenuation, 0 maximum attenuation (31.5 dB) + """ + a = self.att_reg & ~(0xff << (channel * 8)) + a |= att << (channel * 8) + self.set_all_att_mu(a) + + @kernel + def set_all_att_mu(self, att_reg): + """Set all four digital step attenuators (in machine units). + + .. seealso:: :meth:`set_att_mu` + + :param att_reg: Attenuator setting string (32 bit) + """ + self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END, 32, + SPIT_ATT_WR, CS_ATT) + self.bus.write(att_reg) + self.att_reg = att_reg + + @kernel + def set_att(self, channel, att): + """Set digital step attenuator in SI units. + + This method will write the attenuator settings of all four channels. + + .. seealso:: :meth:`set_att_mu` + + :param channel: Attenuator channel (0-3). + :param att: Attenuation setting in dB. Higher value is more + attenuation. Minimum attenuation is 0*dB, maximum attenuation is + 31.5*dB. + """ + code = 255 - int32(round(att*8)) + if code < 0 or code > 255: + raise ValueError("Invalid urukul.CPLD attenuation!") + self.set_att_mu(channel, code) + + @kernel + def get_att_mu(self): + """Return the digital step attenuator settings in machine units. + + The result is stored and will be used in future calls of :meth:`set_att_mu`. + + :return: 32 bit attenuator settings + """ + self.bus.set_config_mu(SPI_CONFIG | spi.SPI_INPUT, 32, + SPIT_ATT_RD, CS_ATT) + self.bus.write(0) # shift in zeros, shift out current value + self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END, 32, + SPIT_ATT_WR, CS_ATT) + delay(10*us) + self.att_reg = self.bus.read() + self.bus.write(self.att_reg) # shift in current value again and latch + return self.att_reg + + @kernel + def set_sync_div(self, div): + """Set the SYNC_IN AD9910 pulse generator frequency + and align it to the current RTIO timestamp. + + The SYNC_IN signal is derived from the coarse RTIO clock + and the divider must be a power of two. + Configure ``sync_sel == 0``. + + :param div: SYNC_IN frequency divider. Must be a power of two. + Minimum division ratio is 2. Maximum division ratio is 16. + """ + ftw_max = 1 << 4 + ftw = ftw_max//div + assert ftw*div == ftw_max + self.sync.set_mu(ftw) + + @kernel + def set_profile(self, profile): + """Set the PROFILE pins. + + The PROFILE pins are common to all four DDS channels. + + :param profile: PROFILE pins in numeric representation (0-7). + """ + cfg = self.cfg_reg & ~(7 << CFG_PROFILE) + cfg |= (profile & 7) << CFG_PROFILE + self.cfg_write(cfg) diff --git a/artiq/coredevice/zotino.py b/artiq/coredevice/zotino.py new file mode 100644 index 000000000..e56842be5 --- /dev/null +++ b/artiq/coredevice/zotino.py @@ -0,0 +1,53 @@ +"""RTIO driver for the Zotino 32-channel, 16-bit 1MSPS DAC. + +Output event replacement is not supported and issuing commands at the same +time is an error. +""" + +from artiq.language.core import kernel +from artiq.coredevice import spi2 as spi +from artiq.coredevice.ad53xx import SPI_AD53XX_CONFIG, AD53xx + +_SPI_SR_CONFIG = (0*spi.SPI_OFFLINE | 1*spi.SPI_END | + 0*spi.SPI_INPUT | 0*spi.SPI_CS_POLARITY | + 0*spi.SPI_CLK_POLARITY | 0*spi.SPI_CLK_PHASE | + 0*spi.SPI_LSB_FIRST | 0*spi.SPI_HALF_DUPLEX) + +_SPI_CS_DAC = 1 +_SPI_CS_SR = 2 + + +class Zotino(AD53xx): + """ Zotino 32-channel, 16-bit 1MSPS DAC. + + Controls the AD5372 DAC and the 8 user LEDs via a shared SPI interface. + + :param spi_device: SPI bus device name + :param ldac_device: LDAC RTIO TTLOut channel name. + :param clr_device: CLR RTIO TTLOut channel name. + :param div_write: SPI clock divider for write operations (default: 4, + 50MHz max SPI clock) + :param div_read: SPI clock divider for read operations (default: 8, not + optimized for speed, but cf data sheet t22: 25ns min SCLK edge to SDO + valid) + :param vref: DAC reference voltage (default: 5.) + :param core_device: Core device name (default: "core") + """ + + def __init__(self, dmgr, spi_device, ldac_device=None, clr_device=None, + div_write=4, div_read=8, vref=5., core="core"): + AD53xx.__init__(self, dmgr=dmgr, spi_device=spi_device, + ldac_device=ldac_device, clr_device=clr_device, + chip_select=_SPI_CS_DAC, div_write=div_write, + div_read=div_read, core=core) + + @kernel + def set_leds(self, leds): + """ Sets the states of the 8 user LEDs. + + :param leds: 8-bit word with LED state + """ + self.bus.set_config_mu(_SPI_SR_CONFIG, 8, self.div_write, _SPI_CS_SR) + self.bus.write(leds << 24) + self.bus.set_config_mu(SPI_AD53XX_CONFIG, 24, self.div_write, + self.chip_select) diff --git a/artiq/dashboard/applets_ccb.py b/artiq/dashboard/applets_ccb.py index 1337a5363..73e2f23fb 100644 --- a/artiq/dashboard/applets_ccb.py +++ b/artiq/dashboard/applets_ccb.py @@ -1,3 +1,4 @@ +import asyncio import logging from PyQt5 import QtCore, QtWidgets @@ -149,15 +150,16 @@ class AppletsCCBDock(applets.AppletsDock): corresponds to a single group. If ``group`` is ``None`` or an empty list, it corresponds to the root. - ``command`` gives the command line used to run the applet, as if it - was started from a shell. The dashboard substitutes variables such as - ``$python`` that gives the complete file name of the Python - interpreter running the dashboard. + ``command`` gives the command line used to run the applet, as if it was + started from a shell. The dashboard substitutes variables such as + ``$python`` that gives the complete file name of the Python interpreter + running the dashboard. If the name already exists (after following any specified groups), the command or code of the existing applet with that name is replaced, and - the applet is shown at its previous position. If not, a new applet - entry is created and the applet is shown at any position on the screen. + the applet is restarted and shown at its previous position. If not, a + new applet entry is created and the applet is shown at any position on + the screen. If the group(s) do not exist, they are created. @@ -181,9 +183,17 @@ class AppletsCCBDock(applets.AppletsDock): else: spec = {"ty": "code", "code": code, "command": command} if applet is None: + logger.debug("Applet %s does not exist: creating", name) applet = self.new(name=name, spec=spec, parent=parent) else: - self.set_spec(applet, spec) + if spec != self.get_spec(applet): + logger.debug("Applet %s already exists: updating existing spec", name) + self.set_spec(applet, spec) + if applet.applet_dock: + asyncio.ensure_future(applet.applet_dock.restart()) + else: + logger.debug("Applet %s already exists and no update required", name) + if ccbp == "enable": applet.setCheckState(0, QtCore.Qt.Checked) diff --git a/artiq/dashboard/experiments.py b/artiq/dashboard/experiments.py index 78f6a15b0..6095a3466 100644 --- a/artiq/dashboard/experiments.py +++ b/artiq/dashboard/experiments.py @@ -7,9 +7,11 @@ from collections import OrderedDict from PyQt5 import QtCore, QtGui, QtWidgets import h5py -from artiq.gui.tools import LayoutWidget, log_level_to_name, get_open_file_name +from sipyco import pyon + from artiq.gui.entries import procdesc_to_entry, ScanEntry -from artiq.protocols import pyon +from artiq.gui.fuzzy_select import FuzzySelectWidget +from artiq.gui.tools import LayoutWidget, log_level_to_name, get_open_file_name logger = logging.getLogger(__name__) @@ -162,14 +164,14 @@ class _ArgumentEditor(QtWidgets.QTreeWidget): async def _recompute_argument(self, name): try: - arginfo = await self.manager.compute_arginfo(self.expurl) + expdesc = await self.manager.compute_expdesc(self.expurl) except: logger.error("Could not recompute argument '%s' of '%s'", name, self.expurl, exc_info=True) return argument = self.manager.get_submission_arguments(self.expurl)[name] - procdesc = arginfo[name][0] + procdesc = expdesc["arginfo"][name][0] state = procdesc_to_entry(procdesc).default_state(procdesc) argument["desc"] = procdesc argument["state"] = state @@ -272,7 +274,8 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow): scheduling["due_date"] = due_date datetime_en.stateChanged.connect(update_datetime_en) - pipeline_name = QtWidgets.QLineEdit() + self.pipeline_name = QtWidgets.QLineEdit() + pipeline_name = self.pipeline_name self.layout.addWidget(QtWidgets.QLabel("Pipeline:"), 1, 2) self.layout.addWidget(pipeline_name, 1, 3) @@ -280,9 +283,10 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow): def update_pipeline_name(text): scheduling["pipeline_name"] = text - pipeline_name.textEdited.connect(update_pipeline_name) + pipeline_name.textChanged.connect(update_pipeline_name) - priority = QtWidgets.QSpinBox() + self.priority = QtWidgets.QSpinBox() + priority = self.priority priority.setRange(-99, 99) self.layout.addWidget(QtWidgets.QLabel("Priority:"), 2, 0) self.layout.addWidget(priority, 2, 1) @@ -293,7 +297,8 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow): scheduling["priority"] = value priority.valueChanged.connect(update_priority) - flush = QtWidgets.QCheckBox("Flush") + self.flush = QtWidgets.QCheckBox("Flush") + flush = self.flush flush.setToolTip("Flush the pipeline (of current- and higher-priority " "experiments) before starting the experiment") self.layout.addWidget(flush, 2, 2, 1, 2) @@ -386,11 +391,12 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow): async def _recompute_arguments_task(self, overrides=dict()): try: - arginfo = await self.manager.compute_arginfo(self.expurl) + expdesc = await self.manager.compute_expdesc(self.expurl) except: - logger.error("Could not recompute arguments of '%s'", + logger.error("Could not recompute experiment description of '%s'", self.expurl, exc_info=True) return + arginfo = expdesc["arginfo"] for k, v in overrides.items(): # Some values (e.g. scans) may have multiple defaults in a list if ("default" in arginfo[k][0] @@ -407,6 +413,28 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow): self.argeditor.restore_state(argeditor_state) self.layout.addWidget(self.argeditor, 0, 0, 1, 5) + def contextMenuEvent(self, event): + menu = QtWidgets.QMenu(self) + reset_sched = menu.addAction("Reset scheduler settings") + action = menu.exec_(self.mapToGlobal(event.pos())) + if action == reset_sched: + asyncio.ensure_future(self._recompute_sched_options_task()) + + async def _recompute_sched_options_task(self): + try: + expdesc = await self.manager.compute_expdesc(self.expurl) + except: + logger.error("Could not recompute experiment description of '%s'", + self.expurl, exc_info=True) + return + sched_defaults = expdesc["scheduler_defaults"] + + scheduling = self.manager.get_submission_scheduling(self.expurl) + scheduling.update(sched_defaults) + self.priority.setValue(scheduling["priority"]) + self.pipeline_name.setText(scheduling["pipeline_name"]) + self.flush.setChecked(scheduling["flush"]) + def _load_hdf5_clicked(self): asyncio.ensure_future(self._load_hdf5_task()) @@ -461,6 +489,60 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow): self.hdf5_load_directory = state["hdf5_load_directory"] +class _QuickOpenDialog(QtWidgets.QDialog): + """Modal dialog for opening/submitting experiments from a + FuzzySelectWidget.""" + closed = QtCore.pyqtSignal() + + def __init__(self, manager): + super().__init__(manager.main_window) + self.setModal(True) + + self.manager = manager + + self.setWindowTitle("Quick open...") + + layout = QtWidgets.QGridLayout(self) + layout.setSpacing(0) + layout.setContentsMargins(0, 0, 0, 0) + self.setLayout(layout) + + # Find matching experiment names. Open experiments are preferred to + # matches from the repository to ease quick window switching. + open_exps = list(self.manager.open_experiments.keys()) + repo_exps = set("repo:" + k + for k in self.manager.explist.keys()) - set(open_exps) + choices = [(o, 100) for o in open_exps] + [(r, 0) for r in repo_exps] + + self.select_widget = FuzzySelectWidget(choices) + layout.addWidget(self.select_widget) + self.select_widget.aborted.connect(self.close) + self.select_widget.finished.connect(self._open_experiment) + + font_metrics = QtGui.QFontMetrics(self.select_widget.line_edit.font()) + self.select_widget.setMinimumWidth(font_metrics.averageCharWidth() * 70) + + def done(self, r): + if self.select_widget: + self.select_widget.abort() + self.closed.emit() + QtWidgets.QDialog.done(self, r) + + def _open_experiment(self, exp_name, modifiers): + if modifiers & QtCore.Qt.ControlModifier: + try: + self.manager.submit(exp_name) + except: + # Not all open_experiments necessarily still exist in the explist + # (e.g. if the repository has been re-scanned since). + logger.warning("failed to submit experiment '%s'", + exp_name, + exc_info=True) + else: + self.manager.open_experiment(exp_name) + self.close() + + class ExperimentManager: def __init__(self, main_window, explist_sub, schedule_sub, @@ -481,6 +563,13 @@ class ExperimentManager: self.open_experiments = dict() + self.is_quick_open_shown = False + quick_open_shortcut = QtWidgets.QShortcut( + QtCore.Qt.CTRL + QtCore.Qt.Key_P, + main_window) + quick_open_shortcut.setContext(QtCore.Qt.ApplicationShortcut) + quick_open_shortcut.activated.connect(self.show_quick_open) + def set_explist_model(self, model): self.explist = model.backing_store @@ -508,6 +597,8 @@ class ExperimentManager: "due_date": None, "flush": False } + if expurl[:5] == "repo:": + scheduling.update(self.explist[expurl[5:]]["scheduler_defaults"]) self.submission_scheduling[expurl] = scheduling return scheduling @@ -640,7 +731,7 @@ class ExperimentManager: rids.append(rid) asyncio.ensure_future(self._request_term_multiple(rids)) - async def compute_arginfo(self, expurl): + async def compute_expdesc(self, expurl): file, class_name, use_repository = self.resolve_expurl(expurl) if use_repository: revision = self.get_submission_options(expurl)["repo_rev"] @@ -648,7 +739,7 @@ class ExperimentManager: revision = None description = await self.experiment_db_ctl.examine( file, use_repository, revision) - return description[class_name]["arginfo"] + return description[class_name] async def open_file(self, file): description = await self.experiment_db_ctl.examine(file, False) @@ -679,3 +770,14 @@ class ExperimentManager: self.submission_arguments = state["arguments"] for expurl in state["open_docks"]: self.open_experiment(expurl) + + def show_quick_open(self): + if self.is_quick_open_shown: + return + + self.is_quick_open_shown = True + dialog = _QuickOpenDialog(self) + def closed(): + self.is_quick_open_shown = False + dialog.closed.connect(closed) + dialog.show() diff --git a/artiq/dashboard/moninj.py b/artiq/dashboard/moninj.py index e34679c34..0233fd8d7 100644 --- a/artiq/dashboard/moninj.py +++ b/artiq/dashboard/moninj.py @@ -4,7 +4,8 @@ from collections import namedtuple from PyQt5 import QtCore, QtWidgets, QtGui -from artiq.protocols.sync_struct import Subscriber +from sipyco.sync_struct import Subscriber + from artiq.coredevice.comm_moninj import * from artiq.gui.tools import LayoutWidget from artiq.gui.flowlayout import FlowLayout @@ -74,6 +75,7 @@ class _TTLWidget(QtWidgets.QFrame): self.cur_level = False self.cur_oe = False self.cur_override = False + self.cur_override_level = False self.refresh_display() def enterEvent(self, event): @@ -106,7 +108,9 @@ class _TTLWidget(QtWidgets.QFrame): self.set_mode(self.channel, "0") def refresh_display(self): - value_s = "1" if self.cur_level else "0" + level = self.cur_override_level if self.cur_override else self.cur_level + value_s = "1" if level else "0" + if self.cur_override: value_s = "" + value_s + "" color = " color=\"red\"" @@ -215,17 +219,15 @@ def setup_from_ddb(ddb): force_out = v["class"] == "TTLOut" widget = _WidgetDesc(k, comment, _TTLWidget, (channel, force_out, k)) description.add(widget) - elif (v["module"] == "artiq.coredevice.dds" - and v["class"] == "DDSGroupAD9914"): - dds_sysclk = v["arguments"]["sysclk"] - elif (v["module"] == "artiq.coredevice.dds" - and v["class"] == "DDSChannelAD9914"): + elif (v["module"] == "artiq.coredevice.ad9914" + and v["class"] == "AD9914"): bus_channel = v["arguments"]["bus_channel"] channel = v["arguments"]["channel"] + dds_sysclk = v["arguments"]["sysclk"] widget = _WidgetDesc(k, comment, _DDSWidget, (bus_channel, channel, k)) description.add(widget) - elif (v["module"] == "artiq.coredevice.ad5360" - and v["class"] == "AD5360"): + elif ( (v["module"] == "artiq.coredevice.ad53xx" and v["class"] == "AD53XX") + or (v["module"] == "artiq.coredevice.zotino" and v["class"] == "Zotino")): spi_device = v["arguments"]["spi_device"] spi_device = ddb[spi_device] while isinstance(spi_device, str): @@ -242,7 +244,7 @@ def setup_from_ddb(ddb): class _DeviceManager: def __init__(self): self.core_addr = None - self.new_core_addr = asyncio.Event() + self.reconnect_core = asyncio.Event() self.core_connection = None self.core_connector_task = asyncio.ensure_future(self.core_connector()) @@ -267,7 +269,7 @@ class _DeviceManager: if core_addr != self.core_addr: self.core_addr = core_addr - self.new_core_addr.set() + self.reconnect_core.set() self.dds_sysclk = dds_sysclk @@ -341,18 +343,20 @@ class _DeviceManager: def setup_ttl_monitoring(self, enable, channel): if self.core_connection is not None: - self.core_connection.monitor(enable, channel, TTLProbe.level.value) - self.core_connection.monitor(enable, channel, TTLProbe.oe.value) + self.core_connection.monitor_probe(enable, channel, TTLProbe.level.value) + self.core_connection.monitor_probe(enable, channel, TTLProbe.oe.value) + self.core_connection.monitor_injection(enable, channel, TTLOverride.en.value) + self.core_connection.monitor_injection(enable, channel, TTLOverride.level.value) if enable: self.core_connection.get_injection_status(channel, TTLOverride.en.value) def setup_dds_monitoring(self, enable, bus_channel, channel): if self.core_connection is not None: - self.core_connection.monitor(enable, bus_channel, channel) + self.core_connection.monitor_probe(enable, bus_channel, channel) def setup_dac_monitoring(self, enable, spi_channel, channel): if self.core_connection is not None: - self.core_connection.monitor(enable, spi_channel, channel) + self.core_connection.monitor_probe(enable, spi_channel, channel) def monitor_cb(self, channel, probe, value): if channel in self.ttl_widgets: @@ -373,21 +377,35 @@ class _DeviceManager: def injection_status_cb(self, channel, override, value): if channel in self.ttl_widgets: - self.ttl_widgets[channel].cur_override = bool(value) + widget = self.ttl_widgets[channel] + if override == TTLOverride.en.value: + widget.cur_override = bool(value) + if override == TTLOverride.level.value: + widget.cur_override_level = bool(value) + widget.refresh_display() + + def disconnect_cb(self): + logger.error("lost connection to core device moninj") + self.reconnect_core.set() async def core_connector(self): while True: - await self.new_core_addr.wait() - self.new_core_addr.clear() + await self.reconnect_core.wait() + self.reconnect_core.clear() if self.core_connection is not None: await self.core_connection.close() self.core_connection = None new_core_connection = CommMonInj(self.monitor_cb, self.injection_status_cb, - lambda: logger.error("lost connection to core device moninj")) + self.disconnect_cb) try: await new_core_connection.connect(self.core_addr, 1383) + except asyncio.CancelledError: + logger.info("cancelled connection to core device moninj") + break except: logger.error("failed to connect to core device moninj", exc_info=True) + await asyncio.sleep(10.) + self.reconnect_core.set() else: self.core_connection = new_core_connection for ttl_channel in self.ttl_widgets.keys(): diff --git a/artiq/devices/ctlmgr.py b/artiq/devices/ctlmgr.py deleted file mode 100644 index 69dd1b4db..000000000 --- a/artiq/devices/ctlmgr.py +++ /dev/null @@ -1,258 +0,0 @@ -import asyncio -import logging -import subprocess -import shlex -import socket -import os - -from artiq.protocols.sync_struct import Subscriber -from artiq.protocols.pc_rpc import AsyncioClient -from artiq.protocols.logging import LogParser -from artiq.tools import Condition, TaskObject - - -logger = logging.getLogger(__name__) - - -class Controller: - def __init__(self, name, ddb_entry): - self.name = name - self.command = ddb_entry["command"] - self.retry_timer = ddb_entry.get("retry_timer", 5) - self.retry_timer_backoff = ddb_entry.get("retry_timer_backoff", 1.1) - - self.host = ddb_entry["host"] - self.port = ddb_entry["port"] - self.ping_timer = ddb_entry.get("ping_timer", 30) - self.ping_timeout = ddb_entry.get("ping_timeout", 30) - self.term_timeout = ddb_entry.get("term_timeout", 30) - - self.retry_timer_cur = self.retry_timer - self.retry_now = Condition() - self.process = None - self.launch_task = asyncio.ensure_future(self.launcher()) - - async def end(self): - self.launch_task.cancel() - await asyncio.wait_for(self.launch_task, None) - - async def call(self, method, *args, **kwargs): - remote = AsyncioClient() - await remote.connect_rpc(self.host, self.port, None) - try: - targets, _ = remote.get_rpc_id() - await remote.select_rpc_target(targets[0]) - r = await getattr(remote, method)(*args, **kwargs) - finally: - remote.close_rpc() - return r - - async def _ping(self): - try: - ok = await asyncio.wait_for(self.call("ping"), - self.ping_timeout) - if ok: - self.retry_timer_cur = self.retry_timer - return ok - except: - return False - - async def _wait_and_ping(self): - while True: - try: - await asyncio.wait_for(self.process.wait(), - self.ping_timer) - except asyncio.TimeoutError: - logger.debug("pinging controller %s", self.name) - ok = await self._ping() - if not ok: - logger.warning("Controller %s ping failed", self.name) - await self._terminate() - return - else: - break - - def _get_log_source(self): - return "controller({})".format(self.name) - - async def launcher(self): - try: - while True: - logger.info("Starting controller %s with command: %s", - self.name, self.command) - try: - env = os.environ.copy() - env["PYTHONUNBUFFERED"] = "1" - self.process = await asyncio.create_subprocess_exec( - *shlex.split(self.command), - stdout=subprocess.PIPE, stderr=subprocess.PIPE, - env=env, start_new_session=True) - asyncio.ensure_future( - LogParser(self._get_log_source).stream_task( - self.process.stdout)) - asyncio.ensure_future( - LogParser(self._get_log_source).stream_task( - self.process.stderr)) - await self._wait_and_ping() - except FileNotFoundError: - logger.warning("Controller %s failed to start", self.name) - else: - logger.warning("Controller %s exited", self.name) - logger.warning("Restarting in %.1f seconds", - self.retry_timer_cur) - try: - await asyncio.wait_for(self.retry_now.wait(), - self.retry_timer_cur) - except asyncio.TimeoutError: - pass - self.retry_timer_cur *= self.retry_timer_backoff - except asyncio.CancelledError: - await self._terminate() - - async def _terminate(self): - if self.process is None or self.process.returncode is not None: - logger.info("Controller %s already terminated", self.name) - return - logger.debug("Terminating controller %s", self.name) - try: - await asyncio.wait_for(self.call("terminate"), self.term_timeout) - await asyncio.wait_for(self.process.wait(), self.term_timeout) - logger.info("Controller %s terminated", self.name) - return - except: - logger.warning("Controller %s did not exit on request, " - "ending the process", self.name) - if os.name != "nt": - try: - self.process.terminate() - except ProcessLookupError: - pass - try: - await asyncio.wait_for(self.process.wait(), self.term_timeout) - logger.info("Controller process %s terminated", self.name) - return - except asyncio.TimeoutError: - logger.warning("Controller process %s did not terminate, " - "killing", self.name) - try: - self.process.kill() - except ProcessLookupError: - pass - try: - await asyncio.wait_for(self.process.wait(), self.term_timeout) - logger.info("Controller process %s killed", self.name) - return - except asyncio.TimeoutError: - logger.warning("Controller process %s failed to die", self.name) - - -def get_ip_addresses(host): - try: - addrinfo = socket.getaddrinfo(host, None) - except: - return set() - return {info[4][0] for info in addrinfo} - - -class Controllers: - def __init__(self): - self.host_filter = None - self.active_or_queued = set() - self.queue = asyncio.Queue() - self.active = dict() - self.process_task = asyncio.ensure_future(self._process()) - - async def _process(self): - while True: - action, param = await self.queue.get() - if action == "set": - k, ddb_entry = param - if k in self.active: - await self.active[k].end() - self.active[k] = Controller(k, ddb_entry) - elif action == "del": - await self.active[param].end() - del self.active[param] - self.queue.task_done() - if action not in ("set", "del"): - raise ValueError - - def __setitem__(self, k, v): - if (isinstance(v, dict) and v["type"] == "controller" and - self.host_filter in get_ip_addresses(v["host"])): - v["command"] = v["command"].format(name=k, - bind=self.host_filter, - port=v["port"]) - self.queue.put_nowait(("set", (k, v))) - self.active_or_queued.add(k) - - def __delitem__(self, k): - if k in self.active_or_queued: - self.queue.put_nowait(("del", k)) - self.active_or_queued.remove(k) - - def delete_all(self): - for name in set(self.active_or_queued): - del self[name] - - async def shutdown(self): - self.process_task.cancel() - for c in self.active.values(): - await c.end() - - -class ControllerDB: - def __init__(self): - self.current_controllers = Controllers() - - def set_host_filter(self, host_filter): - self.current_controllers.host_filter = host_filter - - def sync_struct_init(self, init): - if self.current_controllers is not None: - self.current_controllers.delete_all() - for k, v in init.items(): - self.current_controllers[k] = v - return self.current_controllers - - -class ControllerManager(TaskObject): - def __init__(self, server, port, retry_master): - self.server = server - self.port = port - self.retry_master = retry_master - self.controller_db = ControllerDB() - - async def _do(self): - try: - subscriber = Subscriber("devices", - self.controller_db.sync_struct_init) - while True: - try: - def set_host_filter(): - s = subscriber.writer.get_extra_info("socket") - localhost = s.getsockname()[0] - self.controller_db.set_host_filter(localhost) - await subscriber.connect(self.server, self.port, - set_host_filter) - try: - await asyncio.wait_for(subscriber.receive_task, None) - finally: - await subscriber.close() - except (ConnectionAbortedError, ConnectionError, - ConnectionRefusedError, ConnectionResetError) as e: - logger.warning("Connection to master failed (%s: %s)", - e.__class__.__name__, str(e)) - else: - logger.warning("Connection to master lost") - logger.warning("Retrying in %.1f seconds", self.retry_master) - await asyncio.sleep(self.retry_master) - except asyncio.CancelledError: - pass - finally: - await self.controller_db.current_controllers.shutdown() - - def retry_now(self, k): - """If a controller is disabled and pending retry, perform that retry - now.""" - self.controller_db.current_controllers.active[k].retry_now.notify() diff --git a/artiq/devices/korad_ka3005p/driver.py b/artiq/devices/korad_ka3005p/driver.py deleted file mode 100644 index 85bba0504..000000000 --- a/artiq/devices/korad_ka3005p/driver.py +++ /dev/null @@ -1,152 +0,0 @@ -# Written by Joe Britton, 2016 - -import logging -import asyncio -import asyncserial - -logger = logging.getLogger(__name__) - - -class UnexpectedResponse(Exception): - pass - - -class KoradKA3005P: - """The Korad KA3005P is a 1-channel programmable power supply - (0-30V/0-5A) with both USB/serial and RS232 connectivity. - - All amplitudes are in volts. - All currents are in amperes. - """ - - # Serial interface gleaned from the following. - # https://github.com/starforgelabs/py-korad-serial - # https://sigrok.org/wiki/Korad_KAxxxxP_series - - def __init__(self, serial_dev): - if serial_dev is None: - self.simulation = True - else: - self.simulation = False - self.port = asyncserial.AsyncSerial(serial_dev, baudrate=9600) - - def close(self): - """Close the serial port.""" - if not self.simulation: - self.port.close() - - async def _ser_read(self, fixed_length=None): - """ strings returned by firmware are zero-terminated or fixed length - """ - r = "" - if self.simulation: - logger.info("simulation _ser_read()") - else: - c = (await self.port.read(1)).decode() - r = c - while len(c) > 0 and ord(c) != 0 and not len(r) == fixed_length: - c = (await self.port.read(1)).decode().rstrip('\0') - r += c - logger.debug("_read %s: ", r) - return r - - async def _ser_write(self, cmd): - if self.simulation: - logger.info("simulation _ser_write(\"%s\")", cmd) - else: - logger.debug("_ser_write(\"%s\")", cmd) - await asyncio.sleep(0.1) - await self.port.write(cmd.encode("ascii")) - - async def setup(self): - """Configure in known state.""" - await self.set_output(False) - await self.set_v(0) - await self.set_ovp(False) - await self.set_i(0) - await self.set_ocp(False) - - async def get_id(self): - """Request identification from device. - """ - if self.simulation: - return "KORADKA3005PV2.0" - await self._ser_write("*IDN?") - return await self._ser_read() - - async def set_output(self, b): - """Enable/disable the power output. - """ - if b: - await self._ser_write("OUT1") - else: - await self._ser_write("OUT0") - - async def set_v(self, v): - """Set the maximum output voltage.""" - await self._ser_write("VSET1:{0:05.2f}".format(v)) - - async def get_v(self): - """Request the voltage as set by the user.""" - await self._ser_write("VSET1?") - return float(await self._ser_read(fixed_length=5)) - - async def measure_v(self): - """Request the actual voltage output.""" - await self._ser_write("VOUT1?") - return float(await self._ser_read(fixed_length=5)) - - async def set_ovp(self, b): - """Enable/disable the "Over Voltage Protection", the PS will switch off the - output when the voltage rises above the actual level.""" - if b: - await self._ser_write("OVP1") - else: - await self._ser_write("OVP0") - - async def set_i(self, v): - """Set the maximum output current.""" - await self._ser_write("ISET1:{0:05.3f}".format(v)) - - async def get_i(self): - """Request the current as set by the user. """ - # Expected behavior of ISET1? is to return 5 bytes. - # However, if *IDN? has been previously called, ISET1? replies - # with a sixth byte 'K' which should be discarded. For consistency, - # always call *IDN? before calling ISET1?. - self.get_id() - await self._ser_write("ISET1?") - r = (await self._ser_read(fixed_length=6)).rstrip('K') - return float(r) - - async def measure_i(self): - """Request the actual output current.""" - await self._ser_write("IOUT1?") - r = await self._ser_read(fixed_length=5) - if r[0] == "K": - r = r[1:-1] - return float(r) - - async def set_ocp(self, b): - """Enable/disable the "Over Current Protection", the PS will switch off - the output when the current rises above the actual level.""" - if b: - await self._ser_write("OCP1") - else: - await self._ser_write("OCP0") - - async def ping(self): - """Check if device is responding.""" - if self.simulation: - return True - try: - id = await self.get_id() - except asyncio.CancelledError: - raise - except: - return False - if id == "KORADKA3005PV2.0": - logger.debug("ping successful") - return True - else: - return False diff --git a/artiq/devices/lda/driver.py b/artiq/devices/lda/driver.py deleted file mode 100644 index f53f31864..000000000 --- a/artiq/devices/lda/driver.py +++ /dev/null @@ -1,229 +0,0 @@ -import logging -import ctypes -import struct - -from artiq.language.units import dB - - -logger = logging.getLogger("lda") - - -class HidError(Exception): - pass - - -class Ldasim: - """Lab Brick Digital Attenuator simulation driver.""" - - def __init__(self): - self._attenuation = None - self._att_max = 63*dB - self._att_step_size = 0.25*dB - - def get_att_max(self): - return self._att_max - - def get_att_step_size(self): - return self._att_step_size - - def close(self): - """Close the device.""" - pass - - def get_attenuation(self): - """Reads last attenuation value set to the simulated device. - - :return: Returns the attenuation value in dB, or None if it was - never set. - :rtype: float - """ - - return self._attenuation - - def set_attenuation(self, attenuation): - """Stores the new attenuation value. - - :param attenuation: The attenuation value in dB. - """ - - step = self.get_att_step_size() - att = round(attenuation/step)*step - - if att > self.get_att_max(): - raise ValueError("Cannot set attenuation {} > {}" - .format(att, self.get_att_max())) - elif att < 0*dB: - raise ValueError("Cannot set attenuation {} < 0".format(att)) - else: - att = round(att*4)/4. * dB - self._attenuation = att - - def ping(self): - return True - - -class Lda: - """Lab Brick Digital Attenuator driver. - - This driver depends on the hidapi library. - - On Linux you should install hidapi-libusb shared library in a directory - listed in your LD_LIBRARY_PATH or in the conventional places (/usr/lib, - /lib, /usr/local/lib). This can be done either from hidapi sources - or by installing the libhidapi-libusb0 binary package on Debian-like OS. - - On Windows you should put hidapi.dll shared library in the - artiq\\\\devices\\\\lda folder. - - """ - _vendor_id = 0x041f - _product_ids = { - "LDA-102": 0x1207, - "LDA-602": 0x1208, - "LDA-302P-1": 0x120E, - } - _att_max = { - "LDA-102": 63*dB, - "LDA-602": 63*dB, - "LDA-302P-1": 63*dB - } - _att_step_size = { - "LDA-102": 0.5*dB, - "LDA-602": 0.5*dB, - "LDA-302P-1": 1.0*dB - } - - def __init__(self, serial=None, product="LDA-102"): - """ - :param serial: The serial number. - :param product: The product identifier string: LDA-102, LDA-602. - """ - - from artiq.devices.lda.hidapi import hidapi - self.hidapi = hidapi - self.product = product - self.serial = serial - - if self.serial is None: - self.serial = next(self.enumerate(self.product)) - self._dev = self.hidapi.hid_open(self._vendor_id, - self._product_ids[self.product], - self.serial) - if not self._dev: - raise IOError("Device not found") - - def close(self): - """Close the device.""" - self.hidapi.hid_close(self._dev) - - def get_att_step_size(self): - return self._att_step_size[self.product] - - def get_att_max(self): - return self._att_max[self.product] - - @classmethod - def enumerate(cls, product): - from artiq.devices.lda.hidapi import hidapi - devs = hidapi.hid_enumerate(cls._vendor_id, - cls._product_ids[product]) - try: - dev = devs - while dev: - yield dev[0].serial - dev = dev[0].next - finally: - hidapi.hid_free_enumeration(devs) - - def _check_error(self, ret): - if ret < 0: - err = self.hidapi.hid_error(self._dev) - raise HidError("{}: {}".format(ret, err)) - return ret - - def write(self, command, length, data=bytes()): - """Writes a command to the Lab Brick device. - - :param command: command ID. - :param length: number of meaningful bytes in the data array. - :param data: a byte array containing the payload of the command. - """ - - # 0 is report id/padding - buf = struct.pack("BBB6s", 0, command, length, data) - res = self._check_error(self.hidapi.hid_write(self._dev, buf, - len(buf))) - if res != len(buf): - raise IOError - - def set(self, command, data): - """Sends a SET command to the Lab Brick device. - - :param command: command ID, must have most significant bit set. - :param data: payload of the command. - """ - - if not data: - raise ValueError("Data is empty") - if not (command & 0x80): - raise ValueError("Set commands must have most significant bit set") - self.write(command, len(data), data) - - def get(self, command, length, timeout=1000): - """Sends a GET command to read back some value of the Lab Brick device. - - :param command: Command ID, most significant bit must be cleared. - :param length: Length of the command, "count" in the datasheet. - :param timeout: Timeout of the HID read in ms. - :return: Returns the value read from the device. - :rtype: bytes - """ - - if command & 0x80: - raise ValueError("Get commands must not have most significant bit" - " set") - status = None - self.write(command, length) - buf = ctypes.create_string_buffer(8) - while status != command: - res = self._check_error(self.hidapi.hid_read_timeout(self._dev, - buf, len(buf), timeout)) - if res != len(buf): - raise IOError - status, length, data = struct.unpack("BB6s", buf.raw) - data = data[:length] - logger.info("%s %s %r", command, length, data) - return data - - def get_attenuation(self): - """Reads attenuation value from Lab Brick device. - - :return: Returns the attenuation value in dB. - :rtype: float - """ - - return (ord(self.get(0x0d, 1))/4.) * dB - - def set_attenuation(self, attenuation): - """Sets attenuation value of the Lab Brick device. - - :param attenuation: Attenuation value in dB. - """ - - step = self.get_att_step_size() - att = round(attenuation/step)*step - - if att > self.get_att_max(): - raise ValueError("Cannot set attenuation {} > {}" - .format(att, self.get_att_max())) - elif att < 0*dB: - raise ValueError("Cannot set attenuation {} < 0".format(att)) - else: - self.set(0x8d, bytes([int(round(att*4))])) - - def ping(self): - try: - self.get_attenuation() - except: - return False - return True diff --git a/artiq/devices/lda/hidapi.py b/artiq/devices/lda/hidapi.py deleted file mode 100644 index 663a0a044..000000000 --- a/artiq/devices/lda/hidapi.py +++ /dev/null @@ -1,58 +0,0 @@ -import os -import atexit -import ctypes -import ctypes.util - -if "." not in os.environ["PATH"].split(";"): - os.environ["PATH"] += ";." -dir = os.path.split(__file__)[0] -if dir not in os.environ["PATH"].split(";"): - os.environ["PATH"] += ";{}".format(dir) - -for n in "hidapi-libusb hidapi-hidraw hidapi".split(): - path = ctypes.util.find_library(n) - if path: - break -if not path: - raise ImportError("no hidapi library found") -hidapi = ctypes.CDLL(path) - - -class HidDeviceInfo(ctypes.Structure): - pass - - -HidDeviceInfo._fields_ = [ - ("path", ctypes.c_char_p), - ("vendor_id", ctypes.c_ushort), - ("product_id", ctypes.c_ushort), - ("serial", ctypes.c_wchar_p), - ("release", ctypes.c_ushort), - ("manufacturer", ctypes.c_wchar_p), - ("product", ctypes.c_wchar_p), - ("usage_page", ctypes.c_ushort), - ("usage", ctypes.c_ushort), - ("interface", ctypes.c_int), - ("next", ctypes.POINTER(HidDeviceInfo)), -] - - -hidapi.hid_enumerate.argtypes = [ctypes.c_ushort, ctypes.c_ushort] -hidapi.hid_enumerate.restype = ctypes.POINTER(HidDeviceInfo) -hidapi.hid_free_enumeration.argtypes = [ctypes.POINTER(HidDeviceInfo)] -hidapi.hid_open.argtypes = [ctypes.c_ushort, ctypes.c_ushort, - ctypes.c_wchar_p] -hidapi.hid_open.restype = ctypes.c_void_p -hidapi.hid_close.argtypes = [ctypes.c_void_p] -hidapi.hid_read_timeout.argtypes = [ctypes.c_void_p, ctypes.c_char_p, - ctypes.c_size_t, ctypes.c_int] -hidapi.hid_read.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_size_t] -hidapi.hid_write.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_size_t] -hidapi.hid_send_feature_report.argtypes = [ctypes.c_void_p, ctypes.c_char_p, - ctypes.c_size_t] -hidapi.hid_get_feature_report.argtypes = [ctypes.c_void_p, ctypes.c_char_p, - ctypes.c_size_t] -hidapi.hid_error.argtypes = [ctypes.c_void_p] -hidapi.hid_error.restype = ctypes.c_wchar_p - -atexit.register(hidapi.hid_exit) diff --git a/artiq/devices/novatech409b/driver.py b/artiq/devices/novatech409b/driver.py deleted file mode 100644 index 4a734544b..000000000 --- a/artiq/devices/novatech409b/driver.py +++ /dev/null @@ -1,201 +0,0 @@ -# Written by Joe Britton, 2015 - -import math -import logging -import asyncio - -import asyncserial - - -logger = logging.getLogger(__name__) - - -class UnexpectedResponse(Exception): - pass - - -class Novatech409B: - """Driver for Novatech 409B 4-channel DDS. - - All output channels are in range [0, 1, 2, 3]. - All frequencies are in Hz. - All phases are in turns. - All amplitudes are in volts. - """ - - error_codes = { - "?0": "Unrecognized Command", - "?1": "Bad Frequency", - "?2": "Bad AM Command", - "?3": "Input line too long", - "?4": "Bad Phase", - "?5": "Bad Time", - "?6": "Bad Mode", - "?7": "Bad Amp", - "?8": "Bad Constant", - "?f": "Bad Byte" - } - - def __init__(self, serial_dev): - if serial_dev is None: - self.simulation = True - else: - self.simulation = False - self.port = asyncserial.AsyncSerial( - serial_dev, - baudrate=19200, - bytesize=8, - parity="N", - stopbits=1, - xonxoff=0) - - def close(self): - """Close the serial port.""" - if not self.simulation: - self.port.close() - - async def _ser_readline(self): - c = await self.port.read(1) - r = c - while c != b"\n": - c = await self.port.read(1) - r += c - return r - - async def _ser_send(self, cmd, get_response=True): - """Send a string to the serial port.""" - - # Low-level routine for sending serial commands to device. It sends - # strings and listens for a response terminated by a carriage return. - # example: - # ser_send("F0 1.0") # sets the freq of channel 0 to 1.0 MHz - - if self.simulation: - logger.info("simulation _ser_send(\"%s\")", cmd) - else: - logger.debug("_ser_send(\"%s\")", cmd) - self.port.ser.reset_input_buffer() - await self.port.write((cmd + "\r\n").encode()) - if get_response: - result = (await self._ser_readline()).rstrip().decode() - logger.debug("got response from device: %s", result) - if result != "OK": - errstr = self.error_codes.get(result, "Unrecognized reply") - s = "Erroneous reply from device: {ec}, {ecs}".format( - ec=result, ecs=errstr) - raise ValueError(s) - else: - pass - - async def reset(self): - """Hardware reset of 409B.""" - await self._ser_send("R", get_response=False) - await asyncio.sleep(1) - await self.setup() - - async def setup(self): - """Initial setup of 409B.""" - - # Setup the Novatech 409B with the following defaults: - # * command echo off ("E d") - # * external clock ("") 10 MHz sinusoid -1 to +7 dBm - - await self._ser_send("E d", get_response=False) - await self.set_phase_continuous(True) - await self.set_simultaneous_update(False) - - async def save_state_to_eeprom(self): - """Save current state to EEPROM.""" - await self._ser_send("S") - - async def set_phase_continuous(self, is_continuous): - """Toggle phase continuous mode. - - Sends the "M n" command. This turns off the automatic - clearing of the phase register. In this mode, the phase - register is left intact when a command is performed. - Use this mode if you want frequency changes to remain - phase synchronous, with no phase discontinuities. - - :param is_continuous: True or False - """ - if is_continuous: - await self._ser_send("M n") - else: - await self._ser_send("M a") - - async def set_simultaneous_update(self, simultaneous): - """Set simultaneous update mode. - - Sends the "I m" command. In this mode an update - pulse will not be sent to the DDS chip until - an "I p" command is sent. This is useful when it is - important to change all the outputs to new values - simultaneously. - """ - if simultaneous: - await self._ser_send("I m") - else: - await self._ser_send("I a") - - async def do_simultaneous_update(self): - """Apply update in simultaneous update mode.""" - await self._ser_send("I p") - - async def set_freq(self, ch_no, freq): - """Set frequency of one channel.""" - # Novatech expects MHz - await self._ser_send("F{:d} {:f}".format(ch_no, freq/1e6)) - - async def set_phase(self, ch_no, phase): - """Set phase of one channel.""" - # phase word is required by device - # N is an integer from 0 to 16383. Phase is set to - # N*360/16384 deg; in ARTIQ represent phase in cycles [0, 1] - phase_word = round(phase*16383) - cmd = "P{:d} {:d}".format(ch_no, phase_word) - await self._ser_send(cmd) - - async def set_gain(self, ch_no, volts): - """Set amplitude of one channel.""" - - # due to error in Novatech it doesn't generate an error for - # dac_value>1024, so need to trap. - dac_value = int(math.floor(volts/0.51*1024)) - if dac_value < 0 or dac_value > 1023: - s = "Amplitude out of range {v}".format(v=volts) - raise ValueError(s) - - s = "V{:d} {:d}".format(ch_no, dac_value) - await self._ser_send(s) - - async def get_status(self): - if self.simulation: - return ["00989680 2000 01F5 0000 00000000 00000000 000301", - "00989680 2000 01F5 0000 00000000 00000000 000301", - "00989680 2000 01F5 0000 00000000 00000000 000301", - "00989680 2000 01F5 0000 00000000 00000000 000301", - "80 BC0000 0000 0102 21"] - else: - self.port.ser.reset_input_buffer() - result = [] - await self.port.write(("QUE" + "\r\n").encode()) - for i in range(5): - m = (await self._ser_readline()).rstrip().decode() - result.append(m) - logger.debug("got device status: %s", result) - return result - - async def ping(self): - try: - stat = await self.get_status() - except asyncio.CancelledError: - raise - except: - return False - # check that version number matches is "21" - if stat[4][20:] == "21": - logger.debug("ping successful") - return True - else: - return False diff --git a/artiq/devices/thorlabs_tcube/__init__.py b/artiq/devices/thorlabs_tcube/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/artiq/devices/thorlabs_tcube/driver.py b/artiq/devices/thorlabs_tcube/driver.py deleted file mode 100644 index 545887e18..000000000 --- a/artiq/devices/thorlabs_tcube/driver.py +++ /dev/null @@ -1,1476 +0,0 @@ -from enum import Enum -import logging -import struct as st -import asyncio - -import asyncserial - - -logger = logging.getLogger(__name__) - - -class MGMSG(Enum): - HW_DISCONNECT = 0x0002 - HW_REQ_INFO = 0x0005 - HW_GET_INFO = 0x0006 - HW_START_UPDATEMSGS = 0x0011 - HW_STOP_UPDATEMSGS = 0x0012 - HUB_REQ_BAYUSED = 0x0065 - HUB_GET_BAYUSED = 0x0066 - HW_RESPONSE = 0x0080 - HW_RICHRESPONSE = 0x0081 - MOD_SET_CHANENABLESTATE = 0x0210 - MOD_REQ_CHANENABLESTATE = 0x0211 - MOD_GET_CHANENABLESTATE = 0x0212 - MOD_IDENTIFY = 0x0223 - MOT_SET_ENCCOUNTER = 0x0409 - MOT_REQ_ENCCOUNTER = 0x040A - MOT_GET_ENCCOUNTER = 0x040B - MOT_SET_POSCOUNTER = 0x0410 - MOT_REQ_POSCOUNTER = 0x0411 - MOT_GET_POSCOUNTER = 0x0412 - MOT_SET_VELPARAMS = 0x0413 - MOT_REQ_VELPARAMS = 0x0414 - MOT_GET_VELPARAMS = 0x0415 - MOT_SET_JOGPARAMS = 0x0416 - MOT_REQ_JOGPARAMS = 0x0417 - MOT_GET_JOGPARAMS = 0x0418 - MOT_SET_LIMSWITCHPARAMS = 0x0423 - MOT_REQ_LIMSWITCHPARAMS = 0x0424 - MOT_GET_LIMSWITCHPARAMS = 0x0425 - MOT_REQ_STATUSBITS = 0x0429 - MOT_GET_STATUSBITS = 0x042A - MOT_SET_GENMOVEPARAMS = 0x043A - MOT_REQ_GENMOVEPARAMS = 0x043B - MOT_GET_GENMOVEPARAMS = 0x043C - MOT_SET_HOMEPARAMS = 0x0440 - MOT_REQ_HOMEPARAMS = 0x0441 - MOT_GET_HOMEPARAMS = 0x0442 - MOT_MOVE_HOME = 0x0443 - MOT_MOVE_HOMED = 0x0444 - MOT_SET_MOVERELPARAMS = 0x0445 - MOT_REQ_MOVERELPARAMS = 0x0446 - MOT_GET_MOVERELPARAMS = 0x0447 - MOT_MOVE_RELATIVE = 0x0448 - MOT_SET_MOVEABSPARAMS = 0x0450 - MOT_REQ_MOVEABSPARAMS = 0x0451 - MOT_GET_MOVEABSPARAMS = 0x0452 - MOT_MOVE_ABSOLUTE = 0x0453 - MOT_MOVE_VELOCITY = 0x0457 - MOT_MOVE_COMPLETED = 0x0464 - MOT_MOVE_STOP = 0x0465 - MOT_MOVE_STOPPED = 0x0466 - MOT_MOVE_JOG = 0x046A - MOT_SUSPEND_ENDOFMOVEMSGS = 0x046B - MOT_RESUME_ENDOFMOVEMSGS = 0x046C - MOT_REQ_DCSTATUSUPDATE = 0x0490 - MOT_GET_DCSTATUSUPDATE = 0x0491 - MOT_ACK_DCSTATUSUPDATE = 0x0492 - MOT_SET_DCPIDPARAMS = 0x04A0 - MOT_REQ_DCPIDPARAMS = 0x04A1 - MOT_GET_DCPIDPARAMS = 0x04A2 - MOT_SET_POTPARAMS = 0x04B0 - MOT_REQ_POTPARAMS = 0x04B1 - MOT_GET_POTPARAMS = 0x04B2 - MOT_SET_AVMODES = 0x04B3 - MOT_REQ_AVMODES = 0x04B4 - MOT_GET_AVMODES = 0x04B5 - MOT_SET_BUTTONPARAMS = 0x04B6 - MOT_REQ_BUTTONPARAMS = 0x04B7 - MOT_GET_BUTTONPARAMS = 0x04B8 - MOT_SET_EEPROMPARAMS = 0x04B9 - PZ_SET_POSCONTROLMODE = 0x0640 - PZ_REQ_POSCONTROLMODE = 0x0641 - PZ_GET_POSCONTROLMODE = 0x0642 - PZ_SET_OUTPUTVOLTS = 0x0643 - PZ_REQ_OUTPUTVOLTS = 0x0644 - PZ_GET_OUTPUTVOLTS = 0x0645 - PZ_SET_OUTPUTPOS = 0x0646 - PZ_REQ_OUTPUTPOS = 0x0647 - PZ_GET_OUTPUTPOS = 0x0648 - PZ_SET_INPUTVOLTSSRC = 0x0652 - PZ_REQ_INPUTVOLTSSRC = 0x0653 - PZ_GET_INPUTVOLTSSRC = 0x0654 - PZ_SET_PICONSTS = 0x0655 - PZ_REQ_PICONSTS = 0x0656 - PZ_GET_PICONSTS = 0x0657 - PZ_GET_PZSTATUSUPDATE = 0x0661 - PZ_SET_OUTPUTLUT = 0x0700 - PZ_REQ_OUTPUTLUT = 0x0701 - PZ_GET_OUTPUTLUT = 0x0702 - PZ_SET_OUTPUTLUTPARAMS = 0x0703 - PZ_REQ_OUTPUTLUTPARAMS = 0x0704 - PZ_GET_OUTPUTLUTPARAMS = 0x0705 - PZ_START_LUTOUTPUT = 0x0706 - PZ_STOP_LUTOUTPUT = 0x0707 - PZ_SET_EEPROMPARAMS = 0x07D0 - PZ_SET_TPZ_DISPSETTINGS = 0x07D1 - PZ_REQ_TPZ_DISPSETTINGS = 0x07D2 - PZ_GET_TPZ_DISPSETTINGS = 0x07D3 - PZ_SET_TPZ_IOSETTINGS = 0x07D4 - PZ_REQ_TPZ_IOSETTINGS = 0x07D5 - PZ_GET_TPZ_IOSETTINGS = 0x07D6 - - -class Direction: - def __init__(self, direction): - if direction not in (1, 2): - raise ValueError("Direction must be either 1 or 2") - self.direction = direction - - def __str__(self): - if self.direction == 1: - return "forward" - else: - return "backward" - - -class MsgError(Exception): - pass - - -class Message: - def __init__(self, id, param1=0, param2=0, dest=0x50, src=0x01, - data=None): - if data is not None: - dest |= 0x80 - self.id = id - self.param1 = param1 - self.param2 = param2 - self.dest = dest - self.src = src - self.data = data - - def __str__(self): - return ("".format( - self.id, self.param1, self.param2, - self.dest, self.src)) - - @staticmethod - def unpack(data): - id, param1, param2, dest, src = st.unpack("` method - between the three values 75 V, 100 V and 150 V. - """ - if voltage < 0 or voltage > self.voltage_limit: - raise ValueError("Voltage must be in range [0;{}]" - .format(self.voltage_limit)) - volt = int(voltage*32767/self.voltage_limit) - payload = st.pack("` method. - - 0x01 External Signal: Unit sums the differential signal on the rear - panel EXT IN(+) and EXT IN(-) connectors with the voltage set - using the set_outputvolts method. - - 0x02 Potentiometer: The HV amp output is controlled by a - potentiometer input (either on the control panel, or connected - to the rear panel User I/O D-type connector) summed with the - voltage set using the set_outputvolts method. - - The values can be bitwise or'ed to sum the software source with - either or both of the other source options. - """ - payload = st.pack("` method - docstring for meaning of bits. - :rtype: int - """ - get_msg = await self.send_request(MGMSG.PZ_REQ_INPUTVOLTSSRC, - [MGMSG.PZ_GET_INPUTVOLTSSRC], 1) - return st.unpack("` - function), then only the first cyclelength values need to be set. In - this manner, any arbitrary voltage waveform can be programmed into the - LUT. Note. The LUT values are output by the system at a maximum - bandwidth of 7 KHz, e.g. 500 LUT values will take approximately 71 ms - to be clocked out. - - :param lut_index: The position in the array of the value to be set (0 - to 512 for TPZ). - :param output: The voltage value to be set. Values are in the range - [0; voltage_limit]. Voltage_limit being set with the - :py:meth:`set_tpz_io_settings - ` method. - """ - volt = round(output*32767/self.voltage_limit) - payload = st.pack("` for the - meaning of those parameters. - :rtype: a 2 elements tuple (int, int) - """ - get_msg = await self.send_request(MGMSG.PZ_REQ_TPZ_IOSETTINGS, - [MGMSG.PZ_GET_TPZ_IOSETTINGS], 1) - voltage_limit, hub_analog_input = st.unpack("` for a - description of each tuple element meaning. - :rtype: An 8 int tuple - """ - get_msg = await self.send_request(MGMSG.MOT_REQ_POTPARAMS, - [MGMSG.MOT_GET_POTPARAMS], 1) - return st.unpack("` - method. - :rtype: A 2 int tuple. - """ - get_msg = await self.send_request(MGMSG.MOT_REQ_LIMSWITCHPARAMS, - [MGMSG.MOT_GET_LIMSWITCHPARAMS], 1) - return st.unpack("` - command. - """ - await self.send_request(MGMSG.MOT_MOVE_RELATIVE, - [MGMSG.MOT_MOVE_COMPLETED, MGMSG.MOT_MOVE_STOPPED], 1) - - async def move_relative(self, relative_distance): - """Start a relative move - - :param relative_distance: The distance to move in position encoder - counts. - """ - payload = st.pack("` - command. - """ - await self.send_request(MGMSG.MOT_MOVE_ABSOLUTE, - [MGMSG.MOT_MOVE_COMPLETED, MGMSG.MOT_MOVE_STOPPED], - param1=1) - - async def move_absolute(self, absolute_distance): - """Start an absolute move. - - :param absolute_distance: The distance to move. This is a signed - integer that specifies the absolute distance in position encoder - counts. - """ - payload = st.pack("` - command until a :py:meth:`move_stop() - ` command (either - StopImmediate or StopProfiled) is called, or a limit switch is reached. - - :param direction: The direction to jog: 1 to move forward, 2 to move - backward. - """ - await self.send(Message(MGMSG.MOT_MOVE_VELOCITY, param1=1, param2=direction)) - - async def move_stop(self, stop_mode): - """Stop any type of motor move. - - Stops any of those motor move: relative, absolute, homing or move at - velocity. - - :param stop_mode: The stop mode defines either an immediate (abrupt) - or profiled stop. Set this byte to 1 to stop immediately, or to 2 - to stop in a controlled (profiled) manner. - """ - if await self.is_moving(): - await self.send_request(MGMSG.MOT_MOVE_STOP, - [MGMSG.MOT_MOVE_STOPPED, - MGMSG.MOT_MOVE_COMPLETED], - 1, stop_mode) - - async def set_dc_pid_parameters(self, proportional, integral, differential, - integral_limit, filter_control=0x0F): - """Set the position control loop parameters. - - :param proportional: The proportional gain, values in range [0; 32767]. - :param integral: The integral gain, values in range [0; 32767]. - :param differential: The differential gain, values in range [0; 32767]. - :param integral_limit: The integral limit parameter is used to cap the - value of the integrator to prevent runaway of the integral sum at - the output. Values are in range [0; 32767]. If set to 0, then - integration term in the PID loop is ignored. - :param filter_control: Identifies which of the above are applied by - setting the corresponding bit to 1. By default, all parameters are - applied, and this parameter is set to 0x0F (1111). - """ - payload = st.pack("` for - precise description. - :rtype: A 5 int tuple. - """ - get_msg = await self.send_request(MGMSG.MOT_REQ_DCPIDPARAMS, - [MGMSG.MOT_GET_DCPIDPARAMS], 1) - return st.unpack("` method. - If set to 2, each button can be programmed with a differente - position value such that the controller will move the motor to that - position when the specific button is pressed. - :param position1: The position (in encoder counts) to which the motor - will move when the top button is pressed. - :param position2: The position (in encoder counts) to which the motor - will move when the bottom button is pressed. - """ - payload = st.pack("` for - description. - :rtype: A 3 int tuple - """ - get_msg = await self.send_request(MGMSG.MOT_REQ_BUTTONPARAMS, - [MGMSG.MOT_GET_BUTTONPARAMS], 1) - return st.unpack(" 512: - raise ValueError("LUT index should be in range [0;512] and not {}" - .format(lut_index)) - self.lut[lut_index] = output - - def get_output_lut(self): - return 0, 0 # FIXME: the API description here doesn't make any sense - - def set_output_lut_parameters(self, mode, cycle_length, num_cycles, - delay_time, precycle_rest, postcycle_rest): - self.mode = mode - self.cycle_length = cycle_length - self.num_cycles = num_cycles - self.delay_time = delay_time - self.precycle_rest = precycle_rest - self.postcycle_rest = postcycle_rest - - def get_output_lut_parameters(self): - return (self.mode, self.cycle_length, self.num_cycles, - self.delay_time, self.precycle_rest, self.postcycle_rest) - - def start_lut_output(self): - pass - - def stop_lut_output(self): - pass - - def set_eeprom_parameters(self, msg_id): - pass - - def set_tpz_display_settings(self, intensity): - self.intensity = intensity - - def get_tpz_display_settings(self): - return self.intensity - - def set_tpz_io_settings(self, voltage_limit, hub_analog_input): - if voltage_limit not in [75, 100, 150]: - raise ValueError("voltage_limit must be 75 V, 100 V or 150 V") - self.voltage_limit = voltage_limit - self.hub_analog_input = hub_analog_input - - def get_tpz_io_settings(self): - return self.voltage_limit, self.hub_analog_input - - -class TdcSim: - def close(self): - pass - - def module_identify(self): - pass - - def set_pot_parameters(self, zero_wnd, vel1, wnd1, vel2, wnd2, vel3, - wnd3, vel4): - self.zero_wnd = zero_wnd - self.vel1 = vel1 - self.wnd1 = wnd1 - self.vel2 = vel2 - self.wnd2 = wnd2 - self.vel3 = vel3 - self.wnd3 = wnd3 - self.vel4 = vel4 - - def get_pot_parameters(self): - return (self.zero_wnd, self.vel1, self.wnd1, self.vel2, self.wnd2, - self.vel3, self.wnd3, self.vel4) - - def hub_get_bay_used(self): - return False - - def set_position_counter(self, position): - self.position = position - - def get_position_counter(self): - return self.position - - def set_encoder_counter(self, encoder_count): - self.encoder_count = encoder_count - - def get_encoder_counter(self): - return self.encoder_count - - def set_velocity_parameters(self, acceleration, max_velocity): - self.acceleration = acceleration - self.max_velocity = max_velocity - - def get_velocity_parameters(self): - return self.acceleration, self.max_velocity - - def set_jog_parameters(self, mode, step_size, acceleration, - max_velocity, stop_mode): - self.jog_mode = mode - self.step_size = step_size - self.acceleration = acceleration - self.max_velocity = max_velocity - self.stop_mode = stop_mode - - def get_jog_parameters(self): - return (self.jog_mode, self.step_size, self.acceleration, - self.max_velocity, self.stop_mode) - - def set_gen_move_parameters(self, backlash_distance): - self.backlash_distance = backlash_distance - - def get_gen_move_parameters(self): - return self.backlash_distance - - def set_move_relative_parameters(self, relative_distance): - self.relative_distance = relative_distance - - def get_move_relative_parameters(self): - return self.relative_distance - - def set_move_absolute_parameters(self, absolute_position): - self.absolute_position = absolute_position - - def get_move_absolute_parameters(self): - return self.absolute_position - - def set_home_parameters(self, home_velocity): - self.home_velocity = home_velocity - - def get_home_parameters(self): - return self.home_velocity - - def move_home(self): - pass - - def set_limit_switch_parameters(self, cw_hw_limit, ccw_hw_limit): - self.cw_hw_limit = cw_hw_limit - self.ccw_hw_limit = ccw_hw_limit - - def get_limit_switch_parameters(self): - return self.cw_hw_limit, self.ccw_hw_limit - - def move_relative_memory(self): - pass - - def move_relative(self, relative_distance): - pass - - def move_absolute_memory(self): - pass - - def move_absolute(self, absolute_distance): - pass - - def move_jog(self, direction, async=False): - pass - - def move_velocity(self, direction): - pass - - def move_stop(self, stop_mode, async=False): - pass - - def set_dc_pid_parameters(self, proportional, integral, differential, - integral_limit, filter_control=0x0F): - self.proportional = proportional - self.integral = integral - self.differential = differential - self.integral_limit = integral_limit - self.filter_control = filter_control - - def get_dc_pid_parameters(self): - return (self.proportional, self.integral, self.differential, - self.integral_limit, self.filter_control) - - def set_av_modes(self, mode_bits): - self.mode_bits = mode_bits - - def get_av_modes(self): - return self.mode_bits - - def set_button_parameters(self, mode, position1, position2): - self.mode = mode - self.position1 = position1 - self.position2 = position2 - - def get_button_parameters(self): - return self.mode, self.position1, self.position2 - - def set_eeprom_parameters(self, msg_id): - pass - - def get_dc_status_update(self): - return 0, 0, 0x80000400 # FIXME: not implemented yet for simulation - - def get_status_bits(self): - return 0x80000400 # FIXME: not implemented yet for simulation - - def suspend_end_of_move_messages(self): - pass - - def resume_end_of_move_messages(self): - pass diff --git a/artiq/examples/README.rst b/artiq/examples/README.rst new file mode 100644 index 000000000..4a37fd15c --- /dev/null +++ b/artiq/examples/README.rst @@ -0,0 +1,11 @@ +ARTIQ experiment examples +========================= + +This directory contains several sample ARTIQ master configurations +and associated experiments that illustrate basic usage of various +hardware and software features. + +New users might want to peruse the ``no_hardware`` directory to +explore the argument/dataset machinery without needing access to +hardware, and the ``kc705_nist_clock`` directory for inspiration +on how to coordinate between host and FPGA core device code. diff --git a/artiq/examples/artiq_ipython_notebook.ipynb b/artiq/examples/artiq_ipython_notebook.ipynb index 6bab291fe..7964cd5a4 100644 --- a/artiq/examples/artiq_ipython_notebook.ipynb +++ b/artiq/examples/artiq_ipython_notebook.ipynb @@ -34,8 +34,8 @@ "import pandas as pd\n", "import h5py\n", "\n", - "from artiq.protocols.pc_rpc import (Client, AsyncioClient,\n", - " BestEffortClient, AutoTarget)\n", + "from sipyco.pc_rpc import (Client, AsyncioClient,\n", + " BestEffortClient, AutoTarget)\n", "from artiq.master.databases import DeviceDB\n", "from artiq.master.worker_db import DeviceManager" ] @@ -72,8 +72,8 @@ "assert lda.get_attenuation() == 42\n", "\n", "# ... or we can wire it up ourselves if you know where it is\n", - "assert ddb.get(\"lda\")[\"host\"] == \"::1\"\n", - "assert ddb.get(\"lda\")[\"port\"] == 3253\n", + "assert ddb.get(\"lda\", resolve_alias=True)[\"host\"] == \"::1\"\n", + "assert ddb.get(\"lda\", resolve_alias=True)[\"port\"] == 3253\n", "\n", "# there are different Client types tailored to different use cases:\n", "\n", diff --git a/artiq/examples/drtio/device_db.py b/artiq/examples/drtio/device_db.py deleted file mode 100644 index 09f70c544..000000000 --- a/artiq/examples/drtio/device_db.py +++ /dev/null @@ -1,131 +0,0 @@ -core_addr = "sayma1.lab.m-labs.hk" - -device_db = { - "core": { - "type": "local", - "module": "artiq.coredevice.core", - "class": "Core", - "arguments": {"host": core_addr, "ref_period": 2e-9} - }, - "core_log": { - "type": "controller", - "host": "::1", - "port": 1068, - "command": "aqctl_corelog -p {port} --bind {bind} " + core_addr - }, - "core_cache": { - "type": "local", - "module": "artiq.coredevice.cache", - "class": "CoreCache" - }, - - "led0": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLOut", - "arguments": {"channel": 0}, - }, - "led1": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLOut", - "arguments": {"channel": 1}, - }, - "led2": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLOut", - "arguments": {"channel": 2}, - }, - "led3": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLOut", - "arguments": {"channel": 3}, - }, - "ttl_sma_out": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLInOut", - "arguments": {"channel": 4} - }, - "ttl_sma_in": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLInOut", - "arguments": {"channel": 5} - }, - - "rled0": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLOut", - "arguments": {"channel": 0x010000}, - }, - "rled1": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLOut", - "arguments": {"channel": 0x010001}, - }, - "rled2": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLOut", - "arguments": {"channel": 0x010002}, - }, - "rled3": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLOut", - "arguments": {"channel": 0x010003}, - }, - "rttl_sma_out": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLInOut", - "arguments": {"channel": 0x010004} - }, - "rttl_sma_in": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLInOut", - "arguments": {"channel": 0x010005} - }, - - "converter_spi": { - "type": "local", - "module": "artiq.coredevice.spi", - "class": "NRTSPIMaster", - }, - "ad9154_spi0": { - "type": "local", - "module": "artiq.coredevice.ad9154_spi", - "class": "AD9154", - "arguments": {"spi_device": "converter_spi", "chip_select": 2} - }, - "ad9154_spi1": { - "type": "local", - "module": "artiq.coredevice.ad9154_spi", - "class": "AD9154", - "arguments": {"spi_device": "converter_spi", "chip_select": 3} - }, - "rconverter_spi": { - "type": "local", - "module": "artiq.coredevice.spi", - "class": "NRTSPIMaster", - "arguments": {"busno": 0x010000} - }, - "rad9154_spi0": { - "type": "local", - "module": "artiq.coredevice.ad9154_spi", - "class": "AD9154", - "arguments": {"spi_device": "rconverter_spi", "chip_select": 2} - }, - "rad9154_spi1": { - "type": "local", - "module": "artiq.coredevice.ad9154_spi", - "class": "AD9154", - "arguments": {"spi_device": "rconverter_spi", "chip_select": 3} - }, -} diff --git a/artiq/examples/drtio/repository/ad9154_spi.py b/artiq/examples/drtio/repository/ad9154_spi.py deleted file mode 100644 index 8e55a5919..000000000 --- a/artiq/examples/drtio/repository/ad9154_spi.py +++ /dev/null @@ -1,23 +0,0 @@ -from artiq.coredevice.ad9154_reg import * -from artiq.experiment import * - - -class Test(EnvExperiment): - def build(self): - self.setattr_device("core") - self.ad9154_spi = self.get_device("ad9154_spi0") - self.rad9154_spi = self.get_device("rad9154_spi0") - - @kernel - def run(self): - self.ad9154_spi.setup_bus() - self.rad9154_spi.setup_bus() - - for i in range(5): - self.p("local PRODID: 0x%04x", (self.ad9154_spi.read(AD9154_PRODIDH) << 8) | - self.ad9154_spi.read(AD9154_PRODIDL)) - self.p("remote PRODID: 0x%04x", (self.rad9154_spi.read(AD9154_PRODIDH) << 8) | - self.rad9154_spi.read(AD9154_PRODIDL)) - - def p(self, f, *a): - print(f % a) diff --git a/artiq/examples/drtio/repository/blink_forever.py b/artiq/examples/drtio/repository/blink_forever.py deleted file mode 100644 index ededc57b9..000000000 --- a/artiq/examples/drtio/repository/blink_forever.py +++ /dev/null @@ -1,27 +0,0 @@ -from artiq.experiment import * - - -class BlinkForever(EnvExperiment): - def build(self): - self.setattr_device("core") - self.rleds = [self.get_device("rled" + str(i)) for i in range(4)] - self.leds = [self.get_device("led" + str(i)) for i in range(4)] - - @kernel - def run(self): - self.core.reset() - - while True: - with parallel: - for led in self.leds: - led.pulse(250*ms) - for led in self.rleds: - led.pulse(250*ms) - t = now_mu() - for led in self.leds: - at_mu(t) - led.pulse(500*ms) - for led in self.rleds: - at_mu(t) - led.pulse(500*ms) - delay(250*ms) diff --git a/artiq/examples/drtio/repository/pulse_rate.py b/artiq/examples/drtio/repository/pulse_rate.py deleted file mode 100644 index f430a2a67..000000000 --- a/artiq/examples/drtio/repository/pulse_rate.py +++ /dev/null @@ -1,25 +0,0 @@ -from artiq.experiment import * - - -class PulseRate(EnvExperiment): - def build(self): - self.setattr_device("core") - self.setattr_device("rttl_sma_out") - - @kernel - def run(self): - self.core.reset() - - dt = self.core.seconds_to_mu(300*ns) - while True: - for i in range(10000): - try: - self.rttl_sma_out.pulse_mu(dt) - delay_mu(dt) - except RTIOUnderflow: - dt += 1 - self.core.break_realtime() - break - else: - print(self.core.mu_to_seconds(dt)) - return diff --git a/artiq/examples/kasli/device_db.py b/artiq/examples/kasli/device_db.py new file mode 100644 index 000000000..4b20c7a28 --- /dev/null +++ b/artiq/examples/kasli/device_db.py @@ -0,0 +1,231 @@ +# Tester device database + +core_addr = "192.168.1.70" + +device_db = { + "core": { + "type": "local", + "module": "artiq.coredevice.core", + "class": "Core", + "arguments": {"host": core_addr, "ref_period": 1e-9} + }, + "core_log": { + "type": "controller", + "host": "::1", + "port": 1068, + "command": "aqctl_corelog -p {port} --bind {bind} " + core_addr + }, + "core_cache": { + "type": "local", + "module": "artiq.coredevice.cache", + "class": "CoreCache" + }, + "core_dma": { + "type": "local", + "module": "artiq.coredevice.dma", + "class": "CoreDMA" + }, + + "i2c_switch0": { + "type": "local", + "module": "artiq.coredevice.i2c", + "class": "PCA9548", + "arguments": {"address": 0xe0} + }, + "i2c_switch1": { + "type": "local", + "module": "artiq.coredevice.i2c", + "class": "PCA9548", + "arguments": {"address": 0xe2} + }, +} + + +# DIO (EEM5) starting at RTIO channel 0 +for i in range(8): + device_db["ttl" + str(i)] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLInOut" if i < 4 else "TTLOut", + "arguments": {"channel": i}, + } + device_db["ttl{}_counter".format(i)] = { + "type": "local", + "module": "artiq.coredevice.edge_counter", + "class": "EdgeCounter", + "arguments": {"channel": 8 + i}, + } + + +# Urukul (EEM1) starting at RTIO channel 12 +device_db.update( + eeprom_urukul0={ + "type": "local", + "module": "artiq.coredevice.kasli_i2c", + "class": "KasliEEPROM", + "arguments": {"port": "EEM1"} + }, + spi_urukul0={ + "type": "local", + "module": "artiq.coredevice.spi2", + "class": "SPIMaster", + "arguments": {"channel": 12} + }, + ttl_urukul0_sync={ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLClockGen", + "arguments": {"channel": 13, "acc_width": 4} + }, + ttl_urukul0_io_update={ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 14} + }, + ttl_urukul0_sw0={ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 15} + }, + ttl_urukul0_sw1={ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 16} + }, + ttl_urukul0_sw2={ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 17} + }, + ttl_urukul0_sw3={ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 18} + }, + urukul0_cpld={ + "type": "local", + "module": "artiq.coredevice.urukul", + "class": "CPLD", + "arguments": { + "spi_device": "spi_urukul0", + "io_update_device": "ttl_urukul0_io_update", + "sync_device": "ttl_urukul0_sync", + "refclk": 125e6, + "clk_sel": 2 + } + } +) + +for i in range(4): + device_db["urukul0_ch" + str(i)] = { + "type": "local", + "module": "artiq.coredevice.ad9910", + "class": "AD9910", + "arguments": { + "pll_n": 32, + "chip_select": 4 + i, + "cpld_device": "urukul0_cpld", + "sw_device": "ttl_urukul0_sw" + str(i), + "sync_delay_seed": "eeprom_urukul0:" + str(64 + 4*i), + "io_update_delay": "eeprom_urukul0:" + str(64 + 4*i), + } + } + + +# Sampler (EEM3) starting at RTIO channel 19 +device_db["spi_sampler0_adc"] = { + "type": "local", + "module": "artiq.coredevice.spi2", + "class": "SPIMaster", + "arguments": {"channel": 19} +} +device_db["spi_sampler0_pgia"] = { + "type": "local", + "module": "artiq.coredevice.spi2", + "class": "SPIMaster", + "arguments": {"channel": 20} +} +device_db["spi_sampler0_cnv"] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 21}, +} +device_db["sampler0"] = { + "type": "local", + "module": "artiq.coredevice.sampler", + "class": "Sampler", + "arguments": { + "spi_adc_device": "spi_sampler0_adc", + "spi_pgia_device": "spi_sampler0_pgia", + "cnv_device": "spi_sampler0_cnv" + } +} + + +# Zotino (EEM4) starting at RTIO channel 22 +device_db["spi_zotino0"] = { + "type": "local", + "module": "artiq.coredevice.spi2", + "class": "SPIMaster", + "arguments": {"channel": 22} +} +device_db["ttl_zotino0_ldac"] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 23} +} +device_db["ttl_zotino0_clr"] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 24} +} +device_db["zotino0"] = { + "type": "local", + "module": "artiq.coredevice.zotino", + "class": "Zotino", + "arguments": { + "spi_device": "spi_zotino0", + "ldac_device": "ttl_zotino0_ldac", + "clr_device": "ttl_zotino0_clr" + } +} + + +device_db.update( + led0={ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 25} + }, + led1={ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 26} + }, +) + +device_db.update( + i2c_switch="i2c_switch0", + + ttl_out="ttl4", + ttl_out_serdes="ttl4", + + loop_out="ttl4", + loop_in="ttl0", + loop_in_counter="ttl0_counter", + + # Urukul CPLD with sync and io_update, IFC MODE 0b1000 + urukul_cpld="urukul0_cpld", + # Urukul AD9910 with switch TTL, internal 125 MHz MMCX connection + urukul_ad9910="urukul0_ch0", +) diff --git a/artiq/examples/kasli/idle_kernel.py b/artiq/examples/kasli/idle_kernel.py new file mode 100644 index 000000000..05184f731 --- /dev/null +++ b/artiq/examples/kasli/idle_kernel.py @@ -0,0 +1,21 @@ +from artiq.experiment import * + + +class IdleKernel(EnvExperiment): + def build(self): + self.setattr_device("core") + self.setattr_device("led0") + + @kernel + def run(self): + start_time = now_mu() + self.core.seconds_to_mu(500*ms) + while self.core.get_rtio_counter_mu() < start_time: + pass + self.core.reset() + while True: + self.led0.pulse(250*ms) + delay(125*ms) + self.led0.pulse(125*ms) + delay(125*ms) + self.led0.pulse(125*ms) + delay(250*ms) diff --git a/artiq/examples/kasli_drtioswitching/device_db.py b/artiq/examples/kasli_drtioswitching/device_db.py new file mode 100644 index 000000000..433eaf7bc --- /dev/null +++ b/artiq/examples/kasli_drtioswitching/device_db.py @@ -0,0 +1,34 @@ +core_addr = "192.168.1.70" + +device_db = { + "core": { + "type": "local", + "module": "artiq.coredevice.core", + "class": "Core", + "arguments": {"host": core_addr, "ref_period": 1/(8*150e6)} + }, + "core_log": { + "type": "controller", + "host": "::1", + "port": 1068, + "command": "aqctl_corelog -p {port} --bind {bind} " + core_addr + }, + "core_cache": { + "type": "local", + "module": "artiq.coredevice.cache", + "class": "CoreCache" + }, + "core_dma": { + "type": "local", + "module": "artiq.coredevice.dma", + "class": "CoreDMA" + }, +} + +for i in range(3): + device_db["led" + str(i)] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": i << 16}, + } diff --git a/artiq/examples/kasli_drtioswitching/repository/blink.py b/artiq/examples/kasli_drtioswitching/repository/blink.py new file mode 100644 index 000000000..2af2d1233 --- /dev/null +++ b/artiq/examples/kasli_drtioswitching/repository/blink.py @@ -0,0 +1,16 @@ +from artiq.experiment import * + + +class Blink(EnvExperiment): + def build(self): + self.setattr_device("core") + self.leds = [self.get_device("led0"), self.get_device("led2")] + + @kernel + def run(self): + self.core.reset() + + while True: + for led in self.leds: + led.pulse(200*ms) + delay(200*ms) diff --git a/artiq/examples/kasli_sawgmaster/device_db.py b/artiq/examples/kasli_sawgmaster/device_db.py new file mode 100644 index 000000000..4cba2bbd2 --- /dev/null +++ b/artiq/examples/kasli_sawgmaster/device_db.py @@ -0,0 +1,177 @@ +core_addr = "192.168.1.70" + +device_db = { + "core": { + "type": "local", + "module": "artiq.coredevice.core", + "class": "Core", + "arguments": {"host": core_addr, "ref_period": 1/(8*150e6)} + }, + "core_log": { + "type": "controller", + "host": "::1", + "port": 1068, + "command": "aqctl_corelog -p {port} --bind {bind} " + core_addr + }, + "core_cache": { + "type": "local", + "module": "artiq.coredevice.cache", + "class": "CoreCache" + }, + "core_dma": { + "type": "local", + "module": "artiq.coredevice.dma", + "class": "CoreDMA" + }, +} + +device_db.update( + spi_urukul0={ + "type": "local", + "module": "artiq.coredevice.spi2", + "class": "SPIMaster", + "arguments": {"channel": 0} + }, + ttl_urukul0_io_update={ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 1} + }, + ttl_urukul0_sw0={ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 2} + }, + ttl_urukul0_sw1={ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 3} + }, + ttl_urukul0_sw2={ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 4} + }, + ttl_urukul0_sw3={ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 5} + }, + urukul0_cpld={ + "type": "local", + "module": "artiq.coredevice.urukul", + "class": "CPLD", + "arguments": { + "spi_device": "spi_urukul0", + "io_update_device": "ttl_urukul0_io_update", + "refclk": 150e6, + "clk_sel": 2 + } + } +) + +for i in range(4): + device_db["urukul0_ch" + str(i)] = { + "type": "local", + "module": "artiq.coredevice.ad9910", + "class": "AD9910", + "arguments": { + "pll_n": 16, # 600MHz sample rate + "pll_vco": 2, + "chip_select": 4 + i, + "cpld_device": "urukul0_cpld", + "sw_device": "ttl_urukul0_sw" + str(i) + } + } + +""" +artiq_route routing.bin init +artiq_route routing.bin set 0 0 +artiq_route routing.bin set 1 1 0 +artiq_route routing.bin set 2 1 1 0 +artiq_route routing.bin set 3 2 0 +artiq_route routing.bin set 4 2 1 0 +artiq_coremgmt -D kasli config write -f routing_table routing.bin +""" + +for sayma in range(2): + amc_base = 0x010000 + sayma*0x020000 + rtm_base = 0x020000 + sayma*0x020000 + for i in range(4): + device_db["led" + str(4*sayma+i)] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": amc_base + i} + } + for i in range(2): + device_db["ttl_mcx" + str(2*sayma+i)] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLInOut", + "arguments": {"channel": amc_base + 4 + i} + } + for i in range(8): + device_db["sawg" + str(8*sayma+i)] = { + "type": "local", + "module": "artiq.coredevice.sawg", + "class": "SAWG", + "arguments": {"channel_base": amc_base + 6 + i*10, "parallelism": 4} + } + for basemod in range(2): + for i in range(4): + device_db["sawg_sw" + str(8*sayma+4*basemod+i)] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": rtm_base + basemod*9 + i} + } + att_idx = 2*sayma + basemod + device_db["basemod_att_rst_n"+str(att_idx)] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": rtm_base + basemod*9 + 4} + } + device_db["basemod_att_clk"+str(att_idx)] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": rtm_base + basemod*9 + 5} + } + device_db["basemod_att_le"+str(att_idx)] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": rtm_base + basemod*9 + 6} + } + device_db["basemod_att_mosi"+str(att_idx)] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": rtm_base + basemod*9 + 7} + } + device_db["basemod_att_miso"+str(att_idx)] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLInOut", + "arguments": {"channel": rtm_base + basemod*9 + 8} + } + device_db["basemod_att"+str(att_idx)] = { + "type": "local", + "module": "artiq.coredevice.basemod_att", + "class": "BaseModAtt", + "arguments": { + "rst_n": "basemod_att_rst_n"+str(att_idx), + "clk": "basemod_att_clk"+str(att_idx), + "le": "basemod_att_le"+str(att_idx), + "mosi": "basemod_att_mosi"+str(att_idx), + "miso": "basemod_att_miso"+str(att_idx), + } + } + diff --git a/artiq/examples/kasli_sawgmaster/repository/basemod.py b/artiq/examples/kasli_sawgmaster/repository/basemod.py new file mode 100644 index 000000000..3ca9a1c86 --- /dev/null +++ b/artiq/examples/kasli_sawgmaster/repository/basemod.py @@ -0,0 +1,25 @@ +from artiq.experiment import * + + +class BaseMod(EnvExperiment): + def build(self): + self.setattr_device("core") + self.basemods = [self.get_device("basemod_att0"), self.get_device("basemod_att1")] + self.rfsws = [self.get_device("sawg_sw"+str(i)) for i in range(8)] + + @kernel + def run(self): + self.core.reset() + for basemod in self.basemods: + self.core.break_realtime() + delay(10*ms) + basemod.reset() + delay(10*ms) + basemod.set(0.0, 0.0, 0.0, 0.0) + delay(10*ms) + print(basemod.get_mu()) + + self.core.break_realtime() + for rfsw in self.rfsws: + rfsw.on() + delay(1*ms) diff --git a/artiq/examples/kasli_sawgmaster/repository/sines_2sayma.py b/artiq/examples/kasli_sawgmaster/repository/sines_2sayma.py new file mode 100644 index 000000000..3a8204941 --- /dev/null +++ b/artiq/examples/kasli_sawgmaster/repository/sines_2sayma.py @@ -0,0 +1,37 @@ +from artiq.experiment import * + + +class Sines2Sayma(EnvExperiment): + def build(self): + self.setattr_device("core") + self.sawgs = [self.get_device("sawg"+str(i)) for i in range(16)] + + @kernel + def drtio_is_up(self): + for i in range(5): + if not self.core.get_rtio_destination_status(i): + return False + return True + + @kernel + def run(self): + while True: + print("waiting for DRTIO ready...") + while not self.drtio_is_up(): + pass + print("OK") + + self.core.reset() + + for sawg in self.sawgs: + delay(1*ms) + sawg.reset() + + for sawg in self.sawgs: + delay(1*ms) + sawg.amplitude1.set(.4) + # Do not use a sub-multiple of oscilloscope sample rates. + sawg.frequency0.set(9*MHz) + + while self.drtio_is_up(): + pass diff --git a/artiq/examples/kasli_sawgmaster/repository/sines_urukul_sayma.py b/artiq/examples/kasli_sawgmaster/repository/sines_urukul_sayma.py new file mode 100644 index 000000000..dfd8e46c9 --- /dev/null +++ b/artiq/examples/kasli_sawgmaster/repository/sines_urukul_sayma.py @@ -0,0 +1,89 @@ +from artiq.experiment import * + + +class SinesUrukulSayma(EnvExperiment): + def build(self): + self.setattr_device("core") + self.setattr_device("urukul0_cpld") + + # Urukul clock output syntonized to the RTIO clock. + # Can be used as HMC830 reference on Sayma RTM. + # When using this reference, Sayma must be recalibrated every time Urukul + # is rebooted, as Urukul is not synchronized to the Kasli. + self.urukul_hmc_ref = self.get_device("urukul0_ch3") + + # Urukul measurement channels - compare with SAWG outputs. + # When testing sync, do not reboot Urukul, as it is not + # synchronized to the Kasli. + self.urukul_meas = [self.get_device("urukul0_ch" + str(i)) for i in range(3)] + # The same waveform is output on all first 4 SAWG channels (first DAC). + self.sawgs = [self.get_device("sawg"+str(i)) for i in range(4)] + self.basemod = self.get_device("basemod_att0") + self.rfsws = [self.get_device("sawg_sw"+str(i)) for i in range(4)] + + + # DRTIO destinations: + # 0: local + # 1: Sayma AMC + # 2: Sayma RTM + @kernel + def drtio_is_up(self): + for i in range(3): + if not self.core.get_rtio_destination_status(i): + return False + return True + + @kernel + def run(self): + f = 9*MHz + dds_ftw = self.urukul_meas[0].frequency_to_ftw(f) + sawg_ftw = self.sawgs[0].frequency0.to_mu(f) + if dds_ftw != sawg_ftw: + print("DDS and SAWG FTWs do not match:", dds_ftw, sawg_ftw) + return + + self.core.reset() + self.urukul0_cpld.init() + + delay(1*ms) + self.urukul_hmc_ref.init() + self.urukul_hmc_ref.set_mu(0x40000000, asf=self.urukul_hmc_ref.amplitude_to_asf(0.6)) + self.urukul_hmc_ref.set_att(6.) + self.urukul_hmc_ref.sw.on() + + for urukul_ch in self.urukul_meas: + delay(1*ms) + urukul_ch.init() + urukul_ch.set_mu(dds_ftw, asf=urukul_ch.amplitude_to_asf(0.5)) + urukul_ch.set_att(6.) + urukul_ch.sw.on() + + while True: + print("waiting for DRTIO ready...") + while not self.drtio_is_up(): + pass + print("OK") + + self.core.reset() + + delay(10*ms) + self.basemod.reset() + delay(10*ms) + self.basemod.set(3.0, 3.0, 3.0, 3.0) + delay(10*ms) + for rfsw in self.rfsws: + delay(1*ms) + rfsw.on() + + for sawg in self.sawgs: + delay(1*ms) + sawg.reset() + + for sawg in self.sawgs: + delay(1*ms) + sawg.amplitude1.set(.4) + sawg.frequency0.set_mu(sawg_ftw) + sawg.phase0.set_mu(sawg_ftw*now_mu() >> 17) + + while self.drtio_is_up(): + pass diff --git a/artiq/examples/kasli_suservo/device_db.py b/artiq/examples/kasli_suservo/device_db.py new file mode 100644 index 000000000..d33bfb280 --- /dev/null +++ b/artiq/examples/kasli_suservo/device_db.py @@ -0,0 +1,274 @@ +core_addr = "10.0.16.121" + +device_db = { + "core": { + "type": "local", + "module": "artiq.coredevice.core", + "class": "Core", + "arguments": {"host": core_addr, "ref_period": 1e-9} + }, + "core_log": { + "type": "controller", + "host": "::1", + "port": 1068, + "command": "aqctl_corelog -p {port} --bind {bind} " + core_addr + }, + "core_cache": { + "type": "local", + "module": "artiq.coredevice.cache", + "class": "CoreCache" + }, + "core_dma": { + "type": "local", + "module": "artiq.coredevice.dma", + "class": "CoreDMA" + }, + + "i2c_switch0": { + "type": "local", + "module": "artiq.coredevice.i2c", + "class": "PCA9548", + "arguments": {"address": 0xe0} + }, + "i2c_switch1": { + "type": "local", + "module": "artiq.coredevice.i2c", + "class": "PCA9548", + "arguments": {"address": 0xe2} + }, + + "ttl0": { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLInOut", + "arguments": {"channel": 0}, + }, + "ttl1": { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLInOut", + "arguments": {"channel": 1}, + }, + "ttl2": { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLInOut", + "arguments": {"channel": 2}, + }, + "ttl3": { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLInOut", + "arguments": {"channel": 3}, + }, + + "ttl4": { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 4}, + }, + "ttl5": { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 5}, + }, + "ttl6": { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 6}, + }, + "ttl7": { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 7}, + }, + "ttl8": { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 8}, + }, + "ttl9": { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 9}, + }, + "ttl10": { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 10}, + }, + "ttl11": { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 11}, + }, + "ttl12": { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 12}, + }, + "ttl13": { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 13}, + }, + "ttl14": { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 14}, + }, + "ttl15": { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 15}, + }, + + "suservo0_ch0": { + "type": "local", + "module": "artiq.coredevice.suservo", + "class": "Channel", + "arguments": {"channel": 16, "servo_device": "suservo0"} + }, + "suservo0_ch1": { + "type": "local", + "module": "artiq.coredevice.suservo", + "class": "Channel", + "arguments": {"channel": 17, "servo_device": "suservo0"} + }, + "suservo0_ch2": { + "type": "local", + "module": "artiq.coredevice.suservo", + "class": "Channel", + "arguments": {"channel": 18, "servo_device": "suservo0"} + }, + "suservo0_ch3": { + "type": "local", + "module": "artiq.coredevice.suservo", + "class": "Channel", + "arguments": {"channel": 19, "servo_device": "suservo0"} + }, + "suservo0_ch4": { + "type": "local", + "module": "artiq.coredevice.suservo", + "class": "Channel", + "arguments": {"channel": 20, "servo_device": "suservo0"} + }, + "suservo0_ch5": { + "type": "local", + "module": "artiq.coredevice.suservo", + "class": "Channel", + "arguments": {"channel": 21, "servo_device": "suservo0"} + }, + "suservo0_ch6": { + "type": "local", + "module": "artiq.coredevice.suservo", + "class": "Channel", + "arguments": {"channel": 22, "servo_device": "suservo0"} + }, + "suservo0_ch7": { + "type": "local", + "module": "artiq.coredevice.suservo", + "class": "Channel", + "arguments": {"channel": 23, "servo_device": "suservo0"} + }, + + "suservo0": { + "type": "local", + "module": "artiq.coredevice.suservo", + "class": "SUServo", + "arguments": { + "channel": 24, + "pgia_device": "spi_sampler0_pgia", + "cpld0_device": "urukul0_cpld", + "cpld1_device": "urukul1_cpld", + "dds0_device": "urukul0_dds", + "dds1_device": "urukul1_dds" + } + }, + + "spi_sampler0_pgia": { + "type": "local", + "module": "artiq.coredevice.spi2", + "class": "SPIMaster", + "arguments": {"channel": 25} + }, + + "spi_urukul0": { + "type": "local", + "module": "artiq.coredevice.spi2", + "class": "SPIMaster", + "arguments": {"channel": 26} + }, + "urukul0_cpld": { + "type": "local", + "module": "artiq.coredevice.urukul", + "class": "CPLD", + "arguments": { + "spi_device": "spi_urukul0", + "refclk": 100e6, + "clk_sel": 0 + } + }, + "urukul0_dds": { + "type": "local", + "module": "artiq.coredevice.ad9910", + "class": "AD9910", + "arguments": { + "pll_n": 40, + "chip_select": 3, + "cpld_device": "urukul0_cpld", + } + }, + + "spi_urukul1": { + "type": "local", + "module": "artiq.coredevice.spi2", + "class": "SPIMaster", + "arguments": {"channel": 27} + }, + "urukul1_cpld": { + "type": "local", + "module": "artiq.coredevice.urukul", + "class": "CPLD", + "arguments": { + "spi_device": "spi_urukul1", + "refclk": 100e6, + "clk_sel": 0 + } + }, + "urukul1_dds": { + "type": "local", + "module": "artiq.coredevice.ad9910", + "class": "AD9910", + "arguments": { + "pll_n": 40, + "chip_select": 3, + "cpld_device": "urukul1_cpld", + } + }, + + "led0": { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 28} + }, + "led1": { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 29} + } +} diff --git a/artiq/examples/kasli_suservo/repository/suservo.py b/artiq/examples/kasli_suservo/repository/suservo.py new file mode 100644 index 000000000..7bc887030 --- /dev/null +++ b/artiq/examples/kasli_suservo/repository/suservo.py @@ -0,0 +1,99 @@ +from artiq.experiment import * + + +class SUServo(EnvExperiment): + def build(self): + self.setattr_device("core") + self.setattr_device("led0") + self.setattr_device("suservo0") + for i in range(8): + self.setattr_device("suservo0_ch{}".format(i)) + + def run(self): + self.init() + + def p(self, d): + mask = 1 << 18 - 1 + for name, val in zip("ftw1 b1 pow cfg offset a1 ftw0 b0".split(), d): + val = -(val & mask) + (val & ~mask) + print("{}: {:#x} = {}".format(name, val, val)) + + @rpc(flags={"async"}) + def p1(self, adc, asf, st): + print("ADC: {:10s}, ASF: {:10s}, clipped: {}".format( + "#"*int(adc), "#"*int(asf*10), (st >> 8) & 1), end="\r") + + @kernel + def init(self): + self.core.break_realtime() + self.core.reset() + self.led() + + self.suservo0.init() + delay(1*us) + # ADC PGIA gain + for i in range(8): + self.suservo0.set_pgia_mu(i, 0) + delay(10*us) + # DDS attenuator + self.suservo0.cpld0.set_att(0, 10.) + delay(1*us) + # Servo is done and disabled + assert self.suservo0.get_status() & 0xff == 2 + + # set up profile 0 on channel 0: + delay(120*us) + self.suservo0_ch0.set_y( + profile=0, + y=0. # clear integrator + ) + self.suservo0_ch0.set_iir( + profile=0, + adc=7, # take data from Sampler channel 7 + kp=-.1, # -0.1 P gain + ki=-300./s, # low integrator gain + g=0., # no integrator gain limit + delay=0. # no IIR update delay after enabling + ) + # setpoint 0.5 (5 V with above PGIA gain setting) + # 71 MHz + # 0 phase + self.suservo0_ch0.set_dds( + profile=0, + offset=-.5, # 5 V with above PGIA settings + frequency=71*MHz, + phase=0.) + # enable RF, IIR updates and profile 0 + self.suservo0_ch0.set(en_out=1, en_iir=1, profile=0) + # enable global servo iterations + self.suservo0.set_config(enable=1) + + # check servo enabled + assert self.suservo0.get_status() & 0x01 == 1 + delay(10*us) + + # read back profile data + data = [0] * 8 + self.suservo0_ch0.get_profile_mu(0, data) + self.p(data) + delay(10*ms) + + while True: + self.suservo0.set_config(0) + delay(10*us) + v = self.suservo0.get_adc(7) + delay(30*us) + w = self.suservo0_ch0.get_y(0) + delay(20*us) + x = self.suservo0.get_status() + delay(10*us) + self.suservo0.set_config(1) + self.p1(v, w, x) + delay(20*ms) + + @kernel + def led(self): + self.core.break_realtime() + for i in range(3): + self.led0.pulse(.1*s) + delay(.1*s) diff --git a/artiq/examples/master/device_db.py b/artiq/examples/kc705_nist_clock/device_db.py similarity index 55% rename from artiq/examples/master/device_db.py rename to artiq/examples/kc705_nist_clock/device_db.py index b4c9c70ce..1b3c3e615 100644 --- a/artiq/examples/master/device_db.py +++ b/artiq/examples/kc705_nist_clock/device_db.py @@ -1,8 +1,7 @@ # This is an example device database that needs to be adapted to your setup. -# The RTIO channel numbers here are for NIST CLOCK on KC705. # The list of devices here is not exhaustive. -core_addr = "kc705.lab.m-labs.hk" +core_addr = "192.168.1.50" device_db = { # Core device @@ -28,17 +27,6 @@ device_db = { "module": "artiq.coredevice.dma", "class": "CoreDMA" }, - "core_dds": { - "type": "local", - "module": "artiq.coredevice.dds", - "class": "DDSGroupAD9914", - "arguments": { - "sysclk": 3e9, - "first_dds_bus_channel": 32, - "dds_bus_count": 2, - "dds_channel_count": 3 - } - }, "i2c_switch": { "type": "local", @@ -120,49 +108,21 @@ device_db = { # Generic SPI "spi0": { "type": "local", - "module": "artiq.coredevice.spi", + "module": "artiq.coredevice.spi2", "class": "SPIMaster", "arguments": {"channel": 23} }, - "spi_mmc": { + "spi_mmc": { "type": "local", - "module": "artiq.coredevice.spi", + "module": "artiq.coredevice.spi2", "class": "SPIMaster", "arguments": {"channel": 26} }, - # FMC DIO used to connect to Zotino - "fmcdio_dirctl_clk": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLOut", - "arguments": {"channel": 27} - }, - "fmcdio_dirctl_ser": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLOut", - "arguments": {"channel": 28} - }, - "fmcdio_dirctl_latch": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLOut", - "arguments": {"channel": 29} - }, - "fmcdio_dirctl": { - "type": "local", - "module": "artiq.coredevice.shiftreg", - "class": "ShiftReg", - "arguments": {"clk": "fmcdio_dirctl_clk", - "ser": "fmcdio_dirctl_ser", - "latch": "fmcdio_dirctl_latch"} - }, - # DAC "spi_ams101": { "type": "local", - "module": "artiq.coredevice.spi", + "module": "artiq.coredevice.spi2", "class": "SPIMaster", "arguments": {"channel": 22} }, @@ -172,66 +132,26 @@ device_db = { "class": "TTLOut", "arguments": {"channel": 20} }, - "spi_zotino": { - "type": "local", - "module": "artiq.coredevice.spi", - "class": "SPIMaster", - "arguments": {"channel": 30} - }, - "ttl_zotino_ldac": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLOut", - "arguments": {"channel": 31} - }, - "dac_zotino": { - "type": "local", - "module": "artiq.coredevice.ad5360", - "class": "AD5360", - "arguments": {"spi_device": "spi_zotino", "ldac_device": "ttl_zotino_ldac"} - }, # AD9914 DDS - "dds0": { + "ad9914dds0": { "type": "local", - "module": "artiq.coredevice.dds", - "class": "DDSChannelAD9914", - "arguments": {"bus_channel": 32, "channel": 0}, + "module": "artiq.coredevice.ad9914", + "class": "AD9914", + "arguments": {"sysclk": 3e9, "bus_channel": 27, "channel": 0}, "comment": "Comments work in DDS panel as well" }, - "dds1": { + "ad9914dds1": { "type": "local", - "module": "artiq.coredevice.dds", - "class": "DDSChannelAD9914", - "arguments": {"bus_channel": 32, "channel": 1} + "module": "artiq.coredevice.ad9914", + "class": "AD9914", + "arguments": {"sysclk": 3e9, "bus_channel": 27, "channel": 1} }, - "dds2": { + "ad9914dds2": { "type": "local", - "module": "artiq.coredevice.dds", - "class": "DDSChannelAD9914", - "arguments": {"bus_channel": 32, "channel": 2} - }, - - # Controllers - "lda": { - "type": "controller", - "best_effort": True, - "host": "::1", - "port": 3253, - "command": "aqctl_lda -p {port} --bind {bind} --simulation" - }, - - "camera_sim": { - "type": "controller", - "host": "::1", - "port": 6283, - "target_name": "camera_sim", - "command": "python3 -m artiq.examples.remote_exec_controller" - }, - "camera_sim_rexec": { - "type": "controller_aux_target", - "controller": "camera_sim", - "target_name": "camera_sim_rexec" + "module": "artiq.coredevice.ad9914", + "class": "AD9914", + "arguments": {"sysclk": 3e9, "bus_channel": 27, "channel": 2} }, # Aliases @@ -244,8 +164,8 @@ device_db = { "loop_clock_in": "ttl7", "pmt": "ttl3", - "bd_dds": "dds0", + "bd_dds": "ad9914dds0", "bd_sw": "ttl0", - "bdd_dds": "dds1", + "bdd_dds": "ad9914dds1", "bdd_sw": "ttl1" } diff --git a/artiq/examples/master/idle_kernel.py b/artiq/examples/kc705_nist_clock/idle_kernel.py similarity index 100% rename from artiq/examples/master/idle_kernel.py rename to artiq/examples/kc705_nist_clock/idle_kernel.py diff --git a/artiq/examples/master/repository/coredevice_examples/simple/blink_forever.py b/artiq/examples/kc705_nist_clock/repository/blink_forever.py similarity index 100% rename from artiq/examples/master/repository/coredevice_examples/simple/blink_forever.py rename to artiq/examples/kc705_nist_clock/repository/blink_forever.py diff --git a/artiq/examples/master/repository/core_pause.py b/artiq/examples/kc705_nist_clock/repository/core_pause.py similarity index 100% rename from artiq/examples/master/repository/core_pause.py rename to artiq/examples/kc705_nist_clock/repository/core_pause.py diff --git a/artiq/examples/master/repository/utilities/dds_setter.py b/artiq/examples/kc705_nist_clock/repository/dds_setter.py similarity index 85% rename from artiq/examples/master/repository/utilities/dds_setter.py rename to artiq/examples/kc705_nist_clock/repository/dds_setter.py index ef13b1bd3..7f4b1447e 100644 --- a/artiq/examples/master/repository/utilities/dds_setter.py +++ b/artiq/examples/kc705_nist_clock/repository/dds_setter.py @@ -14,8 +14,8 @@ class DDSSetter(EnvExperiment): for k, v in sorted(device_db.items(), key=itemgetter(0)): if (isinstance(v, dict) and v["type"] == "local" - and v["module"] == "artiq.coredevice.dds" - and v["class"] in {"DDSChannelAD9914"}): + and v["module"] == "artiq.coredevice.ad9914" + and v["class"] == "AD9914"): self.dds[k] = { "driver": self.get_device(k), "frequency": self.get_argument( @@ -25,6 +25,7 @@ class DDSSetter(EnvExperiment): @kernel def set_dds(self, dds, frequency): + self.core.break_realtime() dds.set(frequency) delay(200*ms) diff --git a/artiq/examples/master/repository/coredevice_examples/simple/dds_test.py b/artiq/examples/kc705_nist_clock/repository/dds_test.py similarity index 75% rename from artiq/examples/master/repository/coredevice_examples/simple/dds_test.py rename to artiq/examples/kc705_nist_clock/repository/dds_test.py index ec57f9e52..dceb10438 100644 --- a/artiq/examples/master/repository/coredevice_examples/simple/dds_test.py +++ b/artiq/examples/kc705_nist_clock/repository/dds_test.py @@ -6,10 +6,9 @@ class DDSTest(EnvExperiment): def build(self): self.setattr_device("core") - self.setattr_device("core_dds") - self.setattr_device("dds0") - self.setattr_device("dds1") - self.setattr_device("dds2") + self.dds0 = self.get_device("ad9914dds0") + self.dds1 = self.get_device("ad9914dds1") + self.dds2 = self.get_device("ad9914dds2") self.setattr_device("ttl0") self.setattr_device("ttl1") self.setattr_device("ttl2") @@ -19,9 +18,9 @@ class DDSTest(EnvExperiment): def run(self): self.core.reset() delay(200*us) - with self.core_dds.batch: - self.dds1.set(120*MHz) - self.dds2.set(200*MHz) + self.dds1.set(120*MHz) + delay(10*us) + self.dds2.set(200*MHz) delay(1*us) for i in range(10000): diff --git a/artiq/examples/master/repository/coredevice_examples/simple/dma_blink.py b/artiq/examples/kc705_nist_clock/repository/dma_blink.py similarity index 85% rename from artiq/examples/master/repository/coredevice_examples/simple/dma_blink.py rename to artiq/examples/kc705_nist_clock/repository/dma_blink.py index e2866483f..92c96327c 100644 --- a/artiq/examples/master/repository/coredevice_examples/simple/dma_blink.py +++ b/artiq/examples/kc705_nist_clock/repository/dma_blink.py @@ -21,6 +21,7 @@ class DMABlink(EnvExperiment): def run(self): self.core.reset() self.record() + handle = self.core_dma.get_handle("blink") self.core.break_realtime() for i in range(5): - self.core_dma.playback("blink") + self.core_dma.playback_handle(handle) diff --git a/artiq/examples/master/repository/coredevice_examples/simple/handover.py b/artiq/examples/kc705_nist_clock/repository/handover.py similarity index 100% rename from artiq/examples/master/repository/coredevice_examples/simple/handover.py rename to artiq/examples/kc705_nist_clock/repository/handover.py diff --git a/artiq/examples/master/repository/coredevice_examples/simple/mandelbrot.py b/artiq/examples/kc705_nist_clock/repository/mandelbrot.py similarity index 100% rename from artiq/examples/master/repository/coredevice_examples/simple/mandelbrot.py rename to artiq/examples/kc705_nist_clock/repository/mandelbrot.py diff --git a/artiq/examples/master/repository/coredevice_examples/photon_histogram.py b/artiq/examples/kc705_nist_clock/repository/photon_histogram.py similarity index 86% rename from artiq/examples/master/repository/coredevice_examples/photon_histogram.py rename to artiq/examples/kc705_nist_clock/repository/photon_histogram.py index f58a59a10..4a9166a8a 100644 --- a/artiq/examples/master/repository/coredevice_examples/photon_histogram.py +++ b/artiq/examples/kc705_nist_clock/repository/photon_histogram.py @@ -6,7 +6,6 @@ class PhotonHistogram(EnvExperiment): def build(self): self.setattr_device("core") - self.setattr_device("core_dds") self.setattr_device("bd_dds") self.setattr_device("bd_sw") self.setattr_device("bdd_dds") @@ -22,9 +21,10 @@ class PhotonHistogram(EnvExperiment): @kernel def program_cooling(self): - with self.core_dds.batch: - self.bd_dds.set(200*MHz) - self.bdd_dds.set(300*MHz) + delay_mu(-self.bd_dds.set_duration_mu) + self.bd_dds.set(200*MHz) + delay_mu(self.bd_dds.set_duration_mu) + self.bdd_dds.set(300*MHz) @kernel def cool_detect(self): @@ -38,13 +38,13 @@ class PhotonHistogram(EnvExperiment): self.bd_dds.set(self.detect_f) with parallel: self.bd_sw.pulse(self.detect_t) - self.pmt.gate_rising(self.detect_t) + gate_end_mu = self.pmt.gate_rising(self.detect_t) self.program_cooling() self.bd_sw.on() self.bdd_sw.on() - return self.pmt.count() + return self.pmt.count(gate_end_mu) @kernel def run(self): diff --git a/artiq/examples/master/repository/speed_benchmark.py b/artiq/examples/kc705_nist_clock/repository/speed_benchmark.py similarity index 100% rename from artiq/examples/master/repository/speed_benchmark.py rename to artiq/examples/kc705_nist_clock/repository/speed_benchmark.py diff --git a/artiq/examples/master/repository/coredevice_examples/tdr.py b/artiq/examples/kc705_nist_clock/repository/tdr.py similarity index 94% rename from artiq/examples/master/repository/coredevice_examples/tdr.py rename to artiq/examples/kc705_nist_clock/repository/tdr.py index a32015a7b..21dc9338a 100644 --- a/artiq/examples/master/repository/coredevice_examples/tdr.py +++ b/artiq/examples/kc705_nist_clock/repository/tdr.py @@ -44,7 +44,7 @@ class TDR(EnvExperiment): try: self.many(n, self.core.seconds_to_mu(pulse)) except PulseNotReceivedError: - print("to few edges: cable too long or wiring bad") + print("too few edges: cable too long or wiring bad") else: print(self.t) t_rise = mu_to_seconds(self.t[0], self.core)/n - latency @@ -66,8 +66,8 @@ class TDR(EnvExperiment): self.pmt0.gate_both_mu(2*p) self.ttl2.pulse_mu(p) for i in range(len(self.t)): - ti = self.pmt0.timestamp_mu() + ti = self.pmt0.timestamp_mu(now_mu()) if ti <= 0: raise PulseNotReceivedError() self.t[i] = int(self.t[i] + ti - t0) - self.pmt0.count() # flush + self.pmt0.count(now_mu()) # flush diff --git a/artiq/examples/master/repository/coredevice_examples/simple/ad5360.py b/artiq/examples/master/repository/coredevice_examples/simple/ad5360.py deleted file mode 100644 index 79d1f01c6..000000000 --- a/artiq/examples/master/repository/coredevice_examples/simple/ad5360.py +++ /dev/null @@ -1,21 +0,0 @@ -from artiq.experiment import * - - -class AD5360Test(EnvExperiment): - def build(self): - self.setattr_device("core") - self.setattr_device("fmcdio_dirctl") - self.dac = self.get_device("dac_zotino") - self.setattr_device("led") - - @kernel - def run(self): - self.core.reset() - delay(5*ms) # build slack for shift register set - self.fmcdio_dirctl.set(self, 0x00008800) - self.dac.setup_bus(write_div=30, read_div=40) - self.dac.write_offsets() - self.led.on() - delay(400*us) - self.led.off() - self.dac.set([i << 10 for i in range(32)]) diff --git a/artiq/examples/metlino_sayma_ttl/device_db.py b/artiq/examples/metlino_sayma_ttl/device_db.py new file mode 100644 index 000000000..c8c3acb8e --- /dev/null +++ b/artiq/examples/metlino_sayma_ttl/device_db.py @@ -0,0 +1,95 @@ +core_addr = "192.168.1.65" + +device_db = { + "core": { + "type": "local", + "module": "artiq.coredevice.core", + "class": "Core", + "arguments": {"host": core_addr, "ref_period": 1/(8*150e6)} + }, + "core_log": { + "type": "controller", + "host": "::1", + "port": 1068, + "command": "aqctl_corelog -p {port} --bind {bind} " + core_addr + }, + "core_cache": { + "type": "local", + "module": "artiq.coredevice.cache", + "class": "CoreCache" + }, + "core_dma": { + "type": "local", + "module": "artiq.coredevice.dma", + "class": "CoreDMA" + } +} + +# master peripherals +for i in range(4): + device_db["led" + str(i)] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": i}, +} + +# DEST#1 peripherals +amc_base = 0x070000 +rtm_base = 0x020000 + +for i in range(4): + device_db["led" + str(4+i)] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": amc_base + i}, + } + +#DIO (EEM0) starting at RTIO channel 0x000056 +for i in range(8): + device_db["ttl" + str(i)] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": amc_base + 0x000056 + i}, + } + +#DIO (EEM1) starting at RTIO channel 0x00005e +for i in range(8): + device_db["ttl" + str(8+i)] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": amc_base + 0x00005e + i}, + } + +device_db["fmcdio_dirctl_clk"] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": amc_base + 0x000066} +} + +device_db["fmcdio_dirctl_ser"] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": amc_base + 0x000067} +} + +device_db["fmcdio_dirctl_latch"] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": amc_base + 0x000068} +} + +device_db["fmcdio_dirctl"] = { + "type": "local", + "module": "artiq.coredevice.shiftreg", + "class": "ShiftReg", + "arguments": {"clk": "fmcdio_dirctl_clk", + "ser": "fmcdio_dirctl_ser", + "latch": "fmcdio_dirctl_latch"} +} diff --git a/artiq/examples/metlino_sayma_ttl/repository/demo.py b/artiq/examples/metlino_sayma_ttl/repository/demo.py new file mode 100644 index 000000000..bb273ce2c --- /dev/null +++ b/artiq/examples/metlino_sayma_ttl/repository/demo.py @@ -0,0 +1,129 @@ +import sys +import os +import select + +from artiq.experiment import * +from artiq.coredevice.fmcdio_vhdci_eem import * + + +def chunker(seq, size): + res = [] + for el in seq: + res.append(el) + if len(res) == size: + yield res + res = [] + if res: + yield res + + +def is_enter_pressed() -> TBool: + if os.name == "nt": + if msvcrt.kbhit() and msvcrt.getch() == b"\r": + return True + else: + return False + else: + if select.select([sys.stdin, ], [], [], 0.0)[0]: + sys.stdin.read(1) + return True + else: + return False + + +class Demo(EnvExperiment): + def build(self): + self.setattr_device("core") + self.setattr_device("fmcdio_dirctl") + + self.leds = dict() + self.ttl_outs = dict() + + ddb = self.get_device_db() + for name, desc in ddb.items(): + if isinstance(desc, dict) and desc["type"] == "local": + module, cls = desc["module"], desc["class"] + if (module, cls) == ("artiq.coredevice.ttl", "TTLOut"): + dev = self.get_device(name) + if "led" in name: # guess + self.leds[name] = dev + elif "ttl" in name: # to exclude fmcdio_dirctl + self.ttl_outs[name] = dev + + self.leds = sorted(self.leds.items(), key=lambda x: x[1].channel) + self.ttl_outs = sorted(self.ttl_outs.items(), key=lambda x: x[1].channel) + + self.dirctl_word = ( + shiftreg_bits(0, dio_bank0_out_pins | dio_bank1_out_pins) | + shiftreg_bits(1, dio_bank0_out_pins | dio_bank1_out_pins) + ) + + @kernel + def init(self): + self.core.break_realtime() + print("*** Waiting for DRTIO ready...") + drtio_indices = [7] + for i in drtio_indices: + while not self.drtio_is_up(i): + pass + + self.fmcdio_dirctl.set(self.dirctl_word) + + @kernel + def drtio_is_up(self, drtio_index): + if not self.core.get_rtio_destination_status(drtio_index): + return False + print("DRTIO #", drtio_index, "is ready\n") + return True + + @kernel + def test_led(self, led): + while not is_enter_pressed(): + self.core.break_realtime() + # do not fill the FIFOs too much to avoid long response times + t = now_mu() - self.core.seconds_to_mu(0.2) + while self.core.get_rtio_counter_mu() < t: + pass + for i in range(3): + led.pulse(100*ms) + delay(100*ms) + + def test_leds(self): + print("*** Testing LEDs.") + print("Check for blinking. Press ENTER when done.") + + for led_name, led_dev in self.leds: + print("Testing LED: {}".format(led_name)) + self.test_led(led_dev) + + @kernel + def test_ttl_out_chunk(self, ttl_chunk): + while not is_enter_pressed(): + self.core.break_realtime() + for _ in range(50000): + i = 0 + for ttl in ttl_chunk: + i += 1 + for _ in range(i): + ttl.pulse(1*us) + delay(1*us) + delay(10*us) + + def test_ttl_outs(self): + print("*** Testing TTL outputs.") + print("Outputs are tested in groups of 4. Touch each TTL connector") + print("with the oscilloscope probe tip, and check that the number of") + print("pulses corresponds to its number in the group.") + print("Press ENTER when done.") + + for ttl_chunk in chunker(self.ttl_outs, 4): + print("Testing TTL outputs: {}.".format(", ".join(name for name, dev in ttl_chunk))) + self.test_ttl_out_chunk([dev for name, dev in ttl_chunk]) + + def run(self): + self.core.reset() + + if self.leds: + self.test_leds() + if self.ttl_outs: + self.test_ttl_outs() diff --git a/artiq/examples/sim/device_db.py b/artiq/examples/no_hardware/device_db.py similarity index 66% rename from artiq/examples/sim/device_db.py rename to artiq/examples/no_hardware/device_db.py index 2fe351a5b..6e3c29af4 100644 --- a/artiq/examples/sim/device_db.py +++ b/artiq/examples/no_hardware/device_db.py @@ -41,4 +41,27 @@ device_db = { "class": "WaveOutput", "arguments": {"name": "state_detection"} }, + + # Controllers + "lda": { + "type": "controller", + "best_effort": True, + "host": "::1", + "port": 3253, + "command": "aqctl_lda -p {port} --bind {bind} --simulation" + }, + + "camera_sim": { + "type": "controller", + "host": "::1", + "port": 6283, + "target_name": "camera_sim", + "command": "python3 -m artiq.examples.remote_exec_controller" + }, + "camera_sim_rexec": { + "type": "controller_aux_target", + "controller": "camera_sim", + "target_name": "camera_sim_rexec" + }, + } diff --git a/artiq/examples/sim/al_spectroscopy.py b/artiq/examples/no_hardware/repository/al_spectroscopy.py similarity index 90% rename from artiq/examples/sim/al_spectroscopy.py rename to artiq/examples/no_hardware/repository/al_spectroscopy.py index de1cc51cc..d48c2ead7 100644 --- a/artiq/examples/sim/al_spectroscopy.py +++ b/artiq/examples/no_hardware/repository/al_spectroscopy.py @@ -21,7 +21,7 @@ class AluminumSpectroscopy(EnvExperiment): state_0_count = 0 for count in range(100): self.mains_sync.gate_rising(1*s/60) - at_mu(self.mains_sync.timestamp_mu() + 100*us) + at_mu(self.mains_sync.timestamp_mu(now_mu()) + 100*us) delay(10*us) self.laser_cooling.pulse(100*MHz, 100*us) delay(5*us) @@ -35,8 +35,7 @@ class AluminumSpectroscopy(EnvExperiment): delay(5*us) with parallel: self.state_detection.pulse(100*MHz, 10*us) - self.pmt.gate_rising(10*us) - photon_count = self.pmt.count() + photon_count = self.pmt.count(self.pmt.gate_rising(10*us)) if (photon_count < self.photon_limit_low or photon_count > self.photon_limit_high): break diff --git a/artiq/examples/master/repository/arguments_demo.py b/artiq/examples/no_hardware/repository/arguments_demo.py similarity index 100% rename from artiq/examples/master/repository/arguments_demo.py rename to artiq/examples/no_hardware/repository/arguments_demo.py diff --git a/artiq/examples/master/repository/code_applet.py b/artiq/examples/no_hardware/repository/code_applet.py similarity index 94% rename from artiq/examples/master/repository/code_applet.py rename to artiq/examples/no_hardware/repository/code_applet.py index a1e5f2f56..4026cea55 100644 --- a/artiq/examples/master/repository/code_applet.py +++ b/artiq/examples/no_hardware/repository/code_applet.py @@ -23,7 +23,7 @@ class CreateCodeApplet(EnvExperiment): "code_applet_dataset", code=f.read(), group="autoapplet") for i in reversed(range(10)): self.set_dataset("code_applet_dataset", i, - broadcast=True, save=False) + broadcast=True, archive=False) time.sleep(1) self.ccb.issue("disable_applet", "code_applet_example", group="autoapplet") diff --git a/artiq/examples/master/repository/custom_applet.py b/artiq/examples/no_hardware/repository/custom_applet.py similarity index 100% rename from artiq/examples/master/repository/custom_applet.py rename to artiq/examples/no_hardware/repository/custom_applet.py diff --git a/artiq/examples/master/repository/flopping_f_simulation.py b/artiq/examples/no_hardware/repository/flopping_f_simulation.py similarity index 91% rename from artiq/examples/master/repository/flopping_f_simulation.py rename to artiq/examples/no_hardware/repository/flopping_f_simulation.py index c8f9b94ea..d79aa11ba 100644 --- a/artiq/examples/master/repository/flopping_f_simulation.py +++ b/artiq/examples/no_hardware/repository/flopping_f_simulation.py @@ -35,12 +35,12 @@ class FloppingF(EnvExperiment): l = len(self.frequency_scan) self.set_dataset("flopping_f_frequency", np.full(l, np.nan), - broadcast=True, save=False) + broadcast=True, archive=False) self.set_dataset("flopping_f_brightness", np.full(l, np.nan), broadcast=True) self.set_dataset("flopping_f_fit", np.full(l, np.nan), - broadcast=True, save=False) + broadcast=True, archive=False) self.ccb.issue("create_applet", "flopping_f", "${artiq_applet}plot_xy " @@ -66,14 +66,14 @@ class FloppingF(EnvExperiment): frequency = np.fromiter(self.frequency_scan, np.float) assert frequency.shape == brightness.shape self.set_dataset("flopping_f_frequency", frequency, - broadcast=True, save=False) + broadcast=True, archive=False) popt, pcov = curve_fit(model, frequency, brightness, p0=[self.get_dataset("flopping_freq", 1500.0, archive=False)]) perr = np.sqrt(np.diag(pcov)) if perr < 0.1: F0 = float(popt) - self.set_dataset("flopping_freq", F0, persist=True, save=False) + self.set_dataset("flopping_freq", F0, persist=True, archive=False) self.set_dataset("flopping_f_fit", np.array([model(x, F0) for x in frequency]), - broadcast=True, save=False) + broadcast=True, archive=False) diff --git a/artiq/examples/master/repository/histograms.py b/artiq/examples/no_hardware/repository/histograms.py similarity index 81% rename from artiq/examples/master/repository/histograms.py rename to artiq/examples/no_hardware/repository/histograms.py index c7a0d1a3c..8f12d8eb6 100644 --- a/artiq/examples/master/repository/histograms.py +++ b/artiq/examples/no_hardware/repository/histograms.py @@ -13,15 +13,15 @@ class Histograms(EnvExperiment): bin_boundaries = np.linspace(-10, 30, nbins + 1) self.set_dataset("hd_bins", bin_boundaries, - broadcast=True, save=False) + broadcast=True, archive=False) xs = np.empty(npoints) xs.fill(np.nan) self.set_dataset("hd_xs", xs, - broadcast=True, save=False) + broadcast=True, archive=False) self.set_dataset("hd_counts", np.empty((npoints, nbins)), - broadcast=True, save=False) + broadcast=True, archive=False) for i in range(npoints): histogram, _ = np.histogram(np.random.normal(i, size=1000), diff --git a/artiq/examples/master/repository/multi_scan.py b/artiq/examples/no_hardware/repository/multi_scan.py similarity index 100% rename from artiq/examples/master/repository/multi_scan.py rename to artiq/examples/no_hardware/repository/multi_scan.py diff --git a/artiq/examples/master/repository/remote_exec_demo.py b/artiq/examples/no_hardware/repository/remote_exec_demo.py similarity index 78% rename from artiq/examples/master/repository/remote_exec_demo.py rename to artiq/examples/no_hardware/repository/remote_exec_demo.py index a4028f764..f7998bd1d 100644 --- a/artiq/examples/master/repository/remote_exec_demo.py +++ b/artiq/examples/no_hardware/repository/remote_exec_demo.py @@ -1,8 +1,9 @@ import time import inspect +from sipyco.remote_exec import connect_global_rpc + from artiq.experiment import * -from artiq.protocols.remote_exec import connect_global_rpc import remote_exec_processing @@ -25,10 +26,10 @@ class RemoteExecDemo(EnvExperiment): def transfer_parameters(self, parameters): w, h, cx, cy = parameters - self.set_dataset("rexec_demo.gaussian_w", w, save=False, broadcast=True) - self.set_dataset("rexec_demo.gaussian_h", h, save=False, broadcast=True) - self.set_dataset("rexec_demo.gaussian_cx", cx, save=False, broadcast=True) - self.set_dataset("rexec_demo.gaussian_cy", cy, save=False, broadcast=True) + self.set_dataset("rexec_demo.gaussian_w", w, archive=False, broadcast=True) + self.set_dataset("rexec_demo.gaussian_h", h, archive=False, broadcast=True) + self.set_dataset("rexec_demo.gaussian_cx", cx, archive=False, broadcast=True) + self.set_dataset("rexec_demo.gaussian_cy", cy, archive=False, broadcast=True) def fps_meter(self): t = time.monotonic() @@ -37,7 +38,7 @@ class RemoteExecDemo(EnvExperiment): dt = t - self.last_pt_update if dt >= 5: pt = dt/self.iter_count - self.set_dataset("rexec_demo.picture_pt", pt, save=False, broadcast=True) + self.set_dataset("rexec_demo.picture_pt", pt, archive=False, broadcast=True) self.last_pt_update = t self.iter_count = 0 else: @@ -50,7 +51,7 @@ class RemoteExecDemo(EnvExperiment): data = self.camera_sim.get_picture() if self.show_picture: self.set_dataset("rexec_demo.picture", data, - save=False, broadcast=True) + archive=False, broadcast=True) if self.enable_fit: p = remote_exec_processing.fit(data, self.get_dataset) self.transfer_parameters(p) diff --git a/artiq/examples/master/repository/remote_exec_processing.py b/artiq/examples/no_hardware/repository/remote_exec_processing.py similarity index 100% rename from artiq/examples/master/repository/remote_exec_processing.py rename to artiq/examples/no_hardware/repository/remote_exec_processing.py diff --git a/artiq/examples/master/repository/run_forever.py b/artiq/examples/no_hardware/repository/run_forever.py similarity index 100% rename from artiq/examples/master/repository/run_forever.py rename to artiq/examples/no_hardware/repository/run_forever.py diff --git a/artiq/examples/sim/simple_simulation.py b/artiq/examples/no_hardware/repository/simple_simulation.py similarity index 100% rename from artiq/examples/sim/simple_simulation.py rename to artiq/examples/no_hardware/repository/simple_simulation.py diff --git a/artiq/examples/master/repository/utilities/terminate_all.py b/artiq/examples/no_hardware/repository/terminate_all.py similarity index 100% rename from artiq/examples/master/repository/utilities/terminate_all.py rename to artiq/examples/no_hardware/repository/terminate_all.py diff --git a/artiq/examples/master/repository/thumbnail.py b/artiq/examples/no_hardware/repository/thumbnail.py similarity index 100% rename from artiq/examples/master/repository/thumbnail.py rename to artiq/examples/no_hardware/repository/thumbnail.py diff --git a/artiq/examples/remote_exec_controller.py b/artiq/examples/remote_exec_controller.py index 421202414..7c97a09c6 100755 --- a/artiq/examples/remote_exec_controller.py +++ b/artiq/examples/remote_exec_controller.py @@ -4,7 +4,7 @@ import numpy as np from numba import jit import logging -from artiq.protocols.remote_exec import simple_rexec_server_loop +from sipyco.remote_exec import simple_rexec_server_loop @jit(nopython=True) diff --git a/artiq/examples/sayma/device_db.py b/artiq/examples/sayma/device_db.py deleted file mode 100644 index 01ef43913..000000000 --- a/artiq/examples/sayma/device_db.py +++ /dev/null @@ -1,125 +0,0 @@ -core_addr = "sayma1.lab.m-labs.hk" - -device_db = { - "core": { - "type": "local", - "module": "artiq.coredevice.core", - "class": "Core", - "arguments": {"host": core_addr, "ref_period": 1/(150e6)} - }, - "core_log": { - "type": "controller", - "host": "::1", - "port": 1068, - "command": "aqctl_corelog -p {port} --bind {bind} " + core_addr - }, - "core_cache": { - "type": "local", - "module": "artiq.coredevice.cache", - "class": "CoreCache" - }, - - "converter_spi": { - "type": "local", - "module": "artiq.coredevice.spi", - "class": "NRTSPIMaster", - }, - "ad9154_spi0": { - "type": "local", - "module": "artiq.coredevice.ad9154_spi", - "class": "AD9154", - "arguments": {"spi_device": "converter_spi", "chip_select": 2} - }, - "ad9154_spi1": { - "type": "local", - "module": "artiq.coredevice.ad9154_spi", - "class": "AD9154", - "arguments": {"spi_device": "converter_spi", "chip_select": 3} - }, - - "led0": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLOut", - "arguments": {"channel": 0} - }, - "led1": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLOut", - "arguments": {"channel": 1} - }, - "led1": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLOut", - "arguments": {"channel": 2} - }, - "led1": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLOut", - "arguments": {"channel": 3} - }, - "ttl_sma_out": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLOut", - "arguments": {"channel": 4} - }, - "ttl_sma_in": { - "type": "local", - "module": "artiq.coredevice.ttl", - "class": "TTLOut", - "arguments": {"channel": 5} - }, - - "sawg0": { - "type": "local", - "module": "artiq.coredevice.sawg", - "class": "SAWG", - "arguments": {"channel_base": 6, "parallelism": 4} - }, - "sawg1": { - "type": "local", - "module": "artiq.coredevice.sawg", - "class": "SAWG", - "arguments": {"channel_base": 16, "parallelism": 4} - }, - "sawg2": { - "type": "local", - "module": "artiq.coredevice.sawg", - "class": "SAWG", - "arguments": {"channel_base": 26, "parallelism": 4} - }, - "sawg3": { - "type": "local", - "module": "artiq.coredevice.sawg", - "class": "SAWG", - "arguments": {"channel_base": 36, "parallelism": 4} - }, - "sawg4": { - "type": "local", - "module": "artiq.coredevice.sawg", - "class": "SAWG", - "arguments": {"channel_base": 46, "parallelism": 4} - }, - "sawg5": { - "type": "local", - "module": "artiq.coredevice.sawg", - "class": "SAWG", - "arguments": {"channel_base": 56, "parallelism": 4} - }, - "sawg6": { - "type": "local", - "module": "artiq.coredevice.sawg", - "class": "SAWG", - "arguments": {"channel_base": 66, "parallelism": 4} - }, - "sawg7": { - "type": "local", - "module": "artiq.coredevice.sawg", - "class": "SAWG", - "arguments": {"channel_base": 76, "parallelism": 4} - }, -} diff --git a/artiq/examples/sayma/repository/blink_led.py b/artiq/examples/sayma/repository/blink_led.py deleted file mode 100644 index d0fc99cb8..000000000 --- a/artiq/examples/sayma/repository/blink_led.py +++ /dev/null @@ -1,16 +0,0 @@ -from artiq.experiment import * - - -class BlinkSaymaLED(EnvExperiment): - def build(self): - self.setattr_device("core") - self.setattr_device("led0") - - @kernel - def run(self): - self.core.reset() - while True: - for _ in range(3): - self.led0.pulse(100*ms) - delay(100*ms) - delay(500*ms) diff --git a/artiq/examples/sayma/repository/demo.py b/artiq/examples/sayma/repository/demo.py deleted file mode 100644 index f1d43fd4e..000000000 --- a/artiq/examples/sayma/repository/demo.py +++ /dev/null @@ -1,48 +0,0 @@ -from artiq.experiment import * - - -class SAWGTest(EnvExperiment): - def build(self): - self.setattr_device("core") - self.setattr_device("ttl_sma_out") - - self.setattr_device("sawg0") - self.setattr_device("sawg1") - self.setattr_device("sawg2") - self.setattr_device("sawg3") - - @kernel - def run(self): - self.core.reset() - - while True: - self.sawg0.amplitude1.set(0.) - self.sawg0.frequency0.set(0*MHz) - self.sawg1.amplitude1.set(0.) - self.sawg1.frequency0.set(0*MHz) - delay(20*ms) - - self.sawg0.amplitude1.set(.4) - self.sawg0.frequency0.set(10*MHz) - self.sawg0.phase0.set(0.) - self.sawg1.amplitude1.set(.4) - self.sawg1.frequency0.set(10*MHz) - self.sawg1.phase0.set(0.) - self.ttl_sma_out.pulse(200*ns) - self.sawg1.amplitude1.set(.1) - delay(200*ns) - self.sawg1.amplitude1.set(-.4) - self.ttl_sma_out.pulse(200*ns) - self.sawg1.amplitude1.set(.4) - delay(200*ns) - self.sawg1.phase0.set(.25) - self.ttl_sma_out.pulse(200*ns) - self.sawg1.phase0.set(.5) - delay(200*ns) - self.sawg0.phase0.set(.5) - self.ttl_sma_out.pulse(200*ns) - self.sawg1.frequency0.set(30*MHz) - delay(200*ns) - self.sawg1.frequency0.set(10*MHz) - self.sawg1.phase0.set(0.) - self.ttl_sma_out.pulse(200*ns) diff --git a/artiq/examples/sayma/repository/demo_2tone.py b/artiq/examples/sayma/repository/demo_2tone.py deleted file mode 100644 index 756b25c33..000000000 --- a/artiq/examples/sayma/repository/demo_2tone.py +++ /dev/null @@ -1,62 +0,0 @@ -from artiq.experiment import * - - -class SAWGTestTwoTone(EnvExperiment): - def build(self): - self.setattr_device("core") - self.setattr_device("led0") - self.setattr_device("ttl_sma_out") - - self.setattr_device("sawg0") - self.setattr_device("sawg1") - self.setattr_device("sawg2") - self.setattr_device("sawg3") - - @kernel - def run(self): - self.core.reset() - delay(1*ms) - - self.sawg0.reset() - self.sawg1.reset() - self.sawg2.reset() - self.sawg3.reset() - - self.sawg0.config.set_clr(1, 1, 1) - delay(10*us) - self.sawg0.config.set_out_max(1.) - delay(10*us) - self.sawg0.config.set_out_min(-1.) - delay(10*us) - - while True: - t_up = t_hold = t_down = 800*ns - a1 = .3 - a2 = .4 - order = 3 - - delay(20*ms) - self.led0.on() - self.ttl_sma_out.on() - self.sawg0.frequency0.set(10*MHz) - self.sawg0.phase0.set(0.) - self.sawg0.frequency1.set(1*MHz) - self.sawg0.phase1.set(0.) - self.sawg0.frequency2.set(9*MHz) - self.sawg0.phase2.set(0.) - with parallel: - self.sawg0.amplitude1.smooth(.0, a1, t_up, order) - self.sawg0.amplitude2.smooth(.0, a2, t_up, order) - self.sawg0.amplitude1.set(a1) - self.sawg0.amplitude2.set(a2) - delay(t_hold) - with parallel: - self.sawg0.amplitude1.smooth(a1, .0, t_down, order) - self.sawg0.amplitude2.smooth(a2, .0, t_down, order) - self.sawg0.amplitude1.set(.0) - self.sawg0.amplitude2.set(.0) - - self.sawg1.amplitude1.set(.0) - self.sawg1.amplitude2.set(.0) - self.ttl_sma_out.off() - self.led0.off() diff --git a/artiq/examples/sayma/repository/test_ad9154_status.py b/artiq/examples/sayma/repository/test_ad9154_status.py deleted file mode 100644 index fccebd615..000000000 --- a/artiq/examples/sayma/repository/test_ad9154_status.py +++ /dev/null @@ -1,145 +0,0 @@ -from artiq.coredevice.ad9154_reg import * -from artiq.experiment import * - - -class Test(EnvExperiment): - def build(self): - self.setattr_device("core") - self.ad9154_spi = self.get_device("ad9154_spi0") - - @kernel - def run(self): - self.ad9154_spi.setup_bus() - self.print_prodid() - self.print_status() - self.print_temp() - - def p(self, f, *a): - print(f % a) - - @kernel - def print_prodid(self): - self.p("PRODID: 0x%04x", (self.ad9154_spi.read(AD9154_PRODIDH) << 8) | - self.ad9154_spi.read(AD9154_PRODIDL)) - - @kernel - def print_temp(self): - self.ad9154_spi.write(AD9154_DIE_TEMP_CTRL0, AD9154_AUXADC_RESERVED_SET(0x10) | - AD9154_AUXADC_ENABLE_SET(1)) - self.ad9154_spi.write(AD9154_DIE_TEMP_UPDATE, 1) - self.p("temp_code %d", self.ad9154_spi.read(AD9154_DIE_TEMP0) | - (self.ad9154_spi.read(AD9154_DIE_TEMP1) << 8)) - self.ad9154_spi.write(AD9154_DIE_TEMP_CTRL0, AD9154_AUXADC_RESERVED_SET(0x10) | - AD9154_AUXADC_ENABLE_SET(0)) - - @kernel - def print_status(self): - x = self.ad9154_spi.read(AD9154_IRQ_STATUS0) - self.p("LANEFIFOERR: %d, SERPLLLOCK: %d, SERPLLLOST: %d, " - "DACPLLLOCK: %d, DACPLLLOST: %d", - AD9154_LANEFIFOERR_GET(x), AD9154_SERPLLLOCK_GET(x), - AD9154_SERPLLLOST_GET(x), AD9154_DACPLLLOCK_GET(x), - AD9154_DACPLLLOST_GET(x)) - x = self.ad9154_spi.read(AD9154_IRQ_STATUS1) - self.p("PRBS0: %d, PRBS1: %d, PRBS2: %d, PRBS3: %d", - AD9154_PRBS0_GET(x), AD9154_PRBS1_GET(x), - AD9154_PRBS2_GET(x), AD9154_PRBS3_GET(x)) - x = self.ad9154_spi.read(AD9154_IRQ_STATUS2) - self.p("SYNC_TRIP0: %d, SYNC_WLIM0: %d, SYNC_ROTATE0: %d, " - "SYNC_LOCK0: %d, NCO_ALIGN0: %d, BLNKDONE0: %d, " - "PDPERR0: %d", - AD9154_SYNC_TRIP0_GET(x), AD9154_SYNC_WLIM0_GET(x), - AD9154_SYNC_ROTATE0_GET(x), AD9154_SYNC_LOCK0_GET(x), - AD9154_NCO_ALIGN0_GET(x), AD9154_BLNKDONE0_GET(x), - AD9154_PDPERR0_GET(x)) - x = self.ad9154_spi.read(AD9154_IRQ_STATUS3) - self.p("SYNC_TRIP1: %d, SYNC_WLIM1: %d, SYNC_ROTATE1: %d, " - "SYNC_LOCK1: %d, NCO_ALIGN1: %d, BLNKDONE1: %d, " - "PDPERR1: %d", - AD9154_SYNC_TRIP1_GET(x), AD9154_SYNC_WLIM1_GET(x), - AD9154_SYNC_ROTATE1_GET(x), AD9154_SYNC_LOCK1_GET(x), - AD9154_NCO_ALIGN1_GET(x), AD9154_BLNKDONE1_GET(x), - AD9154_PDPERR1_GET(x)) - x = self.ad9154_spi.read(AD9154_JESD_CHECKS) - self.p("ERR_INTSUPP: %d, ERR_SUBCLASS: %d, ERR_KUNSUPP: %d, " - "ERR_JESDBAD: %d, ERR_WINLIMIT: %d, ERR_DLYOVER: %d", - AD9154_ERR_INTSUPP_GET(x), AD9154_ERR_SUBCLASS_GET(x), - AD9154_ERR_KUNSUPP_GET(x), AD9154_ERR_JESDBAD_GET(x), - AD9154_ERR_WINLIMIT_GET(x), AD9154_ERR_DLYOVER_GET(x)) - - x = self.ad9154_spi.read(AD9154_DACPLLSTATUS) - self.p("DACPLL_LOCK: %d, VCO_CAL_PROGRESS: %d, CP_CAL_VALID: %d, " - "CP_OVERRANGE_L: %d, CP_OVERRANGE_H: %d", - AD9154_DACPLL_LOCK_GET(x), AD9154_VCO_CAL_PROGRESS_GET(x), - AD9154_CP_CAL_VALID_GET(x), AD9154_CP_OVERRANGE_L_GET(x), - AD9154_CP_OVERRANGE_H_GET(x)) - - x = self.ad9154_spi.read(AD9154_PLL_STATUS) - self.p("PLL_LOCK_RB: %d, CURRENTS_READY_RB: %d, " - "VCO_CAL_IN_PROGRESS_RB: %d, PLL_CAL_VALID_RB: %d, " - "PLL_OVERRANGE_L_RB: %d, PLL_OVERRANGE_H_RB: %d", - AD9154_SERDES_PLL_LOCK_RB_GET(x), - AD9154_SERDES_CURRENTS_READY_RB_GET(x), - AD9154_SERDES_VCO_CAL_IN_PROGRESS_RB_GET(x), - AD9154_SERDES_PLL_CAL_VALID_RB_GET(x), - AD9154_SERDES_PLL_OVERRANGE_L_RB_GET(x), - AD9154_SERDES_PLL_OVERRANGE_H_RB_GET(x)) - - self.p("CODEGRPSYNC: 0x%02x", self.ad9154_spi.read(AD9154_CODEGRPSYNCFLG)) - self.p("FRAMESYNC: 0x%02x", self.ad9154_spi.read(AD9154_FRAMESYNCFLG)) - self.p("GOODCHECKSUM: 0x%02x", self.ad9154_spi.read(AD9154_GOODCHKSUMFLG)) - self.p("INITIALLANESYNC: 0x%02x", self.ad9154_spi.read(AD9154_INITLANESYNCFLG)) - - x = self.ad9154_spi.read(AD9154_SYNC_CURRERR_H) - self.p("SYNC_CURRERR: 0x%04x", self.ad9154_spi.read(AD9154_SYNC_CURRERR_L) | - (AD9154_CURRERROR_H_GET(x) << 8)) - self.p("SYNC_CURROVER: %d, SYNC_CURRUNDER: %d", - AD9154_CURROVER_GET(x), AD9154_CURRUNDER_GET(x)) - x = self.ad9154_spi.read(AD9154_SYNC_LASTERR_H) - self.p("SYNC_LASTERR: 0x%04x", self.ad9154_spi.read(AD9154_SYNC_LASTERR_L) | - (AD9154_LASTERROR_H_GET(x) << 8)) - self.p("SYNC_LASTOVER: %d, SYNC_LASTUNDER: %d", - AD9154_LASTOVER_GET(x), AD9154_LASTUNDER_GET(x)) - x = self.ad9154_spi.read(AD9154_SYNC_STATUS) - self.p("SYNC_TRIP: %d, SYNC_WLIM: %d, SYNC_ROTATE: %d, " - "SYNC_LOCK: %d, SYNC_BUSY: %d", - AD9154_SYNC_TRIP_GET(x), AD9154_SYNC_WLIM_GET(x), - AD9154_SYNC_ROTATE_GET(x), AD9154_SYNC_LOCK_GET(x), - AD9154_SYNC_BUSY_GET(x)) - - self.p("LANE_FIFO_FULL: 0x%02x", self.ad9154_spi.read(AD9154_FIFO_STATUS_REG_0)) - self.p("LANE_FIFO_EMPTY: 0x%02x", self.ad9154_spi.read(AD9154_FIFO_STATUS_REG_1)) - self.p("DID_REG: 0x%02x", self.ad9154_spi.read(AD9154_DID_REG)) - self.p("BID_REG: 0x%02x", self.ad9154_spi.read(AD9154_BID_REG)) - self.p("SCR_L_REG: 0x%02x", self.ad9154_spi.read(AD9154_SCR_L_REG)) - self.p("F_REG: 0x%02x", self.ad9154_spi.read(AD9154_F_REG)) - self.p("K_REG: 0x%02x", self.ad9154_spi.read(AD9154_K_REG)) - self.p("M_REG: 0x%02x", self.ad9154_spi.read(AD9154_M_REG)) - self.p("CS_N_REG: 0x%02x", self.ad9154_spi.read(AD9154_CS_N_REG)) - self.p("NP_REG: 0x%02x", self.ad9154_spi.read(AD9154_NP_REG)) - self.p("S_REG: 0x%02x", self.ad9154_spi.read(AD9154_S_REG)) - self.p("HD_CF_REG: 0x%02x", self.ad9154_spi.read(AD9154_HD_CF_REG)) - self.p("RES1_REG: 0x%02x", self.ad9154_spi.read(AD9154_RES1_REG)) - self.p("RES2_REG: 0x%02x", self.ad9154_spi.read(AD9154_RES2_REG)) - self.p("LIDx_REG: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x", - self.ad9154_spi.read(AD9154_LID0_REG), self.ad9154_spi.read(AD9154_LID1_REG), - self.ad9154_spi.read(AD9154_LID2_REG), self.ad9154_spi.read(AD9154_LID3_REG), - self.ad9154_spi.read(AD9154_LID4_REG), self.ad9154_spi.read(AD9154_LID5_REG), - self.ad9154_spi.read(AD9154_LID6_REG), self.ad9154_spi.read(AD9154_LID7_REG)) - self.p("CHECKSUMx_REG: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x", - self.ad9154_spi.read(AD9154_CHECKSUM0_REG), self.ad9154_spi.read(AD9154_CHECKSUM1_REG), - self.ad9154_spi.read(AD9154_CHECKSUM2_REG), self.ad9154_spi.read(AD9154_CHECKSUM3_REG), - self.ad9154_spi.read(AD9154_CHECKSUM4_REG), self.ad9154_spi.read(AD9154_CHECKSUM5_REG), - self.ad9154_spi.read(AD9154_CHECKSUM6_REG), self.ad9154_spi.read(AD9154_CHECKSUM7_REG)) - self.p("COMPSUMx_REG: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x", - self.ad9154_spi.read(AD9154_COMPSUM0_REG), self.ad9154_spi.read(AD9154_COMPSUM1_REG), - self.ad9154_spi.read(AD9154_COMPSUM2_REG), self.ad9154_spi.read(AD9154_COMPSUM3_REG), - self.ad9154_spi.read(AD9154_COMPSUM4_REG), self.ad9154_spi.read(AD9154_COMPSUM5_REG), - self.ad9154_spi.read(AD9154_COMPSUM6_REG), self.ad9154_spi.read(AD9154_COMPSUM7_REG)) - self.p("BADDISPARITY: 0x%02x", self.ad9154_spi.read(AD9154_BADDISPARITY)) - self.p("NITDISPARITY: 0x%02x", self.ad9154_spi.read(AD9154_NIT_W)) - self.p("UNEXPECTEDCONTROL: 0x%02x", self.ad9154_spi.read(AD9154_UNEXPECTEDCONTROL_W)) - self.p("DYN_LINK_LATENCY_0: 0x%02x", - self.ad9154_spi.read(AD9154_DYN_LINK_LATENCY_0)) - self.p("DYN_LINK_LATENCY_1: 0x%02x", - self.ad9154_spi.read(AD9154_DYN_LINK_LATENCY_1)) diff --git a/artiq/examples/sayma_master/device_db.py b/artiq/examples/sayma_master/device_db.py new file mode 100644 index 000000000..51eede704 --- /dev/null +++ b/artiq/examples/sayma_master/device_db.py @@ -0,0 +1,166 @@ +core_addr = "192.168.1.60" + +device_db = { + "core": { + "type": "local", + "module": "artiq.coredevice.core", + "class": "Core", + "arguments": {"host": core_addr, "ref_period": 1/(8*150e6)} + }, + "core_log": { + "type": "controller", + "host": "::1", + "port": 1068, + "command": "aqctl_corelog -p {port} --bind {bind} " + core_addr + }, + "core_cache": { + "type": "local", + "module": "artiq.coredevice.cache", + "class": "CoreCache" + }, + "core_dma": { + "type": "local", + "module": "artiq.coredevice.dma", + "class": "CoreDMA" + }, +} + +for i in range(4): + device_db["led" + str(i)] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": i}, + } + + +for i in range(2): + device_db["ttl" + str(i)] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLInOut", + "arguments": {"channel": 4 + i}, + } + + +device_db.update( + fmcdio_dirctl_clk={ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 6} + }, + fmcdio_dirctl_ser={ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 7} + }, + fmcdio_dirctl_latch={ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 8} + }, + fmcdio_dirctl={ + "type": "local", + "module": "artiq.coredevice.shiftreg", + "class": "ShiftReg", + "arguments": {"clk": "fmcdio_dirctl_clk", + "ser": "fmcdio_dirctl_ser", + "latch": "fmcdio_dirctl_latch"} + } +) + +device_db.update( + spi_urukul0={ + "type": "local", + "module": "artiq.coredevice.spi2", + "class": "SPIMaster", + "arguments": {"channel": 17} + }, + ttl_urukul0_io_update={ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 18} + }, + ttl_urukul0_sw0={ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 19} + }, + ttl_urukul0_sw1={ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 20} + }, + ttl_urukul0_sw2={ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 21} + }, + ttl_urukul0_sw3={ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 22} + }, + urukul0_cpld={ + "type": "local", + "module": "artiq.coredevice.urukul", + "class": "CPLD", + "arguments": { + "spi_device": "spi_urukul0", + "io_update_device": "ttl_urukul0_io_update", + "refclk": 125e6, + "clk_sel": 0 + } + } +) + +for i in range(4): + device_db["urukul0_ch" + str(i)] = { + "type": "local", + "module": "artiq.coredevice.ad9910", + "class": "AD9910", + "arguments": { + "pll_n": 32, + "chip_select": 4 + i, + "cpld_device": "urukul0_cpld", + "sw_device": "ttl_urukul0_sw" + str(i) + } + } + + +device_db["spi_zotino0"] = { + "type": "local", + "module": "artiq.coredevice.spi2", + "class": "SPIMaster", + "arguments": {"channel": 23} +} +device_db["ttl_zotino0_ldac"] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 24} +} +device_db["ttl_zotino0_clr"] = { + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {"channel": 25} +} +device_db["zotino0"] = { + "type": "local", + "module": "artiq.coredevice.zotino", + "class": "Zotino", + "arguments": { + "spi_device": "spi_zotino0", + "ldac_device": "ttl_zotino0_ldac", + "clr_device": "ttl_zotino0_clr" + } +} diff --git a/artiq/examples/sayma_master/repository/demo.py b/artiq/examples/sayma_master/repository/demo.py new file mode 100644 index 000000000..9b40f4b42 --- /dev/null +++ b/artiq/examples/sayma_master/repository/demo.py @@ -0,0 +1,41 @@ +from artiq.experiment import * +from artiq.coredevice.fmcdio_vhdci_eem import * + + +class Demo(EnvExperiment): + def build(self): + self.setattr_device("core") + self.setattr_device("fmcdio_dirctl") + + self.ttls = [self.get_device("ttl" + str(i)) for i in range(8)] + self.setattr_device("urukul0_cpld") + self.urukul_chs = [self.get_device("urukul0_ch" + str(i)) for i in range(4)] + self.setattr_device("zotino0") + + self.dirctl_word = ( + shiftreg_bits(1, urukul_out_pins) | + shiftreg_bits(0, urukul_aux_out_pins) | + shiftreg_bits(2, dio_bank0_out_pins | dio_bank1_out_pins) | + shiftreg_bits(3, zotino_out_pins)) + + @kernel + def run(self): + self.core.reset() + delay(10*ms) + self.fmcdio_dirctl.set(self.dirctl_word) + delay(10*ms) + + self.urukul0_cpld.init() + delay(10*ms) + + self.zotino0.init() + delay(1*ms) + for i in range(32): + self.zotino0.write_dac(i, i/4) + delay(1*ms) + + while True: + for ttl in self.ttls: + ttl.pulse(100*ms) + for urukul_ch in self.urukul_chs: + urukul_ch.sw.pulse(100*ms) diff --git a/artiq/firmware/Cargo.lock b/artiq/firmware/Cargo.lock index 0cb9aef5b..2874347c8 100644 --- a/artiq/firmware/Cargo.lock +++ b/artiq/firmware/Cargo.lock @@ -1,51 +1,82 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. [[package]] name = "alloc_list" version = "0.0.0" -[[package]] -name = "alloc_stub" -version = "0.0.0" - -[[package]] -name = "amp" -version = "0.0.0" -dependencies = [ - "board 0.0.0", -] - [[package]] name = "bitflags" -version = "1.0.1" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "board" +name = "board_artiq" version = "0.0.0" dependencies = [ - "bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "build_artiq 0.0.0", - "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "board_misoc 0.0.0", + "build_misoc 0.0.0", + "byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "crc 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "failure_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "io 0.0.0", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "proto_artiq 0.0.0", ] [[package]] -name = "build_artiq" +name = "board_misoc" version = "0.0.0" dependencies = [ - "walkdir 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", + "build_misoc 0.0.0", + "byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "smoltcp 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "bootloader" +version = "0.0.0" +dependencies = [ + "board_misoc 0.0.0", + "build_misoc 0.0.0", + "byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "crc 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "smoltcp 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "build_const" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "build_misoc" +version = "0.0.0" + [[package]] name = "byteorder" -version = "1.2.1" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "compiler_builtins" -version = "0.1.0" -source = "git+https://github.com/rust-lang-nursery/compiler-builtins?rev=631b568#631b5687b24af413fdbffa4c2644484e60660b00" +name = "cc" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cfg-if" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "crc" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "gcc 0.3.54 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-cfg 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "build_const 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -53,194 +84,210 @@ name = "cslice" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "drtioaux" -version = "0.0.0" -dependencies = [ - "board 0.0.0", - "byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "std_artiq 0.0.0", -] - [[package]] name = "dyld" version = "0.0.0" [[package]] -name = "fringe" -version = "1.1.0" -source = "git+https://github.com/m-labs/libfringe?rev=bd23494#bd2349467157969324ca7da5d2ae033c7ffac0c0" +name = "eh" +version = "0.0.0" dependencies = [ - "libc 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)", + "cslice 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "gcc" -version = "0.3.54" +name = "failure" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "kernel32-sys" -version = "0.2.2" +name = "failure_derive" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", + "synstructure 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "fringe" +version = "1.1.0" +source = "git+https://github.com/m-labs/libfringe?rev=b8a6d8f#b8a6d8f68df0edaa3d67d9f3b7b62af9d3bb64a5" +dependencies = [ + "libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "io" +version = "0.0.0" +dependencies = [ + "byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "failure_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "ksupport" version = "0.0.0" dependencies = [ - "alloc_stub 0.0.0", - "amp 0.0.0", - "board 0.0.0", - "build_artiq 0.0.0", - "byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "board_artiq 0.0.0", + "board_misoc 0.0.0", + "build_misoc 0.0.0", "cslice 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "dyld 0.0.0", - "proto 0.0.0", - "std_artiq 0.0.0", + "eh 0.0.0", + "io 0.0.0", + "proto_artiq 0.0.0", ] [[package]] name = "libc" -version = "0.2.34" +version = "0.2.40" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "log" -version = "0.3.8" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] name = "log_buffer" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "logger_artiq" version = "0.0.0" dependencies = [ - "board 0.0.0", - "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "log_buffer 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "board_misoc 0.0.0", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log_buffer 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "managed" -version = "0.4.0" -source = "git+https://github.com/m-labs/rust-managed.git?rev=629a6786a1cf1692015f464ed16c04eafa5cb8d1#629a6786a1cf1692015f464ed16c04eafa5cb8d1" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "proto" +name = "proto_artiq" version = "0.0.0" dependencies = [ - "byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "cslice 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "dyld 0.0.0", - "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "std_artiq 0.0.0", + "failure 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "failure_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "io 0.0.0", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "quote" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "runtime" version = "0.0.0" dependencies = [ "alloc_list 0.0.0", - "amp 0.0.0", - "board 0.0.0", - "build_artiq 0.0.0", - "byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "compiler_builtins 0.1.0 (git+https://github.com/rust-lang-nursery/compiler-builtins?rev=631b568)", + "board_artiq 0.0.0", + "board_misoc 0.0.0", + "build_misoc 0.0.0", + "byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "cslice 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "drtioaux 0.0.0", - "fringe 1.1.0 (git+https://github.com/m-labs/libfringe?rev=bd23494)", - "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "eh 0.0.0", + "failure 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "failure_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "fringe 1.1.0 (git+https://github.com/m-labs/libfringe?rev=b8a6d8f)", + "io 0.0.0", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "logger_artiq 0.0.0", - "proto 0.0.0", - "smoltcp 0.4.0 (git+https://github.com/m-labs/smoltcp?rev=960b001)", - "std_artiq 0.0.0", -] - -[[package]] -name = "rustc-cfg" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "same-file" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "managed 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proto_artiq 0.0.0", + "smoltcp 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unwind_backtrace 0.0.0", ] [[package]] name = "satman" version = "0.0.0" dependencies = [ - "alloc_list 0.0.0", - "board 0.0.0", - "build_artiq 0.0.0", - "compiler_builtins 0.1.0 (git+https://github.com/rust-lang-nursery/compiler-builtins?rev=631b568)", - "drtioaux 0.0.0", - "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "logger_artiq 0.0.0", - "std_artiq 0.0.0", + "board_artiq 0.0.0", + "board_misoc 0.0.0", + "build_misoc 0.0.0", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "smoltcp" -version = "0.4.0" -source = "git+https://github.com/m-labs/smoltcp?rev=960b001#960b0012a09d37dde1d86b28bb5531316f606bfd" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "managed 0.4.0 (git+https://github.com/m-labs/rust-managed.git?rev=629a6786a1cf1692015f464ed16c04eafa5cb8d1)", + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "managed 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "std_artiq" +name = "syn" +version = "0.11.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "synom" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "synstructure" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unicode-xid" +version = "0.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "unwind_backtrace" version = "0.0.0" -[[package]] -name = "walkdir" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - [metadata] -"checksum bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b3c30d3802dfb7281680d6285f2ccdaa8c2d8fee41f93805dba5c4cf50dc23cf" -"checksum byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "652805b7e73fada9d85e9a6682a4abd490cb52d96aeecc12e33a0de34dfd0d23" -"checksum compiler_builtins 0.1.0 (git+https://github.com/rust-lang-nursery/compiler-builtins?rev=631b568)" = "" +"checksum bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d0c54bb8f454c567f21197eefcdbf5679d0bd99f2ddbe52e84c77061952e6789" +"checksum build_const 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "39092a32794787acd8525ee150305ff051b0aa6cc2abaf193924f5ab05425f39" +"checksum byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "74c0b906e9446b0a2e4f760cdb3fa4b2c48cdc6db8766a845c54b6ff063fd2e9" +"checksum cc 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)" = "0ebb87d1116151416c0cf66a0e3fb6430cccd120fd6300794b4dfaa050ac40ba" +"checksum cfg-if 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "405216fd8fe65f718daa7102ea808a946b6ce40c742998fbfd3463645552de18" +"checksum crc 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d663548de7f5cca343f1e0a48d14dcfb0e9eb4e079ec58883b7251539fa10aeb" "checksum cslice 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0f8cb7306107e4b10e64994de6d3274bd08996a7c1322a27b86482392f96be0a" -"checksum fringe 1.1.0 (git+https://github.com/m-labs/libfringe?rev=bd23494)" = "" -"checksum gcc 0.3.54 (registry+https://github.com/rust-lang/crates.io-index)" = "5e33ec290da0d127825013597dbdfc28bee4964690c7ce1166cbc2a7bd08b1bb" -"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -"checksum libc 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)" = "36fbc8a8929c632868295d0178dd8f63fc423fd7537ad0738372bd010b3ac9b0" -"checksum log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "880f77541efa6e5cc74e76910c9884d9859683118839d6a1dc3b11e63512565b" -"checksum log_buffer 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ec57723b84bbe7bdf76aa93169c9b59e67473317c6de3a83cb2a0f8ccb2aa493" -"checksum managed 0.4.0 (git+https://github.com/m-labs/rust-managed.git?rev=629a6786a1cf1692015f464ed16c04eafa5cb8d1)" = "" -"checksum rustc-cfg 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "56a596b5718bf5e059d59a30af12f7f462a152de147aa462b70892849ee18704" -"checksum same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d931a44fdaa43b8637009e7632a02adc4f2b2e0733c08caa4cf00e8da4a117a7" -"checksum smoltcp 0.4.0 (git+https://github.com/m-labs/smoltcp?rev=960b001)" = "" -"checksum walkdir 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "bb08f9e670fab86099470b97cd2b252d6527f0b3cc1401acdb595ffc9dd288ff" -"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" -"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" +"checksum failure 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "934799b6c1de475a012a02dab0ace1ace43789ee4b99bcfbf1a2e3e8ced5de82" +"checksum failure_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c7cdda555bb90c9bb67a3b670a0f42de8e73f5981524123ad8578aafec8ddb8b" +"checksum fringe 1.1.0 (git+https://github.com/m-labs/libfringe?rev=b8a6d8f)" = "" +"checksum libc 0.2.40 (registry+https://github.com/rust-lang/crates.io-index)" = "6fd41f331ac7c5b8ac259b8bf82c75c0fb2e469bbf37d2becbba9a6a2221965b" +"checksum log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "89f010e843f2b1a31dbd316b3b8d443758bc634bed37aabade59c686d644e0a2" +"checksum log_buffer 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f033173c9486b7fe97a79c895c0a3483ae395ab6744c985d10078950e2492419" +"checksum managed 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ba6713e624266d7600e9feae51b1926c6a6a6bebb18ec5a8e11a5f1d5661baba" +"checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a" +"checksum smoltcp 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0fe46639fd2ec79eadf8fe719f237a7a0bd4dac5d957f1ca5bbdbc1c3c39e53a" +"checksum syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d3b891b9015c88c576343b9b3e41c2c11a51c219ef067b264bd9c8aa9b441dad" +"checksum synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a393066ed9010ebaed60b9eafa373d4b1baac186dd7e008555b0f702b51945b6" +"checksum synstructure 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3a761d12e6d8dcb4dcf952a7a89b475e3a9d69e4a69307e01a470977642914bd" +"checksum unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f860d7d29cf02cb2f3f359fd35991af3d30bac52c57d265a3c461074cb4dc" diff --git a/artiq/firmware/Cargo.toml b/artiq/firmware/Cargo.toml index af13ba5ff..1d2f871f3 100644 --- a/artiq/firmware/Cargo.toml +++ b/artiq/firmware/Cargo.toml @@ -1,2 +1,7 @@ [workspace] -members = ["runtime", "ksupport", "satman"] +members = ["bootloader", "runtime", "ksupport", "satman"] + +[profile.dev] +incremental = false # incompatible with LTO +lto = true +debug = 2 diff --git a/artiq/firmware/bootloader/Cargo.toml b/artiq/firmware/bootloader/Cargo.toml new file mode 100644 index 000000000..e89902308 --- /dev/null +++ b/artiq/firmware/bootloader/Cargo.toml @@ -0,0 +1,19 @@ +[package] +authors = ["M-Labs"] +name = "bootloader" +version = "0.0.0" +build = "build.rs" + +[lib] +name = "bootloader" +crate-type = ["staticlib"] +path = "main.rs" + +[build-dependencies] +build_misoc = { path = "../libbuild_misoc" } + +[dependencies] +byteorder = { version = "1.0", default-features = false } +crc = { version = "1.7", default-features = false } +board_misoc = { path = "../libboard_misoc", features = ["uart_console", "smoltcp"] } +smoltcp = { version = "0.6.0", default-features = false, features = ["ethernet", "proto-ipv4", "proto-ipv6", "socket-tcp"] } diff --git a/artiq/firmware/bootloader/Makefile b/artiq/firmware/bootloader/Makefile new file mode 100644 index 000000000..ee70ff22f --- /dev/null +++ b/artiq/firmware/bootloader/Makefile @@ -0,0 +1,17 @@ +include ../include/generated/variables.mak +include $(MISOC_DIRECTORY)/software/common.mak + +RUSTFLAGS += -Cpanic=abort + +all:: bootloader.bin + +.PHONY: $(RUSTOUT)/libbootloader.a +$(RUSTOUT)/libbootloader.a: + $(cargo) --manifest-path $(BOOTLOADER_DIRECTORY)/Cargo.toml + +bootloader.elf: $(RUSTOUT)/libbootloader.a + $(link) -T $(BOOTLOADER_DIRECTORY)/bootloader.ld + +%.bin: %.elf + $(objcopy) -O binary + $(MSCIMG) $@ diff --git a/artiq/firmware/bootloader/bootloader.ld b/artiq/firmware/bootloader/bootloader.ld new file mode 100644 index 000000000..22aea8cb6 --- /dev/null +++ b/artiq/firmware/bootloader/bootloader.ld @@ -0,0 +1,53 @@ +INCLUDE generated/output_format.ld +INCLUDE generated/regions.ld +ENTRY(_reset_handler) + +SECTIONS +{ + .vectors : + { + _begin = .; + *(.vectors) + } > rom + + .text : + { + *(.text .text.*) + } > rom + + /* + * The compiler_builtins crate includes some GOTPC relocations, which require a GOT symbol, + * but don't actually need a GOT. This really ought to be fixed on rustc level, but I'm afraid + * it will add further complications to our build system that aren't pulling their weight. + */ + _GLOBAL_OFFSET_TABLE_ = .; + + .rodata : + { + *(.rodata.*) + . = ALIGN(4); + _end = .; + } > rom + + .crc ALIGN(4) : + { + _crc = .; + . += 4; + } + + .bss : + { + _fbss = .; + *(.bss .bss.*) + . = ALIGN(4); + _ebss = .; + } > sram + + .stack : + { + /* Ensure we have a certain amount of space available for stack. */ + /*. = ORIGIN(sram) + LENGTH(sram) - 0x1a00; */ + . = ORIGIN(sram) + LENGTH(sram) - 4; + _fstack = .; + } > sram +} diff --git a/artiq/firmware/bootloader/build.rs b/artiq/firmware/bootloader/build.rs new file mode 100644 index 000000000..3548ea5ff --- /dev/null +++ b/artiq/firmware/bootloader/build.rs @@ -0,0 +1,5 @@ +extern crate build_misoc; + +fn main() { + build_misoc::cfg(); +} diff --git a/artiq/firmware/bootloader/main.rs b/artiq/firmware/bootloader/main.rs new file mode 100644 index 000000000..c698dee78 --- /dev/null +++ b/artiq/firmware/bootloader/main.rs @@ -0,0 +1,549 @@ +#![no_std] +#![feature(panic_implementation, panic_info_message)] + +extern crate crc; +extern crate byteorder; +extern crate smoltcp; +#[macro_use] +extern crate board_misoc; + +use core::{ptr, slice}; +use crc::crc32; +use byteorder::{ByteOrder, BigEndian}; +use board_misoc::{ident, cache, sdram, config, boot, mem as board_mem}; +#[cfg(has_slave_fpga_cfg)] +use board_misoc::slave_fpga; +#[cfg(has_ethmac)] +use board_misoc::{clock, ethmac, net_settings}; +use board_misoc::uart_console::Console; + +fn check_integrity() -> bool { + extern { + static _begin: u8; + static _end: u8; + static _crc: u32; + } + + unsafe { + let length = &_end as *const u8 as usize - + &_begin as *const u8 as usize; + let bootloader = slice::from_raw_parts(&_begin as *const u8, length); + crc32::checksum_ieee(bootloader) == _crc + } +} + +fn memory_test(total: &mut usize, wrong: &mut usize) -> bool { + const MEMORY: *mut u32 = board_mem::MAIN_RAM_BASE as *mut u32; + + *total = 0; + *wrong = 0; + + macro_rules! test { + ( + $prepare:stmt; + for $i:ident in ($range:expr) { + MEMORY[$index:expr] = $data:expr + } + ) => ({ + $prepare; + for $i in $range { + unsafe { ptr::write_volatile(MEMORY.offset($index as isize), $data) }; + *total += 1; + } + + cache::flush_cpu_dcache(); + cache::flush_l2_cache(); + + $prepare; + for $i in $range { + if unsafe { ptr::read_volatile(MEMORY.offset($index as isize)) } != $data { + *wrong += 1; + } + } + }) + } + + fn prng32(seed: &mut u32) -> u32 { *seed = 1664525 * *seed + 1013904223; *seed } + fn prng16(seed: &mut u16) -> u16 { *seed = 25173 * *seed + 13849; *seed } + + for _ in 0..4 { + // Test data bus + test!((); for i in (0..0x100) { MEMORY[i] = 0xAAAAAAAA }); + test!((); for i in (0..0x100) { MEMORY[i] = 0x55555555 }); + + // Test counter addressing with random data + test!(let mut seed = 0; + for i in (0..0x100000) { MEMORY[i] = prng32(&mut seed) }); + + // Test random addressing with counter data + test!(let mut seed = 0; + for i in (0..0x10000) { MEMORY[prng16(&mut seed)] = i }); + } + *wrong == 0 +} + +fn startup() -> bool { + if check_integrity() { + println!("Bootloader CRC passed"); + } else { + println!("Bootloader CRC failed"); + return false + } + + println!("Gateware ident {}", ident::read(&mut [0; 64])); + + println!("Initializing SDRAM..."); + + if unsafe { sdram::init(Some(&mut Console)) } { + println!("SDRAM initialized"); + } else { + println!("SDRAM initialization failed"); + return false + } + + let (mut total, mut wrong) = (0, 0); + if memory_test(&mut total, &mut wrong) { + println!("Memory test passed"); + } else { + println!("Memory test failed ({}/{} words incorrect)", wrong, total); + return false + } + + true +} + +#[cfg(has_slave_fpga_cfg)] +fn load_slave_fpga() { + println!("Loading slave FPGA gateware..."); + + const GATEWARE: *mut u8 = board_misoc::csr::CONFIG_SLAVE_FPGA_GATEWARE as *mut u8; + + let header = unsafe { slice::from_raw_parts(GATEWARE, 8) }; + let magic = BigEndian::read_u32(&header[0..]); + let length = BigEndian::read_u32(&header[4..]) as usize; + println!(" magic: 0x{:08x}, length: 0x{:08x}", magic, length); + if magic != 0x5352544d { + println!(" ...Error: bad magic"); + return + } + if length > 0x220000 { + println!(" ...Error: too long (corrupted?)"); + return + } + let payload = unsafe { slice::from_raw_parts(GATEWARE.offset(8), length) }; + + if let Err(e) = slave_fpga::prepare() { + println!(" ...Error during preparation: {}", e); + return + } + if let Err(e) = slave_fpga::input(payload) { + println!(" ...Error during loading: {}", e); + return + } + if let Err(e) = slave_fpga::startup() { + println!(" ...Error during startup: {}", e); + return + } + + println!(" ...done"); +} + +fn flash_boot() { + const FIRMWARE: *mut u8 = board_mem::FLASH_BOOT_ADDRESS as *mut u8; + const MAIN_RAM: *mut u8 = board_mem::MAIN_RAM_BASE as *mut u8; + + println!("Booting from flash..."); + + let header = unsafe { slice::from_raw_parts(FIRMWARE, 8) }; + let length = BigEndian::read_u32(&header[0..]) as usize; + let expected_crc = BigEndian::read_u32(&header[4..]); + + if length == 0 || length == 0xffffffff { + println!("No firmware present"); + return + } else if length > 4 * 1024 * 1024 { + println!("Firmware too large (is it corrupted?)"); + return + } + + let firmware_in_flash = unsafe { slice::from_raw_parts(FIRMWARE.offset(8), length) }; + let actual_crc_flash = crc32::checksum_ieee(firmware_in_flash); + + if actual_crc_flash == expected_crc { + let firmware_in_sdram = unsafe { slice::from_raw_parts_mut(MAIN_RAM, length) }; + firmware_in_sdram.copy_from_slice(firmware_in_flash); + + let actual_crc_sdram = crc32::checksum_ieee(firmware_in_sdram); + if actual_crc_sdram == expected_crc { + println!("Starting firmware."); + unsafe { boot::jump(MAIN_RAM as usize) } + } else { + println!("Firmware CRC failed in SDRAM (actual {:08x}, expected {:08x})", + actual_crc_sdram, expected_crc); + } + } else { + println!("Firmware CRC failed in flash (actual {:08x}, expected {:08x})", + actual_crc_flash, expected_crc); + } +} + +#[cfg(has_ethmac)] +enum NetConnState { + WaitCommand, + FirmwareLength(usize, u8), + FirmwareDownload(usize, usize), + FirmwareWaitO, + FirmwareWaitK, + #[cfg(has_slave_fpga_cfg)] + GatewareLength(usize, u8), + #[cfg(has_slave_fpga_cfg)] + GatewareDownload(usize, usize), + #[cfg(has_slave_fpga_cfg)] + GatewareWaitO, + #[cfg(has_slave_fpga_cfg)] + GatewareWaitK +} + +#[cfg(has_ethmac)] +struct NetConn { + state: NetConnState, + firmware_downloaded: bool +} + +#[cfg(has_ethmac)] +impl NetConn { + pub fn new() -> NetConn { + NetConn { + state: NetConnState::WaitCommand, + firmware_downloaded: false + } + } + + pub fn reset(&mut self) { + self.state = NetConnState::WaitCommand; + self.firmware_downloaded = false; + } + + // buf must contain at least one byte + // this function must consume at least one byte + fn input_partial(&mut self, buf: &[u8], mut boot_callback: impl FnMut()) -> Result { + match self.state { + NetConnState::WaitCommand => { + match buf[0] { + b'F' => { + println!("Received firmware load command"); + self.state = NetConnState::FirmwareLength(0, 0); + Ok(1) + }, + #[cfg(has_slave_fpga_cfg)] + b'G' => { + println!("Received gateware load command"); + self.state = NetConnState::GatewareLength(0, 0); + Ok(1) + } + b'B' => { + if self.firmware_downloaded { + println!("Received boot command"); + boot_callback(); + self.state = NetConnState::WaitCommand; + Ok(1) + } else { + println!("Received boot command, but no firmware downloaded"); + Err(()) + } + }, + _ => { + println!("Received unknown netboot command: 0x{:02x}", buf[0]); + Err(()) + } + } + }, + + NetConnState::FirmwareLength(firmware_length, recv_bytes) => { + let firmware_length = (firmware_length << 8) | (buf[0] as usize); + let recv_bytes = recv_bytes + 1; + if recv_bytes == 4 { + self.state = NetConnState::FirmwareDownload(firmware_length, 0); + } else { + self.state = NetConnState::FirmwareLength(firmware_length, recv_bytes); + } + Ok(1) + }, + NetConnState::FirmwareDownload(firmware_length, recv_bytes) => { + let max_length = firmware_length - recv_bytes; + let buf = if buf.len() > max_length { + &buf[..max_length] + } else { + &buf[..] + }; + let length = buf.len(); + + let firmware_in_sdram = unsafe { slice::from_raw_parts_mut((board_mem::MAIN_RAM_BASE + recv_bytes) as *mut u8, length) }; + firmware_in_sdram.copy_from_slice(buf); + + let recv_bytes = recv_bytes + length; + if recv_bytes == firmware_length { + self.state = NetConnState::FirmwareWaitO; + Ok(length) + } else { + self.state = NetConnState::FirmwareDownload(firmware_length, recv_bytes); + Ok(length) + } + }, + NetConnState::FirmwareWaitO => { + if buf[0] == b'O' { + self.state = NetConnState::FirmwareWaitK; + Ok(1) + } else { + println!("End-of-firmware confirmation failed"); + Err(()) + } + }, + NetConnState::FirmwareWaitK => { + if buf[0] == b'K' { + println!("Firmware successfully downloaded"); + self.state = NetConnState::WaitCommand; + self.firmware_downloaded = true; + Ok(1) + } else { + println!("End-of-firmware confirmation failed"); + Err(()) + } + } + + #[cfg(has_slave_fpga_cfg)] + NetConnState::GatewareLength(gateware_length, recv_bytes) => { + let gateware_length = (gateware_length << 8) | (buf[0] as usize); + let recv_bytes = recv_bytes + 1; + if recv_bytes == 4 { + if let Err(e) = slave_fpga::prepare() { + println!(" Error during slave FPGA preparation: {}", e); + return Err(()) + } + self.state = NetConnState::GatewareDownload(gateware_length, 0); + } else { + self.state = NetConnState::GatewareLength(gateware_length, recv_bytes); + } + Ok(1) + }, + #[cfg(has_slave_fpga_cfg)] + NetConnState::GatewareDownload(gateware_length, recv_bytes) => { + let max_length = gateware_length - recv_bytes; + let buf = if buf.len() > max_length { + &buf[..max_length] + } else { + &buf[..] + }; + let length = buf.len(); + + if let Err(e) = slave_fpga::input(buf) { + println!("Error during slave FPGA loading: {}", e); + return Err(()) + } + + let recv_bytes = recv_bytes + length; + if recv_bytes == gateware_length { + self.state = NetConnState::GatewareWaitO; + Ok(length) + } else { + self.state = NetConnState::GatewareDownload(gateware_length, recv_bytes); + Ok(length) + } + }, + #[cfg(has_slave_fpga_cfg)] + NetConnState::GatewareWaitO => { + if buf[0] == b'O' { + self.state = NetConnState::GatewareWaitK; + Ok(1) + } else { + println!("End-of-gateware confirmation failed"); + Err(()) + } + }, + #[cfg(has_slave_fpga_cfg)] + NetConnState::GatewareWaitK => { + if buf[0] == b'K' { + if let Err(e) = slave_fpga::startup() { + println!("Error during slave FPGA startup: {}", e); + return Err(()) + } + println!("Gateware successfully downloaded"); + self.state = NetConnState::WaitCommand; + Ok(1) + } else { + println!("End-of-gateware confirmation failed"); + Err(()) + } + } + } + } + + fn input(&mut self, buf: &[u8], mut boot_callback: impl FnMut()) -> Result<(), ()> { + let mut remaining = &buf[..]; + while !remaining.is_empty() { + let read_cnt = self.input_partial(remaining, &mut boot_callback)?; + remaining = &remaining[read_cnt..]; + } + Ok(()) + } +} + +#[cfg(has_ethmac)] +fn network_boot() { + use smoltcp::wire::IpCidr; + + println!("Initializing network..."); + + let mut net_device = unsafe { ethmac::EthernetDevice::new() }; + net_device.reset_phy_if_any(); + + let mut neighbor_map = [None; 2]; + let neighbor_cache = + smoltcp::iface::NeighborCache::new(&mut neighbor_map[..]); + let net_addresses = net_settings::get_adresses(); + println!("Network addresses: {}", net_addresses); + let mut ip_addrs = [ + IpCidr::new(net_addresses.ipv4_addr, 0), + IpCidr::new(net_addresses.ipv6_ll_addr, 0), + IpCidr::new(net_addresses.ipv6_ll_addr, 0) + ]; + let mut interface = match net_addresses.ipv6_addr { + Some(addr) => { + ip_addrs[2] = IpCidr::new(addr, 0); + smoltcp::iface::EthernetInterfaceBuilder::new(net_device) + .ethernet_addr(net_addresses.hardware_addr) + .ip_addrs(&mut ip_addrs[..]) + .neighbor_cache(neighbor_cache) + .finalize() + } + None => + smoltcp::iface::EthernetInterfaceBuilder::new(net_device) + .ethernet_addr(net_addresses.hardware_addr) + .ip_addrs(&mut ip_addrs[..2]) + .neighbor_cache(neighbor_cache) + .finalize() + }; + + let mut rx_storage = [0; 4096]; + let mut tx_storage = [0; 128]; + + let mut socket_set_entries: [_; 1] = Default::default(); + let mut sockets = + smoltcp::socket::SocketSet::new(&mut socket_set_entries[..]); + + let tcp_rx_buffer = smoltcp::socket::TcpSocketBuffer::new(&mut rx_storage[..]); + let tcp_tx_buffer = smoltcp::socket::TcpSocketBuffer::new(&mut tx_storage[..]); + let tcp_socket = smoltcp::socket::TcpSocket::new(tcp_rx_buffer, tcp_tx_buffer); + let tcp_handle = sockets.add(tcp_socket); + + let mut net_conn = NetConn::new(); + let mut boot_time = None; + + println!("Waiting for connections..."); + + loop { + let timestamp = clock::get_ms() as i64; + { + let socket = &mut *sockets.get::(tcp_handle); + + match boot_time { + None => { + if !socket.is_open() { + socket.listen(4269).unwrap() // 0x10ad + } + + if socket.may_recv() { + if socket.recv(|data| { + (data.len(), net_conn.input(data, || { boot_time = Some(timestamp + 20); }).is_err()) + }).unwrap() { + net_conn.reset(); + socket.close(); + } + } else if socket.may_send() { + net_conn.reset(); + socket.close(); + } + }, + Some(boot_time) => { + if timestamp > boot_time { + println!("Starting firmware."); + unsafe { boot::jump(board_mem::MAIN_RAM_BASE) } + } + } + } + } + + match interface.poll(&mut sockets, smoltcp::time::Instant::from_millis(timestamp)) { + Ok(_) => (), + Err(smoltcp::Error::Unrecognized) => (), + Err(err) => println!("Network error: {}", err) + } + } +} + +#[no_mangle] +pub extern fn main() -> i32 { + println!(""); + println!(r" __ __ _ ____ ____ "); + println!(r"| \/ (_) ___| ___ / ___|"); + println!(r"| |\/| | \___ \ / _ \| | "); + println!(r"| | | | |___) | (_) | |___ "); + println!(r"|_| |_|_|____/ \___/ \____|"); + println!(""); + println!("MiSoC Bootloader"); + println!("Copyright (c) 2017-2020 M-Labs Limited"); + println!(""); + + #[cfg(has_ethmac)] + clock::init(); + + if startup() { + println!(""); + if !config::read_str("no_flash_boot", |r| r == Ok("1")) { + #[cfg(has_slave_fpga_cfg)] + load_slave_fpga(); + flash_boot(); + } else { + println!("Flash booting has been disabled."); + } + #[cfg(has_ethmac)] + network_boot(); + } else { + println!("Halting."); + } + + println!("No boot medium."); + loop {} +} + +#[no_mangle] +pub extern fn exception(vect: u32, _regs: *const u32, pc: u32, ea: u32) { + panic!("exception {} at PC {:#08x}, EA {:#08x}", vect, pc, ea) +} + +#[no_mangle] +pub extern fn abort() { + println!("aborted"); + loop {} +} + +#[no_mangle] // https://github.com/rust-lang/rust/issues/{38281,51647} +#[panic_implementation] +pub fn panic_fmt(info: &core::panic::PanicInfo) -> ! { + #[cfg(has_error_led)] + unsafe { + board_misoc::csr::error_led::out_write(1); + } + + if let Some(location) = info.location() { + print!("panic at {}:{}:{}", location.file(), location.line(), location.column()); + } else { + print!("panic at unknown location"); + } + if let Some(message) = info.message() { + println!(": {}", message); + } else { + println!(""); + } + loop {} +} diff --git a/artiq/firmware/cargosha256.nix b/artiq/firmware/cargosha256.nix new file mode 100644 index 000000000..2bf5725cf --- /dev/null +++ b/artiq/firmware/cargosha256.nix @@ -0,0 +1 @@ +"0ml6j4sxqrayqk25xkrikwg713mahfqa60nrx1jhrj8c2h3p07yk" diff --git a/artiq/firmware/ksupport/Cargo.toml b/artiq/firmware/ksupport/Cargo.toml index c785d0aa3..ba53a7c9a 100644 --- a/artiq/firmware/ksupport/Cargo.toml +++ b/artiq/firmware/ksupport/Cargo.toml @@ -10,14 +10,13 @@ path = "lib.rs" crate-type = ["staticlib"] [build-dependencies] -build_artiq = { path = "../libbuild_artiq" } +build_misoc = { path = "../libbuild_misoc" } [dependencies] -byteorder = { version = "1.0", default-features = false } cslice = { version = "0.3" } -alloc_stub = { path = "../liballoc_stub" } -std_artiq = { path = "../libstd_artiq" } +eh = { path = "../libeh" } +io = { path = "../libio", features = ["byteorder"] } dyld = { path = "../libdyld" } -board = { path = "../libboard" } -proto = { path = "../libproto" } -amp = { path = "../libamp" } +board_misoc = { path = "../libboard_misoc" } +board_artiq = { path = "../libboard_artiq" } +proto_artiq = { path = "../libproto_artiq" } diff --git a/artiq/firmware/ksupport/Makefile b/artiq/firmware/ksupport/Makefile index c45aea7a4..297bad7bd 100644 --- a/artiq/firmware/ksupport/Makefile +++ b/artiq/firmware/ksupport/Makefile @@ -5,32 +5,23 @@ CFLAGS += \ -I$(LIBUNWIND_DIRECTORY) \ -I$(LIBUNWIND_DIRECTORY)/../unwinder/include \ -I$(MISOC_DIRECTORY)/software/include/dyld -CFLAGS += -DNDEBUG LDFLAGS += --eh-frame-hdr \ - -L../libcompiler-rt \ - -L../libbase \ -L../libm \ + -L../libprintf \ -L../libunwind RUSTFLAGS += -Cpanic=unwind -all: ksupport.elf +all:: ksupport.elf .PHONY: $(RUSTOUT)/libksupport.a $(RUSTOUT)/libksupport.a: $(cargo) --manifest-path $(KSUPPORT_DIRECTORY)/Cargo.toml ksupport.elf: $(RUSTOUT)/libksupport.a glue.o - $(LD) $(LDFLAGS) -T $(KSUPPORT_DIRECTORY)/ksupport.ld -o $@ $^ \ - -lunwind -lcompiler-rt -lbase -lm - @chmod -x $@ + $(link) -T $(KSUPPORT_DIRECTORY)/ksupport.ld \ + -lunwind-elf -lprintf-float -lm %.o: $(KSUPPORT_DIRECTORY)/%.c $(compile) - -clean: - $(RM) *.o ksupport.elf - $(RM) -rf cargo - -.PHONY: all clean diff --git a/artiq/firmware/ksupport/api.rs b/artiq/firmware/ksupport/api.rs index 1cf3ce884..84e69dad5 100644 --- a/artiq/firmware/ksupport/api.rs +++ b/artiq/firmware/ksupport/api.rs @@ -1,3 +1,5 @@ +use board_misoc::csr; + macro_rules! api { ($i:ident) => ({ extern { static $i: u8; } @@ -31,8 +33,6 @@ static mut API: &'static [(&'static str, *const ())] = &[ api!(__ltdf2), api!(__nedf2), api!(__gtdf2), - api!(__negsf2), - api!(__negdf2), api!(__addsf3), api!(__subsf3), api!(__mulsf3), @@ -57,35 +57,72 @@ static mut API: &'static [(&'static str, *const ())] = &[ api!(__fixdfsi), api!(__fixdfdi), api!(__fixunsdfsi), - api!(__clzsi2), - api!(__ctzsi2), api!(__udivdi3), api!(__umoddi3), api!(__moddi3), api!(__powidf2), /* libc */ - api!(abort = ::abort), api!(memcmp, extern { fn memcmp(a: *const u8, b: *mut u8, size: usize); }), /* libm */ - api!(sqrt), - api!(round), + // commented out functions are not available with the libm used here, but are available in NAR3. + api!(acos), + api!(acosh), + api!(asin), + api!(asinh), + api!(atan), + api!(atan2), + api!(atanh), + api!(cbrt), + api!(ceil), + api!(copysign), + api!(cos), + api!(cosh), + api!(erf), + api!(erfc), + api!(exp), + //api!(exp2), + //api!(exp10), + api!(expm1), + api!(fabs), api!(floor), + // api!(fmax), + // api!(fmin), + //api!(fma), + api!(fmod), + api!(hypot), + api!(j0), + api!(j1), + api!(jn), + api!(lgamma), + api!(log), + //api!(log2), + api!(log10), + api!(nextafter), + api!(pow), + api!(round), + api!(sin), + api!(sinh), + api!(sqrt), + api!(tan), + api!(tanh), + //api!(tgamma), + //api!(trunc), + api!(y0), + api!(y1), + api!(yn), /* exceptions */ api!(_Unwind_Resume = ::unwind::_Unwind_Resume), - api!(__artiq_personality = ::eh::personality), - api!(__artiq_raise = ::eh::raise), - api!(__artiq_reraise = ::eh::reraise), + api!(__artiq_personality = ::eh_artiq::personality), + api!(__artiq_raise = ::eh_artiq::raise), + api!(__artiq_reraise = ::eh_artiq::reraise), /* proxified syscalls */ api!(core_log), - api!(now = &::NOW as *const _), - - api!(watchdog_set = ::watchdog_set), - api!(watchdog_clear = ::watchdog_clear), + api!(now = csr::rtio::NOW_HI_ADDR as *const _), api!(rpc_send = ::rpc_send), api!(rpc_send_async = ::rpc_send_async), @@ -94,17 +131,19 @@ static mut API: &'static [(&'static str, *const ())] = &[ api!(cache_get = ::cache_get), api!(cache_put = ::cache_put), - api!(mfspr = ::board::spr::mfspr), - api!(mtspr = ::board::spr::mtspr), + api!(mfspr = ::board_misoc::spr::mfspr), + api!(mtspr = ::board_misoc::spr::mtspr), /* direct syscalls */ api!(rtio_init = ::rtio::init), + api!(rtio_get_destination_status = ::rtio::get_destination_status), api!(rtio_get_counter = ::rtio::get_counter), api!(rtio_log), api!(rtio_output = ::rtio::output), api!(rtio_output_wide = ::rtio::output_wide), api!(rtio_input_timestamp = ::rtio::input_timestamp), api!(rtio_input_data = ::rtio::input_data), + api!(rtio_input_timestamped_data = ::rtio::input_timestamped_data), api!(dma_record_start = ::dma_record_start), api!(dma_record_stop = ::dma_record_stop), @@ -112,12 +151,6 @@ static mut API: &'static [(&'static str, *const ())] = &[ api!(dma_retrieve = ::dma_retrieve), api!(dma_playback = ::dma_playback), - api!(drtio_get_channel_state = ::rtio::drtio_dbg::get_channel_state), - api!(drtio_reset_channel_state = ::rtio::drtio_dbg::reset_channel_state), - api!(drtio_get_fifo_space = ::rtio::drtio_dbg::get_fifo_space), - api!(drtio_get_packet_counts = ::rtio::drtio_dbg::get_packet_counts), - api!(drtio_get_fifo_space_req_count = ::rtio::drtio_dbg::get_fifo_space_req_count), - api!(i2c_start = ::nrt_bus::i2c::start), api!(i2c_restart = ::nrt_bus::i2c::restart), api!(i2c_stop = ::nrt_bus::i2c::stop), @@ -125,7 +158,6 @@ static mut API: &'static [(&'static str, *const ())] = &[ api!(i2c_read = ::nrt_bus::i2c::read), api!(spi_set_config = ::nrt_bus::spi::set_config), - api!(spi_set_xfer = ::nrt_bus::spi::set_xfer), api!(spi_write = ::nrt_bus::spi::write), api!(spi_read = ::nrt_bus::spi::read), ]; diff --git a/artiq/firmware/ksupport/build.rs b/artiq/firmware/ksupport/build.rs index 6f802af66..ab6acff39 100644 --- a/artiq/firmware/ksupport/build.rs +++ b/artiq/firmware/ksupport/build.rs @@ -1,6 +1,6 @@ -extern crate build_artiq; +extern crate build_misoc; fn main() { - build_artiq::misoc_cfg(); + build_misoc::cfg(); println!("cargo:rustc-cfg={}", "ksupport"); } diff --git a/artiq/firmware/ksupport/eh.rs b/artiq/firmware/ksupport/eh.rs deleted file mode 100644 index 6138969a4..000000000 --- a/artiq/firmware/ksupport/eh.rs +++ /dev/null @@ -1,451 +0,0 @@ -// Portions of the code in this file are derived from code by: -// -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. -#![allow(non_upper_case_globals, non_camel_case_types, dead_code)] - -use core::{ptr, mem}; -use cslice::CSlice; -use unwind as uw; -use libc::{c_int, c_void}; - -type _Unwind_Stop_Fn = extern "C" fn(version: c_int, - actions: uw::_Unwind_Action, - exception_class: uw::_Unwind_Exception_Class, - exception_object: *mut uw::_Unwind_Exception, - context: *mut uw::_Unwind_Context, - stop_parameter: *mut c_void) - -> uw::_Unwind_Reason_Code; -extern { - fn _Unwind_ForcedUnwind(exception: *mut uw::_Unwind_Exception, - stop_fn: _Unwind_Stop_Fn, - stop_parameter: *mut c_void) -> uw::_Unwind_Reason_Code; -} - -const DW_EH_PE_omit: u8 = 0xFF; -const DW_EH_PE_absptr: u8 = 0x00; - -const DW_EH_PE_uleb128: u8 = 0x01; -const DW_EH_PE_udata2: u8 = 0x02; -const DW_EH_PE_udata4: u8 = 0x03; -const DW_EH_PE_udata8: u8 = 0x04; -const DW_EH_PE_sleb128: u8 = 0x09; -const DW_EH_PE_sdata2: u8 = 0x0A; -const DW_EH_PE_sdata4: u8 = 0x0B; -const DW_EH_PE_sdata8: u8 = 0x0C; - -const DW_EH_PE_pcrel: u8 = 0x10; -const DW_EH_PE_textrel: u8 = 0x20; -const DW_EH_PE_datarel: u8 = 0x30; -const DW_EH_PE_funcrel: u8 = 0x40; -const DW_EH_PE_aligned: u8 = 0x50; - -const DW_EH_PE_indirect: u8 = 0x80; - -#[derive(Clone)] -struct DwarfReader { - pub ptr: *const u8, -} - -impl DwarfReader { - fn new(ptr: *const u8) -> DwarfReader { - DwarfReader { ptr: ptr } - } - - // DWARF streams are packed, so e.g. a u32 would not necessarily be aligned - // on a 4-byte boundary. This may cause problems on platforms with strict - // alignment requirements. By wrapping data in a "packed" struct, we are - // telling the backend to generate "misalignment-safe" code. - unsafe fn read(&mut self) -> T { - let result = ptr::read_unaligned(self.ptr as *const T); - self.ptr = self.ptr.offset(mem::size_of::() as isize); - result - } - - // ULEB128 and SLEB128 encodings are defined in Section 7.6 - "Variable - // Length Data". - unsafe fn read_uleb128(&mut self) -> u64 { - let mut shift: usize = 0; - let mut result: u64 = 0; - let mut byte: u8; - loop { - byte = self.read::(); - result |= ((byte & 0x7F) as u64) << shift; - shift += 7; - if byte & 0x80 == 0 { - break; - } - } - result - } - - unsafe fn read_sleb128(&mut self) -> i64 { - let mut shift: usize = 0; - let mut result: u64 = 0; - let mut byte: u8; - loop { - byte = self.read::(); - result |= ((byte & 0x7F) as u64) << shift; - shift += 7; - if byte & 0x80 == 0 { - break; - } - } - // sign-extend - if shift < 8 * mem::size_of::() && (byte & 0x40) != 0 { - result |= (!0 as u64) << shift; - } - result as i64 - } - - unsafe fn read_encoded_pointer(&mut self, encoding: u8) -> usize { - fn round_up(unrounded: usize, align: usize) -> usize { - debug_assert!(align.is_power_of_two()); - (unrounded + align - 1) & !(align - 1) - } - - debug_assert!(encoding != DW_EH_PE_omit); - - // DW_EH_PE_aligned implies it's an absolute pointer value - if encoding == DW_EH_PE_aligned { - self.ptr = round_up(self.ptr as usize, mem::size_of::()) as *const u8; - return self.read::() - } - - let value_ptr = self.ptr; - let mut result = match encoding & 0x0F { - DW_EH_PE_absptr => self.read::(), - DW_EH_PE_uleb128 => self.read_uleb128() as usize, - DW_EH_PE_udata2 => self.read::() as usize, - DW_EH_PE_udata4 => self.read::() as usize, - DW_EH_PE_udata8 => self.read::() as usize, - DW_EH_PE_sleb128 => self.read_sleb128() as usize, - DW_EH_PE_sdata2 => self.read::() as usize, - DW_EH_PE_sdata4 => self.read::() as usize, - DW_EH_PE_sdata8 => self.read::() as usize, - _ => panic!(), - }; - - result += match encoding & 0x70 { - DW_EH_PE_absptr => 0, - // relative to address of the encoded value, despite the name - DW_EH_PE_pcrel => value_ptr as usize, - _ => panic!(), - }; - - if encoding & DW_EH_PE_indirect != 0 { - result = *(result as *const usize); - } - - result - } -} - -fn encoding_size(encoding: u8) -> usize { - if encoding == DW_EH_PE_omit { - return 0 - } - - match encoding & 0x0F { - DW_EH_PE_absptr => mem::size_of::(), - DW_EH_PE_udata2 => 2, - DW_EH_PE_udata4 => 4, - DW_EH_PE_udata8 => 8, - DW_EH_PE_sdata2 => 2, - DW_EH_PE_sdata4 => 4, - DW_EH_PE_sdata8 => 8, - _ => panic!() - } -} - -pub enum EHAction { - None, - Cleanup(usize), - Catch(usize), - Terminate, -} - -unsafe fn find_eh_action(lsda: *const u8, func_start: usize, ip: usize, - exn_name: CSlice) -> EHAction { - if lsda.is_null() { - return EHAction::None - } - - let mut reader = DwarfReader::new(lsda); - - let start_encoding = reader.read::(); - // base address for landing pad offsets - let lpad_base = if start_encoding != DW_EH_PE_omit { - reader.read_encoded_pointer(start_encoding) - } else { - func_start - }; - - let ttype_encoding = reader.read::(); - let ttype_encoding_size = encoding_size(ttype_encoding) as isize; - - let class_info; - if ttype_encoding != DW_EH_PE_omit { - let class_info_offset = reader.read_uleb128(); - class_info = reader.ptr.offset(class_info_offset as isize); - } else { - class_info = ptr::null(); - } - assert!(!class_info.is_null()); - - let call_site_encoding = reader.read::(); - let call_site_table_length = reader.read_uleb128(); - let action_table = reader.ptr.offset(call_site_table_length as isize); - - while reader.ptr < action_table { - let cs_start = reader.read_encoded_pointer(call_site_encoding); - let cs_len = reader.read_encoded_pointer(call_site_encoding); - let cs_lpad = reader.read_encoded_pointer(call_site_encoding); - let cs_action = reader.read_uleb128(); - - if ip < func_start + cs_start { - // Callsite table is sorted by cs_start, so if we've passed the ip, we - // may stop searching. - break - } - if ip > func_start + cs_start + cs_len { - continue - } - - if cs_lpad == 0 { - return EHAction::None - } - - let lpad = lpad_base + cs_lpad; - if cs_action == 0 { - return EHAction::Cleanup(lpad) - } - - let action_entry = action_table.offset((cs_action - 1) as isize); - let mut action_reader = DwarfReader::new(action_entry); - loop { - let type_info_offset = action_reader.read_sleb128() as isize; - let action_offset = action_reader.clone().read_sleb128() as isize; - assert!(type_info_offset >= 0); - - if type_info_offset > 0 { - let type_info_ptr_ptr = class_info.offset(-type_info_offset * ttype_encoding_size); - let type_info_ptr = DwarfReader::new(type_info_ptr_ptr) - .read_encoded_pointer(ttype_encoding); - let type_info = *(type_info_ptr as *const CSlice); - - if type_info.as_ref() == exn_name.as_ref() { - return EHAction::Catch(lpad) - } - - if type_info.len() == 0 { - // This is a catch-all clause. We don't compare type_info_ptr with null here - // because, in PIC mode, the OR1K LLVM backend emits a literal zero - // encoded with DW_EH_PE_pcrel, which of course doesn't result in - // a proper null pointer. - return EHAction::Catch(lpad) - } - } - - if action_offset == 0 { - break - } else { - action_reader.ptr = action_reader.ptr.offset(action_offset) - } - } - - return EHAction::None - } - - // the function has a personality but no landing pads; this is fine - EHAction::None -} - -#[repr(C)] -#[derive(Clone, Copy)] -pub struct Exception<'a> { - pub name: CSlice<'a, u8>, - pub file: CSlice<'a, u8>, - pub line: u32, - pub column: u32, - pub function: CSlice<'a, u8>, - pub message: CSlice<'a, u8>, - pub param: [i64; 3] -} - -const EXCEPTION_CLASS: uw::_Unwind_Exception_Class = 0x4d_4c_42_53_41_52_54_51; /* 'MLBSARTQ' */ - -const MAX_BACKTRACE_SIZE: usize = 128; - -#[repr(C)] -struct ExceptionInfo { - uw_exception: uw::_Unwind_Exception, - exception: Option>, - handled: bool, - backtrace: [usize; MAX_BACKTRACE_SIZE], - backtrace_size: usize -} - -#[cfg(target_arch = "x86_64")] -const UNWIND_DATA_REG: (i32, i32) = (0, 1); // RAX, RDX - -#[cfg(any(target_arch = "or1k"))] -const UNWIND_DATA_REG: (i32, i32) = (3, 4); // R3, R4 - -#[export_name="__artiq_personality"] -pub extern fn personality(version: c_int, - actions: uw::_Unwind_Action, - uw_exception_class: uw::_Unwind_Exception_Class, - uw_exception: *mut uw::_Unwind_Exception, - context: *mut uw::_Unwind_Context) - -> uw::_Unwind_Reason_Code { - unsafe { - if version != 1 || uw_exception_class != EXCEPTION_CLASS { - return uw::_URC_FATAL_PHASE1_ERROR - } - - let lsda = uw::_Unwind_GetLanguageSpecificData(context) as *const u8; - let ip = uw::_Unwind_GetIP(context) - 1; - let func_start = uw::_Unwind_GetRegionStart(context); - - let exception_info = &mut *(uw_exception as *mut ExceptionInfo); - let exception = &exception_info.exception.unwrap(); - - let eh_action = find_eh_action(lsda, func_start, ip, exception.name); - if actions as u32 & uw::_UA_SEARCH_PHASE as u32 != 0 { - match eh_action { - EHAction::None | - EHAction::Cleanup(_) => return uw::_URC_CONTINUE_UNWIND, - EHAction::Catch(_) => return uw::_URC_HANDLER_FOUND, - EHAction::Terminate => return uw::_URC_FATAL_PHASE1_ERROR, - } - } else { - match eh_action { - EHAction::None => return uw::_URC_CONTINUE_UNWIND, - EHAction::Cleanup(lpad) | - EHAction::Catch(lpad) => { - if actions as u32 & uw::_UA_HANDLER_FRAME as u32 != 0 { - exception_info.handled = true - } - - // Pass a pair of the unwinder exception and ARTIQ exception - // (which immediately follows). - uw::_Unwind_SetGR(context, UNWIND_DATA_REG.0, - uw_exception as uw::_Unwind_Word); - uw::_Unwind_SetGR(context, UNWIND_DATA_REG.1, - exception as *const _ as uw::_Unwind_Word); - uw::_Unwind_SetIP(context, lpad); - return uw::_URC_INSTALL_CONTEXT; - } - EHAction::Terminate => return uw::_URC_FATAL_PHASE2_ERROR, - } - } - } -} - -extern fn cleanup(_unwind_code: uw::_Unwind_Reason_Code, - uw_exception: *mut uw::_Unwind_Exception) { - unsafe { - let exception_info = &mut *(uw_exception as *mut ExceptionInfo); - - exception_info.exception = None; - } -} - -extern fn uncaught_exception(_version: c_int, - actions: uw::_Unwind_Action, - _uw_exception_class: uw::_Unwind_Exception_Class, - uw_exception: *mut uw::_Unwind_Exception, - context: *mut uw::_Unwind_Context, - _stop_parameter: *mut c_void) - -> uw::_Unwind_Reason_Code { - unsafe { - let exception_info = &mut *(uw_exception as *mut ExceptionInfo); - - if exception_info.backtrace_size < exception_info.backtrace.len() { - let ip = uw::_Unwind_GetIP(context); - exception_info.backtrace[exception_info.backtrace_size] = ip; - exception_info.backtrace_size += 1; - } - - if actions as u32 & uw::_UA_END_OF_STACK as u32 != 0 { - ::terminate(&exception_info.exception.unwrap(), - exception_info.backtrace[..exception_info.backtrace_size].as_mut()) - } else { - uw::_URC_NO_REASON - } - } -} - -// We can unfortunately not use mem::zeroed in a static, so Option<> is used as a workaround. -// See https://github.com/rust-lang/rust/issues/39498. -static mut INFLIGHT: ExceptionInfo = ExceptionInfo { - uw_exception: uw::_Unwind_Exception { - exception_class: EXCEPTION_CLASS, - exception_cleanup: cleanup, - private: [0; uw::unwinder_private_data_size], - }, - exception: None, - handled: false, - backtrace: [0; MAX_BACKTRACE_SIZE], - backtrace_size: 0 -}; - -#[export_name="__artiq_raise"] -#[unwind] -pub unsafe extern fn raise(exception: *const Exception) -> ! { - // Zing! The Exception<'a> to Exception<'static> transmute is not really sound in case - // the exception is ever captured. Fortunately, they currently aren't, and we save - // on the hassle of having to allocate exceptions somewhere except on stack. - INFLIGHT.exception = Some(mem::transmute::>(*exception)); - INFLIGHT.handled = false; - - let result = uw::_Unwind_RaiseException(&mut INFLIGHT.uw_exception); - assert!(result == uw::_URC_END_OF_STACK); - - INFLIGHT.backtrace_size = 0; - let _result = _Unwind_ForcedUnwind(&mut INFLIGHT.uw_exception, - uncaught_exception, ptr::null_mut()); - unreachable!() -} - -#[export_name="__artiq_reraise"] -#[unwind] -pub unsafe extern fn reraise() -> ! { - if INFLIGHT.handled { - raise(&INFLIGHT.exception.unwrap()) - } else { - uw::_Unwind_Resume(&mut INFLIGHT.uw_exception) - } -} - -// Stub implementations for the functions the panic_unwind crate expects to be provided. -// These all do nothing in libunwind, but aren't built for OR1K. -pub mod stubs { - #![allow(bad_style, unused_variables)] - - use super::{uw, c_int}; - - #[export_name="_Unwind_GetIPInfo"] - pub unsafe extern fn _Unwind_GetIPInfo(ctx: *mut uw::_Unwind_Context, - ip_before_insn: *mut c_int) -> uw::_Unwind_Word { - *ip_before_insn = 0; - uw::_Unwind_GetIP(ctx) - } - - #[export_name="_Unwind_GetTextRelBase"] - pub unsafe extern fn _Unwind_GetTextRelBase(ctx: *mut uw::_Unwind_Context) -> uw::_Unwind_Ptr { - unimplemented!() - } - - #[export_name="_Unwind_GetDataRelBase"] - pub unsafe extern fn _Unwind_GetDataRelBase(ctx: *mut uw::_Unwind_Context) -> uw::_Unwind_Ptr { - unimplemented!() - } -} diff --git a/artiq/firmware/ksupport/eh_artiq.rs b/artiq/firmware/ksupport/eh_artiq.rs new file mode 100644 index 000000000..8bfc171a0 --- /dev/null +++ b/artiq/firmware/ksupport/eh_artiq.rs @@ -0,0 +1,203 @@ +// Portions of the code in this file are derived from code by: +// +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +#![allow(private_no_mangle_fns, non_camel_case_types)] + +use core::{ptr, mem}; +use cslice::CSlice; +use unwind as uw; +use libc::{c_int, c_void}; + +use eh::dwarf::{self, EHAction}; + +type _Unwind_Stop_Fn = extern "C" fn(version: c_int, + actions: uw::_Unwind_Action, + exception_class: uw::_Unwind_Exception_Class, + exception_object: *mut uw::_Unwind_Exception, + context: *mut uw::_Unwind_Context, + stop_parameter: *mut c_void) + -> uw::_Unwind_Reason_Code; +extern { + fn _Unwind_ForcedUnwind(exception: *mut uw::_Unwind_Exception, + stop_fn: _Unwind_Stop_Fn, + stop_parameter: *mut c_void) -> uw::_Unwind_Reason_Code; +} + +#[repr(C)] +#[derive(Clone, Copy)] +pub struct Exception<'a> { + pub name: CSlice<'a, u8>, + pub file: CSlice<'a, u8>, + pub line: u32, + pub column: u32, + pub function: CSlice<'a, u8>, + pub message: CSlice<'a, u8>, + pub param: [i64; 3] +} + +const EXCEPTION_CLASS: uw::_Unwind_Exception_Class = 0x4d_4c_42_53_41_52_54_51; /* 'MLBSARTQ' */ + +const MAX_BACKTRACE_SIZE: usize = 128; + +#[repr(C)] +struct ExceptionInfo { + uw_exception: uw::_Unwind_Exception, + exception: Option>, + handled: bool, + backtrace: [usize; MAX_BACKTRACE_SIZE], + backtrace_size: usize +} + +#[cfg(target_arch = "x86_64")] +const UNWIND_DATA_REG: (i32, i32) = (0, 1); // RAX, RDX + +#[cfg(any(target_arch = "or1k"))] +const UNWIND_DATA_REG: (i32, i32) = (3, 4); // R3, R4 + +#[export_name="__artiq_personality"] +pub extern fn personality(version: c_int, + actions: uw::_Unwind_Action, + uw_exception_class: uw::_Unwind_Exception_Class, + uw_exception: *mut uw::_Unwind_Exception, + context: *mut uw::_Unwind_Context) + -> uw::_Unwind_Reason_Code { + unsafe { + if version != 1 || uw_exception_class != EXCEPTION_CLASS { + return uw::_URC_FATAL_PHASE1_ERROR + } + + let lsda = uw::_Unwind_GetLanguageSpecificData(context) as *const u8; + let ip = uw::_Unwind_GetIP(context) - 1; + let func_start = uw::_Unwind_GetRegionStart(context); + + let exception_info = &mut *(uw_exception as *mut ExceptionInfo); + let exception = &exception_info.exception.unwrap(); + + let eh_action = dwarf::find_eh_action(lsda, func_start, ip, exception.name); + if actions as u32 & uw::_UA_SEARCH_PHASE as u32 != 0 { + match eh_action { + EHAction::None | + EHAction::Cleanup(_) => return uw::_URC_CONTINUE_UNWIND, + EHAction::Catch(_) => return uw::_URC_HANDLER_FOUND, + EHAction::Terminate => return uw::_URC_FATAL_PHASE1_ERROR, + } + } else { + match eh_action { + EHAction::None => return uw::_URC_CONTINUE_UNWIND, + EHAction::Cleanup(lpad) | + EHAction::Catch(lpad) => { + if actions as u32 & uw::_UA_HANDLER_FRAME as u32 != 0 { + exception_info.handled = true + } + + // Pass a pair of the unwinder exception and ARTIQ exception + // (which immediately follows). + uw::_Unwind_SetGR(context, UNWIND_DATA_REG.0, + uw_exception as uw::_Unwind_Word); + uw::_Unwind_SetGR(context, UNWIND_DATA_REG.1, + exception as *const _ as uw::_Unwind_Word); + uw::_Unwind_SetIP(context, lpad); + return uw::_URC_INSTALL_CONTEXT; + } + EHAction::Terminate => return uw::_URC_FATAL_PHASE2_ERROR, + } + } + } +} + +extern fn cleanup(_unwind_code: uw::_Unwind_Reason_Code, + uw_exception: *mut uw::_Unwind_Exception) { + unsafe { + let exception_info = &mut *(uw_exception as *mut ExceptionInfo); + + exception_info.exception = None; + } +} + +extern fn uncaught_exception(_version: c_int, + actions: uw::_Unwind_Action, + _uw_exception_class: uw::_Unwind_Exception_Class, + uw_exception: *mut uw::_Unwind_Exception, + context: *mut uw::_Unwind_Context, + _stop_parameter: *mut c_void) + -> uw::_Unwind_Reason_Code { + unsafe { + let exception_info = &mut *(uw_exception as *mut ExceptionInfo); + + if exception_info.backtrace_size < exception_info.backtrace.len() { + let ip = uw::_Unwind_GetIP(context); + exception_info.backtrace[exception_info.backtrace_size] = ip; + exception_info.backtrace_size += 1; + } + + if actions as u32 & uw::_UA_END_OF_STACK as u32 != 0 { + ::terminate(&exception_info.exception.unwrap(), + exception_info.backtrace[..exception_info.backtrace_size].as_mut()) + } else { + uw::_URC_NO_REASON + } + } +} + +// We can unfortunately not use mem::zeroed in a static, so Option<> is used as a workaround. +// See https://github.com/rust-lang/rust/issues/39498. +static mut INFLIGHT: ExceptionInfo = ExceptionInfo { + uw_exception: uw::_Unwind_Exception { + exception_class: EXCEPTION_CLASS, + exception_cleanup: cleanup, + private: [0; uw::unwinder_private_data_size], + }, + exception: None, + handled: true, + backtrace: [0; MAX_BACKTRACE_SIZE], + backtrace_size: 0 +}; + +#[export_name="__artiq_raise"] +#[unwind(allowed)] +pub unsafe extern fn raise(exception: *const Exception) -> ! { + // Zing! The Exception<'a> to Exception<'static> transmute is not really sound in case + // the exception is ever captured. Fortunately, they currently aren't, and we save + // on the hassle of having to allocate exceptions somewhere except on stack. + INFLIGHT.exception = Some(mem::transmute::>(*exception)); + INFLIGHT.handled = false; + + let result = uw::_Unwind_RaiseException(&mut INFLIGHT.uw_exception); + assert!(result == uw::_URC_END_OF_STACK); + + INFLIGHT.backtrace_size = 0; + let _result = _Unwind_ForcedUnwind(&mut INFLIGHT.uw_exception, + uncaught_exception, ptr::null_mut()); + unreachable!() +} + +#[export_name="__artiq_reraise"] +#[unwind(allowed)] +pub unsafe extern fn reraise() -> ! { + use cslice::AsCSlice; + + if INFLIGHT.handled { + match INFLIGHT.exception { + Some(ref exception) => raise(exception), + None => raise(&Exception { + name: "0:artiq.coredevice.exceptions.RuntimeError".as_c_slice(), + file: file!().as_c_slice(), + line: line!(), + column: column!(), + // https://github.com/rust-lang/rfcs/pull/1719 + function: "__artiq_reraise".as_c_slice(), + message: "No active exception to reraise".as_c_slice(), + param: [0, 0, 0] + }) + } + } else { + uw::_Unwind_Resume(&mut INFLIGHT.uw_exception) + } +} diff --git a/artiq/firmware/ksupport/glue.c b/artiq/firmware/ksupport/glue.c index 475e22a84..7f87d5e3c 100644 --- a/artiq/firmware/ksupport/glue.c +++ b/artiq/firmware/ksupport/glue.c @@ -12,13 +12,21 @@ struct slice { }; void send_to_core_log(struct slice str); -void send_to_rtio_log(long long int timestamp, struct slice data); +void send_to_rtio_log(struct slice data); #define KERNELCPU_EXEC_ADDRESS 0x40800000 -#define KERNELCPU_PAYLOAD_ADDRESS 0x40840000 +#define KERNELCPU_PAYLOAD_ADDRESS 0x40860000 #define KERNELCPU_LAST_ADDRESS 0x4fffffff #define KSUPPORT_HEADER_SIZE 0x80 +FILE *stderr; + +/* called by libunwind */ +char *getenv(const char *var) +{ + return NULL; +} + /* called by libunwind */ int fprintf(FILE *stream, const char *fmt, ...) { @@ -131,8 +139,8 @@ int core_log(const char *fmt, ...) } /* called by kernel */ -void rtio_log(long long int timestamp, const char *fmt, ...); -void rtio_log(long long int timestamp, const char *fmt, ...) +void rtio_log(const char *fmt, ...); +void rtio_log(const char *fmt, ...) { va_list args; @@ -146,5 +154,5 @@ void rtio_log(long long int timestamp, const char *fmt, ...) va_end(args); struct slice str = { buf, size }; - send_to_rtio_log(timestamp, str); + send_to_rtio_log(str); } diff --git a/artiq/firmware/ksupport/ksupport.ld b/artiq/firmware/ksupport/ksupport.ld index 333db0739..bc6a97ab1 100644 --- a/artiq/firmware/ksupport/ksupport.ld +++ b/artiq/firmware/ksupport/ksupport.ld @@ -1,23 +1,21 @@ INCLUDE generated/output_format.ld -STARTUP(crt0-or1k.o) -ENTRY(_start) - INCLUDE generated/regions.ld +ENTRY(_reset_handler) /* First 4M of main memory are reserved for runtime * code/data/heap, then comes kernel memory. * Next 4M of main memory are reserved for * the background RPC queue. - * First 256K of kernel memory are for support code. + * First 384K of kernel memory are for support code. * Support code is loaded at ORIGIN-0x80 so that ELF headers * are also loaded. */ MEMORY { - ksupport (RWX) : ORIGIN = 0x40800000, LENGTH = 0x40000 + ksupport (RWX) : ORIGIN = 0x40800000, LENGTH = 0x60000 } /* Kernel stack is at the end of main RAM. */ -PROVIDE(_fstack = ORIGIN(main_ram) + LENGTH(main_ram) - 4); +_fstack = ORIGIN(main_ram) + LENGTH(main_ram) - 4; /* Force ld to make the ELF header as loadable. */ PHDRS @@ -28,16 +26,18 @@ PHDRS SECTIONS { + .vectors : { + *(.vectors) + } :text + .text : { - _ftext = .; - *(.text .stub .text.* .gnu.linkonce.t.*) - _etext = .; + *(.text .text.*) } :text /* https://sourceware.org/bugzilla/show_bug.cgi?id=20475 */ .got : { - _GLOBAL_OFFSET_TABLE_ = .; + PROVIDE(_GLOBAL_OFFSET_TABLE_ = .); *(.got) } :text @@ -47,11 +47,7 @@ SECTIONS .rodata : { - . = ALIGN(4); - _frodata = .; - *(.rodata .rodata.* .gnu.linkonce.r.*) - *(.rodata1) - _erodata = .; + *(.rodata .rodata.*) } > ksupport .eh_frame : @@ -66,24 +62,13 @@ SECTIONS .data : { - . = ALIGN(4); - _fdata = .; - *(.data .data.* .gnu.linkonce.d.*) - *(.data1) - *(.sdata .sdata.* .gnu.linkonce.s.*) - _edata = .; + *(.data .data.*) } .bss : { - . = ALIGN(4); _fbss = .; - *(.dynsbss) - *(.sbss .sbss.* .gnu.linkonce.sb.*) - *(.scommon) - *(.dynbss) - *(.bss .bss.* .gnu.linkonce.b.*) - *(COMMON) + *(.bss .bss.*) . = ALIGN(4); _ebss = .; } diff --git a/artiq/firmware/ksupport/lib.rs b/artiq/firmware/ksupport/lib.rs index 1ce29e05b..5e42fd100 100644 --- a/artiq/firmware/ksupport/lib.rs +++ b/artiq/firmware/ksupport/lib.rs @@ -1,31 +1,27 @@ -#![feature(lang_items, asm, libc, panic_unwind, unwind_attributes, global_allocator)] +#![feature(lang_items, asm, panic_unwind, libc, unwind_attributes, + panic_implementation, panic_info_message, nll)] #![no_std] -extern crate unwind; extern crate libc; -extern crate byteorder; +extern crate unwind; extern crate cslice; -extern crate alloc_stub; -extern crate std_artiq as std; - -extern crate board; +extern crate eh; +extern crate io; extern crate dyld; -extern crate proto; -extern crate amp; +extern crate board_misoc; +extern crate board_artiq; +extern crate proto_artiq; use core::{mem, ptr, slice, str}; -use std::io::Cursor; use cslice::{CSlice, AsCSlice}; -use alloc_stub::StubAlloc; -use board::csr; +use io::Cursor; use dyld::Library; -use proto::{kernel_proto, rpc_proto}; -use proto::kernel_proto::*; -use amp::{mailbox, rpc_queue}; - -#[global_allocator] -static mut ALLOC: StubAlloc = StubAlloc; +use board_artiq::{mailbox, rpc_queue}; +use proto_artiq::{kernel_proto, rpc_proto}; +use kernel_proto::*; +#[cfg(has_rtio_dma)] +use board_misoc::csr; fn send(request: &Message) { unsafe { mailbox::send(request as *const _ as usize) } @@ -52,10 +48,20 @@ macro_rules! recv { } } -#[no_mangle] -#[lang = "panic_fmt"] -pub extern fn panic_fmt(args: core::fmt::Arguments, file: &'static str, line: u32) -> ! { - send(&Log(format_args!("panic at {}:{}: {}\n", file, line, args))); +#[no_mangle] // https://github.com/rust-lang/rust/issues/{38281,51647} +#[panic_implementation] +pub fn panic_fmt(info: &core::panic::PanicInfo) -> ! { + if let Some(location) = info.location() { + send(&Log(format_args!("panic at {}:{}:{}", + location.file(), location.line(), location.column()))); + } else { + send(&Log(format_args!("panic at unknown location"))); + } + if let Some(message) = info.message() { + send(&Log(format_args!(": {}\n", message))); + } else { + send(&Log(format_args!("\n"))); + } send(&RunAborted); loop {} } @@ -72,30 +78,29 @@ macro_rules! println { macro_rules! raise { ($name:expr, $message:expr, $param0:expr, $param1:expr, $param2:expr) => ({ use cslice::AsCSlice; - let exn = $crate::eh::Exception { - name: concat!("0:artiq.coredevice.exceptions.", $name).as_bytes().as_c_slice(), - file: file!().as_bytes().as_c_slice(), + let exn = $crate::eh_artiq::Exception { + name: concat!("0:artiq.coredevice.exceptions.", $name).as_c_slice(), + file: file!().as_c_slice(), line: line!(), column: column!(), // https://github.com/rust-lang/rfcs/pull/1719 - function: "(Rust function)".as_bytes().as_c_slice(), - message: $message.as_bytes().as_c_slice(), + function: "(Rust function)".as_c_slice(), + message: $message.as_c_slice(), param: [$param0, $param1, $param2] }; #[allow(unused_unsafe)] - unsafe { $crate::eh::raise(&exn) } + unsafe { $crate::eh_artiq::raise(&exn) } }); ($name:expr, $message:expr) => ({ raise!($name, $message, 0, 0, 0) }); } -pub mod eh; +mod eh_artiq; mod api; mod rtio; mod nrt_bus; -static mut NOW: u64 = 0; static mut LIBRARY: Option> = None; #[no_mangle] @@ -110,10 +115,11 @@ pub extern fn send_to_core_log(text: CSlice) { } #[no_mangle] -pub extern fn send_to_rtio_log(timestamp: i64, text: CSlice) { - rtio::log(timestamp, text.as_ref()) +pub extern fn send_to_rtio_log(text: CSlice) { + rtio::log(text.as_ref()) } +#[unwind(aborts)] extern fn rpc_send(service: u32, tag: CSlice, data: *const *const ()) { while !rpc_queue::empty() {} send(&RpcSend { @@ -124,6 +130,7 @@ extern fn rpc_send(service: u32, tag: CSlice, data: *const *const ()) { }) } +#[unwind(aborts)] extern fn rpc_send_async(service: u32, tag: CSlice, data: *const *const ()) { while rpc_queue::full() {} rpc_queue::enqueue(|mut slice| { @@ -132,9 +139,9 @@ extern fn rpc_send_async(service: u32, tag: CSlice, data: *const *const ()) rpc_proto::send_args(&mut writer, service, tag.as_ref(), data)?; writer.position() }; - proto::WriteExt::write_u32(&mut slice, length as u32) + io::ProtoWrite::write_u32(&mut slice, length as u32) }).unwrap_or_else(|err| { - assert!(err.kind() == std::io::ErrorKind::WriteZero); + assert!(err == io::Error::UnexpectedEnd); while !rpc_queue::empty() {} send(&RpcSend { @@ -146,6 +153,7 @@ extern fn rpc_send_async(service: u32, tag: CSlice, data: *const *const ()) }) } +#[unwind(allowed)] extern fn rpc_recv(slot: *mut ()) -> usize { send(&RpcRecvRequest(slot)); recv!(&RpcRecvReply(ref result) => { @@ -153,7 +161,7 @@ extern fn rpc_recv(slot: *mut ()) -> usize { &Ok(alloc_size) => alloc_size, &Err(ref exception) => unsafe { - eh::raise(&eh::Exception { + eh_artiq::raise(&eh_artiq::Exception { name: exception.name.as_bytes().as_c_slice(), file: exception.file.as_bytes().as_c_slice(), line: exception.line, @@ -167,7 +175,7 @@ extern fn rpc_recv(slot: *mut ()) -> usize { }) } -fn terminate(exception: &eh::Exception, mut backtrace: &mut [usize]) -> ! { +fn terminate(exception: &eh_artiq::Exception, backtrace: &mut [usize]) -> ! { let mut cursor = 0; for index in 0..backtrace.len() { if backtrace[index] > kernel_proto::KERNELCPU_PAYLOAD_ADDRESS { @@ -177,7 +185,6 @@ fn terminate(exception: &eh::Exception, mut backtrace: &mut [usize]) -> ! { } let backtrace = &mut backtrace.as_mut()[0..cursor]; - send(&NowSave(unsafe { NOW })); send(&RunException { exception: kernel_proto::Exception { name: str::from_utf8(exception.name.as_ref()).unwrap(), @@ -193,19 +200,7 @@ fn terminate(exception: &eh::Exception, mut backtrace: &mut [usize]) -> ! { loop {} } -extern fn watchdog_set(ms: i64) -> i32 { - if ms < 0 { - raise!("ValueError", "cannot set a watchdog with a negative timeout") - } - - send(&WatchdogSetRequest { ms: ms as u64 }); - recv!(&WatchdogSetReply { id } => id) as i32 -} - -extern fn watchdog_clear(id: i32) { - send(&WatchdogClear { id: id as usize }) -} - +#[unwind(aborts)] extern fn cache_get(key: CSlice) -> CSlice<'static, i32> { send(&CacheGetRequest { key: str::from_utf8(key.as_ref()).unwrap() @@ -213,6 +208,7 @@ extern fn cache_get(key: CSlice) -> CSlice<'static, i32> { recv!(&CacheGetReply { value } => value.as_c_slice()) } +#[unwind(allowed)] extern fn cache_put(key: CSlice, list: CSlice) { send(&CachePutRequest { key: str::from_utf8(key.as_ref()).unwrap(), @@ -229,15 +225,12 @@ const DMA_BUFFER_SIZE: usize = 64 * 1024; struct DmaRecorder { active: bool, - #[allow(dead_code)] - padding: [u8; 3], //https://github.com/rust-lang/rust/issues/41315 data_len: usize, buffer: [u8; DMA_BUFFER_SIZE], } static mut DMA_RECORDER: DmaRecorder = DmaRecorder { active: false, - padding: [0; 3], data_len: 0, buffer: [0; DMA_BUFFER_SIZE], }; @@ -249,6 +242,7 @@ fn dma_record_flush() { } } +#[unwind(allowed)] extern fn dma_record_start(name: CSlice) { let name = str::from_utf8(name.as_ref()).unwrap(); @@ -268,6 +262,7 @@ extern fn dma_record_start(name: CSlice) { } } +#[unwind(allowed)] extern fn dma_record_stop(duration: i64) { unsafe { dma_record_flush(); @@ -289,22 +284,29 @@ extern fn dma_record_stop(duration: i64) { } } -extern fn dma_record_output(timestamp: i64, channel: i32, address: i32, word: i32) { - dma_record_output_wide(timestamp, channel, address, [word].as_c_slice()) -} - -extern fn dma_record_output_wide(timestamp: i64, channel: i32, address: i32, words: CSlice) { - assert!(words.len() <= 16); // enforce the hardware limit - +#[unwind(aborts)] +#[inline(always)] +unsafe fn dma_record_output_prepare(timestamp: i64, target: i32, + words: usize) -> &'static mut [u8] { // See gateware/rtio/dma.py. - let header_length = /*length*/1 + /*channel*/3 + /*timestamp*/8 + /*address*/2; - let length = header_length + /*data*/words.len() * 4; + const HEADER_LENGTH: usize = /*length*/1 + /*channel*/3 + /*timestamp*/8 + /*address*/1; + let length = HEADER_LENGTH + /*data*/words * 4; - let header = [ + if DMA_RECORDER.buffer.len() - DMA_RECORDER.data_len < length { + dma_record_flush() + } + + let record = &mut DMA_RECORDER.buffer[DMA_RECORDER.data_len.. + DMA_RECORDER.data_len + length]; + DMA_RECORDER.data_len += length; + + let (header, data) = record.split_at_mut(HEADER_LENGTH); + + header.copy_from_slice(&[ (length >> 0) as u8, - (channel >> 0) as u8, - (channel >> 8) as u8, - (channel >> 16) as u8, + (target >> 8) as u8, + (target >> 16) as u8, + (target >> 24) as u8, (timestamp >> 0) as u8, (timestamp >> 8) as u8, (timestamp >> 16) as u8, @@ -313,34 +315,46 @@ extern fn dma_record_output_wide(timestamp: i64, channel: i32, address: i32, wor (timestamp >> 40) as u8, (timestamp >> 48) as u8, (timestamp >> 56) as u8, - (address >> 0) as u8, - (address >> 8) as u8, - ]; + (target >> 0) as u8, + ]); - let mut data = [0; 16 * 4]; - for (i, &word) in words.as_ref().iter().enumerate() { - let part = [ + data +} + +#[unwind(aborts)] +extern fn dma_record_output(target: i32, word: i32) { + unsafe { + let timestamp = *(csr::rtio::NOW_HI_ADDR as *const i64); + let data = dma_record_output_prepare(timestamp, target, 1); + data.copy_from_slice(&[ (word >> 0) as u8, (word >> 8) as u8, (word >> 16) as u8, (word >> 24) as u8, - ]; - data[i * 4..(i + 1) * 4].copy_from_slice(&part[..]); - } - let data = &data[..words.len() * 4]; - - unsafe { - if DMA_RECORDER.buffer.len() - DMA_RECORDER.data_len < length { - dma_record_flush() - } - let mut dst = &mut DMA_RECORDER.buffer[DMA_RECORDER.data_len.. - DMA_RECORDER.data_len + length]; - dst[..header_length].copy_from_slice(&header[..]); - dst[header_length..].copy_from_slice(&data[..]); - DMA_RECORDER.data_len += length; + ]); } } +#[unwind(aborts)] +extern fn dma_record_output_wide(target: i32, words: CSlice) { + assert!(words.len() <= 16); // enforce the hardware limit + + unsafe { + let timestamp = *(csr::rtio::NOW_HI_ADDR as *const i64); + let mut data = dma_record_output_prepare(timestamp, target, words.len()); + for word in words.as_ref().iter() { + data[..4].copy_from_slice(&[ + (word >> 0) as u8, + (word >> 8) as u8, + (word >> 16) as u8, + (word >> 24) as u8, + ]); + data = &mut data[4..]; + } + } +} + +#[unwind(aborts)] extern fn dma_erase(name: CSlice) { let name = str::from_utf8(name.as_ref()).unwrap(); @@ -353,6 +367,7 @@ struct DmaTrace { address: i32, } +#[unwind(allowed)] extern fn dma_retrieve(name: CSlice) -> DmaTrace { let name = str::from_utf8(name.as_ref()).unwrap(); @@ -372,7 +387,8 @@ extern fn dma_retrieve(name: CSlice) -> DmaTrace { }) } -#[cfg(has_rtio)] +#[cfg(has_rtio_dma)] +#[unwind(allowed)] extern fn dma_playback(timestamp: i64, ptr: i32) { assert!(ptr % 64 == 0); @@ -385,29 +401,29 @@ extern fn dma_playback(timestamp: i64, ptr: i32) { while csr::rtio_dma::enable_read() != 0 {} csr::cri_con::selected_write(0); - let status = csr::rtio_dma::error_status_read(); - if status != 0 { + let error = csr::rtio_dma::error_read(); + if error != 0 { let timestamp = csr::rtio_dma::error_timestamp_read(); let channel = csr::rtio_dma::error_channel_read(); - if status & rtio::RTIO_O_STATUS_UNDERFLOW != 0 { - csr::rtio_dma::error_underflow_reset_write(1); + csr::rtio_dma::error_write(1); + if error & 1 != 0 { raise!("RTIOUnderflow", "RTIO underflow at {0} mu, channel {1}", - timestamp as i64, channel as i64, 0) + timestamp as i64, channel as i64, 0); } - if status & rtio::RTIO_O_STATUS_SEQUENCE_ERROR != 0 { - csr::rtio_dma::error_sequence_error_reset_write(1); - raise!("RTIOSequenceError", - "RTIO sequence error at {0} mu, channel {1}", - timestamp as i64, channel as i64, 0) + if error & 2 != 0 { + raise!("RTIODestinationUnreachable", + "RTIO destination unreachable, output, at {0} mu, channel {1}", + timestamp as i64, channel as i64, 0); } } } } -#[cfg(not(has_rtio))] -extern fn dma_playback(timestamp: i64, ptr: i32) { - unimplemented!("not(has_rtio)") +#[cfg(not(has_rtio_dma))] +#[unwind(allowed)] +extern fn dma_playback(_timestamp: i64, _ptr: i32) { + unimplemented!("not(has_rtio_dma)") } unsafe fn attribute_writeback(typeinfo: *const ()) { @@ -477,27 +493,38 @@ pub unsafe fn main() { ptr::write_bytes(__bss_start as *mut u8, 0, (_end - __bss_start) as usize); - send(&NowInitRequest); - recv!(&NowInitReply(now) => NOW = now); (mem::transmute::(__modinit__))(); - send(&NowSave(NOW)); if let Some(typeinfo) = typeinfo { attribute_writeback(typeinfo as *const ()); } + // Make sure all async RPCs are processed before exiting. + // Otherwise, if the comms and kernel CPU run in the following sequence: + // + // comms kernel + // ----------------------- ----------------------- + // check for async RPC + // post async RPC + // post RunFinished + // check for mailbox + // + // the async RPC would be missed. + send(&RpcFlush); + send(&RunFinished); loop {} } #[no_mangle] -pub extern fn exception_handler(vect: u32, _regs: *const u32, pc: u32, ea: u32) { +#[unwind(allowed)] +pub extern fn exception(vect: u32, _regs: *const u32, pc: u32, ea: u32) { panic!("exception {:?} at PC 0x{:x}, EA 0x{:x}", vect, pc, ea) } -// We don't export this because libbase does. -// #[no_mangle] +#[no_mangle] +#[unwind(allowed)] pub extern fn abort() { panic!("aborted") } diff --git a/artiq/firmware/ksupport/nrt_bus.rs b/artiq/firmware/ksupport/nrt_bus.rs index c847bcf82..9e226dd96 100644 --- a/artiq/firmware/ksupport/nrt_bus.rs +++ b/artiq/firmware/ksupport/nrt_bus.rs @@ -50,17 +50,9 @@ pub mod spi { use ::recv; use kernel_proto::*; - pub extern fn set_config(busno: i32, flags: i32, write_div: i32, read_div: i32) { + pub extern fn set_config(busno: i32, flags: i32, length: i32, div: i32, cs: i32) { send(&SpiSetConfigRequest { busno: busno as u32, flags: flags as u8, - write_div: write_div as u8, read_div: read_div as u8 }); - recv!(&SpiBasicReply { succeeded } => if !succeeded { - raise!("SPIError", "SPI bus could not be accessed"); - }); - } - - pub extern fn set_xfer(busno: i32, chip_select: i32, write_length: i32, read_length: i32) { - send(&SpiSetXferRequest { busno: busno as u32, chip_select: chip_select as u16, - write_length: write_length as u8, read_length: read_length as u8 }); + length: length as u8, div: div as u8, cs: cs as u8 }); recv!(&SpiBasicReply { succeeded } => if !succeeded { raise!("SPIError", "SPI bus could not be accessed"); }); diff --git a/artiq/firmware/ksupport/rtio.rs b/artiq/firmware/ksupport/rtio.rs index 64fd1e4e4..a83a45ec1 100644 --- a/artiq/firmware/ksupport/rtio.rs +++ b/artiq/firmware/ksupport/rtio.rs @@ -1,24 +1,41 @@ +#[repr(C)] +pub struct TimestampedData { + timestamp: i64, + data: i32, +} #[cfg(has_rtio)] mod imp { use core::ptr::{read_volatile, write_volatile}; use cslice::CSlice; + use rtio::TimestampedData; - use board::csr; + use board_misoc::csr; use ::send; + use ::recv; use kernel_proto::*; - pub const RTIO_O_STATUS_WAIT: u8 = 1; - pub const RTIO_O_STATUS_UNDERFLOW: u8 = 2; - pub const RTIO_O_STATUS_SEQUENCE_ERROR: u8 = 4; - pub const RTIO_I_STATUS_WAIT_EVENT: u8 = 1; - pub const RTIO_I_STATUS_OVERFLOW: u8 = 2; - pub const RTIO_I_STATUS_WAIT_STATUS: u8 = 4; + pub const RTIO_O_STATUS_WAIT: u8 = 1; + pub const RTIO_O_STATUS_UNDERFLOW: u8 = 2; + pub const RTIO_O_STATUS_DESTINATION_UNREACHABLE: u8 = 4; + pub const RTIO_I_STATUS_WAIT_EVENT: u8 = 1; + pub const RTIO_I_STATUS_OVERFLOW: u8 = 2; + pub const RTIO_I_STATUS_WAIT_STATUS: u8 = 4; + pub const RTIO_I_STATUS_DESTINATION_UNREACHABLE: u8 = 8; pub extern fn init() { send(&RtioInitRequest); } + pub extern fn get_destination_status(destination: i32) -> bool { + if 0 <= destination && destination <= 255 { + send(&RtioDestinationStatusRequest { destination: destination as u8 }); + recv!(&RtioDestinationStatusReply { up } => up) + } else { + false + } + } + pub extern fn get_counter() -> i64 { unsafe { csr::rtio::counter_update_write(1); @@ -26,6 +43,7 @@ mod imp { } } + // writing the LSB of o_data (offset=0) triggers the RTIO write #[inline(always)] pub unsafe fn rtio_o_data_write(offset: usize, data: u32) { write_volatile( @@ -40,59 +58,53 @@ mod imp { } #[inline(never)] - unsafe fn process_exceptional_status(timestamp: i64, channel: i32, status: u8) { + unsafe fn process_exceptional_status(channel: i32, status: u8) { + let timestamp = *(csr::rtio::NOW_HI_ADDR as *const i64); if status & RTIO_O_STATUS_WAIT != 0 { while csr::rtio::o_status_read() & RTIO_O_STATUS_WAIT != 0 {} } if status & RTIO_O_STATUS_UNDERFLOW != 0 { raise!("RTIOUnderflow", "RTIO underflow at {0} mu, channel {1}, slack {2} mu", - timestamp, channel as i64, timestamp - get_counter()) + timestamp, channel as i64, timestamp - get_counter()); } - if status & RTIO_O_STATUS_SEQUENCE_ERROR != 0 { - raise!("RTIOSequenceError", - "RTIO sequence error at {0} mu, channel {1}", - timestamp, channel as i64, 0) + if status & RTIO_O_STATUS_DESTINATION_UNREACHABLE != 0 { + raise!("RTIODestinationUnreachable", + "RTIO destination unreachable, output, at {0} mu, channel {1}", + timestamp, channel as i64, 0); } } - pub extern fn output(timestamp: i64, channel: i32, addr: i32, data: i32) { + pub extern fn output(target: i32, data: i32) { unsafe { - csr::rtio::chan_sel_write(channel as _); - // writing timestamp clears o_data - csr::rtio::timestamp_write(timestamp as u64); - csr::rtio::o_address_write(addr as _); + csr::rtio::target_write(target as u32); + // writing target clears o_data rtio_o_data_write(0, data as _); - csr::rtio::o_we_write(1); let status = csr::rtio::o_status_read(); if status != 0 { - process_exceptional_status(timestamp, channel, status); + process_exceptional_status(target >> 8, status); } } } - pub extern fn output_wide(timestamp: i64, channel: i32, addr: i32, data: CSlice) { + pub extern fn output_wide(target: i32, data: CSlice) { unsafe { - csr::rtio::chan_sel_write(channel as _); - // writing timestamp clears o_data - csr::rtio::timestamp_write(timestamp as u64); - csr::rtio::o_address_write(addr as _); - for i in 0..data.len() { + csr::rtio::target_write(target as u32); + // writing target clears o_data + for i in (0..data.len()).rev() { rtio_o_data_write(i, data[i] as _) } - csr::rtio::o_we_write(1); let status = csr::rtio::o_status_read(); if status != 0 { - process_exceptional_status(timestamp, channel, status); + process_exceptional_status(target >> 8, status); } } } - pub extern fn input_timestamp(timeout: i64, channel: i32) -> u64 { + pub extern fn input_timestamp(timeout: i64, channel: i32) -> i64 { unsafe { - csr::rtio::chan_sel_write(channel as _); - csr::rtio::timestamp_write(timeout as u64); - csr::rtio::i_request_write(1); + csr::rtio::target_write((channel as u32) << 8); + csr::rtio::i_timeout_write(timeout as u64); let mut status = RTIO_I_STATUS_WAIT_STATUS; while status & RTIO_I_STATUS_WAIT_STATUS != 0 { @@ -105,18 +117,22 @@ mod imp { channel as i64, 0, 0); } if status & RTIO_I_STATUS_WAIT_EVENT != 0 { - return !0 + return -1 + } + if status & RTIO_I_STATUS_DESTINATION_UNREACHABLE != 0 { + raise!("RTIODestinationUnreachable", + "RTIO destination unreachable, input, on channel {0}", + channel as i64, 0, 0); } - csr::rtio::i_timestamp_read() + csr::rtio::i_timestamp_read() as i64 } } pub extern fn input_data(channel: i32) -> i32 { unsafe { - csr::rtio::chan_sel_write(channel as _); - csr::rtio::timestamp_write(0xffffffff_ffffffff); - csr::rtio::i_request_write(1); + csr::rtio::target_write((channel as u32) << 8); + csr::rtio::i_timeout_write(0xffffffff_ffffffff); let mut status = RTIO_I_STATUS_WAIT_STATUS; while status & RTIO_I_STATUS_WAIT_STATUS != 0 { @@ -124,21 +140,55 @@ mod imp { } if status & RTIO_I_STATUS_OVERFLOW != 0 { - csr::rtio::i_overflow_reset_write(1); raise!("RTIOOverflow", "RTIO input overflow on channel {0}", channel as i64, 0, 0); } + if status & RTIO_I_STATUS_DESTINATION_UNREACHABLE != 0 { + raise!("RTIODestinationUnreachable", + "RTIO destination unreachable, input, on channel {0}", + channel as i64, 0, 0); + } rtio_i_data_read(0) as i32 } } - #[cfg(has_rtio_log)] - pub fn log(timestamp: i64, data: &[u8]) { + pub extern fn input_timestamped_data(timeout: i64, channel: i32) -> TimestampedData { unsafe { - csr::rtio::chan_sel_write(csr::CONFIG_RTIO_LOG_CHANNEL); - csr::rtio::timestamp_write(timestamp as u64); + csr::rtio::target_write((channel as u32) << 8); + csr::rtio::i_timeout_write(timeout as u64); + + let mut status = RTIO_I_STATUS_WAIT_STATUS; + while status & RTIO_I_STATUS_WAIT_STATUS != 0 { + status = csr::rtio::i_status_read(); + } + + if status & RTIO_I_STATUS_OVERFLOW != 0 { + raise!("RTIOOverflow", + "RTIO input overflow on channel {0}", + channel as i64, 0, 0); + } + if status & RTIO_I_STATUS_WAIT_EVENT != 0 { + return TimestampedData { timestamp: -1, data: 0 } + } + if status & RTIO_I_STATUS_DESTINATION_UNREACHABLE != 0 { + raise!("RTIODestinationUnreachable", + "RTIO destination unreachable, input, on channel {0}", + channel as i64, 0, 0); + } + + TimestampedData { + timestamp: csr::rtio::i_timestamp_read() as i64, + data: rtio_i_data_read(0) as i32 + } + } + } + + #[cfg(has_rtio_log)] + pub fn log(data: &[u8]) { + unsafe { + csr::rtio::target_write(csr::CONFIG_RTIO_LOG_CHANNEL << 8); let mut word: u32 = 0; for i in 0..data.len() { @@ -146,20 +196,18 @@ mod imp { word |= data[i] as u32; if i % 4 == 3 { rtio_o_data_write(0, word); - csr::rtio::o_we_write(1); word = 0; } } if word != 0 { rtio_o_data_write(0, word); - csr::rtio::o_we_write(1); } } } #[cfg(not(has_rtio_log))] - pub fn log(_timestamp: i64, _data: &[u8]) { + pub fn log(_data: &[u8]) { unimplemented!("not(has_rtio_log)") } } @@ -167,24 +215,29 @@ mod imp { #[cfg(not(has_rtio))] mod imp { use cslice::CSlice; + use rtio::TimestampedData; pub extern fn init() { unimplemented!("not(has_rtio)") } + pub extern fn get_destination_status(_destination: i32) -> bool { + unimplemented!("not(has_rtio)") + } + pub extern fn get_counter() -> i64 { unimplemented!("not(has_rtio)") } - pub extern fn output(_timestamp: i64, _channel: i32, _addr: i32, _data: i32) { + pub extern fn output(_target: i32, _data: i32) { unimplemented!("not(has_rtio)") } - pub extern fn output_wide(_timestamp: i64, _channel: i32, _addr: i32, _data: CSlice) { + pub extern fn output_wide(_target: i32, _data: CSlice) { unimplemented!("not(has_rtio)") } - pub extern fn input_timestamp(_timeout: i64, _channel: i32) -> u64 { + pub extern fn input_timestamp(_timeout: i64, _channel: i32) -> i64 { unimplemented!("not(has_rtio)") } @@ -192,47 +245,13 @@ mod imp { unimplemented!("not(has_rtio)") } - pub fn log(_timestamp: i64, _data: &[u8]) { + pub extern fn input_timestamped_data(_timeout: i64, _channel: i32) -> TimestampedData { + unimplemented!("not(has_rtio)") + } + + pub fn log(_data: &[u8]) { unimplemented!("not(has_rtio)") } } pub use self::imp::*; - -pub mod drtio_dbg { - use ::send; - use ::recv; - use kernel_proto::*; - - #[repr(C)] - pub struct ChannelState(i32, i64); - - pub extern fn get_channel_state(channel: i32) -> ChannelState { - send(&DrtioChannelStateRequest { channel: channel as u32 }); - recv!(&DrtioChannelStateReply { fifo_space, last_timestamp } - => ChannelState(fifo_space as i32, last_timestamp as i64)) - } - - pub extern fn reset_channel_state(channel: i32) { - send(&DrtioResetChannelStateRequest { channel: channel as u32 }) - } - - pub extern fn get_fifo_space(channel: i32) { - send(&DrtioGetFifoSpaceRequest { channel: channel as u32 }) - } - - #[repr(C)] - pub struct PacketCounts(i32, i32); - - pub extern fn get_packet_counts(linkno: i32) -> PacketCounts { - send(&DrtioPacketCountRequest { linkno: linkno as u8 }); - recv!(&DrtioPacketCountReply { tx_cnt, rx_cnt } - => PacketCounts(tx_cnt as i32, rx_cnt as i32)) - } - - pub extern fn get_fifo_space_req_count(linkno: i32) -> i32 { - send(&DrtioFifoSpaceReqCountRequest { linkno: linkno as u8 }); - recv!(&DrtioFifoSpaceReqCountReply { cnt } - => cnt as i32) - } -} diff --git a/artiq/firmware/liballoc_list/lib.rs b/artiq/firmware/liballoc_list/lib.rs index 4854575bb..f01262007 100644 --- a/artiq/firmware/liballoc_list/lib.rs +++ b/artiq/firmware/liballoc_list/lib.rs @@ -1,10 +1,7 @@ -#![feature(alloc, allocator_api)] #![no_std] -extern crate alloc; - -use core::{mem, fmt}; -use alloc::allocator::{Layout, AllocErr, Alloc}; +use core::{ptr, mem, fmt}; +use core::alloc::{GlobalAlloc, Layout}; // The minimum alignment guaranteed by the architecture. const MIN_ALIGN: usize = 4; @@ -42,10 +39,10 @@ impl ListAlloc { } } -unsafe impl<'a> Alloc for &'a ListAlloc { - unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { +unsafe impl GlobalAlloc for ListAlloc { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { if layout.align() > MIN_ALIGN { - return Err(AllocErr::Unsupported { details: "alignment too large" }) + panic!("cannot allocate with alignment {}", layout.align()) } let header_size = mem::size_of::
(); @@ -83,7 +80,7 @@ unsafe impl<'a> Alloc for &'a ListAlloc { if (*curr).size >= size { (*curr).magic = MAGIC_BUSY; - return Ok(curr.offset(1) as *mut u8) + return curr.offset(1) as *mut u8 } }, _ => panic!("heap corruption detected at {:p}", curr) @@ -92,20 +89,16 @@ unsafe impl<'a> Alloc for &'a ListAlloc { curr = (*curr).next; } - Err(AllocErr::Exhausted { request: layout }) + ptr::null_mut() } - unsafe fn dealloc(&mut self, ptr: *mut u8, _layout: Layout) { + unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { let curr = (ptr as *mut Header).offset(-1); if (*curr).magic != MAGIC_BUSY { panic!("heap corruption detected at {:p}", curr) } (*curr).magic = MAGIC_FREE; } - - fn oom(&mut self, err: AllocErr) -> ! { - panic!("heap view: {}\ncannot allocate: {:?}", self, err) - } } impl fmt::Display for ListAlloc { diff --git a/artiq/firmware/liballoc_stub/lib.rs b/artiq/firmware/liballoc_stub/lib.rs deleted file mode 100644 index 941b5e16f..000000000 --- a/artiq/firmware/liballoc_stub/lib.rs +++ /dev/null @@ -1,18 +0,0 @@ -#![feature(alloc, allocator_api)] -#![no_std] - -extern crate alloc; - -use alloc::allocator::{Layout, AllocErr, Alloc}; - -pub struct StubAlloc; - -unsafe impl<'a> Alloc for &'a StubAlloc { - unsafe fn alloc(&mut self, _layout: Layout) -> Result<*mut u8, AllocErr> { - unimplemented!() - } - - unsafe fn dealloc(&mut self, _ptr: *mut u8, _layout: Layout) { - unimplemented!() - } -} diff --git a/artiq/firmware/libamp/Cargo.toml b/artiq/firmware/libamp/Cargo.toml deleted file mode 100644 index 2607f80c2..000000000 --- a/artiq/firmware/libamp/Cargo.toml +++ /dev/null @@ -1,11 +0,0 @@ -[package] -authors = ["M-Labs"] -name = "amp" -version = "0.0.0" - -[lib] -name = "amp" -path = "lib.rs" - -[dependencies] -board = { path = "../libboard" } diff --git a/artiq/firmware/libamp/lib.rs b/artiq/firmware/libamp/lib.rs deleted file mode 100644 index df46ef0c0..000000000 --- a/artiq/firmware/libamp/lib.rs +++ /dev/null @@ -1,6 +0,0 @@ -#![no_std] - -extern crate board; - -pub mod mailbox; -pub mod rpc_queue; diff --git a/artiq/firmware/libboard/Cargo.toml b/artiq/firmware/libboard/Cargo.toml deleted file mode 100644 index 825c9cf81..000000000 --- a/artiq/firmware/libboard/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -authors = ["M-Labs"] -name = "board" -version = "0.0.0" -build = "build.rs" - -[lib] -name = "board" -path = "lib.rs" - -[build-dependencies] -build_artiq = { path = "../libbuild_artiq" } - -[dependencies] -log = { version = "0.3", default-features = false } -bitflags = { version = "1.0" } - -[features] -uart_console = [] diff --git a/artiq/firmware/libboard/build.rs b/artiq/firmware/libboard/build.rs deleted file mode 100644 index 03ad582f8..000000000 --- a/artiq/firmware/libboard/build.rs +++ /dev/null @@ -1,29 +0,0 @@ -extern crate build_artiq; - -use std::env; -use std::fs::File; -use std::io::Write; -use std::path::PathBuf; -use std::process::Command; - -fn gen_hmc7043_writes() { - println!("cargo:rerun-if-changed=hmc7043_gen_writes.py"); - println!("cargo:rerun-if-changed=hmc7043_guiexport_6gbps.py"); - - let hmc7043_writes = - Command::new("python3") - .arg("hmc7043_gen_writes.py") - .arg("hmc7043_guiexport_6gbps.py") - .output() - .ok() - .and_then(|o| String::from_utf8(o.stdout).ok()) - .unwrap(); - let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); - let mut f = File::create(out_dir.join("hmc7043_writes.rs")).unwrap(); - write!(f, "{}", hmc7043_writes).unwrap(); -} - -fn main() { - build_artiq::misoc_cfg(); - gen_hmc7043_writes(); -} diff --git a/artiq/firmware/libboard/clock.rs b/artiq/firmware/libboard/clock.rs deleted file mode 100644 index 31fe7bafc..000000000 --- a/artiq/firmware/libboard/clock.rs +++ /dev/null @@ -1,83 +0,0 @@ -use csr; - -const INIT: u64 = ::core::i64::MAX as u64; -const FREQ: u64 = csr::CONFIG_CLOCK_FREQUENCY as u64; - -pub fn init() { - unsafe { - csr::timer0::en_write(0); - csr::timer0::load_write(INIT); - csr::timer0::reload_write(INIT); - csr::timer0::en_write(1); - } -} - -pub fn get_us() -> u64 { - unsafe { - csr::timer0::update_value_write(1); - (INIT - csr::timer0::value_read()) / (FREQ / 1_000_000) - } -} - -pub fn get_ms() -> u64 { - unsafe { - csr::timer0::update_value_write(1); - (INIT - csr::timer0::value_read()) / (FREQ / 1_000) - } -} - -pub fn spin_us(interval: u64) { - unsafe { - csr::timer0::update_value_write(1); - let threshold = csr::timer0::value_read() - interval * (FREQ / 1_000_000); - while csr::timer0::value_read() > threshold { - csr::timer0::update_value_write(1) - } - } -} - -#[derive(Debug, Clone, Copy)] -struct Watchdog { - active: bool, - threshold: u64 -} - -pub const MAX_WATCHDOGS: usize = 16; - -#[derive(Debug)] -pub struct WatchdogSet { - watchdogs: [Watchdog; MAX_WATCHDOGS] -} - -impl WatchdogSet { - pub fn new() -> WatchdogSet { - WatchdogSet { - watchdogs: [Watchdog { active: false, threshold: 0 }; MAX_WATCHDOGS] - } - } - - pub fn set_ms(&mut self, interval: u64) -> Result { - for (index, watchdog) in self.watchdogs.iter_mut().enumerate() { - if !watchdog.active { - watchdog.active = true; - watchdog.threshold = get_ms() + interval; - return Ok(index) - } - } - - Err(()) - } - - pub fn clear(&mut self, index: usize) { - if index < MAX_WATCHDOGS { - self.watchdogs[index].active = false - } - } - - pub fn expired(&self) -> bool { - self.watchdogs.iter() - .filter(|wd| wd.active) - .min_by_key(|wd| wd.threshold) - .map_or(false, |wd| get_ms() > wd.threshold) - } -} diff --git a/artiq/firmware/libboard/hmc7043_gen_writes.py b/artiq/firmware/libboard/hmc7043_gen_writes.py deleted file mode 100755 index 3f0a8cb47..000000000 --- a/artiq/firmware/libboard/hmc7043_gen_writes.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python3 - -# The HMC7043 GUI exports register write lists into Python files. -# This script converts them into Rust arrays. - -import sys -import runpy - - -class DUT: - def __init__(self): - self.writes = [] - - def write(self, address, value): - self.writes.append((address, value)) - - -def main(): - dut = DUT() - runpy.run_path(sys.argv[1], {"dut": dut}) - - print("// This file was autogenerated by hmc7043_gen_writes.py") - print("const HMC7043_WRITES: [(u16, u8); {}] = [".format(len(dut.writes))) - for address, value in dut.writes: - print(" (0x{:04x}, 0x{:02x}),".format(address, value)) - print("];") - - -if __name__ == "__main__": - main() diff --git a/artiq/firmware/libboard/hmc7043_guiexport_6gbps.py b/artiq/firmware/libboard/hmc7043_guiexport_6gbps.py deleted file mode 100644 index 77841ea03..000000000 --- a/artiq/firmware/libboard/hmc7043_guiexport_6gbps.py +++ /dev/null @@ -1,696 +0,0 @@ -# glbl_cfg1_swrst[0:0] = 0x0 -dut.write(0x0, 0x0) - -# glbl_cfg1_sleep[0:0] = 0x0 -# glbl_cfg1_restart[1:1] = 0x0 -# sysr_cfg1_pulsor_req[2:2] = 0x0 -# grpx_cfg1_mute[3:3] = 0x0 -# dist_cfg1_perf_floor[6:6] = 0x0 -# sysr_cfg1_reseed_req[7:7] = 0x0 -dut.write(0x1, 0x0) - -# sysr_cfg1_rev[0:0] = 0x0 -# sysr_cfg1_slipN_req[1:1] = 0x0 -dut.write(0x2, 0x0) - -# glbl_cfg1_ena_sysr[2:2] = 0x1 -# glbl_cfg2_ena_vcos[4:3] = 0x0 -# glbl_cfg1_ena_sysri[5:5] = 0x1 -dut.write(0x3, 0x24) - -# glbl_cfg7_ena_clkgr[6:0] = 0x3B -dut.write(0x4, 0x3B) - -# glbl_cfg1_clear_alarms[0:0] = 0x0 -dut.write(0x6, 0x0) - -# glbl_reserved[0:0] = 0x0 -dut.write(0x7, 0x0) - -# glbl_cfg5_ibuf0_en[0:0] = 0x0 -# glbl_cfg5_ibuf0_mode[4:1] = 0x7 -dut.write(0xA, 0xE) - -# glbl_cfg5_ibuf1_en[0:0] = 0x1 -# glbl_cfg5_ibuf1_mode[4:1] = 0x7 -dut.write(0xB, 0xF) - -# glbl_cfg5_gpi1_en[0:0] = 0x0 -# glbl_cfg5_gpi1_sel[4:1] = 0x0 -dut.write(0x46, 0x0) - -# glbl_cfg8_gpo1_en[0:0] = 0x1 -# glbl_cfg8_gpo1_mode[1:1] = 0x1 -# glbl_cfg8_gpo1_sel[7:2] = 0x7 -dut.write(0x50, 0x1F) - -# glbl_cfg2_sdio_en[0:0] = 0x1 -# glbl_cfg2_sdio_mode[1:1] = 0x1 -dut.write(0x54, 0x3) - -# sysr_cfg3_pulsor_mode[2:0] = 0x1 -dut.write(0x5A, 0x1) - -# sysr_cfg1_synci_invpol[0:0] = 0x0 -# sysr_cfg1_ext_sync_retimemode[2:2] = 0x1 -dut.write(0x5B, 0x4) - -# sysr_cfg16_divrat_lsb[7:0] = 0x0 -dut.write(0x5C, 0x0) - -# sysr_cfg16_divrat_msb[3:0] = 0x6 -dut.write(0x5D, 0x6) - -# dist_cfg1_extvco_islowfreq_sel[0:0] = 0x0 -# dist_cfg1_extvco_div2_sel[1:1] = 0x1 -dut.write(0x64, 0x2) - -# clkgrpx_cfg1_alg_dly_lowpwr_sel[0:0] = 0x0 -dut.write(0x65, 0x0) - -# alrm_cfg1_sysr_unsyncd_allow[1:1] = 0x0 -# alrm_cfg1_clkgrpx_validph_allow[2:2] = 0x0 -# alrm_cfg1_sync_req_allow[4:4] = 0x1 -dut.write(0x71, 0x10) - -# glbl_ro8_chipid_lob[7:0] = 0x1 -dut.write(0x78, 0x1) - -# glbl_ro8_chipid_mid[7:0] = 0x52 -dut.write(0x79, 0x52) - -# glbl_ro8_chipid_hib[7:0] = 0x4 -dut.write(0x7A, 0x4) - -# alrm_ro1_sysr_unsyncd_now[1:1] = 0x1 -# alrm_ro1_clkgrpx_validph_now[2:2] = 0x0 -# alrm_ro1_sync_req_now[4:4] = 0x1 -dut.write(0x7D, 0x12) - -# sysr_ro4_fsmstate[3:0] = 0x2 -# grpx_ro1_outdivfsm_busy[4:4] = 0x0 -dut.write(0x91, 0x2) - -# reg_98[7:0] = 0x0 -dut.write(0x98, 0x0) - -# reg_99[7:0] = 0x0 -dut.write(0x99, 0x0) - -# reg_9A[7:0] = 0x0 -dut.write(0x9A, 0x0) - -# reg_9B[7:0] = 0xAA -dut.write(0x9B, 0xAA) - -# reg_9C[7:0] = 0xAA -dut.write(0x9C, 0xAA) - -# reg_9D[7:0] = 0xAA -dut.write(0x9D, 0xAA) - -# reg_9E[7:0] = 0xAA -dut.write(0x9E, 0xAA) - -# reg_9F[7:0] = 0x4D -dut.write(0x9F, 0x4D) - -# reg_A0[7:0] = 0xDF -dut.write(0xA0, 0xDF) - -# reg_A1[7:0] = 0x97 -dut.write(0xA1, 0x97) - -# reg_A2[7:0] = 0x3 -dut.write(0xA2, 0x3) - -# reg_A3[7:0] = 0x0 -dut.write(0xA3, 0x0) - -# reg_A4[7:0] = 0x0 -dut.write(0xA4, 0x0) - -# reg_AD[7:0] = 0x0 -dut.write(0xAD, 0x0) - -# reg_AE[7:0] = 0x8 -dut.write(0xAE, 0x8) - -# reg_AF[7:0] = 0x50 -dut.write(0xAF, 0x50) - -# reg_B0[7:0] = 0x4 -dut.write(0xB0, 0x4) - -# reg_B1[7:0] = 0xD -dut.write(0xB1, 0xD) - -# reg_B2[7:0] = 0x0 -dut.write(0xB2, 0x0) - -# reg_B3[7:0] = 0x0 -dut.write(0xB3, 0x0) - -# reg_B5[7:0] = 0x0 -dut.write(0xB5, 0x0) - -# reg_B6[7:0] = 0x0 -dut.write(0xB6, 0x0) - -# reg_B7[7:0] = 0x0 -dut.write(0xB7, 0x0) - -# reg_B8[7:0] = 0x0 -dut.write(0xB8, 0x0) - -# clkgrp1_div1_cfg1_en[0:0] = 0x1 -# clkgrp1_div1_cfg1_phdelta_mslip[1:1] = 0x1 -# clkgrp1_div1_cfg2_startmode[3:2] = 0x0 -# clkgrp1_div1_cfg1_rev[4:4] = 0x1 -# clkgrp1_div1_cfg1_slipmask[5:5] = 0x1 -# clkgrp1_div1_cfg1_reseedmask[6:6] = 0x1 -# clkgrp1_div1_cfg1_hi_perf[7:7] = 0x0 -dut.write(0xC8, 0x73) - -# clkgrp1_div1_cfg12_divrat_lsb[7:0] = 0x1 -dut.write(0xC9, 0x1) - -# clkgrp1_div1_cfg12_divrat_msb[3:0] = 0x0 -dut.write(0xCA, 0x0) - -# clkgrp1_div1_cfg5_fine_delay[4:0] = 0x0 -dut.write(0xCB, 0x0) - -# clkgrp1_div1_cfg5_sel_coarse_delay[4:0] = 0x0 -dut.write(0xCC, 0x0) - -# clkgrp1_div1_cfg12_mslip_lsb[7:0] = 0x0 -dut.write(0xCD, 0x0) - -# clkgrp1_div1_cfg12_mslip_msb[3:0] = 0x0 -dut.write(0xCE, 0x0) - -# clkgrp1_div1_cfg2_sel_outmux[1:0] = 0x3 -# clkgrp1_div1_cfg1_drvr_sel_testclk[2:2] = 0x0 -dut.write(0xCF, 0x3) - -# clkgrp1_div1_cfg5_drvr_res[1:0] = 0x0 -# clkgrp1_div1_cfg5_drvr_spare[2:2] = 0x0 -# clkgrp1_div1_cfg5_drvr_mode[4:3] = 0x1 -# clkgrp1_div1_cfg_outbuf_dyn[5:5] = 0x0 -# clkgrp1_div1_cfg2_mutesel[7:6] = 0x0 -dut.write(0xD0, 0x8) - -# clkgrp1_div2_cfg1_en[0:0] = 0x1 -# clkgrp1_div2_cfg1_phdelta_mslip[1:1] = 0x0 -# clkgrp1_div2_cfg2_startmode[3:2] = 0x0 -# clkgrp1_div2_cfg1_rev[4:4] = 0x1 -# clkgrp1_div2_cfg1_slipmask[5:5] = 0x1 -# clkgrp1_div2_cfg1_reseedmask[6:6] = 0x1 -# clkgrp1_div2_cfg1_hi_perf[7:7] = 0x0 -dut.write(0xD2, 0x71) - -# clkgrp1_div2_cfg12_divrat_lsb[7:0] = 0x40 -dut.write(0xD3, 0x40) - -# clkgrp1_div2_cfg12_divrat_msb[3:0] = 0x0 -dut.write(0xD4, 0x0) - -# clkgrp1_div2_cfg5_fine_delay[4:0] = 0x0 -dut.write(0xD5, 0x0) - -# clkgrp1_div2_cfg5_sel_coarse_delay[4:0] = 0x0 -dut.write(0xD6, 0x0) - -# clkgrp1_div2_cfg12_mslip_lsb[7:0] = 0x0 -dut.write(0xD7, 0x0) - -# clkgrp1_div2_cfg12_mslip_msb[3:0] = 0x0 -dut.write(0xD8, 0x0) - -# clkgrp1_div2_cfg2_sel_outmux[1:0] = 0x0 -# clkgrp1_div2_cfg1_drvr_sel_testclk[2:2] = 0x0 -dut.write(0xD9, 0x0) - -# clkgrp1_div2_cfg5_drvr_res[1:0] = 0x1 -# clkgrp1_div2_cfg5_drvr_spare[2:2] = 0x0 -# clkgrp1_div2_cfg5_drvr_mode[4:3] = 0x1 -# clkgrp1_div2_cfg_outbuf_dyn[5:5] = 0x0 -# clkgrp1_div2_cfg2_mutesel[7:6] = 0x0 -dut.write(0xDA, 0x9) - -# clkgrp2_div1_cfg1_en[0:0] = 0x1 -# clkgrp2_div1_cfg1_phdelta_mslip[1:1] = 0x1 -# clkgrp2_div1_cfg2_startmode[3:2] = 0x0 -# clkgrp2_div1_cfg1_rev[4:4] = 0x1 -# clkgrp2_div1_cfg1_slipmask[5:5] = 0x1 -# clkgrp2_div1_cfg1_reseedmask[6:6] = 0x1 -# clkgrp2_div1_cfg1_hi_perf[7:7] = 0x0 -dut.write(0xDC, 0x73) - -# clkgrp2_div1_cfg12_divrat_lsb[7:0] = 0x1 -dut.write(0xDD, 0x1) - -# clkgrp2_div1_cfg12_divrat_msb[3:0] = 0x0 -dut.write(0xDE, 0x0) - -# clkgrp2_div1_cfg5_fine_delay[4:0] = 0x0 -dut.write(0xDF, 0x0) - -# clkgrp2_div1_cfg5_sel_coarse_delay[4:0] = 0x0 -dut.write(0xE0, 0x0) - -# clkgrp2_div1_cfg12_mslip_lsb[7:0] = 0x0 -dut.write(0xE1, 0x0) - -# clkgrp2_div1_cfg12_mslip_msb[3:0] = 0x0 -dut.write(0xE2, 0x0) - -# clkgrp2_div1_cfg2_sel_outmux[1:0] = 0x0 -# clkgrp2_div1_cfg1_drvr_sel_testclk[2:2] = 0x0 -dut.write(0xE3, 0x0) - -# clkgrp2_div1_cfg5_drvr_res[1:0] = 0x0 -# clkgrp2_div1_cfg5_drvr_spare[2:2] = 0x0 -# clkgrp2_div1_cfg5_drvr_mode[4:3] = 0x1 -# clkgrp2_div1_cfg_outbuf_dyn[5:5] = 0x0 -# clkgrp2_div1_cfg2_mutesel[7:6] = 0x0 -dut.write(0xE4, 0x8) - -# clkgrp2_div2_cfg1_en[0:0] = 0x1 -# clkgrp2_div2_cfg1_phdelta_mslip[1:1] = 0x0 -# clkgrp2_div2_cfg2_startmode[3:2] = 0x0 -# clkgrp2_div2_cfg1_rev[4:4] = 0x1 -# clkgrp2_div2_cfg1_slipmask[5:5] = 0x1 -# clkgrp2_div2_cfg1_reseedmask[6:6] = 0x1 -# clkgrp2_div2_cfg1_hi_perf[7:7] = 0x0 -dut.write(0xE6, 0x71) - -# clkgrp2_div2_cfg12_divrat_lsb[7:0] = 0x40 -dut.write(0xE7, 0x40) - -# clkgrp2_div2_cfg12_divrat_msb[3:0] = 0x0 -dut.write(0xE8, 0x0) - -# clkgrp2_div2_cfg5_fine_delay[4:0] = 0x0 -dut.write(0xE9, 0x0) - -# clkgrp2_div2_cfg5_sel_coarse_delay[4:0] = 0x0 -dut.write(0xEA, 0x0) - -# clkgrp2_div2_cfg12_mslip_lsb[7:0] = 0x0 -dut.write(0xEB, 0x0) - -# clkgrp2_div2_cfg12_mslip_msb[3:0] = 0x0 -dut.write(0xEC, 0x0) - -# clkgrp2_div2_cfg2_sel_outmux[1:0] = 0x0 -# clkgrp2_div2_cfg1_drvr_sel_testclk[2:2] = 0x0 -dut.write(0xED, 0x0) - -# clkgrp2_div2_cfg5_drvr_res[1:0] = 0x1 -# clkgrp2_div2_cfg5_drvr_spare[2:2] = 0x0 -# clkgrp2_div2_cfg5_drvr_mode[4:3] = 0x1 -# clkgrp2_div2_cfg_outbuf_dyn[5:5] = 0x0 -# clkgrp2_div2_cfg2_mutesel[7:6] = 0x0 -dut.write(0xEE, 0x9) - -# clkgrp3_div1_cfg1_en[0:0] = 0x1 -# clkgrp3_div1_cfg1_phdelta_mslip[1:1] = 0x1 -# clkgrp3_div1_cfg2_startmode[3:2] = 0x0 -# clkgrp3_div1_cfg1_rev[4:4] = 0x1 -# clkgrp3_div1_cfg1_slipmask[5:5] = 0x1 -# clkgrp3_div1_cfg1_reseedmask[6:6] = 0x1 -# clkgrp3_div1_cfg1_hi_perf[7:7] = 0x0 -dut.write(0xF0, 0x73) - -# clkgrp3_div1_cfg12_divrat_lsb[7:0] = 0x2 -dut.write(0xF1, 0x2) - -# clkgrp3_div1_cfg12_divrat_msb[3:0] = 0x0 -dut.write(0xF2, 0x0) - -# clkgrp3_div1_cfg5_fine_delay[4:0] = 0x0 -dut.write(0xF3, 0x0) - -# clkgrp3_div1_cfg5_sel_coarse_delay[4:0] = 0x0 -dut.write(0xF4, 0x0) - -# clkgrp3_div1_cfg12_mslip_lsb[7:0] = 0x0 -dut.write(0xF5, 0x0) - -# clkgrp3_div1_cfg12_mslip_msb[3:0] = 0x0 -dut.write(0xF6, 0x0) - -# clkgrp3_div1_cfg2_sel_outmux[1:0] = 0x0 -# clkgrp3_div1_cfg1_drvr_sel_testclk[2:2] = 0x0 -dut.write(0xF7, 0x0) - -# clkgrp3_div1_cfg5_drvr_res[1:0] = 0x0 -# clkgrp3_div1_cfg5_drvr_spare[2:2] = 0x0 -# clkgrp3_div1_cfg5_drvr_mode[4:3] = 0x1 -# clkgrp3_div1_cfg_outbuf_dyn[5:5] = 0x0 -# clkgrp3_div1_cfg2_mutesel[7:6] = 0x0 -dut.write(0xF8, 0x8) - -# clkgrp3_div2_cfg1_en[0:0] = 0x0 -# clkgrp3_div2_cfg1_phdelta_mslip[1:1] = 0x0 -# clkgrp3_div2_cfg2_startmode[3:2] = 0x0 -# clkgrp3_div2_cfg1_rev[4:4] = 0x1 -# clkgrp3_div2_cfg1_slipmask[5:5] = 0x1 -# clkgrp3_div2_cfg1_reseedmask[6:6] = 0x1 -# clkgrp3_div2_cfg1_hi_perf[7:7] = 0x0 -dut.write(0xFA, 0x70) - -# clkgrp3_div2_cfg12_divrat_lsb[7:0] = 0x80 -dut.write(0xFB, 0x80) - -# clkgrp3_div2_cfg12_divrat_msb[3:0] = 0x0 -dut.write(0xFC, 0x0) - -# clkgrp3_div2_cfg5_fine_delay[4:0] = 0x0 -dut.write(0xFD, 0x0) - -# clkgrp3_div2_cfg5_sel_coarse_delay[4:0] = 0x0 -dut.write(0xFE, 0x0) - -# clkgrp3_div2_cfg12_mslip_lsb[7:0] = 0x0 -dut.write(0xFF, 0x0) - -# clkgrp3_div2_cfg12_mslip_msb[3:0] = 0x0 -dut.write(0x100, 0x0) - -# clkgrp3_div2_cfg2_sel_outmux[1:0] = 0x0 -# clkgrp3_div2_cfg1_drvr_sel_testclk[2:2] = 0x0 -dut.write(0x101, 0x0) - -# clkgrp3_div2_cfg5_drvr_res[1:0] = 0x3 -# clkgrp3_div2_cfg5_drvr_spare[2:2] = 0x0 -# clkgrp3_div2_cfg5_drvr_mode[4:3] = 0x1 -# clkgrp3_div2_cfg_outbuf_dyn[5:5] = 0x0 -# clkgrp3_div2_cfg2_mutesel[7:6] = 0x0 -dut.write(0x102, 0xB) - -# clkgrp4_div1_cfg1_en[0:0] = 0x1 -# clkgrp4_div1_cfg1_phdelta_mslip[1:1] = 0x1 -# clkgrp4_div1_cfg2_startmode[3:2] = 0x0 -# clkgrp4_div1_cfg1_rev[4:4] = 0x1 -# clkgrp4_div1_cfg1_slipmask[5:5] = 0x1 -# clkgrp4_div1_cfg1_reseedmask[6:6] = 0x1 -# clkgrp4_div1_cfg1_hi_perf[7:7] = 0x0 -dut.write(0x104, 0x73) - -# clkgrp4_div1_cfg12_divrat_lsb[7:0] = 0x4 -dut.write(0x105, 0x4) - -# clkgrp4_div1_cfg12_divrat_msb[3:0] = 0x0 -dut.write(0x106, 0x0) - -# clkgrp4_div1_cfg5_fine_delay[4:0] = 0x0 -dut.write(0x107, 0x0) - -# clkgrp4_div1_cfg5_sel_coarse_delay[4:0] = 0x0 -dut.write(0x108, 0x0) - -# clkgrp4_div1_cfg12_mslip_lsb[7:0] = 0x0 -dut.write(0x109, 0x0) - -# clkgrp4_div1_cfg12_mslip_msb[3:0] = 0x0 -dut.write(0x10A, 0x0) - -# clkgrp4_div1_cfg2_sel_outmux[1:0] = 0x0 -# clkgrp4_div1_cfg1_drvr_sel_testclk[2:2] = 0x0 -dut.write(0x10B, 0x0) - -# clkgrp4_div1_cfg5_drvr_res[1:0] = 0x0 -# clkgrp4_div1_cfg5_drvr_spare[2:2] = 0x0 -# clkgrp4_div1_cfg5_drvr_mode[4:3] = 0x1 -# clkgrp4_div1_cfg_outbuf_dyn[5:5] = 0x0 -# clkgrp4_div1_cfg2_mutesel[7:6] = 0x0 -dut.write(0x10C, 0x8) - -# clkgrp4_div2_cfg1_en[0:0] = 0x1 -# clkgrp4_div2_cfg1_phdelta_mslip[1:1] = 0x0 -# clkgrp4_div2_cfg2_startmode[3:2] = 0x0 -# clkgrp4_div2_cfg1_rev[4:4] = 0x1 -# clkgrp4_div2_cfg1_slipmask[5:5] = 0x1 -# clkgrp4_div2_cfg1_reseedmask[6:6] = 0x1 -# clkgrp4_div2_cfg1_hi_perf[7:7] = 0x0 -dut.write(0x10E, 0x71) - -# clkgrp4_div2_cfg12_divrat_lsb[7:0] = 0x40 -dut.write(0x10F, 0x40) - -# clkgrp4_div2_cfg12_divrat_msb[3:0] = 0x0 -dut.write(0x110, 0x0) - -# clkgrp4_div2_cfg5_fine_delay[4:0] = 0x0 -dut.write(0x111, 0x0) - -# clkgrp4_div2_cfg5_sel_coarse_delay[4:0] = 0x0 -dut.write(0x112, 0x0) - -# clkgrp4_div2_cfg12_mslip_lsb[7:0] = 0x0 -dut.write(0x113, 0x0) - -# clkgrp4_div2_cfg12_mslip_msb[3:0] = 0x0 -dut.write(0x114, 0x0) - -# clkgrp4_div2_cfg2_sel_outmux[1:0] = 0x0 -# clkgrp4_div2_cfg1_drvr_sel_testclk[2:2] = 0x0 -dut.write(0x115, 0x0) - -# clkgrp4_div2_cfg5_drvr_res[1:0] = 0x3 -# clkgrp4_div2_cfg5_drvr_spare[2:2] = 0x0 -# clkgrp4_div2_cfg5_drvr_mode[4:3] = 0x2 -# clkgrp4_div2_cfg_outbuf_dyn[5:5] = 0x0 -# clkgrp4_div2_cfg2_mutesel[7:6] = 0x0 -dut.write(0x116, 0x13) - -# clkgrp5_div1_cfg1_en[0:0] = 0x1 -# clkgrp5_div1_cfg1_phdelta_mslip[1:1] = 0x1 -# clkgrp5_div1_cfg2_startmode[3:2] = 0x0 -# clkgrp5_div1_cfg1_rev[4:4] = 0x1 -# clkgrp5_div1_cfg1_slipmask[5:5] = 0x1 -# clkgrp5_div1_cfg1_reseedmask[6:6] = 0x1 -# clkgrp5_div1_cfg1_hi_perf[7:7] = 0x0 -dut.write(0x118, 0x73) - -# clkgrp5_div1_cfg12_divrat_lsb[7:0] = 0x4 -dut.write(0x119, 0x4) - -# clkgrp5_div1_cfg12_divrat_msb[3:0] = 0x0 -dut.write(0x11A, 0x0) - -# clkgrp5_div1_cfg5_fine_delay[4:0] = 0x0 -dut.write(0x11B, 0x0) - -# clkgrp5_div1_cfg5_sel_coarse_delay[4:0] = 0x0 -dut.write(0x11C, 0x0) - -# clkgrp5_div1_cfg12_mslip_lsb[7:0] = 0x0 -dut.write(0x11D, 0x0) - -# clkgrp5_div1_cfg12_mslip_msb[3:0] = 0x0 -dut.write(0x11E, 0x0) - -# clkgrp5_div1_cfg2_sel_outmux[1:0] = 0x0 -# clkgrp5_div1_cfg1_drvr_sel_testclk[2:2] = 0x0 -dut.write(0x11F, 0x0) - -# clkgrp5_div1_cfg5_drvr_res[1:0] = 0x0 -# clkgrp5_div1_cfg5_drvr_spare[2:2] = 0x0 -# clkgrp5_div1_cfg5_drvr_mode[4:3] = 0x1 -# clkgrp5_div1_cfg_outbuf_dyn[5:5] = 0x0 -# clkgrp5_div1_cfg2_mutesel[7:6] = 0x0 -dut.write(0x120, 0x8) - -# clkgrp5_div2_cfg1_en[0:0] = 0x1 -# clkgrp5_div2_cfg1_phdelta_mslip[1:1] = 0x1 -# clkgrp5_div2_cfg2_startmode[3:2] = 0x0 -# clkgrp5_div2_cfg1_rev[4:4] = 0x1 -# clkgrp5_div2_cfg1_slipmask[5:5] = 0x1 -# clkgrp5_div2_cfg1_reseedmask[6:6] = 0x1 -# clkgrp5_div2_cfg1_hi_perf[7:7] = 0x0 -dut.write(0x122, 0x73) - -# clkgrp5_div2_cfg12_divrat_lsb[7:0] = 0x4 -dut.write(0x123, 0x4) - -# clkgrp5_div2_cfg12_divrat_msb[3:0] = 0x0 -dut.write(0x124, 0x0) - -# clkgrp5_div2_cfg5_fine_delay[4:0] = 0x0 -dut.write(0x125, 0x0) - -# clkgrp5_div2_cfg5_sel_coarse_delay[4:0] = 0x0 -dut.write(0x126, 0x0) - -# clkgrp5_div2_cfg12_mslip_lsb[7:0] = 0x0 -dut.write(0x127, 0x0) - -# clkgrp5_div2_cfg12_mslip_msb[3:0] = 0x0 -dut.write(0x128, 0x0) - -# clkgrp5_div2_cfg2_sel_outmux[1:0] = 0x0 -# clkgrp5_div2_cfg1_drvr_sel_testclk[2:2] = 0x0 -dut.write(0x129, 0x0) - -# clkgrp5_div2_cfg5_drvr_res[1:0] = 0x3 -# clkgrp5_div2_cfg5_drvr_spare[2:2] = 0x0 -# clkgrp5_div2_cfg5_drvr_mode[4:3] = 0x1 -# clkgrp5_div2_cfg_outbuf_dyn[5:5] = 0x0 -# clkgrp5_div2_cfg2_mutesel[7:6] = 0x0 -dut.write(0x12A, 0xB) - -# clkgrp6_div1_cfg1_en[0:0] = 0x1 -# clkgrp6_div1_cfg1_phdelta_mslip[1:1] = 0x1 -# clkgrp6_div1_cfg2_startmode[3:2] = 0x0 -# clkgrp6_div1_cfg1_rev[4:4] = 0x1 -# clkgrp6_div1_cfg1_slipmask[5:5] = 0x1 -# clkgrp6_div1_cfg1_reseedmask[6:6] = 0x1 -# clkgrp6_div1_cfg1_hi_perf[7:7] = 0x0 -dut.write(0x12C, 0x73) - -# clkgrp6_div1_cfg12_divrat_lsb[7:0] = 0x4 -dut.write(0x12D, 0x4) - -# clkgrp6_div1_cfg12_divrat_msb[3:0] = 0x0 -dut.write(0x12E, 0x0) - -# clkgrp6_div1_cfg5_fine_delay[4:0] = 0x0 -dut.write(0x12F, 0x0) - -# clkgrp6_div1_cfg5_sel_coarse_delay[4:0] = 0x0 -dut.write(0x130, 0x0) - -# clkgrp6_div1_cfg12_mslip_lsb[7:0] = 0x0 -dut.write(0x131, 0x0) - -# clkgrp6_div1_cfg12_mslip_msb[3:0] = 0x0 -dut.write(0x132, 0x0) - -# clkgrp6_div1_cfg2_sel_outmux[1:0] = 0x0 -# clkgrp6_div1_cfg1_drvr_sel_testclk[2:2] = 0x0 -dut.write(0x133, 0x0) - -# clkgrp6_div1_cfg5_drvr_res[1:0] = 0x0 -# clkgrp6_div1_cfg5_drvr_spare[2:2] = 0x0 -# clkgrp6_div1_cfg5_drvr_mode[4:3] = 0x1 -# clkgrp6_div1_cfg_outbuf_dyn[5:5] = 0x0 -# clkgrp6_div1_cfg2_mutesel[7:6] = 0x0 -dut.write(0x134, 0x8) - -# clkgrp6_div2_cfg1_en[0:0] = 0x1 -# clkgrp6_div2_cfg1_phdelta_mslip[1:1] = 0x0 -# clkgrp6_div2_cfg2_startmode[3:2] = 0x0 -# clkgrp6_div2_cfg1_rev[4:4] = 0x1 -# clkgrp6_div2_cfg1_slipmask[5:5] = 0x1 -# clkgrp6_div2_cfg1_reseedmask[6:6] = 0x1 -# clkgrp6_div2_cfg1_hi_perf[7:7] = 0x0 -dut.write(0x136, 0x71) - -# clkgrp6_div2_cfg12_divrat_lsb[7:0] = 0x80 -dut.write(0x137, 0x80) - -# clkgrp6_div2_cfg12_divrat_msb[3:0] = 0x0 -dut.write(0x138, 0x0) - -# clkgrp6_div2_cfg5_fine_delay[4:0] = 0x0 -dut.write(0x139, 0x0) - -# clkgrp6_div2_cfg5_sel_coarse_delay[4:0] = 0x0 -dut.write(0x13A, 0x0) - -# clkgrp6_div2_cfg12_mslip_lsb[7:0] = 0x0 -dut.write(0x13B, 0x0) - -# clkgrp6_div2_cfg12_mslip_msb[3:0] = 0x0 -dut.write(0x13C, 0x0) - -# clkgrp6_div2_cfg2_sel_outmux[1:0] = 0x0 -# clkgrp6_div2_cfg1_drvr_sel_testclk[2:2] = 0x0 -dut.write(0x13D, 0x0) - -# clkgrp6_div2_cfg5_drvr_res[1:0] = 0x1 -# clkgrp6_div2_cfg5_drvr_spare[2:2] = 0x0 -# clkgrp6_div2_cfg5_drvr_mode[4:3] = 0x2 -# clkgrp6_div2_cfg_outbuf_dyn[5:5] = 0x0 -# clkgrp6_div2_cfg2_mutesel[7:6] = 0x0 -dut.write(0x13E, 0x11) - -# clkgrp7_div1_cfg1_en[0:0] = 0x0 -# clkgrp7_div1_cfg1_phdelta_mslip[1:1] = 0x1 -# clkgrp7_div1_cfg2_startmode[3:2] = 0x0 -# clkgrp7_div1_cfg1_rev[4:4] = 0x1 -# clkgrp7_div1_cfg1_slipmask[5:5] = 0x1 -# clkgrp7_div1_cfg1_reseedmask[6:6] = 0x1 -# clkgrp7_div1_cfg1_hi_perf[7:7] = 0x0 -dut.write(0x140, 0x72) - -# clkgrp7_div1_cfg12_divrat_lsb[7:0] = 0x2 -dut.write(0x141, 0x2) - -# clkgrp7_div1_cfg12_divrat_msb[3:0] = 0x0 -dut.write(0x142, 0x0) - -# clkgrp7_div1_cfg5_fine_delay[4:0] = 0x0 -dut.write(0x143, 0x0) - -# clkgrp7_div1_cfg5_sel_coarse_delay[4:0] = 0x0 -dut.write(0x144, 0x0) - -# clkgrp7_div1_cfg12_mslip_lsb[7:0] = 0x0 -dut.write(0x145, 0x0) - -# clkgrp7_div1_cfg12_mslip_msb[3:0] = 0x0 -dut.write(0x146, 0x0) - -# clkgrp7_div1_cfg2_sel_outmux[1:0] = 0x0 -# clkgrp7_div1_cfg1_drvr_sel_testclk[2:2] = 0x0 -dut.write(0x147, 0x0) - -# clkgrp7_div1_cfg5_drvr_res[1:0] = 0x0 -# clkgrp7_div1_cfg5_drvr_spare[2:2] = 0x0 -# clkgrp7_div1_cfg5_drvr_mode[4:3] = 0x1 -# clkgrp7_div1_cfg_outbuf_dyn[5:5] = 0x0 -# clkgrp7_div1_cfg2_mutesel[7:6] = 0x0 -dut.write(0x148, 0x8) - -# clkgrp7_div2_cfg1_en[0:0] = 0x0 -# clkgrp7_div2_cfg1_phdelta_mslip[1:1] = 0x0 -# clkgrp7_div2_cfg2_startmode[3:2] = 0x0 -# clkgrp7_div2_cfg1_rev[4:4] = 0x1 -# clkgrp7_div2_cfg1_slipmask[5:5] = 0x1 -# clkgrp7_div2_cfg1_reseedmask[6:6] = 0x1 -# clkgrp7_div2_cfg1_hi_perf[7:7] = 0x0 -dut.write(0x14A, 0x70) - -# clkgrp7_div2_cfg12_divrat_lsb[7:0] = 0x80 -dut.write(0x14B, 0x80) - -# clkgrp7_div2_cfg12_divrat_msb[3:0] = 0x0 -dut.write(0x14C, 0x0) - -# clkgrp7_div2_cfg5_fine_delay[4:0] = 0x0 -dut.write(0x14D, 0x0) - -# clkgrp7_div2_cfg5_sel_coarse_delay[4:0] = 0x0 -dut.write(0x14E, 0x0) - -# clkgrp7_div2_cfg12_mslip_lsb[7:0] = 0x0 -dut.write(0x14F, 0x0) - -# clkgrp7_div2_cfg12_mslip_msb[3:0] = 0x0 -dut.write(0x150, 0x0) - -# clkgrp7_div2_cfg2_sel_outmux[1:0] = 0x0 -# clkgrp7_div2_cfg1_drvr_sel_testclk[2:2] = 0x0 -dut.write(0x151, 0x0) - -# clkgrp7_div2_cfg5_drvr_res[1:0] = 0x3 -# clkgrp7_div2_cfg5_drvr_spare[2:2] = 0x0 -# clkgrp7_div2_cfg5_drvr_mode[4:3] = 0x1 -# clkgrp7_div2_cfg_outbuf_dyn[5:5] = 0x0 -# clkgrp7_div2_cfg2_mutesel[7:6] = 0x0 -dut.write(0x152, 0xB) \ No newline at end of file diff --git a/artiq/firmware/libboard/hmc830_7043.rs b/artiq/firmware/libboard/hmc830_7043.rs deleted file mode 100644 index ab106cd2c..000000000 --- a/artiq/firmware/libboard/hmc830_7043.rs +++ /dev/null @@ -1,185 +0,0 @@ -/* - * HMC830 config: - * 100MHz input, 1.2GHz output - * fvco = (refclk / r_divider) * n_divider - * fout = fvco/2 - * - * HMC7043 config: - * dac clock: 600MHz (div=2) - * fpga clock: 150MHz (div=8) - * sysref clock: 9.375MHz (div=128) - */ - -mod clock_mux { - use csr; - - const CLK_SRC_EXT_SEL : u8 = 1 << 0; - const REF_CLK_SRC_SEL : u8 = 1 << 1; - const DAC_CLK_SRC_SEL : u8 = 1 << 2; - - pub fn init() { - unsafe { - csr::clock_mux::out_write( - 1*CLK_SRC_EXT_SEL | // use ext clk from sma - 1*REF_CLK_SRC_SEL | - 1*DAC_CLK_SRC_SEL); - } - } -} - -mod hmc830 { - use clock; - use csr; - - const HMC830_WRITES: [(u8, u32); 16] = [ - (0x0, 0x20), - (0x1, 0x2), - (0x2, 0x2), // r_divider - (0x5, 0x1628), - (0x5, 0x60a0), - (0x5, 0xe110), - (0x5, 0x2818), - (0x5, 0x0), - (0x6, 0x303ca), - (0x7, 0x14d), - (0x8, 0xc1beff), - (0x9, 0x153fff), - (0xa, 0x2046), - (0xb, 0x7c061), - (0xf, 0x81), - (0x3, 0x30), // n_divider - ]; - - fn spi_setup() { - unsafe { - csr::converter_spi::offline_write(1); - csr::converter_spi::cs_polarity_write(0b0001); - csr::converter_spi::clk_polarity_write(0); - csr::converter_spi::clk_phase_write(0); - csr::converter_spi::lsb_first_write(0); - csr::converter_spi::half_duplex_write(0); - csr::converter_spi::clk_div_write_write(8); - csr::converter_spi::clk_div_read_write(8); - csr::converter_spi::cs_write(1 << csr::CONFIG_CONVERTER_SPI_HMC830_CS); - csr::converter_spi::offline_write(0); - } - } - - fn write(addr: u8, data: u32) { - let cmd = (0 << 6) | addr; - let val = ((cmd as u32) << 24) | data; - unsafe { - csr::converter_spi::xfer_len_write_write(32); - csr::converter_spi::xfer_len_read_write(0); - csr::converter_spi::data_write_write(val << (32-31)); - while csr::converter_spi::pending_read() != 0 {} - while csr::converter_spi::active_read() != 0 {} - } - } - - fn read(addr: u8) -> u32 { - let cmd = (1 << 6) | addr; - let val = (cmd as u32) << 24; - unsafe { - csr::converter_spi::xfer_len_write_write(7); - csr::converter_spi::xfer_len_read_write(25); - csr::converter_spi::data_write_write(val << (32-31)); - while csr::converter_spi::pending_read() != 0 {} - while csr::converter_spi::active_read() != 0 {} - csr::converter_spi::data_read_read() & 0xffffff - } - } - - pub fn init() -> Result<(), &'static str> { - spi_setup(); - let id = read(0x00); - if id != 0xa7975 { - error!("invalid HMC830 ID: 0x{:08x}", id); - return Err("invalid HMC830 identification"); - } else { - info!("HMC830 found"); - } - info!("HMC830 configuration..."); - for &(addr, data) in HMC830_WRITES.iter() { - write(addr, data); - } - - let t = clock::get_ms(); - info!("waiting for lock..."); - while read(0x12) & 0x02 == 0 { - if clock::get_ms() > t + 2000 { - return Err("HMC830 lock timeout"); - } - } - - Ok(()) - } -} - -mod hmc7043 { - use csr; - - include!(concat!(env!("OUT_DIR"), "/hmc7043_writes.rs")); - - fn spi_setup() { - unsafe { - csr::converter_spi::offline_write(1); - csr::converter_spi::cs_polarity_write(0b0001); - csr::converter_spi::clk_polarity_write(0); - csr::converter_spi::clk_phase_write(0); - csr::converter_spi::lsb_first_write(0); - csr::converter_spi::half_duplex_write(1); - csr::converter_spi::clk_div_write_write(8); - csr::converter_spi::clk_div_read_write(8); - csr::converter_spi::cs_write(1 << csr::CONFIG_CONVERTER_SPI_HMC7043_CS); - csr::converter_spi::offline_write(0); - } - } - - fn write(addr: u16, data: u8) { - let cmd = (0 << 15) | addr; - let val = ((cmd as u32) << 8) | data as u32; - unsafe { - csr::converter_spi::xfer_len_write_write(24); - csr::converter_spi::xfer_len_read_write(0); - csr::converter_spi::data_write_write(val << (32-24)); - while csr::converter_spi::pending_read() != 0 {} - while csr::converter_spi::active_read() != 0 {} - } - } - - fn read(addr: u16) -> u8 { - let cmd = (1 << 15) | addr; - let val = (cmd as u32) << 8; - unsafe { - csr::converter_spi::xfer_len_write_write(16); - csr::converter_spi::xfer_len_read_write(8); - csr::converter_spi::data_write_write(val << (32-24)); - while csr::converter_spi::pending_read() != 0 {} - while csr::converter_spi::active_read() != 0 {} - csr::converter_spi::data_read_read() as u8 - } - } - - pub fn init() -> Result<(), &'static str> { - spi_setup(); - let id = (read(0x78) as u32) << 16 | (read(0x79) as u32) << 8 | read(0x7a) as u32; - if id != 0xf17904 { - error!("invalid HMC7043 ID: 0x{:08x}", id); - return Err("invalid HMC7043 identification"); - } else { - info!("HMC7043 found"); - } - info!("HMC7043 configuration..."); - for &(addr, data) in HMC7043_WRITES.iter() { - write(addr, data); - } - Ok(()) - } -} - -pub fn init() -> Result<(), &'static str> { - clock_mux::init(); - hmc830::init()?; - hmc7043::init() -} diff --git a/artiq/firmware/libboard/i2c.rs b/artiq/firmware/libboard/i2c.rs deleted file mode 100644 index 6f3880645..000000000 --- a/artiq/firmware/libboard/i2c.rs +++ /dev/null @@ -1,190 +0,0 @@ -#[cfg(has_i2c)] -use csr; - -#[cfg(has_i2c)] -mod io { - use csr; - use clock; - - pub fn half_period() { clock::spin_us(100) } - fn sda_bit(busno: u8) -> u8 { 1 << (2 * busno + 1) } - fn scl_bit(busno: u8) -> u8 { 1 << (2 * busno) } - - pub fn sda_i(busno: u8) -> bool { - unsafe { - csr::i2c::in_read() & sda_bit(busno) != 0 - } - } - - pub fn sda_oe(busno: u8, oe: bool) { - unsafe { - let reg = csr::i2c::oe_read(); - let reg = if oe { reg | sda_bit(busno) } else { reg & !sda_bit(busno) }; - csr::i2c::oe_write(reg) - } - } - - pub fn sda_o(busno: u8, o: bool) { - unsafe { - let reg = csr::i2c::out_read(); - let reg = if o { reg | sda_bit(busno) } else { reg & !sda_bit(busno) }; - csr::i2c::out_write(reg) - } - } - - pub fn scl_oe(busno: u8, oe: bool) { - unsafe { - let reg = csr::i2c::oe_read(); - let reg = if oe { reg | scl_bit(busno) } else { reg & !scl_bit(busno) }; - csr::i2c::oe_write(reg) - } - } - - pub fn scl_o(busno: u8, o: bool) { - unsafe { - let reg = csr::i2c::out_read(); - let reg = if o { reg | scl_bit(busno) } else { reg & !scl_bit(busno) }; - csr::i2c::out_write(reg) - } - } -} - -#[cfg(has_i2c)] -pub fn init() { - for busno in 0..csr::CONFIG_I2C_BUS_COUNT { - let busno = busno as u8; - // Set SCL as output, and high level - io::scl_o(busno, true); - io::scl_oe(busno, true); - // Prepare a zero level on SDA so that sda_oe pulls it down - io::sda_o(busno, false); - // Release SDA - io::sda_oe(busno, false); - - // Check the I2C bus is ready - io::half_period(); - io::half_period(); - if !io::sda_i(busno) { - error!("SDA is stuck low on bus #{}", busno) - } - } -} - -#[cfg(has_i2c)] -pub fn start(busno: u8) -> Result<(), ()> { - if busno as u32 >= csr::CONFIG_I2C_BUS_COUNT { - return Err(()) - } - // Set SCL high then SDA low - io::scl_o(busno, true); - io::half_period(); - io::sda_oe(busno, true); - io::half_period(); - Ok(()) -} - -#[cfg(has_i2c)] -pub fn restart(busno: u8) -> Result<(), ()> { - if busno as u32 >= csr::CONFIG_I2C_BUS_COUNT { - return Err(()) - } - // Set SCL low then SDA high */ - io::scl_o(busno, false); - io::half_period(); - io::sda_oe(busno, false); - io::half_period(); - // Do a regular start - start(busno)?; - Ok(()) -} - -#[cfg(has_i2c)] -pub fn stop(busno: u8) -> Result<(), ()> { - if busno as u32 >= csr::CONFIG_I2C_BUS_COUNT { - return Err(()) - } - // First, make sure SCL is low, so that the target releases the SDA line - io::scl_o(busno, false); - io::half_period(); - // Set SCL high then SDA high - io::sda_oe(busno, true); - io::scl_o(busno, true); - io::half_period(); - io::sda_oe(busno, false); - io::half_period(); - Ok(()) -} - -#[cfg(has_i2c)] -pub fn write(busno: u8, data: u8) -> Result { - if busno as u32 >= csr::CONFIG_I2C_BUS_COUNT { - return Err(()) - } - // MSB first - for bit in (0..8).rev() { - // Set SCL low and set our bit on SDA - io::scl_o(busno, false); - io::sda_oe(busno, data & (1 << bit) == 0); - io::half_period(); - // Set SCL high ; data is shifted on the rising edge of SCL - io::scl_o(busno, true); - io::half_period(); - } - // Check ack - // Set SCL low, then release SDA so that the I2C target can respond - io::scl_o(busno, false); - io::half_period(); - io::sda_oe(busno, false); - // Set SCL high and check for ack - io::scl_o(busno, true); - io::half_period(); - // returns true if acked (I2C target pulled SDA low) - Ok(!io::sda_i(busno)) -} - -#[cfg(has_i2c)] -pub fn read(busno: u8, ack: bool) -> Result { - if busno as u32 >= csr::CONFIG_I2C_BUS_COUNT { - return Err(()) - } - // Set SCL low first, otherwise setting SDA as input may cause a transition - // on SDA with SCL high which will be interpreted as START/STOP condition. - io::scl_o(busno, false); - io::half_period(); // make sure SCL has settled low - io::sda_oe(busno, false); - - let mut data: u8 = 0; - - // MSB first - for bit in (0..8).rev() { - io::scl_o(busno, false); - io::half_period(); - // Set SCL high and shift data - io::scl_o(busno, true); - io::half_period(); - if io::sda_i(busno) { data |= 1 << bit } - } - // Send ack - // Set SCL low and pull SDA low when acking - io::scl_o(busno, false); - if ack { io::sda_oe(busno, true) } - io::half_period(); - // then set SCL high - io::scl_o(busno, true); - io::half_period(); - - Ok(data) -} - -#[cfg(not(has_i2c))] -pub fn init() {} -#[cfg(not(has_i2c))] -pub fn start(_busno: u8) -> Result<(), ()> { Err(()) } -#[cfg(not(has_i2c))] -pub fn restart(_busno: u8) -> Result<(), ()> { Err(()) } -#[cfg(not(has_i2c))] -pub fn stop(_busno: u8) -> Result<(), ()> { Err(()) } -#[cfg(not(has_i2c))] -pub fn write(_busno: u8, _data: u8) -> Result { Err(()) } -#[cfg(not(has_i2c))] -pub fn read(_busno: u8, _ack: bool) -> Result { Err(()) } diff --git a/artiq/firmware/libboard/irq.rs b/artiq/firmware/libboard/irq.rs deleted file mode 100644 index ad9c917b9..000000000 --- a/artiq/firmware/libboard/irq.rs +++ /dev/null @@ -1,25 +0,0 @@ -use super::spr::*; - -pub fn get_ie() -> bool { - unsafe { mfspr(SPR_SR) & SPR_SR_IEE != 0 } -} - -pub fn set_ie(ie: bool) { - if ie { - unsafe { mtspr(SPR_SR, mfspr(SPR_SR) | SPR_SR_IEE) } - } else { - unsafe { mtspr(SPR_SR, mfspr(SPR_SR) & !SPR_SR_IEE) } - } -} - -pub fn get_mask() -> u32 { - unsafe { mfspr(SPR_PICMR) } -} - -pub fn set_mask(mask: u32) { - unsafe { mtspr(SPR_PICMR, mask) } -} - -pub fn pending() -> u32 { - unsafe { mfspr(SPR_PICSR) } -} diff --git a/artiq/firmware/libboard/lib.rs b/artiq/firmware/libboard/lib.rs deleted file mode 100644 index d25a54d80..000000000 --- a/artiq/firmware/libboard/lib.rs +++ /dev/null @@ -1,55 +0,0 @@ -#![feature(asm, lang_items)] -#![no_std] - -#[macro_use] -extern crate log; -#[macro_use] -extern crate bitflags; - -use core::{cmp, ptr, str}; - -include!(concat!(env!("BUILDINC_DIRECTORY"), "/generated/mem.rs")); -include!(concat!(env!("BUILDINC_DIRECTORY"), "/generated/csr.rs")); -pub mod spr; -pub mod irq; -pub mod cache; -pub mod pcr; -pub mod clock; -pub mod uart; -#[cfg(feature = "uart_console")] -pub mod uart_console; - -#[cfg(has_spiflash)] -pub mod spiflash; - -pub mod i2c; -pub mod spi; - -#[cfg(has_si5324)] -pub mod si5324; - -#[cfg(has_serwb_phy_amc)] -pub mod serwb; -#[cfg(has_hmc830_7043)] -pub mod hmc830_7043; -#[cfg(has_ad9154)] -#[allow(dead_code)] -mod ad9154_reg; -#[cfg(has_ad9154)] -pub mod ad9154; - -pub mod boot; - -#[cfg(feature = "uart_console")] -pub use uart_console::Console; - -pub fn ident(buf: &mut [u8]) -> &str { - unsafe { - let len = ptr::read_volatile(csr::IDENTIFIER_MEM_BASE); - let len = cmp::min(len as usize, buf.len()); - for i in 0..len { - buf[i] = ptr::read_volatile(csr::IDENTIFIER_MEM_BASE.offset(1 + i as isize)) as u8 - } - str::from_utf8_unchecked(&buf[..len]) - } -} diff --git a/artiq/firmware/libboard/serwb.rs b/artiq/firmware/libboard/serwb.rs deleted file mode 100644 index 0d76ff5f7..000000000 --- a/artiq/firmware/libboard/serwb.rs +++ /dev/null @@ -1,46 +0,0 @@ -use csr; - -pub fn wait_init() { - info!("waiting for AMC/RTM serwb bridge to be ready..."); - unsafe { - csr::serwb_phy_amc::control_reset_write(1); - while csr::serwb_phy_amc::control_ready_read() == 0 { - if csr::serwb_phy_amc::control_error_read() == 1 { - warn!("AMC/RTM serwb bridge initialization failed, retrying."); - csr::serwb_phy_amc::control_reset_write(1); - } - } - } - info!("done."); - - // Try reading the identifier register on the other side of the bridge. - let rtm_identifier = unsafe { - csr::rtm_identifier::identifier_read() - }; - if rtm_identifier != 0x5352544d { - error!("incorrect RTM identifier: 0x{:08x}", rtm_identifier); - // proceed anyway - } - - unsafe { - debug!("AMC serwb settings:"); - debug!(" delay_min_found: {}", csr::serwb_phy_amc::control_delay_min_found_read()); - debug!(" delay_min: {}", csr::serwb_phy_amc::control_delay_min_read()); - debug!(" delay_max_found: {}", csr::serwb_phy_amc::control_delay_max_found_read()); - debug!(" delay_max: {}", csr::serwb_phy_amc::control_delay_max_read()); - debug!(" delay: {}", csr::serwb_phy_amc::control_delay_read()); - debug!(" bitslip: {}", csr::serwb_phy_amc::control_bitslip_read()); - debug!(" ready: {}", csr::serwb_phy_amc::control_ready_read()); - debug!(" error: {}", csr::serwb_phy_amc::control_error_read()); - - debug!("RTM serwb settings:"); - debug!(" delay_min_found: {}", csr::serwb_phy_rtm::control_delay_min_found_read()); - debug!(" delay_min: {}", csr::serwb_phy_rtm::control_delay_min_read()); - debug!(" delay_max_found: {}", csr::serwb_phy_rtm::control_delay_max_found_read()); - debug!(" delay_max: {}", csr::serwb_phy_rtm::control_delay_max_read()); - debug!(" delay: {}", csr::serwb_phy_rtm::control_delay_read()); - debug!(" bitslip: {}", csr::serwb_phy_rtm::control_bitslip_read()); - debug!(" ready: {}", csr::serwb_phy_rtm::control_ready_read()); - debug!(" error: {}", csr::serwb_phy_rtm::control_error_read()); - } -} diff --git a/artiq/firmware/libboard/si5324.rs b/artiq/firmware/libboard/si5324.rs deleted file mode 100644 index 5cea4f814..000000000 --- a/artiq/firmware/libboard/si5324.rs +++ /dev/null @@ -1,216 +0,0 @@ -use core::result; -use i2c; -use clock; -use csr; - -type Result = result::Result; - -const BUSNO: u8 = 0; -const ADDRESS: u8 = 0x68; - -#[cfg(any(soc_platform = "sayma_amc", soc_platform = "kc705"))] -fn pca9548_select(address: u8, channel: u8) -> Result<()> { - i2c::start(BUSNO).unwrap(); - if !i2c::write(BUSNO, (address << 1)).unwrap() { - return Err("PCA9548 failed to ack write address") - } - if !i2c::write(BUSNO, 1 << channel).unwrap() { - return Err("PCA9548 failed to ack control word") - } - i2c::stop(BUSNO).unwrap(); - Ok(()) -} - -fn reset(en: bool) { - unsafe { - csr::si5324_rst_n::out_write(if en { 0 } else { 1 }) - } -} - -// NOTE: the logical parameters DO NOT MAP to physical values written -// into registers. They have to be mapped; see the datasheet. -// DSPLLsim reports the logical parameters in the design summary, not -// the physical register values. -pub struct FrequencySettings { - pub n1_hs: u8, - pub nc1_ls: u32, - pub n2_hs: u8, - pub n2_ls: u32, - pub n31: u32, - pub n32: u32, - pub bwsel: u8 -} - -fn map_frequency_settings(settings: &FrequencySettings) -> Result { - if settings.nc1_ls != 0 && (settings.nc1_ls % 2) == 1 { - return Err("NC1_LS must be 0 or even") - } - if settings.nc1_ls > (1 << 20) { - return Err("NC1_LS is too high") - } - if (settings.n2_ls % 2) == 1 { - return Err("N2_LS must be even") - } - if settings.n2_ls > (1 << 20) { - return Err("N2_LS is too high") - } - if settings.n31 > (1 << 19) { - return Err("N31 is too high") - } - if settings.n32 > (1 << 19) { - return Err("N32 is too high") - } - let r = FrequencySettings { - n1_hs: match settings.n1_hs { - 4 => 0b000, - 5 => 0b001, - 6 => 0b010, - 7 => 0b011, - 8 => 0b100, - 9 => 0b101, - 10 => 0b110, - 11 => 0b111, - _ => return Err("N1_HS has an invalid value") - }, - nc1_ls: settings.nc1_ls - 1, - n2_hs: match settings.n2_hs { - 4 => 0b000, - 5 => 0b001, - 6 => 0b010, - 7 => 0b011, - 8 => 0b100, - 9 => 0b101, - 10 => 0b110, - 11 => 0b111, - _ => return Err("N2_HS has an invalid value") - }, - n2_ls: settings.n2_ls - 1, - n31: settings.n31 - 1, - n32: settings.n32 - 1, - bwsel: settings.bwsel - }; - Ok(r) -} - -fn write(reg: u8, val: u8) -> Result<()> { - i2c::start(BUSNO).unwrap(); - if !i2c::write(BUSNO, (ADDRESS << 1)).unwrap() { - return Err("Si5324 failed to ack write address") - } - if !i2c::write(BUSNO, reg).unwrap() { - return Err("Si5324 failed to ack register") - } - if !i2c::write(BUSNO, val).unwrap() { - return Err("Si5324 failed to ack value") - } - i2c::stop(BUSNO).unwrap(); - Ok(()) -} - -fn read(reg: u8) -> Result { - i2c::start(BUSNO).unwrap(); - if !i2c::write(BUSNO, (ADDRESS << 1)).unwrap() { - return Err("Si5324 failed to ack write address") - } - if !i2c::write(BUSNO, reg).unwrap() { - return Err("Si5324 failed to ack register") - } - i2c::restart(BUSNO).unwrap(); - if !i2c::write(BUSNO, (ADDRESS << 1) | 1).unwrap() { - return Err("Si5324 failed to ack read address") - } - let val = i2c::read(BUSNO, false).unwrap(); - i2c::stop(BUSNO).unwrap(); - Ok(val) -} - -fn ident() -> Result { - Ok(((read(134)? as u16) << 8) | (read(135)? as u16)) -} - -fn has_xtal() -> Result { - Ok((read(129)? & 0x01) == 0) // LOSX_INT=0 -} - -fn has_clkin2() -> Result { - Ok((read(129)? & 0x04) == 0) // LOS2_INT=0 -} - -fn locked() -> Result { - Ok((read(130)? & 0x01) == 0) // LOL_INT=0 -} - -fn monitor_lock() -> Result<()> { - let t = clock::get_ms(); - while !locked()? { - // Yes, lock can be really slow. - if clock::get_ms() > t + 20000 { - return Err("Si5324 lock timeout"); - } - } - info!("Si5324 is locked"); - Ok(()) -} - -pub fn setup(settings: &FrequencySettings) -> Result<()> { - let s = map_frequency_settings(settings)?; - - reset(true); - clock::spin_us(1_000); - reset(false); - clock::spin_us(10_000); - - #[cfg(soc_platform = "kc705")] - pca9548_select(0x74, 7)?; - #[cfg(soc_platform = "sayma_amc")] - pca9548_select(0x70, 4)?; - - if ident()? != 0x0182 { - return Err("Si5324 does not have expected product number"); - } - - write(0, read(0)? | 0x40)?; // FREE_RUN=1 - write(2, (read(2)? & 0x0f) | (s.bwsel << 4))?; - write(21, read(21)? & 0xfe)?; // CKSEL_PIN=0 - write(3, (read(3)? & 0x3f) | (0b01 << 6) | 0x10)?; // CKSEL_REG=b01 SQ_ICAL=1 - write(4, (read(4)? & 0x3f) | (0b00 << 6))?; // AUTOSEL_REG=b00 - write(6, (read(6)? & 0xc0) | 0b111111)?; // SFOUT2_REG=b111 SFOUT1_REG=b111 - write(25, (s.n1_hs << 5 ) as u8)?; - write(31, (s.nc1_ls >> 16) as u8)?; - write(32, (s.nc1_ls >> 8 ) as u8)?; - write(33, (s.nc1_ls) as u8)?; - write(34, (s.nc1_ls >> 16) as u8)?; // write to NC2_LS as well - write(35, (s.nc1_ls >> 8 ) as u8)?; - write(36, (s.nc1_ls) as u8)?; - write(40, (s.n2_hs << 5 ) as u8 | (s.n2_ls >> 16) as u8)?; - write(41, (s.n2_ls >> 8 ) as u8)?; - write(42, (s.n2_ls) as u8)?; - write(43, (s.n31 >> 16) as u8)?; - write(44, (s.n31 >> 8) as u8)?; - write(45, (s.n31) as u8)?; - write(46, (s.n32 >> 16) as u8)?; - write(47, (s.n32 >> 8) as u8)?; - write(48, (s.n32) as u8)?; - write(137, read(137)? | 0x01)?; // FASTLOCK=1 - write(136, read(136)? | 0x40)?; // ICAL=1 - - if !has_xtal()? { - return Err("Si5324 misses XA/XB signal"); - } - if !has_clkin2()? { - return Err("Si5324 misses CLKIN2 signal"); - } - monitor_lock()?; - - Ok(()) -} - -pub fn select_ext_input(external: bool) -> Result<()> { - if external { - write(3, (read(3)? & 0x3f) | (0b00 << 6))?; // CKSEL_REG=b00 - } else { - write(3, (read(3)? & 0x3f) | (0b01 << 6))?; // CKSEL_REG=b01 - } - monitor_lock()?; - Ok(()) -} diff --git a/artiq/firmware/libboard/spi.rs b/artiq/firmware/libboard/spi.rs deleted file mode 100644 index 3c0611311..000000000 --- a/artiq/firmware/libboard/spi.rs +++ /dev/null @@ -1,66 +0,0 @@ -#[cfg(has_converter_spi)] -use csr; - -#[cfg(has_converter_spi)] -pub fn set_config(busno: u8, flags: u8, write_div: u8, read_div: u8) -> Result<(), ()> { - if busno != 0 { - return Err(()) - } - unsafe { - csr::converter_spi::offline_write(1); - csr::converter_spi::cs_polarity_write(flags >> 3 & 1); - csr::converter_spi::clk_polarity_write(flags >> 4 & 1); - csr::converter_spi::clk_phase_write(flags >> 5 & 1); - csr::converter_spi::lsb_first_write(flags >> 6 & 1); - csr::converter_spi::half_duplex_write(flags >> 7 & 1); - csr::converter_spi::clk_div_write_write(write_div); - csr::converter_spi::clk_div_read_write(read_div); - csr::converter_spi::offline_write(0); - } - Ok(()) -} - -#[cfg(has_converter_spi)] -pub fn set_xfer(busno: u8, chip_select: u16, write_length: u8, read_length: u8) -> Result<(), ()> { - if busno != 0 { - return Err(()) - } - unsafe { - csr::converter_spi::cs_write(chip_select as _); - csr::converter_spi::xfer_len_write_write(write_length); - csr::converter_spi::xfer_len_read_write(read_length); - } - Ok(()) -} - -#[cfg(has_converter_spi)] -pub fn write(busno: u8, data: u32) -> Result<(), ()> { - if busno != 0 { - return Err(()) - } - unsafe { - csr::converter_spi::data_write_write(data); - while csr::converter_spi::pending_read() != 0 {} - while csr::converter_spi::active_read() != 0 {} - } - Ok(()) -} - -#[cfg(has_converter_spi)] -pub fn read(busno: u8) -> Result { - if busno != 0 { - return Err(()) - } - Ok(unsafe { - csr::converter_spi::data_read_read() - }) -} - -#[cfg(not(has_converter_spi))] -pub fn set_config(_busno: u8, _flags: u8, _write_div: u8, _read_div: u8) -> Result<(), ()> { Err(()) } -#[cfg(not(has_converter_spi))] -pub fn set_xfer(_busno: u8,_chip_select: u16, _write_length: u8, _read_length: u8) -> Result<(), ()> { Err(()) } -#[cfg(not(has_converter_spi))] -pub fn write(_busno: u8,_data: u32) -> Result<(), ()> { Err(()) } -#[cfg(not(has_converter_spi))] -pub fn read(_busno: u8,) -> Result { Err(()) } diff --git a/artiq/firmware/libboard/spiflash.rs b/artiq/firmware/libboard/spiflash.rs deleted file mode 100644 index 26c5cbbc6..000000000 --- a/artiq/firmware/libboard/spiflash.rs +++ /dev/null @@ -1,123 +0,0 @@ -#![allow(dead_code)] - -use core::cmp; -use csr; - -const CMD_PP: u8 = 0x02; -const CMD_WRDI: u8 = 0x04; -const CMD_RDSR: u8 = 0x05; -const CMD_WREN: u8 = 0x06; -const CMD_SE: u8 = 0xd8; - -const PIN_CLK: u8 = 1 << 1; -const PIN_CS_N: u8 = 1 << 2; -const PIN_DQ_I: u8 = 1 << 3; - -const SR_WIP: u8 = 1; - -fn write_byte(mut byte: u8) { - unsafe { - csr::spiflash::bitbang_write(0); - for _ in 0..8 { - csr::spiflash::bitbang_write((byte & 0x80) >> 7); - csr::spiflash::bitbang_write((byte & 0x80) >> 7 | PIN_CLK); - byte <<= 1; - } - csr::spiflash::bitbang_write(0); - } -} - -fn write_addr(mut addr: usize) { - unsafe { - csr::spiflash::bitbang_write(0); - for _ in 0..24 { - csr::spiflash::bitbang_write(((addr & 0x800000) >> 23) as u8); - csr::spiflash::bitbang_write(((addr & 0x800000) >> 23) as u8 | PIN_CLK); - addr <<= 1; - } - csr::spiflash::bitbang_write(0); - } -} - -fn wait_until_ready() { - unsafe { - loop { - let mut sr = 0; - write_byte(CMD_RDSR); - for _ in 0..8 { - sr <<= 1; - csr::spiflash::bitbang_write(PIN_DQ_I | PIN_CLK); - sr |= csr::spiflash::miso_read(); - csr::spiflash::bitbang_write(PIN_DQ_I); - } - csr::spiflash::bitbang_write(0); - csr::spiflash::bitbang_write(PIN_CS_N); - if sr & SR_WIP == 0 { - return - } - } - } -} - -pub fn erase_sector(addr: usize) { - unsafe { - let sector_addr = addr & !(csr::CONFIG_SPIFLASH_SECTOR_SIZE as usize - 1); - - csr::spiflash::bitbang_en_write(1); - - wait_until_ready(); - - write_byte(CMD_WREN); - csr::spiflash::bitbang_write(PIN_CS_N); - - write_byte(CMD_SE); - write_addr(sector_addr); - csr::spiflash::bitbang_write(PIN_CS_N); - - wait_until_ready(); - - csr::spiflash::bitbang_en_write(0); - } -} - -fn write_page(addr: usize, data: &[u8]) { - unsafe { - csr::spiflash::bitbang_en_write(1); - - wait_until_ready(); - - write_byte(CMD_WREN); - csr::spiflash::bitbang_write(PIN_CS_N); - write_byte(CMD_PP); - write_addr(addr); - for &byte in data { - write_byte(byte) - } - - csr::spiflash::bitbang_write(PIN_CS_N); - csr::spiflash::bitbang_write(0); - - wait_until_ready(); - - csr::spiflash::bitbang_en_write(0); - } -} - -const PAGE_SIZE: usize = csr::CONFIG_SPIFLASH_PAGE_SIZE as usize; -const PAGE_MASK: usize = PAGE_SIZE - 1; - -pub fn write(mut addr: usize, mut data: &[u8]) { - if addr & PAGE_MASK != 0 { - let size = cmp::min((PAGE_SIZE - (addr & PAGE_MASK)) as usize, data.len()); - write_page(addr, &data[..size]); - addr += size; - data = &data[size..]; - } - - while data.len() > 0 { - let size = cmp::min(PAGE_SIZE as usize, data.len()); - write_page(addr, &data[..size]); - addr += size; - data = &data[size..]; - } -} diff --git a/artiq/firmware/libboard_artiq/Cargo.toml b/artiq/firmware/libboard_artiq/Cargo.toml new file mode 100644 index 000000000..0f13ca2de --- /dev/null +++ b/artiq/firmware/libboard_artiq/Cargo.toml @@ -0,0 +1,26 @@ +[package] +authors = ["M-Labs"] +name = "board_artiq" +version = "0.0.0" +build = "build.rs" + +[lib] +name = "board_artiq" +path = "lib.rs" + +[build-dependencies] +build_misoc = { path = "../libbuild_misoc" } + +[dependencies] +failure = { version = "0.1", default-features = false } +failure_derive = { version = "0.1", default-features = false } +bitflags = "1.0" +byteorder = { version = "1.0", default-features = false } +crc = { version = "1.7", default-features = false } +log = { version = "0.4", default-features = false } +io = { path = "../libio", features = ["byteorder"] } +board_misoc = { path = "../libboard_misoc" } +proto_artiq = { path = "../libproto_artiq" } + +[features] +uart_console = [] diff --git a/artiq/firmware/libboard/ad9154.rs b/artiq/firmware/libboard_artiq/ad9154.rs similarity index 60% rename from artiq/firmware/libboard/ad9154.rs rename to artiq/firmware/libboard_artiq/ad9154.rs index 9da8ec748..de32ff2e2 100644 --- a/artiq/firmware/libboard/ad9154.rs +++ b/artiq/firmware/libboard_artiq/ad9154.rs @@ -1,80 +1,42 @@ -use csr; -use clock; +use board_misoc::{csr, clock}; use ad9154_reg; fn spi_setup(dacno: u8) { unsafe { - csr::converter_spi::offline_write(1); + while csr::converter_spi::idle_read() == 0 {} + csr::converter_spi::offline_write(0); + csr::converter_spi::end_write(1); csr::converter_spi::cs_polarity_write(0b0001); csr::converter_spi::clk_polarity_write(0); csr::converter_spi::clk_phase_write(0); csr::converter_spi::lsb_first_write(0); csr::converter_spi::half_duplex_write(0); - csr::converter_spi::clk_div_write_write(16); - csr::converter_spi::clk_div_read_write(16); - csr::converter_spi::xfer_len_write_write(24); - csr::converter_spi::xfer_len_read_write(0); + csr::converter_spi::length_write(24 - 1); + csr::converter_spi::div_write(16 - 2); csr::converter_spi::cs_write(1 << (csr::CONFIG_CONVERTER_SPI_FIRST_AD9154_CS + dacno as u32)); - csr::converter_spi::offline_write(0); } } fn write(addr: u16, data: u8) { unsafe { - csr::converter_spi::data_write_write( + while csr::converter_spi::writable_read() == 0 {} + csr::converter_spi::data_write( ((addr as u32) << 16) | ((data as u32) << 8)); - while csr::converter_spi::pending_read() != 0 {} - while csr::converter_spi::active_read() != 0 {} } } fn read(addr: u16) -> u8 { unsafe { write((1 << 15) | addr, 0); - csr::converter_spi::data_read_read() as u8 - } -} - -fn jesd_unreset() { - unsafe { - csr::ad9154_crg::jreset_write(0) - } -} - -fn jesd_enable(dacno: u8, en: bool) { - unsafe { - (csr::AD9154[dacno as usize].jesd_control_enable_write)(if en {1} else {0}) - } -} - -fn jesd_ready(dacno: u8) -> bool { - unsafe { - (csr::AD9154[dacno as usize].jesd_control_ready_read)() != 0 - } -} - -fn jesd_prbs(dacno: u8, en: bool) { - unsafe { - (csr::AD9154[dacno as usize].jesd_control_prbs_config_write)(if en {1} else {0}) - } -} - -fn jesd_stpl(dacno: u8, en: bool) { - unsafe { - (csr::AD9154[dacno as usize].jesd_control_stpl_enable_write)(if en {1} else {0}) - } -} - -fn jesd_jsync(dacno: u8) -> bool { - unsafe { - (csr::AD9154[dacno as usize].jesd_control_jsync_read)() != 0 + while csr::converter_spi::writable_read() == 0 {} + csr::converter_spi::data_read() as u8 } } // ad9154 mode 1 -// linerate 6Gbps -// deviceclock_fpga=150MHz -// deviceclock_dac=600MHz +// linerate 5Gbps or 6Gbps +// deviceclock_fpga 125MHz or 150MHz +// deviceclock_dac 500MHz or 600MHz struct JESDSettings { did: u8, @@ -132,7 +94,8 @@ const JESD_SETTINGS: JESDSettings = JESDSettings { jesdv: 1 }; -fn dac_setup(linerate: u64) -> Result<(), &'static str> { +pub fn reset_and_detect(dacno: u8) -> Result<(), &'static str> { + spi_setup(dacno); // reset write(ad9154_reg::SPI_INTFCONFA, 1*ad9154_reg::SOFTRESET_M | 1*ad9154_reg::SOFTRESET | @@ -147,19 +110,25 @@ fn dac_setup(linerate: u64) -> Result<(), &'static str> { 1*ad9154_reg::SDOACTIVE_M | 1*ad9154_reg::SDOACTIVE); clock::spin_us(100); if (read(ad9154_reg::PRODIDH) as u16) << 8 | (read(ad9154_reg::PRODIDL) as u16) != 0x9154 { - return Err("AD9154 not found"); + return Err("invalid AD9154 identification"); } else { - info!("AD9154 found"); + info!("AD9154-{} found", dacno); } + Ok(()) +} - info!("AD9154 configuration..."); +pub fn setup(dacno: u8, linerate: u64) -> Result<(), &'static str> { + spi_setup(dacno); + info!("AD9154-{} initializing...", dacno); write(ad9154_reg::PWRCNTRL0, 0*ad9154_reg::PD_DAC0 | 0*ad9154_reg::PD_DAC1 | 0*ad9154_reg::PD_DAC2 | 0*ad9154_reg::PD_DAC3 | 0*ad9154_reg::PD_BG); clock::spin_us(100); write(ad9154_reg::TXENMASK1, 0*ad9154_reg::DACA_MASK | - 0*ad9154_reg::DACB_MASK); // TX not controlled by TXEN pins + 0*ad9154_reg::DACB_MASK); // DAC PD not controlled by TXEN pins + write(ad9154_reg::PWRCNTRL3, 1*ad9154_reg::ENA_SPI_TXEN | + 1*ad9154_reg::SPI_TXEN); write(ad9154_reg::CLKCFG0, 0*ad9154_reg::REF_CLKDIV_EN | 1*ad9154_reg::RF_SYNC_EN | 1*ad9154_reg::DUTY_EN | 0*ad9154_reg::PD_CLK_REC | @@ -177,7 +146,7 @@ fn dac_setup(linerate: u64) -> Result<(), &'static str> { write(ad9154_reg::SPI_PAGEINDX, 0x3); // A and B dual - write(ad9154_reg::INTERP_MODE, 0); // 1x + write(ad9154_reg::INTERP_MODE, 0x03); // 4x write(ad9154_reg::MIX_MODE, 0); write(ad9154_reg::DATA_FORMAT, 0*ad9154_reg::BINARY_FORMAT); // s16 write(ad9154_reg::DATAPATH_CTRL, @@ -337,7 +306,7 @@ fn dac_setup(linerate: u64) -> Result<(), &'static str> { 0x5*ad9154_reg::SPI_CP_LEVEL_THRESHOLD_LOW | 0*ad9154_reg::SPI_CP_LEVEL_DET_PD); write(ad9154_reg::VCO_VARACTOR_CTRL_0, - 0xe*ad9154_reg::SPI_VCO_VARACTOR_OFFSET | + 0xe*ad9154_reg::SPI_VCO_VARACTOR_OFFSET | 0x7*ad9154_reg::SPI_VCO_VARACTOR_REF_TCF); write(ad9154_reg::VCO_VARACTOR_CTRL_1, 0x6*ad9154_reg::SPI_VCO_VARACTOR_REF); @@ -349,7 +318,7 @@ fn dac_setup(linerate: u64) -> Result<(), &'static str> { let t = clock::get_ms(); while read(ad9154_reg::PLL_STATUS) & ad9154_reg::SERDES_PLL_LOCK_RB == 0 { if clock::get_ms() > t + 200 { - return Err("AD9154 SERDES PLL lock timeout"); + return Err("SERDES PLL lock timeout"); } } @@ -362,22 +331,12 @@ fn dac_setup(linerate: u64) -> Result<(), &'static str> { write(ad9154_reg::LMFC_VAR_0, 0x0a); // receive buffer delay write(ad9154_reg::LMFC_VAR_1, 0x0a); write(ad9154_reg::SYNC_ERRWINDOW, 0); // +- 1/2 DAC clock + // datasheet seems to say ENABLE and ARM should be separate steps, + // so enable now so it can be armed in sync(). write(ad9154_reg::SYNC_CONTROL, - 0x9*ad9154_reg::SYNCMODE | 0*ad9154_reg::SYNCENABLE | - 0*ad9154_reg::SYNCARM | 1*ad9154_reg::SYNCCLRSTKY | - 1*ad9154_reg::SYNCCLRLAST); - write(ad9154_reg::SYNC_CONTROL, - 0x9*ad9154_reg::SYNCMODE | 1*ad9154_reg::SYNCENABLE | - 0*ad9154_reg::SYNCARM | 1*ad9154_reg::SYNCCLRSTKY | - 1*ad9154_reg::SYNCCLRLAST); - write(ad9154_reg::SYNC_CONTROL, - 0x9*ad9154_reg::SYNCMODE | 1*ad9154_reg::SYNCENABLE | - 1*ad9154_reg::SYNCARM | 0*ad9154_reg::SYNCCLRSTKY | - 0*ad9154_reg::SYNCCLRLAST); - clock::spin_us(1000); // ensure at least one sysref edge - if read(ad9154_reg::SYNC_STATUS) & ad9154_reg::SYNC_LOCK == 0 { - return Err("AD9154 no sync lock"); - } + 0x1*ad9154_reg::SYNCMODE | 1*ad9154_reg::SYNCENABLE | + 0*ad9154_reg::SYNCARM | 0*ad9154_reg::SYNCCLRSTKY); + write(ad9154_reg::XBAR_LN_0_1, 0*ad9154_reg::LOGICAL_LANE0_SRC | 1*ad9154_reg::LOGICAL_LANE1_SRC); write(ad9154_reg::XBAR_LN_2_3, @@ -390,99 +349,201 @@ fn dac_setup(linerate: u64) -> Result<(), &'static str> { write(ad9154_reg::GENERAL_JRX_CTRL_0, 0x1*ad9154_reg::LINK_EN | 0*ad9154_reg::LINK_PAGE | 0*ad9154_reg::LINK_MODE | 0*ad9154_reg::CHECKSUM_MODE); + info!(" ...done"); Ok(()) } -fn dac_monitor() { - write(ad9154_reg::IRQ_STATUS0, 0x00); - write(ad9154_reg::IRQ_STATUS1, 0x00); - write(ad9154_reg::IRQ_STATUS2, 0x00); - write(ad9154_reg::IRQ_STATUS3, 0x00); - - write(ad9154_reg::IRQEN_STATUSMODE0, - ad9154_reg::IRQEN_SMODE_LANEFIFOERR | - ad9154_reg::IRQEN_SMODE_SERPLLLOCK | - ad9154_reg::IRQEN_SMODE_SERPLLLOST | - ad9154_reg::IRQEN_SMODE_DACPLLLOCK | - ad9154_reg::IRQEN_SMODE_DACPLLLOST); - - write(ad9154_reg::IRQEN_STATUSMODE1, - ad9154_reg::IRQEN_SMODE_PRBS0 | - ad9154_reg::IRQEN_SMODE_PRBS1 | - ad9154_reg::IRQEN_SMODE_PRBS2 | - ad9154_reg::IRQEN_SMODE_PRBS3); - - write(ad9154_reg::IRQEN_STATUSMODE2, - ad9154_reg::IRQEN_SMODE_SYNC_TRIP0 | - ad9154_reg::IRQEN_SMODE_SYNC_WLIM0 | - ad9154_reg::IRQEN_SMODE_SYNC_ROTATE0 | - ad9154_reg::IRQEN_SMODE_SYNC_LOCK0 | - ad9154_reg::IRQEN_SMODE_NCO_ALIGN0 | - ad9154_reg::IRQEN_SMODE_BLNKDONE0 | - ad9154_reg::IRQEN_SMODE_PDPERR0); - - write(ad9154_reg::IRQEN_STATUSMODE3, - ad9154_reg::IRQEN_SMODE_SYNC_TRIP1 | - ad9154_reg::IRQEN_SMODE_SYNC_WLIM1 | - ad9154_reg::IRQEN_SMODE_SYNC_ROTATE1 | - ad9154_reg::IRQEN_SMODE_SYNC_LOCK1 | - ad9154_reg::IRQEN_SMODE_NCO_ALIGN1 | - ad9154_reg::IRQEN_SMODE_BLNKDONE1 | - ad9154_reg::IRQEN_SMODE_PDPERR1); - - write(ad9154_reg::IRQ_STATUS0, 0x00); - write(ad9154_reg::IRQ_STATUS1, 0x00); - write(ad9154_reg::IRQ_STATUS2, 0x00); - write(ad9154_reg::IRQ_STATUS3, 0x00); +pub fn status(dacno: u8) { + spi_setup(dacno); + info!("Printing status of AD9154-{}", dacno); + info!("PRODID: 0x{:04x}", (read(ad9154_reg::PRODIDH) as u16) << 8 | (read(ad9154_reg::PRODIDL) as u16)); + info!("SERDES_PLL_LOCK: {}", + (read(ad9154_reg::PLL_STATUS) & ad9154_reg::SERDES_PLL_LOCK_RB)); + info!(""); + info!("CODEGRPSYNC: 0x{:02x}", read(ad9154_reg::CODEGRPSYNCFLG)); + info!("FRAMESYNC: 0x{:02x}", read(ad9154_reg::FRAMESYNCFLG)); + info!("GOODCHECKSUM: 0x{:02x}", read(ad9154_reg::GOODCHKSUMFLG)); + info!("INITLANESYNC: 0x{:02x}", read(ad9154_reg::INITLANESYNCFLG)); + info!(""); + info!("DID_REG: 0x{:02x}", read(ad9154_reg::DID_REG)); + info!("BID_REG: 0x{:02x}", read(ad9154_reg::BID_REG)); + info!("SCR_L_REG: 0x{:02x}", read(ad9154_reg::SCR_L_REG)); + info!("F_REG: 0x{:02x}", read(ad9154_reg::F_REG)); + info!("K_REG: 0x{:02x}", read(ad9154_reg::K_REG)); + info!("M_REG: 0x{:02x}", read(ad9154_reg::M_REG)); + info!("CS_N_REG: 0x{:02x}", read(ad9154_reg::CS_N_REG)); + info!("NP_REG: 0x{:02x}", read(ad9154_reg::NP_REG)); + info!("S_REG: 0x{:02x}", read(ad9154_reg::S_REG)); + info!("HD_CF_REG: 0x{:02x}", read(ad9154_reg::HD_CF_REG)); + info!("RES1_REG: 0x{:02x}", read(ad9154_reg::RES1_REG)); + info!("RES2_REG: 0x{:02x}", read(ad9154_reg::RES2_REG)); + info!("LIDx_REG: 0x{:02x} 0x{:02x} 0x{:02x} 0x{:02x} 0x{:02x} 0x{:02x} 0x{:02x} 0x{:02x}", + read(ad9154_reg::LID0_REG), + read(ad9154_reg::LID1_REG), + read(ad9154_reg::LID2_REG), + read(ad9154_reg::LID3_REG), + read(ad9154_reg::LID4_REG), + read(ad9154_reg::LID5_REG), + read(ad9154_reg::LID6_REG), + read(ad9154_reg::LID7_REG)); + info!("CHECKSUMx_REG: 0x{:02x} 0x{:02x} 0x{:02x} 0x{:02x} 0x{:02x} 0x{:02x} 0x{:02x} 0x{:02x}", + read(ad9154_reg::CHECKSUM0_REG), + read(ad9154_reg::CHECKSUM1_REG), + read(ad9154_reg::CHECKSUM2_REG), + read(ad9154_reg::CHECKSUM3_REG), + read(ad9154_reg::CHECKSUM4_REG), + read(ad9154_reg::CHECKSUM5_REG), + read(ad9154_reg::CHECKSUM6_REG), + read(ad9154_reg::CHECKSUM7_REG)); + info!("COMPSUMx_REG: 0x{:02x} 0x{:02x} 0x{:02x} 0x{:02x} 0x{:02x} 0x{:02x} 0x{:02x} 0x{:02x}", + read(ad9154_reg::COMPSUM0_REG), + read(ad9154_reg::COMPSUM1_REG), + read(ad9154_reg::COMPSUM2_REG), + read(ad9154_reg::COMPSUM3_REG), + read(ad9154_reg::COMPSUM4_REG), + read(ad9154_reg::COMPSUM5_REG), + read(ad9154_reg::COMPSUM6_REG), + read(ad9154_reg::COMPSUM7_REG)); + info!("BADDISPARITY: 0x{:02x}", read(ad9154_reg::BADDISPARITY)); + info!("NITDISPARITY: 0x{:02x}", read(ad9154_reg::NIT_W)); } -fn dac_cfg(dacno: u8) -> Result<(), &'static str> { +pub fn prbs(dacno: u8) -> Result<(), &'static str> { + let mut prbs_errors: u32 = 0; spi_setup(dacno); - jesd_enable(dacno, false); - jesd_prbs(dacno, false); - jesd_stpl(dacno, false); - clock::spin_us(10000); - jesd_enable(dacno, true); - dac_setup(6_000_000_000)?; - jesd_enable(dacno, false); - clock::spin_us(10000); - jesd_enable(dacno, true); - dac_monitor(); - clock::spin_us(50000); - let t = clock::get_ms(); - while !jesd_ready(dacno) { - if clock::get_ms() > t + 200 { - return Err("JESD ready timeout"); + + /* follow phy prbs testing (p58 of ad9154 datasheet) */ + info!("AD9154-{} running PRBS test...", dacno); + + /* step 2: select prbs mode */ + write(ad9154_reg::PHY_PRBS_TEST_CTRL, + 0b00*ad9154_reg::PHY_PRBS_PAT_SEL); + + /* step 3: enable test for all lanes */ + write(ad9154_reg::PHY_PRBS_TEST_EN, 0xff); + + /* step 4: reset */ + write(ad9154_reg::PHY_PRBS_TEST_CTRL, + 0b00*ad9154_reg::PHY_PRBS_PAT_SEL | + 1*ad9154_reg::PHY_TEST_RESET); + write(ad9154_reg::PHY_PRBS_TEST_CTRL, + 0b00*ad9154_reg::PHY_PRBS_PAT_SEL); + + /* step 5: prbs threshold */ + write(ad9154_reg::PHY_PRBS_TEST_THRESHOLD_LOBITS, 0); + write(ad9154_reg::PHY_PRBS_TEST_THRESHOLD_MIDBITS, 0); + write(ad9154_reg::PHY_PRBS_TEST_THRESHOLD_HIBITS, 0); + + /* step 6: start */ + write(ad9154_reg::PHY_PRBS_TEST_CTRL, + 0b00*ad9154_reg::PHY_PRBS_PAT_SEL); + write(ad9154_reg::PHY_PRBS_TEST_CTRL, + 0b00*ad9154_reg::PHY_PRBS_PAT_SEL | + 1*ad9154_reg::PHY_TEST_START); + + /* step 7: wait 500 ms */ + clock::spin_us(500000); + + /* step 8 : stop */ + write(ad9154_reg::PHY_PRBS_TEST_CTRL, + 0b00*ad9154_reg::PHY_PRBS_PAT_SEL); + + for i in 0..8 { + /* step 9.a: select src err */ + write(ad9154_reg::PHY_PRBS_TEST_CTRL, + i*ad9154_reg::PHY_SRC_ERR_CNT); + /* step 9.b: retrieve number of errors */ + let lane_errors = (read(ad9154_reg::PHY_PRBS_TEST_ERRCNT_LOBITS) as u32) | + ((read(ad9154_reg::PHY_PRBS_TEST_ERRCNT_MIDBITS) as u32) << 8) | + ((read(ad9154_reg::PHY_PRBS_TEST_ERRCNT_HIBITS) as u32) << 16); + if lane_errors > 0 { + warn!(" PRBS errors on lane{}: {:06x}", i, lane_errors); + } + prbs_errors += lane_errors + } + + if prbs_errors > 0 { + return Err("PRBS failed") + } + info!(" ...passed"); + Ok(()) +} + +pub fn stpl(dacno: u8, m: u8, s: u8) -> Result<(), &'static str> { + spi_setup(dacno); + + info!("AD9154-{} running STPL test...", dacno); + + fn prng(seed: u32) -> u32 { + return ((seed + 1)*0x31415979 + 1) & 0xffff; + } + + for i in 0..m { + let mut data: u32; + let mut errors: u8 = 0; + for j in 0..s { + /* select converter */ + write(ad9154_reg::SHORT_TPL_TEST_0, + 0b0*ad9154_reg::SHORT_TPL_TEST_EN | + 0b0*ad9154_reg::SHORT_TPL_TEST_RESET | + i*ad9154_reg::SHORT_TPL_DAC_SEL | + j*ad9154_reg::SHORT_TPL_SP_SEL); + + /* set expected value */ + data = prng(((i as u32) << 8) | (j as u32)); + write(ad9154_reg::SHORT_TPL_TEST_1, (data & 0x00ff) as u8); + write(ad9154_reg::SHORT_TPL_TEST_2, ((data & 0xff00) >> 8) as u8); + + /* enable stpl */ + write(ad9154_reg::SHORT_TPL_TEST_0, + 0b1*ad9154_reg::SHORT_TPL_TEST_EN | + 0b0*ad9154_reg::SHORT_TPL_TEST_RESET | + i*ad9154_reg::SHORT_TPL_DAC_SEL | + j*ad9154_reg::SHORT_TPL_SP_SEL); + + /* reset stpl */ + write(ad9154_reg::SHORT_TPL_TEST_0, + 0b1*ad9154_reg::SHORT_TPL_TEST_EN | + 0b1*ad9154_reg::SHORT_TPL_TEST_RESET | + i*ad9154_reg::SHORT_TPL_DAC_SEL | + j*ad9154_reg::SHORT_TPL_SP_SEL); + + /* release reset stpl */ + write(ad9154_reg::SHORT_TPL_TEST_0, + 0b1*ad9154_reg::SHORT_TPL_TEST_EN | + 0b0*ad9154_reg::SHORT_TPL_TEST_RESET | + i*ad9154_reg::SHORT_TPL_DAC_SEL | + j*ad9154_reg::SHORT_TPL_SP_SEL); + errors += read(ad9154_reg::SHORT_TPL_TEST_3); + } + info!(" c{} errors: {}", i, errors); + if errors > 0 { + return Err("STPL failed") } } - clock::spin_us(10000); - if read(ad9154_reg::CODEGRPSYNCFLG) != 0xff { - return Err("bad CODEGRPSYNCFLG") - } - if !jesd_jsync(dacno) { - return Err("bad SYNC") - } - if read(ad9154_reg::FRAMESYNCFLG) != 0xff { - return Err("bad FRAMESYNCFLG") - } - if read(ad9154_reg::GOODCHKSUMFLG) != 0xff { - return Err("bad GOODCHECKSUMFLG") - } - if read(ad9154_reg::INITLANESYNCFLG) != 0xff { - return Err("bad INITLANESYNCFLG") - } + + info!(" ...passed"); Ok(()) } -pub fn init() -> Result<(), &'static str> { - // Release the JESD clock domain reset late, as we need to - // set up clock chips before. - jesd_unreset(); +pub fn sync(dacno: u8) -> Result { + spi_setup(dacno); - for dacno in 0..csr::AD9154.len() { - let dacno = dacno as u8; - debug!("setting up AD9154-{} DAC...", dacno); - dac_cfg(dacno)?; + write(ad9154_reg::SYNC_CONTROL, + 0x1*ad9154_reg::SYNCMODE | 1*ad9154_reg::SYNCENABLE | + 1*ad9154_reg::SYNCARM | 1*ad9154_reg::SYNCCLRSTKY); + clock::spin_us(1000); // ensure at least one sysref edge + let sync_status = read(ad9154_reg::SYNC_STATUS); + + if sync_status & ad9154_reg::SYNC_BUSY != 0 { + return Err("sync logic busy"); } - Ok(()) + if sync_status & ad9154_reg::SYNC_LOCK == 0 { + return Err("no sync lock"); + } + if sync_status & ad9154_reg::SYNC_TRIP == 0 { + return Err("no sysref edge"); + } + let realign_occured = sync_status & ad9154_reg::SYNC_ROTATE != 0; + Ok(realign_occured) } diff --git a/artiq/firmware/libboard/ad9154_reg.rs b/artiq/firmware/libboard_artiq/ad9154_reg.rs similarity index 99% rename from artiq/firmware/libboard/ad9154_reg.rs rename to artiq/firmware/libboard_artiq/ad9154_reg.rs index 3830f89c8..3af180496 100644 --- a/artiq/firmware/libboard/ad9154_reg.rs +++ b/artiq/firmware/libboard_artiq/ad9154_reg.rs @@ -1,3 +1,5 @@ +#![allow(dead_code)] + pub const SPI_INTFCONFA : u16 = 0x000; pub const SOFTRESET : u8 = 1 << 0; pub const LSBFIRST : u8 = 1 << 1; diff --git a/artiq/firmware/libboard_artiq/build.rs b/artiq/firmware/libboard_artiq/build.rs new file mode 100644 index 000000000..3548ea5ff --- /dev/null +++ b/artiq/firmware/libboard_artiq/build.rs @@ -0,0 +1,5 @@ +extern crate build_misoc; + +fn main() { + build_misoc::cfg(); +} diff --git a/artiq/firmware/libboard_artiq/drtio_routing.rs b/artiq/firmware/libboard_artiq/drtio_routing.rs new file mode 100644 index 000000000..10ce489b1 --- /dev/null +++ b/artiq/firmware/libboard_artiq/drtio_routing.rs @@ -0,0 +1,107 @@ +use board_misoc::config; +#[cfg(has_drtio_routing)] +use board_misoc::csr; +use core::fmt; + +#[cfg(has_drtio_routing)] +pub const DEST_COUNT: usize = 256; +#[cfg(not(has_drtio_routing))] +pub const DEST_COUNT: usize = 0; +pub const MAX_HOPS: usize = 32; +pub const INVALID_HOP: u8 = 0xff; + +pub struct RoutingTable(pub [[u8; MAX_HOPS]; DEST_COUNT]); + +impl RoutingTable { + // default routing table is for star topology with no repeaters + pub fn default_master(default_n_links: usize) -> RoutingTable { + let mut ret = RoutingTable([[INVALID_HOP; MAX_HOPS]; DEST_COUNT]); + let n_entries = default_n_links + 1; // include local RTIO + for i in 0..n_entries { + ret.0[i][0] = i as u8; + } + for i in 1..n_entries { + ret.0[i][1] = 0x00; + } + ret + } + + // use this by default on satellite, as they receive + // the routing table from the master + pub fn default_empty() -> RoutingTable { + RoutingTable([[INVALID_HOP; MAX_HOPS]; DEST_COUNT]) + } +} + +impl fmt::Display for RoutingTable { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "RoutingTable {{")?; + for i in 0..DEST_COUNT { + if self.0[i][0] != INVALID_HOP { + write!(f, " {}:", i)?; + for j in 0..MAX_HOPS { + if self.0[i][j] == INVALID_HOP { + break; + } + write!(f, " {}", self.0[i][j])?; + } + write!(f, ";")?; + } + } + write!(f, " }}")?; + Ok(()) + } +} + +pub fn config_routing_table(default_n_links: usize) -> RoutingTable { + let mut ret = RoutingTable::default_master(default_n_links); + let ok = config::read("routing_table", |result| { + if let Ok(data) = result { + if data.len() == DEST_COUNT*MAX_HOPS { + for i in 0..DEST_COUNT { + for j in 0..MAX_HOPS { + ret.0[i][j] = data[i*MAX_HOPS+j]; + } + } + return true; + } + } + false + }); + if !ok { + warn!("could not read routing table from configuration, using default"); + } + info!("routing table: {}", ret); + ret +} + +#[cfg(has_drtio_routing)] +pub fn interconnect_enable(routing_table: &RoutingTable, rank: u8, destination: u8) { + let hop = routing_table.0[destination as usize][rank as usize]; + unsafe { + csr::routing_table::destination_write(destination); + csr::routing_table::hop_write(hop); + } +} + +#[cfg(has_drtio_routing)] +pub fn interconnect_disable(destination: u8) { + unsafe { + csr::routing_table::destination_write(destination); + csr::routing_table::hop_write(INVALID_HOP); + } +} + +#[cfg(has_drtio_routing)] +pub fn interconnect_enable_all(routing_table: &RoutingTable, rank: u8) { + for i in 0..DEST_COUNT { + interconnect_enable(routing_table, rank, i as u8); + } +} + +#[cfg(has_drtio_routing)] +pub fn interconnect_disable_all() { + for i in 0..DEST_COUNT { + interconnect_disable(i as u8); + } +} diff --git a/artiq/firmware/libboard_artiq/drtioaux.rs b/artiq/firmware/libboard_artiq/drtioaux.rs new file mode 100644 index 000000000..f72072702 --- /dev/null +++ b/artiq/firmware/libboard_artiq/drtioaux.rs @@ -0,0 +1,152 @@ +use core::slice; +use crc; + +use io::{ProtoRead, ProtoWrite, Cursor, Error as IoError}; +use board_misoc::{csr::DRTIOAUX, mem::DRTIOAUX_MEM, clock}; +use proto_artiq::drtioaux_proto::Error as ProtocolError; + +pub use proto_artiq::drtioaux_proto::Packet; + +// this is parametric over T because there's no impl Fail for !. +#[derive(Fail, Debug)] +pub enum Error { + #[fail(display = "gateware reported error")] + GatewareError, + #[fail(display = "packet CRC failed")] + CorruptedPacket, + + #[fail(display = "link is down")] + LinkDown, + #[fail(display = "timed out waiting for data")] + TimedOut, + #[fail(display = "unexpected reply")] + UnexpectedReply, + + #[fail(display = "routing error")] + RoutingError, + + #[fail(display = "protocol error: {}", _0)] + Protocol(#[cause] ProtocolError) +} + +impl From> for Error { + fn from(value: ProtocolError) -> Error { + Error::Protocol(value) + } +} + +impl From> for Error { + fn from(value: IoError) -> Error { + Error::Protocol(ProtocolError::Io(value)) + } +} + +pub fn reset(linkno: u8) { + let linkno = linkno as usize; + unsafe { + // clear buffer first to limit race window with buffer overflow + // error. We assume the CPU is fast enough so that no two packets + // will be received between the buffer and the error flag are cleared. + (DRTIOAUX[linkno].aux_rx_present_write)(1); + (DRTIOAUX[linkno].aux_rx_error_write)(1); + } +} + +fn has_rx_error(linkno: u8) -> bool { + let linkno = linkno as usize; + unsafe { + let error = (DRTIOAUX[linkno].aux_rx_error_read)() != 0; + if error { + (DRTIOAUX[linkno].aux_rx_error_write)(1) + } + error + } +} + +fn receive(linkno: u8, f: F) -> Result, Error> + where F: FnOnce(&[u8]) -> Result> +{ + let linkidx = linkno as usize; + unsafe { + if (DRTIOAUX[linkidx].aux_rx_present_read)() == 1 { + let ptr = DRTIOAUX_MEM[linkidx].base + DRTIOAUX_MEM[linkidx].size / 2; + let len = (DRTIOAUX[linkidx].aux_rx_length_read)(); + let result = f(slice::from_raw_parts(ptr as *mut u8, len as usize)); + (DRTIOAUX[linkidx].aux_rx_present_write)(1); + Ok(Some(result?)) + } else { + Ok(None) + } + } +} + +pub fn recv(linkno: u8) -> Result, Error> { + if has_rx_error(linkno) { + return Err(Error::GatewareError) + } + + receive(linkno, |buffer| { + if buffer.len() < 8 { + return Err(IoError::UnexpectedEnd.into()) + } + + let mut reader = Cursor::new(buffer); + + let checksum_at = buffer.len() - 4; + let checksum = crc::crc32::checksum_ieee(&reader.get_ref()[0..checksum_at]); + reader.set_position(checksum_at); + if reader.read_u32()? != checksum { + return Err(Error::CorruptedPacket) + } + reader.set_position(0); + + Ok(Packet::read_from(&mut reader)?) + }) +} + +pub fn recv_timeout(linkno: u8, timeout_ms: Option) -> Result> { + let timeout_ms = timeout_ms.unwrap_or(10); + let limit = clock::get_ms() + timeout_ms; + while clock::get_ms() < limit { + match recv(linkno)? { + None => (), + Some(packet) => return Ok(packet), + } + } + Err(Error::TimedOut) +} + +fn transmit(linkno: u8, f: F) -> Result<(), Error> + where F: FnOnce(&mut [u8]) -> Result> +{ + let linkno = linkno as usize; + unsafe { + while (DRTIOAUX[linkno].aux_tx_read)() != 0 {} + let ptr = DRTIOAUX_MEM[linkno].base; + let len = DRTIOAUX_MEM[linkno].size / 2; + let len = f(slice::from_raw_parts_mut(ptr as *mut u8, len))?; + (DRTIOAUX[linkno].aux_tx_length_write)(len as u16); + (DRTIOAUX[linkno].aux_tx_write)(1); + Ok(()) + } +} + +pub fn send(linkno: u8, packet: &Packet) -> Result<(), Error> { + transmit(linkno, |buffer| { + let mut writer = Cursor::new(buffer); + + packet.write_to(&mut writer)?; + + let padding = 4 - (writer.position() % 4); + if padding != 4 { + for _ in 0..padding { + writer.write_u8(0)?; + } + } + + let checksum = crc::crc32::checksum_ieee(&writer.get_ref()[0..writer.position()]); + writer.write_u32(checksum)?; + + Ok(writer.position()) + }) +} diff --git a/artiq/firmware/libboard_artiq/grabber.rs b/artiq/firmware/libboard_artiq/grabber.rs new file mode 100644 index 000000000..e9266015a --- /dev/null +++ b/artiq/firmware/libboard_artiq/grabber.rs @@ -0,0 +1,159 @@ +use board_misoc::csr; + +#[derive(PartialEq, Clone, Copy)] +enum State { + Reset, + ExitReset, + Lock, + Align, + Watch +} + +#[derive(Clone, Copy)] +struct Info { + state: State, + frame_size: (u16, u16), +} + +static mut INFO: [Info; csr::GRABBER_LEN] = + [Info { state: State::Reset, frame_size: (0, 0) }; csr::GRABBER_LEN]; + +fn get_pll_reset(g: usize) -> bool { + unsafe { (csr::GRABBER[g].pll_reset_read)() != 0 } +} + +fn set_pll_reset(g: usize, reset: bool) { + let val = if reset { 1 } else { 0 }; + unsafe { (csr::GRABBER[g].pll_reset_write)(val) } +} + +fn pll_locked(g: usize) -> bool { + unsafe { (csr::GRABBER[g].pll_locked_read)() != 0 } +} + +fn clock_pattern_ok(g: usize) -> bool { + unsafe { (csr::GRABBER[g].clk_sampled_read)() == 0b1100011 } +} + +fn clock_pattern_ok_filter(g: usize) -> bool { + for _ in 0..128 { + if !clock_pattern_ok(g) { + return false; + } + } + true +} + +fn phase_shift(g: usize, direction: u8) { + unsafe { + (csr::GRABBER[g].phase_shift_write)(direction); + while (csr::GRABBER[g].phase_shift_done_read)() == 0 {} + } +} + +fn clock_align(g: usize) -> bool { + while clock_pattern_ok_filter(g) { + phase_shift(g, 1); + } + phase_shift(g, 1); + + let mut count = 0; + while !clock_pattern_ok_filter(g) { + phase_shift(g, 1); + count += 1; + if count > 1024 { + return false; + } + } + + let mut window = 1; + phase_shift(g, 1); + while clock_pattern_ok_filter(g) { + phase_shift(g, 1); + window += 1; + } + + for _ in 0..window/2 { + phase_shift(g, 0); + } + + true +} + +fn get_last_pixels(g: usize) -> (u16, u16) { + unsafe { ((csr::GRABBER[g].last_x_read)(), + (csr::GRABBER[g].last_y_read)()) } +} + +fn get_video_clock(g: usize) -> u32 { + let freq_count = unsafe { + (csr::GRABBER[g].freq_count_read)() + } as u32; + 2*freq_count*(csr::CONFIG_CLOCK_FREQUENCY/1000)/(511*1000) +} + +pub fn tick() { + for g in 0..csr::GRABBER.len() { + let next = match unsafe { INFO[g].state } { + State::Reset => { + set_pll_reset(g, true); + unsafe { INFO[g].frame_size = (0, 0); } + State::ExitReset + } + State::ExitReset => { + if get_pll_reset(g) { + set_pll_reset(g, false); + State::Lock + } else { + State::ExitReset + } + } + State::Lock => { + if pll_locked(g) { + info!("grabber{} locked: {}MHz", g, get_video_clock(g)); + State::Align + } else { + State::Lock + } + } + State::Align => { + if pll_locked(g) { + if clock_align(g) { + info!("grabber{} alignment success", g); + State::Watch + } else { + info!("grabber{} alignment failure", g); + State::Reset + } + } else { + info!("grabber{} lock lost", g); + State::Reset + } + } + State::Watch => { + if pll_locked(g) { + if clock_pattern_ok(g) { + let last_xy = get_last_pixels(g); + if last_xy != unsafe { INFO[g].frame_size } { + // x capture is on ~LVAL which is after + // the last increment on DVAL + // y capture is on ~FVAL which coincides with the + // last increment on ~LVAL + info!("grabber{} frame size: {}x{}", + g, last_xy.0, last_xy.1 + 1); + unsafe { INFO[g].frame_size = last_xy } + } + State::Watch + } else { + info!("grabber{} alignment lost", g); + State::Reset + } + } else { + info!("grabber{} lock lost", g); + State::Reset + } + } + }; + unsafe { INFO[g].state = next; } + } +} diff --git a/artiq/firmware/libboard_artiq/hmc830_7043.rs b/artiq/firmware/libboard_artiq/hmc830_7043.rs new file mode 100644 index 000000000..adf753141 --- /dev/null +++ b/artiq/firmware/libboard_artiq/hmc830_7043.rs @@ -0,0 +1,419 @@ +mod hmc830 { + use board_misoc::{csr, clock}; + + fn spi_setup() { + unsafe { + while csr::converter_spi::idle_read() == 0 {} + csr::converter_spi::offline_write(0); + csr::converter_spi::end_write(1); + csr::converter_spi::cs_polarity_write(0b0001); + csr::converter_spi::clk_polarity_write(0); + csr::converter_spi::clk_phase_write(0); + csr::converter_spi::lsb_first_write(0); + csr::converter_spi::half_duplex_write(0); + csr::converter_spi::length_write(32 - 1); + csr::converter_spi::div_write(16 - 2); + csr::converter_spi::cs_write(1 << csr::CONFIG_CONVERTER_SPI_HMC830_CS); + } + } + + pub fn select_spi_mode() { + spi_setup(); + unsafe { + // rising egde on CS since cs_polarity still 0 + // selects "HMC Mode" + // do a dummy cycle with cs still high to clear CS + csr::converter_spi::length_write(0); + csr::converter_spi::data_write(0); + while csr::converter_spi::writable_read() == 0 {} + csr::converter_spi::length_write(32 - 1); + } + } + + fn write(addr: u8, data: u32) { + let val = ((addr as u32) << 24) | data; + unsafe { + while csr::converter_spi::writable_read() == 0 {} + csr::converter_spi::data_write(val << 1); // last clk cycle loads data + } + } + + fn read(addr: u8) -> u32 { + // SDO (miso/read bits) is technically CPHA=1, while SDI is CPHA=0 + // trust that the 8.2ns+0.2ns/pF provide enough hold time on top of + // the SPI round trip delay and stick with CPHA=0 + write((1 << 6) | addr, 0); + unsafe { + while csr::converter_spi::writable_read() == 0 {} + csr::converter_spi::data_read() & 0xffffff + } + } + + pub fn detect() -> Result<(), &'static str> { + spi_setup(); + let id = read(0x00); + if id != 0xa7975 { + error!("invalid HMC830 ID: 0x{:08x}", id); + return Err("invalid HMC830 identification"); + } + + Ok(()) + } + + pub fn init() { + // Configure HMC830 for integer-N operation + // See "PLLs with integrated VCO- RF Applications Product & Operating + // Guide" + spi_setup(); + info!("loading HMC830 configuration..."); + + write(0x0, 0x20); // software reset + write(0x0, 0x00); // normal operation + write(0x6, 0x307ca); // integer-N mode (NB data sheet table 5.8 not self-consistent) + write(0x7, 0x4d); // digital lock detect, 1/2 cycle window (6.5ns window) + write(0x9, 0x2850); // charge pump: 1.6mA, no offset + write(0xa, 0x2045); // for wideband devices like the HMC830 + write(0xb, 0x7c061); // for HMC830 + + // VCO subsystem registers + // NB software reset does not seem to reset these registers, so always + // program them all! + write(0x5, 0xf88); // 1: defaults + write(0x5, 0x6010); // 2: mute output until output divider set + write(0x5, 0x2818); // 3: wideband PLL defaults + write(0x5, 0x60a0); // 4: HMC830 magic value + write(0x5, 0x1628); // 5: HMC830 magic value + write(0x5, 0x7fb0); // 6: HMC830 magic value + write(0x5, 0x0); // ready for VCO auto-cal + + info!(" ...done"); + } + + pub fn set_dividers(r_div: u32, n_div: u32, m_div: u32, out_div: u32) { + // VCO frequency: f_vco = (f_ref/r_div)*(n_int + n_frac/2**24) + // VCO frequency range [1.5GHz, 3GHz] + // Output frequency: f_out = f_vco/out_div + // Max PFD frequency: 125MHz for integer-N, 100MHz for fractional + // (mode B) + // Max reference frequency: 350MHz, however f_ref >= 200MHz requires + // setting 0x08[21]=1 + // + // Warning: Output divider is not synchronized! Set to 1 for deterministic + // phase at the output. + // + // :param r_div: reference divider [1, 16383] + // :param n_div: VCO divider, integer part. Integer-N mode: [16, 2**19-1] + // fractional mode: [20, 2**19-4] + // :param m_div: VCO divider, fractional part [0, 2**24-1] + // :param out_div: output divider [1, 62] (0 mutes output) + info!("setting HMC830 dividers..."); + write(0x5, 0x6010 + (out_div << 7) + (((out_div <= 2) as u32) << 15)); + write(0x5, 0x0); // ready for VCO auto-cal + write(0x2, r_div); + write(0x4, m_div); + write(0x3, n_div); + + info!(" ...done"); + } + + pub fn check_locked() -> Result<(), &'static str> { + info!("waiting for HMC830 lock..."); + let t = clock::get_ms(); + while read(0x12) & 0x02 == 0 { + if clock::get_ms() > t + 2000 { + error!("lock timeout. Register dump:"); + for addr in 0x00..0x14 { + // These registers don't exist (in the data sheet at least) + if addr == 0x0d || addr == 0x0e { continue; } + error!(" [0x{:02x}] = 0x{:04x}", addr, read(addr)); + } + return Err("lock timeout"); + } + } + info!(" ...locked"); + + Ok(()) + } +} + +pub mod hmc7043 { + use board_misoc::{csr, clock}; + + // Warning: dividers are not synchronized with HMC830 clock input! + // Set DAC_CLK_DIV to 1 or 0 for deterministic phase. + // (0 bypasses the divider and reduces noise) + const DAC_CLK_DIV: u16 = 0; + const FPGA_CLK_DIV: u16 = 16; // Keep in sync with jdcg.rs + const SYSREF_DIV: u16 = 256; // Keep in sync with jdcg.rs + const HMC_SYSREF_DIV: u16 = SYSREF_DIV*8; // must be <= 4MHz + + // enabled, divider, output config, is sysref + const OUTPUT_CONFIG: [(bool, u16, u8, bool); 14] = [ + (true, DAC_CLK_DIV, 0x08, false), // 0: DAC1_CLK + (true, SYSREF_DIV, 0x01, true), // 1: DAC1_SYSREF + (true, DAC_CLK_DIV, 0x08, false), // 2: DAC0_CLK + (true, SYSREF_DIV, 0x01, true), // 3: DAC0_SYSREF + (true, SYSREF_DIV, 0x10, true), // 4: AMC_FPGA_SYSREF0 + (false, FPGA_CLK_DIV, 0x10, true), // 5: AMC_FPGA_SYSREF1 + (false, 0, 0x10, false), // 6: unused + (true, FPGA_CLK_DIV, 0x10, true), // 7: RTM_FPGA_SYSREF0 + (true, FPGA_CLK_DIV, 0x08, false), // 8: GTP_CLK0_IN + (false, 0, 0x10, false), // 9: unused + (false, 0, 0x10, false), // 10: unused + (false, 0, 0x08, false), // 11: unused / uFL + (false, 0, 0x10, false), // 12: unused + (false, FPGA_CLK_DIV, 0x10, true), // 13: RTM_FPGA_SYSREF1 + ]; + + fn spi_setup() { + unsafe { + while csr::converter_spi::idle_read() == 0 {} + csr::converter_spi::offline_write(0); + csr::converter_spi::end_write(1); + csr::converter_spi::cs_polarity_write(0b0001); + csr::converter_spi::clk_polarity_write(0); + csr::converter_spi::clk_phase_write(0); + csr::converter_spi::lsb_first_write(0); + csr::converter_spi::half_duplex_write(0); // change mid-transaction for reads + csr::converter_spi::length_write(24 - 1); + csr::converter_spi::div_write(16 - 2); + csr::converter_spi::cs_write(1 << csr::CONFIG_CONVERTER_SPI_HMC7043_CS); + } + } + + fn spi_wait_idle() { + unsafe { + while csr::converter_spi::idle_read() == 0 {} + } + } + + fn write(addr: u16, data: u8) { + let cmd = (0 << 15) | addr; + let val = ((cmd as u32) << 8) | data as u32; + unsafe { + while csr::converter_spi::writable_read() == 0 {} + csr::converter_spi::data_write(val << 8); + } + } + + fn read(addr: u16) -> u8 { + let cmd = (1 << 15) | addr; + let val = cmd as u32; + unsafe { + while csr::converter_spi::writable_read() == 0 {} + csr::converter_spi::end_write(0); + csr::converter_spi::length_write(16 - 1); + csr::converter_spi::data_write(val << 16); + while csr::converter_spi::writable_read() == 0 {} + csr::converter_spi::end_write(1); + csr::converter_spi::half_duplex_write(1); + csr::converter_spi::length_write(8 - 1); + csr::converter_spi::data_write(0); + while csr::converter_spi::writable_read() == 0 {} + csr::converter_spi::half_duplex_write(0); + csr::converter_spi::length_write(24 - 1); + csr::converter_spi::data_read() as u8 + } + } + + pub const CHIP_ID: u32 = 0xf17904; + + pub fn get_id() -> u32 { + spi_setup(); + (read(0x78) as u32) << 16 | (read(0x79) as u32) << 8 | read(0x7a) as u32 + } + + pub fn detect() -> Result<(), &'static str> { + let id = get_id(); + if id != CHIP_ID { + error!("invalid HMC7043 ID: 0x{:08x}", id); + return Err("invalid HMC7043 identification"); + } + + Ok(()) + } + + pub fn enable() { + info!("enabling HMC7043"); + + unsafe { + csr::hmc7043_reset::out_write(0); + } + clock::spin_us(10_000); + + spi_setup(); + write(0x0, 0x1); // Software reset + write(0x0, 0x0); // Normal operation + write(0x1, 0x48); // mute all outputs + } + + const GPO_MUX_CLK_OUT_PHASE: u8 = 3; + const GPO_MUX_FORCE1: u8 = 10; + const GPO_MUX_FORCE0: u8 = 11; + + /* Read an HMC7043 internal status bit through the GPO interface. + * This method is required to work around bugs in the register interface. + */ + fn gpo_indirect_read(mux_setting: u8) -> bool { + write(0x50, (mux_setting << 2) | 0x3); + spi_wait_idle(); + unsafe { + csr::hmc7043_gpo::in_read() == 1 + } + } + + pub fn init() { + spi_setup(); + info!("loading configuration..."); + + write(0x3, 0x14); // Disable the RFSYNCIN reseeder + write(0xA, 0x06); // Disable the RFSYNCIN input buffer + write(0xB, 0x07); // Enable the CLKIN input as LVPECL + write(0x9F, 0x4d); // Unexplained high-performance mode + write(0xA0, 0xdf); // Unexplained high-performance mode + + // Enable required output groups + let mut output_group_en = 0; + for channel in 0..OUTPUT_CONFIG.len() { + let enabled = OUTPUT_CONFIG[channel].0; + if enabled { + let group = channel/2; + output_group_en |= 1 << group; + } + } + write(0x4, output_group_en); + + // Set SYSREF timer divider. + // We don't need this "feature", but the HMC7043 won't work without. + write(0x5c, (HMC_SYSREF_DIV & 0xff) as u8); + write(0x5d, ((HMC_SYSREF_DIV & 0xf00) >> 8) as u8); + + for channel in 0..OUTPUT_CONFIG.len() { + let channel_base = 0xc8 + 0x0a*(channel as u16); + let (enabled, divider, outcfg, is_sysref) = OUTPUT_CONFIG[channel]; + + if enabled { + if !is_sysref { + // DCLK channel: enable high-performance mode + write(channel_base, 0xd1); + } else { + // SYSREF channel: disable hi-perf mode, enable slip + write(channel_base, 0x71); + } + } else { + write(channel_base, 0x10); + } + write(channel_base + 0x1, (divider & 0xff) as u8); + write(channel_base + 0x2, ((divider & 0xf00) >> 8) as u8); + + // bypass analog phase shift on DCLK channels to reduce noise + if !is_sysref { + if divider != 0 { + write(channel_base + 0x7, 0x00); // enable divider + } else { + write(channel_base + 0x7, 0x03); // bypass divider for lowest noise + } + } else { + write(channel_base + 0x7, 0x01); + } + + write(channel_base + 0x8, outcfg) + } + + write(0x1, 0x4a); // Reset dividers and FSMs + write(0x1, 0x48); + write(0x1, 0xc8); // Synchronize dividers + write(0x1, 0x40); // Unmute, high-performance/low-noise mode + + clock::spin_us(10_000); + + info!(" ...done"); + } + + pub fn test_gpo() -> Result<(), &'static str> { + info!("testing GPO..."); + for trial in 0..10 { + if !gpo_indirect_read(GPO_MUX_FORCE1) { + info!(" ...failed. GPO I/O did not go high (#{})", trial + 1); + return Err("GPO is not functioning"); + } + if gpo_indirect_read(GPO_MUX_FORCE0) { + info!(" ...failed. GPO I/O did not return low (#{})", trial + 1); + return Err("GPO is not functioning"); + } + } + info!(" ...passed"); + Ok(()) + } + + pub fn check_phased() -> Result<(), &'static str> { + if !gpo_indirect_read(GPO_MUX_CLK_OUT_PHASE) { + return Err("GPO reported phases did not align"); + } + // Should be the same as the GPO read + let sysref_fsm_status = read(0x91); + if sysref_fsm_status != 0x2 { + error!("Bad SYSREF FSM status: {:02x}", sysref_fsm_status); + return Err("Bad SYSREF FSM status"); + } + Ok(()) + } + + pub fn unmute() { + /* + * Never missing an opportunity to be awful, the HMC7043 produces broadband noise + * prior to intialization, which can upset the AMC FPGA. + * External circuitry mutes it. + */ + unsafe { + csr::hmc7043_out_en::out_write(1); + } + } + + pub fn sysref_delay_dac(dacno: u8, phase_offset: u8) { + spi_setup(); + if dacno == 0 { + write(0x00e9, phase_offset); + } else if dacno == 1 { + write(0x00d5, phase_offset); + } else { + unimplemented!(); + } + clock::spin_us(100); + } + + pub fn sysref_slip() { + spi_setup(); + write(0x0002, 0x02); + write(0x0002, 0x00); + clock::spin_us(100); + } +} + +pub fn init() -> Result<(), &'static str> { + #[cfg(all(hmc830_ref = "125", rtio_frequency = "125.0"))] + const DIV: (u32, u32, u32, u32) = (2, 32, 0, 1); // 125MHz -> 2.0GHz + #[cfg(all(hmc830_ref = "150", rtio_frequency = "150.0"))] + const DIV: (u32, u32, u32, u32) = (2, 32, 0, 1); // 150MHz -> 2.4GHz + + /* do not use other SPI devices before HMC830 SPI mode selection */ + hmc830::select_spi_mode(); + hmc830::detect()?; + hmc830::init(); + + hmc830::set_dividers(DIV.0, DIV.1, DIV.2, DIV.3); + + hmc830::check_locked()?; + + if hmc7043::get_id() == hmc7043::CHIP_ID { + error!("HMC7043 detected while in reset (board rework missing?)"); + } + hmc7043::enable(); + hmc7043::detect()?; + hmc7043::init(); + hmc7043::test_gpo()?; + hmc7043::check_phased()?; + hmc7043::unmute(); + + Ok(()) +} diff --git a/artiq/firmware/libboard_artiq/lib.rs b/artiq/firmware/libboard_artiq/lib.rs new file mode 100644 index 000000000..448b10f9e --- /dev/null +++ b/artiq/firmware/libboard_artiq/lib.rs @@ -0,0 +1,44 @@ +#![feature(asm, lang_items, never_type)] +#![no_std] + +extern crate failure; +#[cfg(has_drtio)] +#[macro_use] +extern crate failure_derive; +#[macro_use] +extern crate bitflags; +extern crate byteorder; +extern crate crc; +#[macro_use] +extern crate log; +extern crate io; +extern crate board_misoc; +extern crate proto_artiq; + +pub mod pcr; + +pub mod spi; + +#[cfg(has_kernel_cpu)] +pub mod mailbox; +#[cfg(has_kernel_cpu)] +pub mod rpc_queue; + +#[cfg(has_si5324)] +pub mod si5324; +#[cfg(has_wrpll)] +pub mod wrpll; + +#[cfg(has_hmc830_7043)] +pub mod hmc830_7043; +#[cfg(has_ad9154)] +mod ad9154_reg; +#[cfg(has_ad9154)] +pub mod ad9154; + +#[cfg(has_grabber)] +pub mod grabber; + +#[cfg(has_drtio)] +pub mod drtioaux; +pub mod drtio_routing; diff --git a/artiq/firmware/libamp/mailbox.rs b/artiq/firmware/libboard_artiq/mailbox.rs similarity index 95% rename from artiq/firmware/libamp/mailbox.rs rename to artiq/firmware/libboard_artiq/mailbox.rs index db1f5af47..9c1f374f6 100644 --- a/artiq/firmware/libamp/mailbox.rs +++ b/artiq/firmware/libboard_artiq/mailbox.rs @@ -1,5 +1,5 @@ use core::ptr::{read_volatile, write_volatile}; -use board::{mem, cache}; +use board_misoc::{mem, cache}; const MAILBOX: *mut usize = mem::MAILBOX_BASE as *mut usize; static mut LAST: usize = 0; diff --git a/artiq/firmware/libboard/pcr.rs b/artiq/firmware/libboard_artiq/pcr.rs similarity index 97% rename from artiq/firmware/libboard/pcr.rs rename to artiq/firmware/libboard_artiq/pcr.rs index 59518e8bf..d16575398 100644 --- a/artiq/firmware/libboard/pcr.rs +++ b/artiq/firmware/libboard_artiq/pcr.rs @@ -1,4 +1,4 @@ -use spr::*; +use board_misoc::spr::*; bitflags! { pub struct Counters: u32 { diff --git a/artiq/firmware/libamp/rpc_queue.rs b/artiq/firmware/libboard_artiq/rpc_queue.rs similarity index 98% rename from artiq/firmware/libamp/rpc_queue.rs rename to artiq/firmware/libboard_artiq/rpc_queue.rs index 8f9f40f96..8b25372e5 100644 --- a/artiq/firmware/libamp/rpc_queue.rs +++ b/artiq/firmware/libboard_artiq/rpc_queue.rs @@ -1,6 +1,6 @@ use core::ptr::{read_volatile, write_volatile}; use core::slice; -use board::{mem, cache}; +use board_misoc::{mem, cache}; const SEND_MAILBOX: *mut usize = (mem::MAILBOX_BASE + 4) as *mut usize; const RECV_MAILBOX: *mut usize = (mem::MAILBOX_BASE + 8) as *mut usize; diff --git a/artiq/firmware/libboard_artiq/si5324.rs b/artiq/firmware/libboard_artiq/si5324.rs new file mode 100644 index 000000000..f5e816f14 --- /dev/null +++ b/artiq/firmware/libboard_artiq/si5324.rs @@ -0,0 +1,346 @@ +use core::result; +use board_misoc::{clock, i2c}; +#[cfg(not(si5324_soft_reset))] +use board_misoc::csr; + +type Result = result::Result; + +const BUSNO: u8 = 0; +const ADDRESS: u8 = 0x68; + +#[cfg(not(si5324_soft_reset))] +fn hard_reset() { + unsafe { csr::si5324_rst_n::out_write(0); } + clock::spin_us(1_000); + unsafe { csr::si5324_rst_n::out_write(1); } + clock::spin_us(10_000); +} + +// NOTE: the logical parameters DO NOT MAP to physical values written +// into registers. They have to be mapped; see the datasheet. +// DSPLLsim reports the logical parameters in the design summary, not +// the physical register values. +pub struct FrequencySettings { + pub n1_hs: u8, + pub nc1_ls: u32, + pub n2_hs: u8, + pub n2_ls: u32, + pub n31: u32, + pub n32: u32, + pub bwsel: u8, + pub crystal_ref: bool +} + +pub enum Input { + Ckin1, + Ckin2, +} + +fn map_frequency_settings(settings: &FrequencySettings) -> Result { + if settings.nc1_ls != 0 && (settings.nc1_ls % 2) == 1 { + return Err("NC1_LS must be 0 or even") + } + if settings.nc1_ls > (1 << 20) { + return Err("NC1_LS is too high") + } + if (settings.n2_ls % 2) == 1 { + return Err("N2_LS must be even") + } + if settings.n2_ls > (1 << 20) { + return Err("N2_LS is too high") + } + if settings.n31 > (1 << 19) { + return Err("N31 is too high") + } + if settings.n32 > (1 << 19) { + return Err("N32 is too high") + } + let r = FrequencySettings { + n1_hs: match settings.n1_hs { + 4 => 0b000, + 5 => 0b001, + 6 => 0b010, + 7 => 0b011, + 8 => 0b100, + 9 => 0b101, + 10 => 0b110, + 11 => 0b111, + _ => return Err("N1_HS has an invalid value") + }, + nc1_ls: settings.nc1_ls - 1, + n2_hs: match settings.n2_hs { + 4 => 0b000, + 5 => 0b001, + 6 => 0b010, + 7 => 0b011, + 8 => 0b100, + 9 => 0b101, + 10 => 0b110, + 11 => 0b111, + _ => return Err("N2_HS has an invalid value") + }, + n2_ls: settings.n2_ls - 1, + n31: settings.n31 - 1, + n32: settings.n32 - 1, + bwsel: settings.bwsel, + crystal_ref: settings.crystal_ref + }; + Ok(r) +} + +fn write(reg: u8, val: u8) -> Result<()> { + i2c::start(BUSNO).unwrap(); + if !i2c::write(BUSNO, ADDRESS << 1).unwrap() { + return Err("Si5324 failed to ack write address") + } + if !i2c::write(BUSNO, reg).unwrap() { + return Err("Si5324 failed to ack register") + } + if !i2c::write(BUSNO, val).unwrap() { + return Err("Si5324 failed to ack value") + } + i2c::stop(BUSNO).unwrap(); + Ok(()) +} + +#[cfg(si5324_soft_reset)] +fn write_no_ack_value(reg: u8, val: u8) -> Result<()> { + i2c::start(BUSNO).unwrap(); + if !i2c::write(BUSNO, ADDRESS << 1).unwrap() { + return Err("Si5324 failed to ack write address") + } + if !i2c::write(BUSNO, reg).unwrap() { + return Err("Si5324 failed to ack register") + } + i2c::write(BUSNO, val).unwrap(); + i2c::stop(BUSNO).unwrap(); + Ok(()) +} + +fn read(reg: u8) -> Result { + i2c::start(BUSNO).unwrap(); + if !i2c::write(BUSNO, ADDRESS << 1).unwrap() { + return Err("Si5324 failed to ack write address") + } + if !i2c::write(BUSNO, reg).unwrap() { + return Err("Si5324 failed to ack register") + } + i2c::restart(BUSNO).unwrap(); + if !i2c::write(BUSNO, (ADDRESS << 1) | 1).unwrap() { + return Err("Si5324 failed to ack read address") + } + let val = i2c::read(BUSNO, false).unwrap(); + i2c::stop(BUSNO).unwrap(); + Ok(val) +} + +fn ident() -> Result { + Ok(((read(134)? as u16) << 8) | (read(135)? as u16)) +} + +#[cfg(si5324_soft_reset)] +fn soft_reset() -> Result<()> { + write_no_ack_value(136, read(136)? | 0x80)?; + clock::spin_us(10_000); + Ok(()) +} + +fn has_xtal() -> Result { + Ok((read(129)? & 0x01) == 0) // LOSX_INT=0 +} + +fn has_ckin(input: Input) -> Result { + match input { + Input::Ckin1 => Ok((read(129)? & 0x02) == 0), // LOS1_INT=0 + Input::Ckin2 => Ok((read(129)? & 0x04) == 0), // LOS2_INT=0 + } +} + +fn locked() -> Result { + Ok((read(130)? & 0x01) == 0) // LOL_INT=0 +} + +fn monitor_lock() -> Result<()> { + info!("waiting for Si5324 lock..."); + let t = clock::get_ms(); + while !locked()? { + // Yes, lock can be really slow. + if clock::get_ms() > t + 20000 { + return Err("Si5324 lock timeout"); + } + } + info!(" ...locked"); + Ok(()) +} + +fn init() -> Result<()> { + #[cfg(not(si5324_soft_reset))] + hard_reset(); + + #[cfg(soc_platform = "kasli")] + { + i2c::pca9548_select(BUSNO, 0x70, 0)?; + i2c::pca9548_select(BUSNO, 0x71, 1 << 3)?; + } + #[cfg(soc_platform = "sayma_amc")] + i2c::pca9548_select(BUSNO, 0x70, 1 << 4)?; + #[cfg(soc_platform = "sayma_rtm")] + i2c::pca9548_select(BUSNO, 0x77, 1 << 5)?; + #[cfg(soc_platform = "metlino")] + i2c::pca9548_select(BUSNO, 0x70, 1 << 4)?; + #[cfg(soc_platform = "kc705")] + i2c::pca9548_select(BUSNO, 0x74, 1 << 7)?; + + if ident()? != 0x0182 { + return Err("Si5324 does not have expected product number"); + } + + #[cfg(si5324_soft_reset)] + soft_reset()?; + Ok(()) +} + +pub fn bypass(input: Input) -> Result<()> { + let cksel_reg = match input { + Input::Ckin1 => 0b00, + Input::Ckin2 => 0b01, + }; + init()?; + write(21, read(21)? & 0xfe)?; // CKSEL_PIN=0 + write(3, (read(3)? & 0x3f) | (cksel_reg << 6))?; // CKSEL_REG + write(4, (read(4)? & 0x3f) | (0b00 << 6))?; // AUTOSEL_REG=b00 + write(6, (read(6)? & 0xc0) | 0b111111)?; // SFOUT2_REG=b111 SFOUT1_REG=b111 + write(0, (read(0)? & 0xfd) | 0x02)?; // BYPASS_REG=1 + Ok(()) +} + +pub fn setup(settings: &FrequencySettings, input: Input) -> Result<()> { + let s = map_frequency_settings(settings)?; + let cksel_reg = match input { + Input::Ckin1 => 0b00, + Input::Ckin2 => 0b01, + }; + + init()?; + if settings.crystal_ref { + write(0, read(0)? | 0x40)?; // FREE_RUN=1 + } + write(2, (read(2)? & 0x0f) | (s.bwsel << 4))?; + write(21, read(21)? & 0xfe)?; // CKSEL_PIN=0 + write(3, (read(3)? & 0x2f) | (cksel_reg << 6) | 0x10)?; // CKSEL_REG, SQ_ICAL=1 + write(4, (read(4)? & 0x3f) | (0b00 << 6))?; // AUTOSEL_REG=b00 + write(6, (read(6)? & 0xc0) | 0b111111)?; // SFOUT2_REG=b111 SFOUT1_REG=b111 + write(25, (s.n1_hs << 5 ) as u8)?; + write(31, (s.nc1_ls >> 16) as u8)?; + write(32, (s.nc1_ls >> 8 ) as u8)?; + write(33, (s.nc1_ls) as u8)?; + write(34, (s.nc1_ls >> 16) as u8)?; // write to NC2_LS as well + write(35, (s.nc1_ls >> 8 ) as u8)?; + write(36, (s.nc1_ls) as u8)?; + write(40, (s.n2_hs << 5 ) as u8 | (s.n2_ls >> 16) as u8)?; + write(41, (s.n2_ls >> 8 ) as u8)?; + write(42, (s.n2_ls) as u8)?; + write(43, (s.n31 >> 16) as u8)?; + write(44, (s.n31 >> 8) as u8)?; + write(45, (s.n31) as u8)?; + write(46, (s.n32 >> 16) as u8)?; + write(47, (s.n32 >> 8) as u8)?; + write(48, (s.n32) as u8)?; + write(137, read(137)? | 0x01)?; // FASTLOCK=1 + write(136, read(136)? | 0x40)?; // ICAL=1 + + if !has_xtal()? { + return Err("Si5324 misses XA/XB signal"); + } + if !has_ckin(input)? { + return Err("Si5324 misses clock input signal"); + } + + monitor_lock()?; + Ok(()) +} + +pub fn select_input(input: Input) -> Result<()> { + let cksel_reg = match input { + Input::Ckin1 => 0b00, + Input::Ckin2 => 0b01, + }; + write(3, (read(3)? & 0x3f) | (cksel_reg << 6))?; + if !has_ckin(input)? { + return Err("Si5324 misses clock input signal"); + } + monitor_lock()?; + Ok(()) +} + +#[cfg(has_siphaser)] +pub mod siphaser { + use super::*; + use board_misoc::{csr, clock}; + + pub fn select_recovered_clock(rc: bool) -> Result<()> { + write(3, (read(3)? & 0xdf) | (1 << 5))?; // DHOLD=1 + unsafe { + csr::siphaser::switch_clocks_write(if rc { 1 } else { 0 }); + } + write(3, (read(3)? & 0xdf) | (0 << 5))?; // DHOLD=0 + monitor_lock()?; + Ok(()) + } + + fn phase_shift(direction: u8) { + unsafe { + csr::siphaser::phase_shift_write(direction); + while csr::siphaser::phase_shift_done_read() == 0 {} + } + // wait for the Si5324 loop to stabilize + clock::spin_us(500); + } + + fn has_error() -> bool { + unsafe { + csr::siphaser::error_write(1); + } + clock::spin_us(5000); + unsafe { + csr::siphaser::error_read() != 0 + } + } + + fn find_edge(target: bool) -> Result { + let mut nshifts = 0; + + let mut previous = has_error(); + loop { + phase_shift(1); + nshifts += 1; + let current = has_error(); + if previous != target && current == target { + return Ok(nshifts); + } + if nshifts > 5000 { + return Err("failed to find timing error edge"); + } + previous = current; + } + } + + pub fn calibrate_skew() -> Result<()> { + let jitter_margin = 32; + let lead = find_edge(false)?; + for _ in 0..jitter_margin { + phase_shift(1); + } + let width = find_edge(true)? + jitter_margin; + // width is 360 degrees (one full rotation of the phase between s/h limits) minus jitter + info!("calibration successful, lead: {}, width: {} ({}deg)", lead, width, width*360/(56*8)); + + // Apply reverse phase shift for half the width to get into the + // middle of the working region. + for _ in 0..width/2 { + phase_shift(0); + } + + Ok(()) + } +} diff --git a/artiq/firmware/libboard_artiq/spi.rs b/artiq/firmware/libboard_artiq/spi.rs new file mode 100644 index 000000000..78556b24b --- /dev/null +++ b/artiq/firmware/libboard_artiq/spi.rs @@ -0,0 +1,63 @@ +#[cfg(has_converter_spi)] +mod imp { + use board_misoc::csr; + + pub fn set_config(busno: u8, flags: u8, length: u8, div: u8, cs: u8) -> Result<(), ()> { + if busno != 0 { + return Err(()) + } + unsafe { + while csr::converter_spi::writable_read() == 0 {} + csr::converter_spi::offline_write(flags >> 0 & 1); + csr::converter_spi::end_write(flags >> 1 & 1); + // input (in RTIO): flags >> 2 & 1 + // cs_polarity is a mask in the CSR interface + // only affect the bits that are selected + let mut cs_polarity = csr::converter_spi::cs_polarity_read(); + if flags >> 3 & 1 != 0 { + cs_polarity |= cs; + } else { + cs_polarity &= !cs; + } + csr::converter_spi::cs_polarity_write(cs_polarity); + csr::converter_spi::clk_polarity_write(flags >> 4 & 1); + csr::converter_spi::clk_phase_write(flags >> 5 & 1); + csr::converter_spi::lsb_first_write(flags >> 6 & 1); + csr::converter_spi::half_duplex_write(flags >> 7 & 1); + csr::converter_spi::length_write(length - 1); + csr::converter_spi::div_write(div - 2); + csr::converter_spi::cs_write(cs); + } + Ok(()) + } + + pub fn write(busno: u8, data: u32) -> Result<(), ()> { + if busno != 0 { + return Err(()) + } + unsafe { + while csr::converter_spi::writable_read() == 0 {} + csr::converter_spi::data_write(data); + } + Ok(()) + } + + pub fn read(busno: u8) -> Result { + if busno != 0 { + return Err(()) + } + Ok(unsafe { + while csr::converter_spi::writable_read() == 0 {} + csr::converter_spi::data_read() + }) + } +} + +#[cfg(not(has_converter_spi))] +mod imp { + pub fn set_config(_busno: u8, _flags: u8, _length: u8, _div: u8, _cs: u8) -> Result<(), ()> { Err(()) } + pub fn write(_busno: u8,_data: u32) -> Result<(), ()> { Err(()) } + pub fn read(_busno: u8,) -> Result { Err(()) } +} + +pub use self::imp::*; diff --git a/artiq/firmware/libboard_artiq/wrpll.rs b/artiq/firmware/libboard_artiq/wrpll.rs new file mode 100644 index 000000000..4b0fa9754 --- /dev/null +++ b/artiq/firmware/libboard_artiq/wrpll.rs @@ -0,0 +1,538 @@ +use board_misoc::{csr, clock}; + +mod i2c { + use board_misoc::{csr, clock}; + + #[derive(Debug, Clone, Copy)] + pub enum Dcxo { + Main, + Helper + } + + fn half_period() { clock::spin_us(1) } + const SDA_MASK: u8 = 2; + const SCL_MASK: u8 = 1; + + fn sda_i(dcxo: Dcxo) -> bool { + let reg = match dcxo { + Dcxo::Main => unsafe { csr::wrpll::main_dcxo_gpio_in_read() }, + Dcxo::Helper => unsafe { csr::wrpll::helper_dcxo_gpio_in_read() }, + }; + reg & SDA_MASK != 0 + } + + fn sda_oe(dcxo: Dcxo, oe: bool) { + let reg = match dcxo { + Dcxo::Main => unsafe { csr::wrpll::main_dcxo_gpio_oe_read() }, + Dcxo::Helper => unsafe { csr::wrpll::helper_dcxo_gpio_oe_read() }, + }; + let reg = if oe { reg | SDA_MASK } else { reg & !SDA_MASK }; + match dcxo { + Dcxo::Main => unsafe { csr::wrpll::main_dcxo_gpio_oe_write(reg) }, + Dcxo::Helper => unsafe { csr::wrpll::helper_dcxo_gpio_oe_write(reg) } + } + } + + fn sda_o(dcxo: Dcxo, o: bool) { + let reg = match dcxo { + Dcxo::Main => unsafe { csr::wrpll::main_dcxo_gpio_out_read() }, + Dcxo::Helper => unsafe { csr::wrpll::helper_dcxo_gpio_out_read() }, + }; + let reg = if o { reg | SDA_MASK } else { reg & !SDA_MASK }; + match dcxo { + Dcxo::Main => unsafe { csr::wrpll::main_dcxo_gpio_out_write(reg) }, + Dcxo::Helper => unsafe { csr::wrpll::helper_dcxo_gpio_out_write(reg) } + } + } + + fn scl_oe(dcxo: Dcxo, oe: bool) { + let reg = match dcxo { + Dcxo::Main => unsafe { csr::wrpll::main_dcxo_gpio_oe_read() }, + Dcxo::Helper => unsafe { csr::wrpll::helper_dcxo_gpio_oe_read() }, + }; + let reg = if oe { reg | SCL_MASK } else { reg & !SCL_MASK }; + match dcxo { + Dcxo::Main => unsafe { csr::wrpll::main_dcxo_gpio_oe_write(reg) }, + Dcxo::Helper => unsafe { csr::wrpll::helper_dcxo_gpio_oe_write(reg) } + } + } + + fn scl_o(dcxo: Dcxo, o: bool) { + let reg = match dcxo { + Dcxo::Main => unsafe { csr::wrpll::main_dcxo_gpio_out_read() }, + Dcxo::Helper => unsafe { csr::wrpll::helper_dcxo_gpio_out_read() }, + }; + let reg = if o { reg | SCL_MASK } else { reg & !SCL_MASK }; + match dcxo { + Dcxo::Main => unsafe { csr::wrpll::main_dcxo_gpio_out_write(reg) }, + Dcxo::Helper => unsafe { csr::wrpll::helper_dcxo_gpio_out_write(reg) } + } + } + + pub fn init(dcxo: Dcxo) -> Result<(), &'static str> { + // Set SCL as output, and high level + scl_o(dcxo, true); + scl_oe(dcxo, true); + // Prepare a zero level on SDA so that sda_oe pulls it down + sda_o(dcxo, false); + // Release SDA + sda_oe(dcxo, false); + + // Check the I2C bus is ready + half_period(); + half_period(); + if !sda_i(dcxo) { + // Try toggling SCL a few times + for _bit in 0..8 { + scl_o(dcxo, false); + half_period(); + scl_o(dcxo, true); + half_period(); + } + } + + if !sda_i(dcxo) { + return Err("SDA is stuck low and doesn't get unstuck"); + } + Ok(()) + } + + pub fn start(dcxo: Dcxo) { + // Set SCL high then SDA low + scl_o(dcxo, true); + half_period(); + sda_oe(dcxo, true); + half_period(); + } + + pub fn stop(dcxo: Dcxo) { + // First, make sure SCL is low, so that the target releases the SDA line + scl_o(dcxo, false); + half_period(); + // Set SCL high then SDA high + sda_oe(dcxo, true); + scl_o(dcxo, true); + half_period(); + sda_oe(dcxo, false); + half_period(); + } + + pub fn write(dcxo: Dcxo, data: u8) -> bool { + // MSB first + for bit in (0..8).rev() { + // Set SCL low and set our bit on SDA + scl_o(dcxo, false); + sda_oe(dcxo, data & (1 << bit) == 0); + half_period(); + // Set SCL high ; data is shifted on the rising edge of SCL + scl_o(dcxo, true); + half_period(); + } + // Check ack + // Set SCL low, then release SDA so that the I2C target can respond + scl_o(dcxo, false); + half_period(); + sda_oe(dcxo, false); + // Set SCL high and check for ack + scl_o(dcxo, true); + half_period(); + // returns true if acked (I2C target pulled SDA low) + !sda_i(dcxo) + } + + pub fn read(dcxo: Dcxo, ack: bool) -> u8 { + // Set SCL low first, otherwise setting SDA as input may cause a transition + // on SDA with SCL high which will be interpreted as START/STOP condition. + scl_o(dcxo, false); + half_period(); // make sure SCL has settled low + sda_oe(dcxo, false); + + let mut data: u8 = 0; + + // MSB first + for bit in (0..8).rev() { + scl_o(dcxo, false); + half_period(); + // Set SCL high and shift data + scl_o(dcxo, true); + half_period(); + if sda_i(dcxo) { data |= 1 << bit } + } + // Send ack + // Set SCL low and pull SDA low when acking + scl_o(dcxo, false); + if ack { sda_oe(dcxo, true) } + half_period(); + // then set SCL high + scl_o(dcxo, true); + half_period(); + + data + } +} + +mod si549 { + use board_misoc::clock; + use super::i2c; + + #[cfg(any(soc_platform = "metlino", soc_platform = "sayma_amc", soc_platform = "sayma_rtm"))] + pub const ADDRESS: u8 = 0x55; + #[cfg(soc_platform = "kasli")] + pub const ADDRESS: u8 = 0x67; + + pub fn write(dcxo: i2c::Dcxo, reg: u8, val: u8) -> Result<(), &'static str> { + i2c::start(dcxo); + if !i2c::write(dcxo, ADDRESS << 1) { + return Err("Si549 failed to ack write address") + } + if !i2c::write(dcxo, reg) { + return Err("Si549 failed to ack register") + } + if !i2c::write(dcxo, val) { + return Err("Si549 failed to ack value") + } + i2c::stop(dcxo); + Ok(()) + } + + pub fn write_no_ack_value(dcxo: i2c::Dcxo, reg: u8, val: u8) -> Result<(), &'static str> { + i2c::start(dcxo); + if !i2c::write(dcxo, ADDRESS << 1) { + return Err("Si549 failed to ack write address") + } + if !i2c::write(dcxo, reg) { + return Err("Si549 failed to ack register") + } + i2c::write(dcxo, val); + i2c::stop(dcxo); + Ok(()) + } + + pub fn read(dcxo: i2c::Dcxo, reg: u8) -> Result { + i2c::start(dcxo); + if !i2c::write(dcxo, ADDRESS << 1) { + return Err("Si549 failed to ack write address") + } + if !i2c::write(dcxo, reg) { + return Err("Si549 failed to ack register") + } + i2c::stop(dcxo); + + i2c::start(dcxo); + if !i2c::write(dcxo, (ADDRESS << 1) | 1) { + return Err("Si549 failed to ack read address") + } + let val = i2c::read(dcxo, false); + i2c::stop(dcxo); + + Ok(val) + } + + pub fn program(dcxo: i2c::Dcxo, hsdiv: u16, lsdiv: u8, fbdiv: u64) -> Result<(), &'static str> { + i2c::init(dcxo)?; + + write(dcxo, 255, 0x00)?; // PAGE + write_no_ack_value(dcxo, 7, 0x80)?; // RESET + clock::spin_us(100_000); // required? not specified in datasheet. + + write(dcxo, 255, 0x00)?; // PAGE + write(dcxo, 69, 0x00)?; // Disable FCAL override. + // Note: Value 0x00 from Table 5.6 is inconsistent with Table 5.7, + // which shows bit 0 as reserved and =1. + write(dcxo, 17, 0x00)?; // Synchronously disable output + + // The Si549 has no ID register, so we check that it responds correctly + // by writing values to a RAM-like register and reading them back. + for test_value in 0..255 { + write(dcxo, 23, test_value)?; + let readback = read(dcxo, 23)?; + if readback != test_value { + return Err("Si549 detection failed"); + } + } + + write(dcxo, 23, hsdiv as u8)?; + write(dcxo, 24, (hsdiv >> 8) as u8 | (lsdiv << 4))?; + write(dcxo, 26, fbdiv as u8)?; + write(dcxo, 27, (fbdiv >> 8) as u8)?; + write(dcxo, 28, (fbdiv >> 16) as u8)?; + write(dcxo, 29, (fbdiv >> 24) as u8)?; + write(dcxo, 30, (fbdiv >> 32) as u8)?; + write(dcxo, 31, (fbdiv >> 40) as u8)?; + + write(dcxo, 7, 0x08)?; // Start FCAL + write(dcxo, 17, 0x01)?; // Synchronously enable output + + Ok(()) + } + + // Si549 digital frequency trim ("all-digital PLL" register) + // ∆ f_out = adpll * 0.0001164e-6 (0.1164 ppb/lsb) + // max trim range is +- 950 ppm + pub fn set_adpll(dcxo: i2c::Dcxo, adpll: i32) -> Result<(), &'static str> { + write(dcxo, 231, adpll as u8)?; + write(dcxo, 232, (adpll >> 8) as u8)?; + write(dcxo, 233, (adpll >> 16) as u8)?; + clock::spin_us(100); + Ok(()) + } + + pub fn get_adpll(dcxo: i2c::Dcxo) -> Result { + let b1 = read(dcxo, 231)? as i32; + let b2 = read(dcxo, 232)? as i32; + let b3 = read(dcxo, 233)? as i8 as i32; + Ok(b3 << 16 | b2 << 8 | b1) + } +} + +// to do: load from gateware config +const DDMTD_COUNTER_N: u32 = 15; +const DDMTD_COUNTER_M: u32 = (1 << DDMTD_COUNTER_N); +const F_SYS: f64 = csr::CONFIG_CLOCK_FREQUENCY as f64; + +const F_MAIN: f64 = 125.0e6; +const F_HELPER: f64 = F_MAIN * DDMTD_COUNTER_M as f64 / (DDMTD_COUNTER_M + 1) as f64; +const F_BEAT: f64 = F_MAIN - F_HELPER; +const TIME_STEP: f32 = 1./F_BEAT as f32; + +fn ddmtd_tag_to_s(mu: f32) -> f32 { + return (mu as f32)*TIME_STEP; +} + +fn get_frequencies() -> (u32, u32, u32) { + unsafe { + csr::wrpll::frequency_counter_update_en_write(1); + // wait for at least one full update cycle (> 2 timer periods) + clock::spin_us(200_000); + csr::wrpll::frequency_counter_update_en_write(0); + let helper = csr::wrpll::frequency_counter_counter_helper_read(); + let main = csr::wrpll::frequency_counter_counter_rtio_read(); + let cdr = csr::wrpll::frequency_counter_counter_rtio_rx0_read(); + (helper, main, cdr) + } +} + +fn log_frequencies() -> (u32, u32, u32) { + let (f_helper, f_main, f_cdr) = get_frequencies(); + let conv_khz = |f| 4*(f as u64)*(csr::CONFIG_CLOCK_FREQUENCY as u64)/(1000*(1 << 23)); + info!("helper clock frequency: {}kHz ({})", conv_khz(f_helper), f_helper); + info!("main clock frequency: {}kHz ({})", conv_khz(f_main), f_main); + info!("CDR clock frequency: {}kHz ({})", conv_khz(f_cdr), f_cdr); + (f_helper, f_main, f_cdr) +} + +fn get_tags() -> (i32, i32, u16, u16) { + unsafe { + csr::wrpll::tag_arm_write(1); + while csr::wrpll::tag_arm_read() != 0 {} + + let main_diff = csr::wrpll::main_diff_tag_read() as i32; + let helper_diff = csr::wrpll::helper_diff_tag_read() as i32; + let ref_tag = csr::wrpll::ref_tag_read(); + let main_tag = csr::wrpll::main_tag_read(); + (main_diff, helper_diff, ref_tag, main_tag) + } +} + +fn print_tags() { + const NUM_TAGS: usize = 30; + let mut main_diffs = [0; NUM_TAGS]; // input to main loop filter + let mut helper_diffs = [0; NUM_TAGS]; // input to helper loop filter + let mut ref_tags = [0; NUM_TAGS]; + let mut main_tags = [0; NUM_TAGS]; + let mut jitter = [0 as f32; NUM_TAGS]; + + for i in 0..NUM_TAGS { + let (main_diff, helper_diff, ref_tag, main_tag) = get_tags(); + main_diffs[i] = main_diff; + helper_diffs[i] = helper_diff; + ref_tags[i] = ref_tag; + main_tags[i] = main_tag; + } + info!("DDMTD ref tags: {:?}", ref_tags); + info!("DDMTD main tags: {:?}", main_tags); + info!("DDMTD main diffs: {:?}", main_diffs); + info!("DDMTD helper diffs: {:?}", helper_diffs); + + // look at the difference between the main DCXO and reference... + let t0 = main_diffs[0]; + main_diffs.iter_mut().for_each(|x| *x -= t0); + + // crude estimate of the max difference across our sample set (assumes no unwrapping issues...) + let delta = main_diffs[main_diffs.len()-1] as f32 / (main_diffs.len()-1) as f32; + info!("detla: {:?} tags", delta); + let delta_f: f32 = delta/DDMTD_COUNTER_M as f32 * F_BEAT as f32; + info!("MAIN <-> ref frequency difference: {:?} Hz ({:?} ppm)", delta_f, delta_f/F_HELPER as f32 * 1e6); + + jitter.iter_mut().enumerate().for_each(|(i, x)| *x = main_diffs[i] as f32 - delta*(i as f32)); + info!("jitter: {:?} tags", jitter); + + let var = jitter.iter().map(|x| x*x).fold(0 as f32, |acc, x| acc + x as f32) / NUM_TAGS as f32; + info!("variance: {:?} tags^2", var); +} + +pub fn init() { + info!("initializing WR PLL..."); + + unsafe { csr::wrpll::helper_reset_write(1); } + + unsafe { + csr::wrpll::helper_dcxo_i2c_address_write(si549::ADDRESS); + csr::wrpll::main_dcxo_i2c_address_write(si549::ADDRESS); + } + + #[cfg(rtio_frequency = "125.0")] + let (h_hsdiv, h_lsdiv, h_fbdiv) = (0x05c, 0, 0x04b5badb98a); + #[cfg(rtio_frequency = "125.0")] + let (m_hsdiv, m_lsdiv, m_fbdiv) = (0x05c, 0, 0x04b5c447213); + + si549::program(i2c::Dcxo::Main, m_hsdiv, m_lsdiv, m_fbdiv) + .expect("cannot initialize main Si549"); + si549::program(i2c::Dcxo::Helper, h_hsdiv, h_lsdiv, h_fbdiv) + .expect("cannot initialize helper Si549"); + // Si549 Settling Time for Large Frequency Change. + // Datasheet said 10ms but it lied. + clock::spin_us(50_000); + + unsafe { csr::wrpll::helper_reset_write(0); } + clock::spin_us(1); +} + +pub fn diagnostics() { + info!("WRPLL diagnostics..."); + info!("Untrimmed oscillator frequencies:"); + log_frequencies(); + + info!("Increase helper DCXO frequency by +10ppm (1.25kHz):"); + si549::set_adpll(i2c::Dcxo::Helper, 85911).expect("ADPLL write failed"); + // to do: add check on frequency? + log_frequencies(); +} + +fn trim_dcxos(f_helper: u32, f_main: u32, f_cdr: u32) -> Result<(i32, i32), &'static str> { + info!("Trimming oscillator frequencies..."); + const DCXO_STEP: i64 = (1.0e6/0.0001164) as i64; + const ADPLL_MAX: i64 = (950.0/0.0001164) as i64; + + const TIMER_WIDTH: u32 = 23; + const COUNTER_DIV: u32 = 2; + + // how many counts we expect to measure + const SYS_COUNTS: i64 = (1 << (TIMER_WIDTH - COUNTER_DIV)) as i64; + const EXP_MAIN_COUNTS: i64 = ((SYS_COUNTS as f64) * (F_MAIN/F_SYS)) as i64; + const EXP_HELPER_COUNTS: i64 = ((SYS_COUNTS as f64) * (F_HELPER/F_SYS)) as i64; + + // calibrate the SYS clock to the CDR clock and correct the measured counts + // assume frequency errors are small so we can make an additive correction + // positive error means sys clock is too fast + let sys_err: i64 = EXP_MAIN_COUNTS - (f_cdr as i64); + let main_err: i64 = EXP_MAIN_COUNTS - (f_main as i64) - sys_err; + let helper_err: i64 = EXP_HELPER_COUNTS - (f_helper as i64) - sys_err; + + info!("sys count err {}", sys_err); + info!("main counts err {}", main_err); + info!("helper counts err {}", helper_err); + + // calculate required adjustment to the ADPLL register see + // https://www.silabs.com/documents/public/data-sheets/si549-datasheet.pdf + // section 5.6 + let helper_adpll: i64 = helper_err*DCXO_STEP/EXP_HELPER_COUNTS; + let main_adpll: i64 = main_err*DCXO_STEP/EXP_MAIN_COUNTS; + if helper_adpll.abs() > ADPLL_MAX { + return Err("helper DCXO offset too large"); + } + if main_adpll.abs() > ADPLL_MAX { + return Err("main DCXO offset too large"); + } + + info!("ADPLL offsets: helper={} main={}", helper_adpll, main_adpll); + Ok((helper_adpll as i32, main_adpll as i32)) +} + +fn statistics(data: &[u16]) -> (f32, f32) { + let sum = data.iter().fold(0 as u32, |acc, x| acc + *x as u32); + let mean = sum as f32 / data.len() as f32; + + let squared_sum = data.iter().fold(0 as u32, |acc, x| acc + (*x as u32).pow(2)); + let variance = (squared_sum as f32 / data.len() as f32) - mean; + return (mean, variance) +} + +fn select_recovered_clock_int(rc: bool) -> Result<(), &'static str> { + info!("Untrimmed oscillator frequencies:"); + let (f_helper, f_main, f_cdr) = log_frequencies(); + if rc { + let (helper_adpll, main_adpll) = trim_dcxos(f_helper, f_main, f_cdr)?; + // to do: add assertion on max frequency shift here? + si549::set_adpll(i2c::Dcxo::Helper, helper_adpll).expect("ADPLL write failed"); + si549::set_adpll(i2c::Dcxo::Main, main_adpll).expect("ADPLL write failed"); + + log_frequencies(); + clock::spin_us(100_000); // TO DO: remove/reduce! + print_tags(); + + info!("increasing main DCXO by 1ppm (125Hz):"); + si549::set_adpll(i2c::Dcxo::Main, main_adpll + 8591).expect("ADPLL write failed"); + clock::spin_us(100_000); + print_tags(); + + si549::set_adpll(i2c::Dcxo::Main, main_adpll).expect("ADPLL write failed"); + + unsafe { + csr::wrpll::adpll_offset_helper_write(helper_adpll as u32); + csr::wrpll::adpll_offset_main_write(main_adpll as u32); + csr::wrpll::helper_dcxo_gpio_enable_write(0); + csr::wrpll::main_dcxo_gpio_enable_write(0); + csr::wrpll::helper_dcxo_errors_write(0xff); + csr::wrpll::main_dcxo_errors_write(0xff); + csr::wrpll::collector_reset_write(0); + } + clock::spin_us(1_000); // wait for the collector to produce meaningful output + unsafe { + csr::wrpll::filter_reset_write(0); + } + + clock::spin_us(100_000); + + print_tags(); +// let mut tags = [0; 10]; +// for i in 0..tags.len() { +// tags[i] = get_ddmtd_helper_tag(); +// } +// info!("DDMTD helper tags: {:?}", tags); + + unsafe { + csr::wrpll::filter_reset_write(1); + csr::wrpll::collector_reset_write(1); + } + clock::spin_us(50_000); + unsafe { + csr::wrpll::helper_dcxo_gpio_enable_write(1); + csr::wrpll::main_dcxo_gpio_enable_write(1); + } + unsafe { + info!("error {} {}", + csr::wrpll::helper_dcxo_errors_read(), + csr::wrpll::main_dcxo_errors_read()); + } + info!("new ADPLL: {} {}", + si549::get_adpll(i2c::Dcxo::Helper)?, + si549::get_adpll(i2c::Dcxo::Main)?); + } else { + si549::set_adpll(i2c::Dcxo::Helper, 0).expect("ADPLL write failed"); + si549::set_adpll(i2c::Dcxo::Main, 0).expect("ADPLL write failed"); + } + Ok(()) +} + +pub fn select_recovered_clock(rc: bool) { + if rc { + info!("switching to recovered clock"); + } else { + info!("switching to local XO clock"); + } + match select_recovered_clock_int(rc) { + Ok(()) => info!("clock transition completed"), + Err(e) => error!("clock transition failed: {}", e) + } +} diff --git a/artiq/firmware/libboard_misoc/Cargo.toml b/artiq/firmware/libboard_misoc/Cargo.toml new file mode 100644 index 000000000..81ebf979f --- /dev/null +++ b/artiq/firmware/libboard_misoc/Cargo.toml @@ -0,0 +1,21 @@ +[package] +authors = ["M-Labs"] +name = "board_misoc" +version = "0.0.0" +build = "build.rs" + +[lib] +name = "board_misoc" +path = "lib.rs" + +[build-dependencies] +cc = "1.0" +build_misoc = { path = "../libbuild_misoc" } + +[dependencies] +byteorder = { version = "1.0", default-features = false } +log = { version = "0.4", default-features = false, optional = true } +smoltcp = { version = "0.6.0", default-features = false, optional = true } + +[features] +uart_console = [] diff --git a/artiq/firmware/libboard_misoc/build.rs b/artiq/firmware/libboard_misoc/build.rs new file mode 100644 index 000000000..c0a5971eb --- /dev/null +++ b/artiq/firmware/libboard_misoc/build.rs @@ -0,0 +1,18 @@ +extern crate build_misoc; +extern crate cc; + +use std::env; +use std::path::Path; + +fn main() { + build_misoc::cfg(); + + let triple = env::var("TARGET").unwrap(); + let arch = triple.split("-").next().unwrap(); + let vectors_path = Path::new(arch).join("vectors.S"); + + println!("cargo:rerun-if-changed={}", vectors_path.to_str().unwrap()); + cc::Build::new() + .file(vectors_path) + .compile("vectors"); +} diff --git a/artiq/firmware/libboard_misoc/clock.rs b/artiq/firmware/libboard_misoc/clock.rs new file mode 100644 index 000000000..deddd8c39 --- /dev/null +++ b/artiq/firmware/libboard_misoc/clock.rs @@ -0,0 +1,38 @@ +use core::i64; +use csr; + +const INIT: u64 = i64::MAX as u64; +const FREQ: u64 = csr::CONFIG_CLOCK_FREQUENCY as u64; + +pub fn init() { + unsafe { + csr::timer0::en_write(0); + csr::timer0::load_write(INIT); + csr::timer0::reload_write(INIT); + csr::timer0::en_write(1); + } +} + +pub fn get_us() -> u64 { + unsafe { + csr::timer0::update_value_write(1); + (INIT - csr::timer0::value_read()) / (FREQ / 1_000_000) + } +} + +pub fn get_ms() -> u64 { + unsafe { + csr::timer0::update_value_write(1); + (INIT - csr::timer0::value_read()) / (FREQ / 1_000) + } +} + +pub fn spin_us(interval: u64) { + unsafe { + csr::timer0::update_value_write(1); + let threshold = csr::timer0::value_read() - interval * (FREQ / 1_000_000); + while csr::timer0::value_read() > threshold { + csr::timer0::update_value_write(1) + } + } +} diff --git a/artiq/firmware/libboard_misoc/config.rs b/artiq/firmware/libboard_misoc/config.rs new file mode 100644 index 000000000..7ed368efd --- /dev/null +++ b/artiq/firmware/libboard_misoc/config.rs @@ -0,0 +1,313 @@ +use core::{str, fmt}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum Error { + AlreadyLocked, + SpaceExhausted, + Truncated { offset: usize }, + InvalidSize { offset: usize, size: usize }, + MissingSeparator { offset: usize }, + Utf8Error(str::Utf8Error), + NoFlash, +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + &Error::AlreadyLocked => + write!(f, "attempt at reentrant access"), + &Error::SpaceExhausted => + write!(f, "space exhausted"), + &Error::Truncated { offset }=> + write!(f, "truncated record at offset {}", offset), + &Error::InvalidSize { offset, size } => + write!(f, "invalid record size {} at offset {}", size, offset), + &Error::MissingSeparator { offset } => + write!(f, "missing separator at offset {}", offset), + &Error::Utf8Error(err) => + write!(f, "{}", err), + &Error::NoFlash => + write!(f, "flash memory is not present"), + } + } +} + +#[cfg(has_spiflash)] +mod imp { + use core::str; + use byteorder::{ByteOrder, BigEndian}; + use cache; + use spiflash; + use super::Error; + use core::fmt; + use core::fmt::Write; + + struct FmtWrapper<'a> { + buf: &'a mut [u8], + offset: usize, + } + + impl<'a> FmtWrapper<'a> { + fn new(buf: &'a mut [u8]) -> Self { + FmtWrapper { + buf: buf, + offset: 0, + } + } + + fn contents(&self) -> &[u8] { + &self.buf[..self.offset] + } + } + + impl<'a> fmt::Write for FmtWrapper<'a> { + fn write_str(&mut self, s: &str) -> fmt::Result { + let bytes = s.as_bytes(); + let remainder = &mut self.buf[self.offset..]; + let remainder = &mut remainder[..bytes.len()]; + remainder.copy_from_slice(bytes); + self.offset += bytes.len(); + Ok(()) + } + } + + // One flash sector immediately before the firmware. + const ADDR: usize = ::mem::FLASH_BOOT_ADDRESS - spiflash::SECTOR_SIZE; + const SIZE: usize = spiflash::SECTOR_SIZE; + + mod lock { + use core::slice; + use core::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; + use super::Error; + + static LOCKED: AtomicUsize = ATOMIC_USIZE_INIT; + + pub struct Lock; + + impl Lock { + pub fn take() -> Result { + if LOCKED.swap(1, Ordering::SeqCst) != 0 { + Err(Error::AlreadyLocked) + } else { + Ok(Lock) + } + } + + pub fn data(&self) -> &'static [u8] { + unsafe { slice::from_raw_parts(super::ADDR as *const u8, super::SIZE) } + } + } + + impl Drop for Lock { + fn drop(&mut self) { + LOCKED.store(0, Ordering::SeqCst) + } + } + } + + use self::lock::Lock; + + #[derive(Clone)] + struct Iter<'a> { + data: &'a [u8], + offset: usize + } + + impl<'a> Iter<'a> { + fn new(data: &'a [u8]) -> Iter<'a> { + Iter { data: data, offset: 0 } + } + } + + impl<'a> Iterator for Iter<'a> { + type Item = Result<(&'a [u8], &'a [u8]), Error>; + + fn next(&mut self) -> Option { + let data = &self.data[self.offset..]; + + if data.len() < 4 { + // error!("offset {}: truncated record", self.offset); + return Some(Err(Error::Truncated { offset: self.offset })) + } + + let record_size = BigEndian::read_u32(data) as usize; + if record_size == !0 /* all ones; erased flash */ { + return None + } else if record_size < 4 || record_size > data.len() { + return Some(Err(Error::InvalidSize { offset: self.offset, size: record_size })) + } + + let record_body = &data[4..record_size]; + match record_body.iter().position(|&x| x == 0) { + None => { + return Some(Err(Error::MissingSeparator { offset: self.offset })) + } + Some(pos) => { + self.offset += record_size; + + let (key, zero_and_value) = record_body.split_at(pos); + Some(Ok((key, &zero_and_value[1..]))) + } + } + } + } + + pub fn read) -> R, R>(key: &str, f: F) -> R { + f(Lock::take().and_then(|lock| { + let mut iter = Iter::new(lock.data()); + let mut value = &[][..]; + while let Some(result) = iter.next() { + let (record_key, record_value) = result?; + if key.as_bytes() == record_key { + // last write wins + value = record_value + } + } + Ok(value) + })) + } + + pub fn read_str) -> R, R>(key: &str, f: F) -> R { + read(key, |result| { + f(result.and_then(|value| str::from_utf8(value).map_err(Error::Utf8Error))) + }) + } + + unsafe fn append_at(data: &[u8], mut offset: usize, + key: &[u8], value: &[u8]) -> Result { + let record_size = 4 + key.len() + 1 + value.len(); + if offset + record_size > data.len() { + return Err(Error::SpaceExhausted) + } + + let mut record_size_bytes = [0u8; 4]; + BigEndian::write_u32(&mut record_size_bytes[..], record_size as u32); + + { + let mut write = |payload| { + spiflash::write(data.as_ptr().offset(offset as isize) as usize, payload); + offset += payload.len(); + }; + + write(&record_size_bytes[..]); + write(key); + write(&[0]); + write(value); + cache::flush_l2_cache(); + } + + Ok(offset) + } + + fn compact() -> Result<(), Error> { + let lock = Lock::take()?; + let data = lock.data(); + + static mut OLD_DATA: [u8; SIZE] = [0; SIZE]; + let old_data = unsafe { + OLD_DATA.copy_from_slice(data); + &OLD_DATA[..] + }; + + unsafe { spiflash::erase_sector(data.as_ptr() as usize) }; + + // This is worst-case quadratic, but we're limited by a small SPI flash sector size, + // so it does not really matter. + let mut offset = 0; + let mut iter = Iter::new(old_data); + 'iter: while let Some(result) = iter.next() { + let (key, mut value) = result?; + if value.is_empty() { + // This is a removed entry, ignore it. + continue + } + + let mut next_iter = iter.clone(); + while let Some(next_result) = next_iter.next() { + let (next_key, _) = next_result?; + if key == next_key { + // There's another entry that overwrites this one, ignore this one. + continue 'iter + } + } + offset = unsafe { append_at(data, offset, key, value)? }; + } + + Ok(()) + } + + fn append(key: &str, value: &[u8]) -> Result<(), Error> { + let lock = Lock::take()?; + let data = lock.data(); + + let free_offset = { + let mut iter = Iter::new(data); + while let Some(result) = iter.next() { + let _ = result?; + } + iter.offset + }; + + unsafe { append_at(data, free_offset, key.as_bytes(), value)? }; + + Ok(()) + } + + pub fn write(key: &str, value: &[u8]) -> Result<(), Error> { + match append(key, value) { + Err(Error::SpaceExhausted) => { + compact()?; + append(key, value) + } + res => res + } + } + + pub fn write_int(key: &str, value: u32) -> Result<(), Error> { + let mut buf = [0; 16]; + let mut wrapper = FmtWrapper::new(&mut buf); + write!(&mut wrapper, "{}", value).unwrap(); + write(key, wrapper.contents()) + } + + pub fn remove(key: &str) -> Result<(), Error> { + write(key, &[]) + } + + pub fn erase() -> Result<(), Error> { + let lock = Lock::take()?; + let data = lock.data(); + + unsafe { spiflash::erase_sector(data.as_ptr() as usize) }; + cache::flush_l2_cache(); + + Ok(()) + } +} + +#[cfg(not(has_spiflash))] +mod imp { + use super::Error; + + pub fn read) -> R, R>(_key: &str, f: F) -> R { + f(Err(Error::NoFlash)) + } + + pub fn read_str) -> R, R>(_key: &str, f: F) -> R { + f(Err(Error::NoFlash)) + } + + pub fn write(_key: &str, _value: &[u8]) -> Result<(), Error> { + Err(Error::NoFlash) + } + + pub fn remove(_key: &str) -> Result<(), Error> { + Err(Error::NoFlash) + } + + pub fn erase() -> Result<(), Error> { + Err(Error::NoFlash) + } +} + +pub use self::imp::*; diff --git a/artiq/firmware/runtime/ethmac.rs b/artiq/firmware/libboard_misoc/ethmac.rs similarity index 82% rename from artiq/firmware/runtime/ethmac.rs rename to artiq/firmware/libboard_misoc/ethmac.rs index a19a13933..ff585a010 100644 --- a/artiq/firmware/runtime/ethmac.rs +++ b/artiq/firmware/libboard_misoc/ethmac.rs @@ -1,8 +1,10 @@ use core::{slice, fmt}; use smoltcp::Result; +use smoltcp::time::Instant; use smoltcp::phy::{self, DeviceCapabilities, Device}; -use board::{csr, mem}; +use csr; +use mem::ETHMAC_BASE; const RX_SLOTS: usize = csr::ETHMAC_RX_SLOTS as usize; const TX_SLOTS: usize = csr::ETHMAC_TX_SLOTS as usize; @@ -28,14 +30,14 @@ fn next_tx_slot() -> Option { } } -fn rx_buffer(slot: usize) -> *const u8 { +fn rx_buffer(slot: usize) -> *mut u8 { debug_assert!(slot < RX_SLOTS); - (mem::ETHMAC_BASE + SLOT_SIZE * slot) as _ + (ETHMAC_BASE + SLOT_SIZE * slot) as _ } fn tx_buffer(slot: usize) -> *mut u8 { debug_assert!(slot < TX_SLOTS); - (mem::ETHMAC_BASE + SLOT_SIZE * (RX_SLOTS + slot)) as _ + (ETHMAC_BASE + SLOT_SIZE * (RX_SLOTS + slot)) as _ } pub struct EthernetDevice(()); @@ -44,6 +46,23 @@ impl EthernetDevice { pub unsafe fn new() -> EthernetDevice { EthernetDevice(()) } + + #[cfg(has_ethphy)] + pub fn reset_phy(&mut self) { + use clock; + + unsafe { + csr::ethphy::crg_reset_write(1); + clock::spin_us(2_000); + csr::ethphy::crg_reset_write(0); + clock::spin_us(2_000); + } + } + + pub fn reset_phy_if_any(&mut self) { + #[cfg(has_ethphy)] + self.reset_phy(); + } } impl<'a> Device<'a> for EthernetDevice { @@ -77,12 +96,12 @@ impl<'a> Device<'a> for EthernetDevice { pub struct EthernetRxSlot(usize); impl phy::RxToken for EthernetRxSlot { - fn consume(self, _timestamp: u64, f: F) -> Result - where F: FnOnce(&[u8]) -> Result + fn consume(self, _timestamp: Instant, f: F) -> Result + where F: FnOnce(&mut [u8]) -> Result { unsafe { let length = csr::ethmac::sram_writer_length_read() as usize; - let result = f(slice::from_raw_parts(rx_buffer(self.0), length)); + let result = f(slice::from_raw_parts_mut(rx_buffer(self.0), length)); csr::ethmac::sram_writer_ev_pending_write(1); result } @@ -92,7 +111,7 @@ impl phy::RxToken for EthernetRxSlot { pub struct EthernetTxSlot(usize); impl phy::TxToken for EthernetTxSlot { - fn consume(self, _timestamp: u64, length: usize, f: F) -> Result + fn consume(self, _timestamp: Instant, length: usize, f: F) -> Result where F: FnOnce(&mut [u8]) -> Result { debug_assert!(length < SLOT_SIZE); diff --git a/artiq/firmware/libboard_misoc/i2c.rs b/artiq/firmware/libboard_misoc/i2c.rs new file mode 100644 index 000000000..19ff3195a --- /dev/null +++ b/artiq/firmware/libboard_misoc/i2c.rs @@ -0,0 +1,216 @@ +#[cfg(has_i2c)] +mod imp { + use super::super::{csr, clock}; + + const INVALID_BUS: &'static str = "Invalid I2C bus"; + + fn half_period() { clock::spin_us(100) } + fn sda_bit(busno: u8) -> u8 { 1 << (2 * busno + 1) } + fn scl_bit(busno: u8) -> u8 { 1 << (2 * busno) } + + fn sda_i(busno: u8) -> bool { + unsafe { + csr::i2c::in_read() & sda_bit(busno) != 0 + } + } + + fn scl_i(busno: u8) -> bool { + unsafe { + csr::i2c::in_read() & scl_bit(busno) != 0 + } + } + + fn sda_oe(busno: u8, oe: bool) { + unsafe { + let reg = csr::i2c::oe_read(); + let reg = if oe { reg | sda_bit(busno) } else { reg & !sda_bit(busno) }; + csr::i2c::oe_write(reg) + } + } + + fn sda_o(busno: u8, o: bool) { + unsafe { + let reg = csr::i2c::out_read(); + let reg = if o { reg | sda_bit(busno) } else { reg & !sda_bit(busno) }; + csr::i2c::out_write(reg) + } + } + + fn scl_oe(busno: u8, oe: bool) { + unsafe { + let reg = csr::i2c::oe_read(); + let reg = if oe { reg | scl_bit(busno) } else { reg & !scl_bit(busno) }; + csr::i2c::oe_write(reg) + } + } + + fn scl_o(busno: u8, o: bool) { + unsafe { + let reg = csr::i2c::out_read(); + let reg = if o { reg | scl_bit(busno) } else { reg & !scl_bit(busno) }; + csr::i2c::out_write(reg) + } + } + + pub fn init() -> Result<(), &'static str> { + for busno in 0..csr::CONFIG_I2C_BUS_COUNT { + let busno = busno as u8; + scl_oe(busno, false); + sda_oe(busno, false); + scl_o(busno, false); + sda_o(busno, false); + + // Check the I2C bus is ready + half_period(); + half_period(); + if !sda_i(busno) { + // Try toggling SCL a few times + for _bit in 0..8 { + scl_oe(busno, true); + half_period(); + scl_oe(busno, false); + half_period(); + } + } + + if !sda_i(busno) { + return Err("SDA is stuck low and doesn't get unstuck"); + } + if !scl_i(busno) { + return Err("SCL is stuck low and doesn't get unstuck"); + } + // postcondition: SCL and SDA high + } + Ok(()) + } + + pub fn start(busno: u8) -> Result<(), &'static str> { + if busno as u32 >= csr::CONFIG_I2C_BUS_COUNT { + return Err(INVALID_BUS) + } + // precondition: SCL and SDA high + if !scl_i(busno) { + return Err("SCL is stuck low and doesn't get unstuck"); + } + if !sda_i(busno) { + return Err("SDA arbitration lost"); + } + sda_oe(busno, true); + half_period(); + scl_oe(busno, true); + // postcondition: SCL and SDA low + Ok(()) + } + + pub fn restart(busno: u8) -> Result<(), &'static str> { + if busno as u32 >= csr::CONFIG_I2C_BUS_COUNT { + return Err(INVALID_BUS) + } + // precondition SCL and SDA low + sda_oe(busno, false); + half_period(); + scl_oe(busno, false); + half_period(); + start(busno)?; + // postcondition: SCL and SDA low + Ok(()) + } + + pub fn stop(busno: u8) -> Result<(), &'static str> { + if busno as u32 >= csr::CONFIG_I2C_BUS_COUNT { + return Err(INVALID_BUS) + } + // precondition: SCL and SDA low + half_period(); + scl_oe(busno, false); + half_period(); + sda_oe(busno, false); + half_period(); + if !sda_i(busno) { + return Err("SDA arbitration lost"); + } + // postcondition: SCL and SDA high + Ok(()) + } + + pub fn write(busno: u8, data: u8) -> Result { + if busno as u32 >= csr::CONFIG_I2C_BUS_COUNT { + return Err(INVALID_BUS) + } + // precondition: SCL and SDA low + // MSB first + for bit in (0..8).rev() { + sda_oe(busno, data & (1 << bit) == 0); + half_period(); + scl_oe(busno, false); + half_period(); + scl_oe(busno, true); + } + sda_oe(busno, false); + half_period(); + scl_oe(busno, false); + half_period(); + // Read ack/nack + let ack = !sda_i(busno); + scl_oe(busno, true); + sda_oe(busno, true); + // postcondition: SCL and SDA low + + Ok(ack) + } + + pub fn read(busno: u8, ack: bool) -> Result { + if busno as u32 >= csr::CONFIG_I2C_BUS_COUNT { + return Err(INVALID_BUS) + } + // precondition: SCL and SDA low + sda_oe(busno, false); + + let mut data: u8 = 0; + + // MSB first + for bit in (0..8).rev() { + half_period(); + scl_oe(busno, false); + half_period(); + if sda_i(busno) { data |= 1 << bit } + scl_oe(busno, true); + } + // Send ack/nack + sda_oe(busno, ack); + half_period(); + scl_oe(busno, false); + half_period(); + scl_oe(busno, true); + sda_oe(busno, true); + // postcondition: SCL and SDA low + + Ok(data) + } + + pub fn pca9548_select(busno: u8, address: u8, channels: u8) -> Result<(), &'static str> { + start(busno)?; + if !write(busno, address << 1)? { + return Err("PCA9548 failed to ack write address") + } + if !write(busno, channels)? { + return Err("PCA9548 failed to ack control word") + } + stop(busno)?; + Ok(()) + } +} + +#[cfg(not(has_i2c))] +mod imp { + const NO_I2C: &'static str = "No I2C support on this platform"; + pub fn init() -> Result<(), &'static str> { Err(NO_I2C) } + pub fn start(_busno: u8) -> Result<(), &'static str> { Err(NO_I2C) } + pub fn restart(_busno: u8) -> Result<(), &'static str> { Err(NO_I2C) } + pub fn stop(_busno: u8) -> Result<(), &'static str> { Err(NO_I2C) } + pub fn write(_busno: u8, _data: u8) -> Result { Err(NO_I2C) } + pub fn read(_busno: u8, _ack: bool) -> Result { Err(NO_I2C) } + pub fn pca9548_select(_busno: u8, _address: u8, _channels: u8) -> Result<(), &'static str> { Err(NO_I2C) } +} + +pub use self::imp::*; diff --git a/artiq/firmware/libboard_misoc/i2c_eeprom.rs b/artiq/firmware/libboard_misoc/i2c_eeprom.rs new file mode 100644 index 000000000..6f2dee15e --- /dev/null +++ b/artiq/firmware/libboard_misoc/i2c_eeprom.rs @@ -0,0 +1,66 @@ +use i2c; + +/// [Hardware manual](http://ww1.microchip.com/downloads/en/DeviceDoc/24AA02E48-24AA025E48-24AA02E64-24AA025E64-Data-Sheet-20002124H.pdf) +pub struct EEPROM { + busno: u8, + port: u8, + address: u8, +} + +impl EEPROM { + #[cfg(all(soc_platform = "kasli", any(hw_rev = "v1.0", hw_rev = "v1.1")))] + pub fn new() -> Self { + EEPROM { + busno: 0, + /// Same port as Si5324 + port: 11, + address: 0xa0, + } + } + + #[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))] + pub fn new() -> Self { + EEPROM { + busno: 0, + /// SHARED I2C bus + port: 11, + address: 0xae, + } + } + + #[cfg(soc_platform = "kasli")] + fn select(&self) -> Result<(), &'static str> { + let mask: u16 = 1 << self.port; + i2c::pca9548_select(self.busno, 0x70, mask as u8)?; + i2c::pca9548_select(self.busno, 0x71, (mask >> 8) as u8)?; + Ok(()) + } + + pub fn read<'a>(&self, addr: u8, buf: &'a mut [u8]) -> Result<(), &'static str> { + self.select()?; + + i2c::start(self.busno)?; + i2c::write(self.busno, self.address)?; + i2c::write(self.busno, addr)?; + + i2c::restart(self.busno)?; + i2c::write(self.busno, self.address | 1)?; + let buf_len = buf.len(); + for (i, byte) in buf.iter_mut().enumerate() { + *byte = i2c::read(self.busno, i < buf_len - 1)?; + } + + i2c::stop(self.busno)?; + + Ok(()) + } + + /// > The 24AA02XEXX is programmed at the factory with a + /// > globally unique node address stored in the upper half + /// > of the array and permanently write-protected. + pub fn read_eui48<'a>(&self) -> Result<[u8; 6], &'static str> { + let mut buffer = [0u8; 6]; + self.read(0xFA, &mut buffer)?; + Ok(buffer) + } +} diff --git a/artiq/firmware/libboard_misoc/ident.rs b/artiq/firmware/libboard_misoc/ident.rs new file mode 100644 index 000000000..76ce2382d --- /dev/null +++ b/artiq/firmware/libboard_misoc/ident.rs @@ -0,0 +1,15 @@ +use core::{cmp, str}; +use csr; + +pub fn read(buf: &mut [u8]) -> &str { + unsafe { + csr::identifier::address_write(0); + let len = csr::identifier::data_read(); + let len = cmp::min(len, buf.len() as u8); + for i in 0..len { + csr::identifier::address_write(1 + i); + buf[i as usize] = csr::identifier::data_read(); + } + str::from_utf8_unchecked(&buf[..len as usize]) + } +} diff --git a/artiq/firmware/libboard_misoc/io_expander.rs b/artiq/firmware/libboard_misoc/io_expander.rs new file mode 100644 index 000000000..d2d2acd2f --- /dev/null +++ b/artiq/firmware/libboard_misoc/io_expander.rs @@ -0,0 +1,117 @@ +use i2c; +use csr; + +pub struct IoExpander { + busno: u8, + port: u8, + address: u8, + virtual_led_mapping: &'static [(u8, u8, u8)], + iodir: [u8; 2], + out_current: [u8; 2], + out_target: [u8; 2], +} + +impl IoExpander { + #[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))] + pub fn new(index: u8) -> Self { + const VIRTUAL_LED_MAPPING0: [(u8, u8, u8); 2] = [(0, 0, 6), (1, 1, 6)]; + const VIRTUAL_LED_MAPPING1: [(u8, u8, u8); 2] = [(2, 0, 6), (3, 1, 6)]; + // Both expanders on SHARED I2C bus + match index { + 0 => IoExpander { + busno: 0, + port: 11, + address: 0x40, + virtual_led_mapping: &VIRTUAL_LED_MAPPING0, + iodir: [0xff; 2], + out_current: [0; 2], + out_target: [0; 2], + }, + 1 => IoExpander { + busno: 0, + port: 11, + address: 0x42, + virtual_led_mapping: &VIRTUAL_LED_MAPPING1, + iodir: [0xff; 2], + out_current: [0; 2], + out_target: [0; 2], + }, + _ => panic!("incorrect I/O expander index"), + } + } + + #[cfg(soc_platform = "kasli")] + fn select(&self) -> Result<(), &'static str> { + let mask: u16 = 1 << self.port; + i2c::pca9548_select(self.busno, 0x70, mask as u8)?; + i2c::pca9548_select(self.busno, 0x71, (mask >> 8) as u8)?; + Ok(()) + } + + fn write(&self, addr: u8, value: u8) -> Result<(), &'static str> { + i2c::start(self.busno)?; + i2c::write(self.busno, self.address)?; + i2c::write(self.busno, addr)?; + i2c::write(self.busno, value)?; + i2c::stop(self.busno)?; + Ok(()) + } + + fn update_iodir(&self) -> Result<(), &'static str> { + self.write(0x00, self.iodir[0])?; + self.write(0x01, self.iodir[1])?; + Ok(()) + } + + pub fn init(&mut self) -> Result<(), &'static str> { + self.select()?; + + for (_led, port, bit) in self.virtual_led_mapping.iter() { + self.iodir[*port as usize] &= !(1 << *bit); + } + self.update_iodir()?; + + self.out_current[0] = 0x00; + self.write(0x12, 0x00)?; + self.out_current[1] = 0x00; + self.write(0x13, 0x00)?; + Ok(()) + } + + pub fn set_oe(&mut self, port: u8, outputs: u8) -> Result<(), &'static str> { + self.iodir[port as usize] &= !outputs; + self.update_iodir()?; + Ok(()) + } + + pub fn set(&mut self, port: u8, bit: u8, high: bool) { + if high { + self.out_target[port as usize] |= 1 << bit; + } else { + self.out_target[port as usize] &= !(1 << bit); + } + } + + pub fn service(&mut self) -> Result<(), &'static str> { + for (led, port, bit) in self.virtual_led_mapping.iter() { + let level = unsafe { + (csr::virtual_leds::status_read() >> led) & 1 + }; + self.set(*port, *bit, level != 0); + } + + if self.out_target != self.out_current { + self.select()?; + if self.out_target[0] != self.out_current[0] { + self.write(0x12, self.out_target[0])?; + self.out_current[0] = self.out_target[0]; + } + if self.out_target[1] != self.out_current[1] { + self.write(0x13, self.out_target[1])?; + self.out_current[1] = self.out_target[1]; + } + } + + Ok(()) + } +} diff --git a/artiq/firmware/libboard_misoc/lib.rs b/artiq/firmware/libboard_misoc/lib.rs new file mode 100644 index 000000000..5e8a92972 --- /dev/null +++ b/artiq/firmware/libboard_misoc/lib.rs @@ -0,0 +1,45 @@ +#![no_std] +#![feature(asm, try_from)] + +extern crate byteorder; +#[cfg(feature = "log")] +extern crate log; +#[cfg(feature = "smoltcp")] +extern crate smoltcp; + +#[cfg(target_arch = "or1k")] +#[path = "or1k/mod.rs"] +mod arch; + +pub use arch::*; + +include!(concat!(env!("BUILDINC_DIRECTORY"), "/generated/mem.rs")); +include!(concat!(env!("BUILDINC_DIRECTORY"), "/generated/csr.rs")); +#[cfg(has_dfii)] +include!(concat!(env!("BUILDINC_DIRECTORY"), "/generated/sdram_phy.rs")); +#[cfg(has_dfii)] +pub mod sdram; +pub mod ident; +pub mod clock; +#[cfg(has_uart)] +pub mod uart; +#[cfg(has_spiflash)] +pub mod spiflash; +pub mod config; +#[cfg(feature = "uart_console")] +#[macro_use] +pub mod uart_console; +#[cfg(all(feature = "uart_console", feature = "log"))] +#[macro_use] +pub mod uart_logger; +#[cfg(all(has_ethmac, feature = "smoltcp"))] +pub mod ethmac; +pub mod i2c; +#[cfg(soc_platform = "kasli")] +pub mod i2c_eeprom; +#[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))] +pub mod io_expander; +#[cfg(all(has_ethmac, feature = "smoltcp"))] +pub mod net_settings; +#[cfg(has_slave_fpga_cfg)] +pub mod slave_fpga; diff --git a/artiq/firmware/libboard_misoc/net_settings.rs b/artiq/firmware/libboard_misoc/net_settings.rs new file mode 100644 index 000000000..2663be5db --- /dev/null +++ b/artiq/firmware/libboard_misoc/net_settings.rs @@ -0,0 +1,84 @@ +use core::fmt; + +use smoltcp::wire::{EthernetAddress, IpAddress}; + +use config; +#[cfg(soc_platform = "kasli")] +use i2c_eeprom; + + +pub struct NetAddresses { + pub hardware_addr: EthernetAddress, + pub ipv4_addr: IpAddress, + pub ipv6_ll_addr: IpAddress, + pub ipv6_addr: Option +} + +impl fmt::Display for NetAddresses { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "MAC={} IPv4={} IPv6-LL={} IPv6=", + self.hardware_addr, self.ipv4_addr, self.ipv6_ll_addr)?; + match self.ipv6_addr { + Some(addr) => write!(f, "{}", addr)?, + None => write!(f, "no configured address")? + } + Ok(()) + } +} + +pub fn get_adresses() -> NetAddresses { + let hardware_addr; + match config::read_str("mac", |r| r.map(|s| s.parse())) { + Ok(Ok(addr)) => hardware_addr = addr, + _ => { + #[cfg(soc_platform = "kasli")] + { + let eeprom = i2c_eeprom::EEPROM::new(); + hardware_addr = + eeprom.read_eui48() + .map(|addr_buf| EthernetAddress(addr_buf)) + .unwrap_or_else(|_e| EthernetAddress([0x02, 0x00, 0x00, 0x00, 0x00, 0x21])); + } + #[cfg(soc_platform = "sayma_amc")] + { hardware_addr = EthernetAddress([0x02, 0x00, 0x00, 0x00, 0x00, 0x11]); } + #[cfg(soc_platform = "metlino")] + { hardware_addr = EthernetAddress([0x02, 0x00, 0x00, 0x00, 0x00, 0x19]); } + #[cfg(soc_platform = "kc705")] + { hardware_addr = EthernetAddress([0x02, 0x00, 0x00, 0x00, 0x00, 0x01]); } + } + } + + let ipv4_addr; + match config::read_str("ip", |r| r.map(|s| s.parse())) { + Ok(Ok(addr)) => ipv4_addr = addr, + _ => { + #[cfg(soc_platform = "kasli")] + { ipv4_addr = IpAddress::v4(192, 168, 1, 70); } + #[cfg(soc_platform = "sayma_amc")] + { ipv4_addr = IpAddress::v4(192, 168, 1, 60); } + #[cfg(soc_platform = "metlino")] + { ipv4_addr = IpAddress::v4(192, 168, 1, 65); } + #[cfg(soc_platform = "kc705")] + { ipv4_addr = IpAddress::v4(192, 168, 1, 50); } + } + } + + let ipv6_ll_addr = IpAddress::v6( + 0xfe80, 0x0000, 0x0000, 0x0000, + (((hardware_addr.0[0] ^ 0x02) as u16) << 8) | (hardware_addr.0[1] as u16), + ((hardware_addr.0[2] as u16) << 8) | 0x00ff, + 0xfe00 | (hardware_addr.0[3] as u16), + ((hardware_addr.0[4] as u16) << 8) | (hardware_addr.0[5] as u16)); + + let ipv6_addr = match config::read_str("ip6", |r| r.map(|s| s.parse())) { + Ok(Ok(addr)) => Some(addr), + _ => None + }; + + NetAddresses { + hardware_addr: hardware_addr, + ipv4_addr: ipv4_addr, + ipv6_ll_addr: ipv6_ll_addr, + ipv6_addr: ipv6_addr + } +} diff --git a/artiq/firmware/libboard/boot.rs b/artiq/firmware/libboard_misoc/or1k/boot.rs similarity index 51% rename from artiq/firmware/libboard/boot.rs rename to artiq/firmware/libboard_misoc/or1k/boot.rs index bd54177a0..51a368e6e 100644 --- a/artiq/firmware/libboard/boot.rs +++ b/artiq/firmware/libboard_misoc/or1k/boot.rs @@ -1,40 +1,48 @@ -use irq; +use super::{irq, cache}; -pub unsafe fn reboot() -> ! { +pub unsafe fn reset() -> ! { irq::set_ie(false); - #[cfg(target_arch="or1k")] asm!(r#" - l.j _ftext - l.nop + l.j _reset_handler + l.nop "# : : : : "volatile"); loop {} } -pub unsafe fn hotswap(new_code: &[u8]) -> ! { +pub unsafe fn jump(addr: usize) -> ! { + irq::set_ie(false); + cache::flush_cpu_icache(); + asm!(r#" + l.jr $0 + l.nop + "# : : "r"(addr) : : "volatile"); + loop {} +} + +pub unsafe fn hotswap(firmware: &[u8]) -> ! { irq::set_ie(false); - #[cfg(target_arch="or1k")] asm!(r#" # This loop overwrites itself, but it's structured in such a way # that before that happens, it loads itself into I$$ fully. - l.movhi r4, hi(_ftext) - l.ori r4, r4, lo(_ftext) + l.movhi r4, hi(_reset_handler) + l.ori r4, r4, lo(_reset_handler) l.or r7, r4, r0 0: l.sfnei r5, 0 l.bf 1f - l.nop + l.nop l.jr r7 - l.nop + l.nop 1: l.lwz r6, 0(r3) l.sw 0(r4), r6 l.addi r3, r3, 4 l.addi r4, r4, 4 l.addi r5, r5, -4 l.bf 0b - l.nop + l.nop "# : - : "{r3}"(new_code.as_ptr() as usize), - "{r5}"(new_code.len()) + : "{r3}"(firmware.as_ptr() as usize), + "{r5}"(firmware.len()) : : "volatile"); loop {} diff --git a/artiq/firmware/libboard/cache.rs b/artiq/firmware/libboard_misoc/or1k/cache.rs similarity index 50% rename from artiq/firmware/libboard/cache.rs rename to artiq/firmware/libboard_misoc/or1k/cache.rs index e90bb1061..9357917c9 100644 --- a/artiq/firmware/libboard/cache.rs +++ b/artiq/firmware/libboard_misoc/or1k/cache.rs @@ -1,19 +1,22 @@ +#[cfg(has_ddrphy)] use core::ptr; -use spr::{self, mfspr, mtspr}; +use super::spr::*; +#[cfg(has_ddrphy)] use csr; +#[cfg(has_ddrphy)] use mem; pub fn flush_cpu_icache() { unsafe { - let iccfgr = mfspr(spr::SPR_ICCFGR); - let ways = 1 << (iccfgr & spr::SPR_ICCFGR_NCW); - let set_size = 1 << ((iccfgr & spr::SPR_ICCFGR_NCS) >> 3); - let block_size = if iccfgr & spr::SPR_ICCFGR_CBS != 0 { 32 } else { 16 }; + let iccfgr = mfspr(SPR_ICCFGR); + let ways = 1 << (iccfgr & SPR_ICCFGR_NCW); + let set_size = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3); + let block_size = if iccfgr & SPR_ICCFGR_CBS != 0 { 32 } else { 16 }; let size = set_size * ways * block_size; let mut i = 0; while i < size { - mtspr(spr::SPR_ICBIR, i); + mtspr(SPR_ICBIR, i); i += block_size; } } @@ -21,20 +24,21 @@ pub fn flush_cpu_icache() { pub fn flush_cpu_dcache() { unsafe { - let dccfgr = mfspr(spr::SPR_DCCFGR); - let ways = 1 << (dccfgr & spr::SPR_ICCFGR_NCW); - let set_size = 1 << ((dccfgr & spr::SPR_DCCFGR_NCS) >> 3); - let block_size = if dccfgr & spr::SPR_DCCFGR_CBS != 0 { 32 } else { 16 }; + let dccfgr = mfspr(SPR_DCCFGR); + let ways = 1 << (dccfgr & SPR_ICCFGR_NCW); + let set_size = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3); + let block_size = if dccfgr & SPR_DCCFGR_CBS != 0 { 32 } else { 16 }; let size = set_size * ways * block_size; let mut i = 0; while i < size { - mtspr(spr::SPR_DCBIR, i); + mtspr(SPR_DCBIR, i); i += block_size; } } } +#[cfg(has_ddrphy)] pub fn flush_l2_cache() { unsafe { for i in 0..2 * (csr::CONFIG_L2_SIZE as usize) / 4 { diff --git a/artiq/firmware/libboard_misoc/or1k/irq.rs b/artiq/firmware/libboard_misoc/or1k/irq.rs new file mode 100644 index 000000000..bc745ae02 --- /dev/null +++ b/artiq/firmware/libboard_misoc/or1k/irq.rs @@ -0,0 +1,107 @@ +use core::{fmt, convert}; + +use super::spr::*; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub enum Exception { + Reset = 0x1, + BusError = 0x2, + DataPageFault = 0x3, + InsnPageFault = 0x4, + Tick = 0x5, + Alignment = 0x6, + IllegalInsn = 0x7, + Interrupt = 0x8, + DtlbMiss = 0x9, + ItlbMiss = 0xa, + Range = 0xb, + Syscall = 0xc, + FloatingPoint = 0xd, + Trap = 0xe, +} + +impl fmt::Display for Exception { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Exception::Reset => write!(f, "reset"), + Exception::BusError => write!(f, "bus error"), + Exception::DataPageFault => write!(f, "data page fault"), + Exception::InsnPageFault => write!(f, "instruction page fault"), + Exception::Tick => write!(f, "tick"), + Exception::Alignment => write!(f, "alignment"), + Exception::IllegalInsn => write!(f, "illegal instruction"), + Exception::Interrupt => write!(f, "interrupt"), + Exception::DtlbMiss => write!(f, "D-TLB miss"), + Exception::ItlbMiss => write!(f, "I-TLB miss"), + Exception::Range => write!(f, "range"), + Exception::Syscall => write!(f, "system call"), + Exception::FloatingPoint => write!(f, "floating point"), + Exception::Trap => write!(f, "trap"), + } + } +} + +impl convert::TryFrom for Exception { + type Error = (); + + fn try_from(num: u32) -> Result { + match num { + 0x1 => Ok(Exception::Reset), + 0x2 => Ok(Exception::BusError), + 0x3 => Ok(Exception::DataPageFault), + 0x4 => Ok(Exception::InsnPageFault), + 0x5 => Ok(Exception::Tick), + 0x6 => Ok(Exception::Alignment), + 0x7 => Ok(Exception::IllegalInsn), + 0x8 => Ok(Exception::Interrupt), + 0x9 => Ok(Exception::DtlbMiss), + 0xa => Ok(Exception::ItlbMiss), + 0xb => Ok(Exception::Range), + 0xc => Ok(Exception::Syscall), + 0xd => Ok(Exception::FloatingPoint), + 0xe => Ok(Exception::Trap), + _ => Err(()) + } + } +} + +#[inline] +pub fn get_ie() -> bool { + unsafe { mfspr(SPR_SR) & SPR_SR_IEE != 0 } +} + +#[inline] +pub fn set_ie(ie: bool) { + if ie { + unsafe { mtspr(SPR_SR, mfspr(SPR_SR) | SPR_SR_IEE) } + } else { + unsafe { mtspr(SPR_SR, mfspr(SPR_SR) & !SPR_SR_IEE) } + } +} + +#[inline] +pub fn get_mask() -> u32 { + unsafe { mfspr(SPR_PICMR) } +} + +#[inline] +pub fn set_mask(mask: u32) { + unsafe { mtspr(SPR_PICMR, mask) } +} + +#[inline] +pub fn pending_mask() -> u32 { + unsafe { mfspr(SPR_PICSR) } +} + +pub fn enable(irq: u32) { + set_mask(get_mask() | (1 << irq)) +} + +pub fn disable(irq: u32) { + set_mask(get_mask() & !(1 << irq)) +} + +pub fn is_pending(irq: u32) -> bool { + get_mask() & (1 << irq) != 0 +} diff --git a/artiq/firmware/libboard_misoc/or1k/mod.rs b/artiq/firmware/libboard_misoc/or1k/mod.rs new file mode 100644 index 000000000..52a619d1b --- /dev/null +++ b/artiq/firmware/libboard_misoc/or1k/mod.rs @@ -0,0 +1,4 @@ +pub mod spr; +pub mod irq; +pub mod cache; +pub mod boot; diff --git a/artiq/firmware/libboard/spr.rs b/artiq/firmware/libboard_misoc/or1k/spr.rs similarity index 99% rename from artiq/firmware/libboard/spr.rs rename to artiq/firmware/libboard_misoc/or1k/spr.rs index 8f011a564..14bb5ae3d 100644 --- a/artiq/firmware/libboard/spr.rs +++ b/artiq/firmware/libboard_misoc/or1k/spr.rs @@ -1,9 +1,11 @@ +#[inline(always)] pub unsafe fn mfspr(reg: u32) -> u32 { let value: u32; asm!("l.mfspr $0, $1, 0" : "=r"(value) : "r"(reg) : : "volatile"); value } +#[inline(always)] pub unsafe fn mtspr(reg: u32, value: u32) { asm!("l.mtspr $0, $1, 0" : : "r"(reg), "r"(value) : : "volatile") } diff --git a/artiq/firmware/libboard_misoc/or1k/vectors.S b/artiq/firmware/libboard_misoc/or1k/vectors.S new file mode 100644 index 000000000..708aed9df --- /dev/null +++ b/artiq/firmware/libboard_misoc/or1k/vectors.S @@ -0,0 +1,413 @@ +/* + * (C) Copyright 2012, Stefan Kristiansson + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include + +/* + * OR1K Architecture has a 128 byte "red zone" after the stack that can not be + * touched by exception handlers. GCC uses this red zone for locals and + * temps without needing to change the stack pointer. + */ +#define OR1K_RED_ZONE_SIZE 128 + +/* + * We need 4 bytes (32 bits) * 32 registers space on the stack to save all the + * registers. + */ +#define EXCEPTION_STACK_SIZE ((4*32) + OR1K_RED_ZONE_SIZE) + +#define HANDLE_EXCEPTION ; \ + l.addi r1, r1, -EXCEPTION_STACK_SIZE ; \ + l.sw 0x1c(r1), r9 ; \ + l.jal _exception_handler ; \ + l.nop ; \ + l.lwz r9, 0x1c(r1) ; \ + l.addi r1, r1, EXCEPTION_STACK_SIZE ; \ + l.rfe ; \ + l.nop + + +.section .vectors, "ax", @progbits +.global _reset_handler +_reset_handler: + l.movhi r0, 0 + l.movhi r1, 0 + l.movhi r2, 0 + l.movhi r3, 0 + l.movhi r4, 0 + l.movhi r5, 0 + l.movhi r6, 0 + l.movhi r7, 0 + l.movhi r8, 0 + l.movhi r9, 0 + l.movhi r10, 0 + l.movhi r11, 0 + l.movhi r12, 0 + l.movhi r13, 0 + l.movhi r14, 0 + l.movhi r15, 0 + l.movhi r16, 0 + l.movhi r17, 0 + l.movhi r18, 0 + l.movhi r19, 0 + l.movhi r20, 0 + l.movhi r21, 0 + l.movhi r22, 0 + l.movhi r23, 0 + l.movhi r24, 0 + l.movhi r25, 0 + l.movhi r26, 0 + l.movhi r27, 0 + l.movhi r28, 0 + l.movhi r29, 0 + l.movhi r30, 0 + l.movhi r31, 0 + + l.ori r21, r0, SPR_SR_SM + l.mtspr r0, r21, SPR_SR + l.movhi r21, hi(_reset_handler) + l.ori r21, r21, lo(_reset_handler) + l.mtspr r0, r21, SPR_EVBAR + /* enable caches */ + l.jal _cache_init + l.nop + l.j _crt0 + l.nop + + /* bus error */ + .org 0x200 + HANDLE_EXCEPTION + + /* data page fault */ + .org 0x300 + HANDLE_EXCEPTION + + /* instruction page fault */ + .org 0x400 + HANDLE_EXCEPTION + + /* tick timer */ + .org 0x500 + HANDLE_EXCEPTION + + /* alignment */ + .org 0x600 + HANDLE_EXCEPTION + + /* illegal instruction */ + .org 0x700 + HANDLE_EXCEPTION + + /* external interrupt */ + .org 0x800 + HANDLE_EXCEPTION + + /* D-TLB miss */ + .org 0x900 + HANDLE_EXCEPTION + + /* I-TLB miss */ + .org 0xa00 + HANDLE_EXCEPTION + + /* range */ + .org 0xb00 + HANDLE_EXCEPTION + + /* system call */ + .org 0xc00 + HANDLE_EXCEPTION + + /* floating point */ + .org 0xd00 + HANDLE_EXCEPTION + + /* trap */ + .org 0xe00 + HANDLE_EXCEPTION + + /* reserved */ + .org 0xf00 + HANDLE_EXCEPTION + + .org 0x1000 +_crt0: + /* Setup stack and global pointer */ + l.movhi r1, hi(_fstack) + l.ori r1, r1, lo(_fstack) + + /* Clear BSS */ + l.movhi r21, hi(_fbss) + l.ori r21, r21, lo(_fbss) + l.movhi r3, hi(_ebss) + l.ori r3, r3, lo(_ebss) +.clearBSS: + l.sfeq r21, r3 + l.bf .callMain + l.nop + l.sw 0(r21), r0 + l.addi r21, r21, 4 + l.j .clearBSS + l.nop + +.callMain: + l.j main + l.nop + +_exception_handler: + .cfi_startproc + .cfi_return_column 32 + .cfi_signal_frame + .cfi_def_cfa_offset EXCEPTION_STACK_SIZE + l.sw 0x00(r1), r2 + .cfi_offset 2, 0x00-EXCEPTION_STACK_SIZE + l.sw 0x04(r1), r3 + .cfi_offset 3, 0x04-EXCEPTION_STACK_SIZE + l.sw 0x08(r1), r4 + .cfi_offset 4, 0x08-EXCEPTION_STACK_SIZE + l.sw 0x0c(r1), r5 + .cfi_offset 5, 0x0c-EXCEPTION_STACK_SIZE + l.sw 0x10(r1), r6 + .cfi_offset 6, 0x10-EXCEPTION_STACK_SIZE + l.sw 0x14(r1), r7 + .cfi_offset 7, 0x14-EXCEPTION_STACK_SIZE + l.sw 0x18(r1), r8 + .cfi_offset 8, 0x18-EXCEPTION_STACK_SIZE + /* r9 saved in HANDLE_EXCEPTION */ + .cfi_offset 9, 0x1c-EXCEPTION_STACK_SIZE + l.sw 0x20(r1), r10 + .cfi_offset 10, 0x20-EXCEPTION_STACK_SIZE + l.sw 0x24(r1), r11 + .cfi_offset 11, 0x24-EXCEPTION_STACK_SIZE + l.sw 0x28(r1), r12 + .cfi_offset 12, 0x28-EXCEPTION_STACK_SIZE + l.sw 0x2c(r1), r13 + .cfi_offset 13, 0x2c-EXCEPTION_STACK_SIZE + l.sw 0x30(r1), r14 + .cfi_offset 14, 0x30-EXCEPTION_STACK_SIZE + l.sw 0x34(r1), r15 + .cfi_offset 15, 0x34-EXCEPTION_STACK_SIZE + l.sw 0x38(r1), r16 + .cfi_offset 16, 0x38-EXCEPTION_STACK_SIZE + l.sw 0x3c(r1), r17 + .cfi_offset 17, 0x3c-EXCEPTION_STACK_SIZE + l.sw 0x40(r1), r18 + .cfi_offset 18, 0x40-EXCEPTION_STACK_SIZE + l.sw 0x44(r1), r19 + .cfi_offset 19, 0x44-EXCEPTION_STACK_SIZE + l.sw 0x48(r1), r20 + .cfi_offset 20, 0x48-EXCEPTION_STACK_SIZE + l.sw 0x4c(r1), r21 + .cfi_offset 21, 0x4c-EXCEPTION_STACK_SIZE + l.sw 0x50(r1), r22 + .cfi_offset 22, 0x50-EXCEPTION_STACK_SIZE + l.sw 0x54(r1), r23 + .cfi_offset 23, 0x54-EXCEPTION_STACK_SIZE + l.sw 0x58(r1), r24 + .cfi_offset 24, 0x58-EXCEPTION_STACK_SIZE + l.sw 0x5c(r1), r25 + .cfi_offset 25, 0x5c-EXCEPTION_STACK_SIZE + l.sw 0x60(r1), r26 + .cfi_offset 26, 0x60-EXCEPTION_STACK_SIZE + l.sw 0x64(r1), r27 + .cfi_offset 27, 0x64-EXCEPTION_STACK_SIZE + l.sw 0x68(r1), r28 + .cfi_offset 28, 0x68-EXCEPTION_STACK_SIZE + l.sw 0x6c(r1), r29 + .cfi_offset 29, 0x6c-EXCEPTION_STACK_SIZE + l.sw 0x70(r1), r30 + .cfi_offset 30, 0x70-EXCEPTION_STACK_SIZE + l.sw 0x74(r1), r31 + .cfi_offset 31, 0x74-EXCEPTION_STACK_SIZE + + /* Save return address */ + l.or r14, r0, r9 + /* Calculate exception vector from handler address */ + l.andi r3, r9, 0xf00 + l.srli r3, r3, 8 + /* Pass saved register state */ + l.or r4, r0, r1 + /* Extract exception PC */ + l.mfspr r5, r0, SPR_EPCR_BASE + /* Tell exception PC to the unwinder */ + l.sw 0x78(r1), r5 + .cfi_offset 32, 0x78-EXCEPTION_STACK_SIZE + /* Extract exception effective address */ + l.mfspr r6, r0, SPR_EEAR_BASE + /* Extract exception SR */ + l.mfspr r7, r0, SPR_ESR_BASE + /* Call exception handler with the link address as argument */ + l.jal exception + l.nop + + /* Load return address */ + l.or r9, r0, r14 + /* Restore state */ + l.lwz r2, 0x00(r1) + l.lwz r3, 0x04(r1) + l.lwz r4, 0x08(r1) + l.lwz r5, 0x0c(r1) + l.lwz r6, 0x10(r1) + l.lwz r7, 0x14(r1) + l.lwz r8, 0x18(r1) + l.lwz r10, 0x20(r1) + l.lwz r11, 0x24(r1) + l.lwz r12, 0x28(r1) + l.lwz r13, 0x2c(r1) + l.lwz r14, 0x30(r1) + l.lwz r15, 0x34(r1) + l.lwz r16, 0x38(r1) + l.lwz r17, 0x3c(r1) + l.lwz r18, 0x40(r1) + l.lwz r19, 0x44(r1) + l.lwz r20, 0x48(r1) + l.lwz r21, 0x4c(r1) + l.lwz r22, 0x50(r1) + l.lwz r23, 0x54(r1) + l.lwz r24, 0x58(r1) + l.lwz r25, 0x5c(r1) + l.lwz r26, 0x60(r1) + l.lwz r27, 0x64(r1) + l.lwz r28, 0x68(r1) + l.lwz r29, 0x6c(r1) + l.lwz r30, 0x70(r1) + l.lwz r31, 0x74(r1) + l.jr r9 + l.nop + .cfi_endproc + +.global _cache_init +_cache_init: + /* + This function is to be used ONLY during reset, before main() is called. + TODO: Perhaps break into individual enable instruction/data cache + sections functions, and provide disable functions, also, all + callable from C + */ + + /* Instruction cache enable */ + /* Check if IC present and skip enabling otherwise */ +#if 1 +.L6: + l.mfspr r3,r0,SPR_UPR + l.andi r7,r3,SPR_UPR_ICP + l.sfeq r7,r0 + l.bf .L8 + l.nop + + /* Disable IC */ + l.mfspr r6,r0,SPR_SR + l.addi r5,r0,-1 + l.xori r5,r5,SPR_SR_ICE + l.and r5,r6,r5 + l.mtspr r0,r5,SPR_SR + + /* Establish cache block size + If BS=0, 16; + If BS=1, 32; + r14 contain block size + */ + l.mfspr r3,r0,SPR_ICCFGR + l.andi r7,r3,SPR_ICCFGR_CBS + l.srli r8,r7,7 + l.ori r4,r0,16 + l.sll r14,r4,r8 + + /* Establish number of cache sets + r10 contains number of cache sets + r8 contains log(# of cache sets) + */ + l.andi r7,r3,SPR_ICCFGR_NCS + l.srli r8,r7,3 + l.ori r4,r0,1 + l.sll r10,r4,r8 + + /* Invalidate IC */ + l.addi r6,r0,0 + l.sll r5,r14,r8 + +.L7: l.mtspr r0,r6,SPR_ICBIR + l.sfne r6,r5 + l.bf .L7 + l.add r6,r6,r14 + + /* Enable IC */ + l.mfspr r6,r0,SPR_SR + l.ori r6,r6,SPR_SR_ICE + l.mtspr r0,r6,SPR_SR + l.nop + l.nop + l.nop + l.nop + l.nop + l.nop + l.nop + l.nop + /* Data cache enable */ + /* Check if DC present and skip enabling otherwise */ +#endif +.L8: +#if 1 + l.mfspr r3,r0,SPR_UPR + l.andi r7,r3,SPR_UPR_DCP + l.sfeq r7,r0 + l.bf .L10 + l.nop + /* Disable DC */ + l.mfspr r6,r0,SPR_SR + l.addi r5,r0,-1 + l.xori r5,r5,SPR_SR_DCE + l.and r5,r6,r5 + l.mtspr r0,r5,SPR_SR + /* Establish cache block size + If BS=0, 16; + If BS=1, 32; + r14 contain block size + */ + l.mfspr r3,r0,SPR_DCCFGR + l.andi r7,r3,SPR_DCCFGR_CBS + l.srli r8,r7,7 + l.ori r4,r0,16 + l.sll r14,r4,r8 + /* Establish number of cache sets + r10 contains number of cache sets + r8 contains log(# of cache sets) + */ + l.andi r7,r3,SPR_DCCFGR_NCS + l.srli r8,r7,3 + l.ori r4,r0,1 + l.sll r10,r4,r8 + /* Invalidate DC */ + l.addi r6,r0,0 + l.sll r5,r14,r8 + +.L9: + l.mtspr r0,r6,SPR_DCBIR + l.sfne r6,r5 + l.bf .L9 + l.add r6,r6,r14 + /* Enable DC */ + l.mfspr r6,r0,SPR_SR + l.ori r6,r6,SPR_SR_DCE + l.mtspr r0,r6,SPR_SR +#endif +.L10: + /* Return */ + l.jr r9 + l.nop diff --git a/artiq/firmware/libboard_misoc/sdram.rs b/artiq/firmware/libboard_misoc/sdram.rs new file mode 100644 index 000000000..6e3071c4b --- /dev/null +++ b/artiq/firmware/libboard_misoc/sdram.rs @@ -0,0 +1,462 @@ +#[cfg(has_ddrphy)] +mod ddr { + use core::{ptr, fmt}; + use csr::{dfii, ddrphy}; + use sdram_phy::{self, spin_cycles}; + use sdram_phy::{DFII_COMMAND_CS, DFII_COMMAND_WE, DFII_COMMAND_CAS, DFII_COMMAND_RAS, + DFII_COMMAND_WRDATA, DFII_COMMAND_RDDATA}; + use sdram_phy::{DFII_NPHASES, DFII_PIX_DATA_SIZE, DFII_PIX_WRDATA_ADDR, DFII_PIX_RDDATA_ADDR}; + + #[cfg(kusddrphy)] + const DDRPHY_MAX_DELAY: u16 = 512; + #[cfg(not(kusddrphy))] + const DDRPHY_MAX_DELAY: u16 = 32; + + const DQS_SIGNAL_COUNT: usize = DFII_PIX_DATA_SIZE / 2; + + macro_rules! log { + ($logger:expr, $( $arg:expr ),+) => ( + if let &mut Some(ref mut f) = $logger { + let _ = write!(f, $( $arg ),+); + } + ) + } + + #[cfg(ddrphy_wlevel)] + unsafe fn enable_write_leveling(enabled: bool) { + dfii::pi0_address_write(sdram_phy::DDR3_MR1 as u16 | ((enabled as u16) << 7)); + dfii::pi0_baddress_write(1); + sdram_phy::command_p0(DFII_COMMAND_RAS|DFII_COMMAND_CAS|DFII_COMMAND_WE|DFII_COMMAND_CS); + ddrphy::wlevel_en_write(enabled as u8); + } + + #[cfg(ddrphy_wlevel)] + unsafe fn write_level_scan(logger: &mut Option<&mut fmt::Write>) { + #[cfg(kusddrphy)] + log!(logger, "DQS initial delay: {} taps\n", ddrphy::wdly_dqs_taps_read()); + log!(logger, "Write leveling scan:\n"); + + enable_write_leveling(true); + spin_cycles(100); + + #[cfg(not(kusddrphy))] + let ddrphy_max_delay : u16 = DDRPHY_MAX_DELAY; + #[cfg(kusddrphy)] + let ddrphy_max_delay : u16 = DDRPHY_MAX_DELAY - ddrphy::wdly_dqs_taps_read(); + + for n in 0..DQS_SIGNAL_COUNT { + let dq_addr = dfii::PI0_RDDATA_ADDR + .offset((DQS_SIGNAL_COUNT - 1 - n) as isize); + + log!(logger, "Module {}:\n", DQS_SIGNAL_COUNT - 1 - n); + + ddrphy::dly_sel_write(1 << n); + + ddrphy::wdly_dq_rst_write(1); + ddrphy::wdly_dqs_rst_write(1); + #[cfg(kusddrphy)] + for _ in 0..ddrphy::wdly_dqs_taps_read() { + ddrphy::wdly_dqs_inc_write(1); + } + + let mut dq; + for _ in 0..ddrphy_max_delay { + ddrphy::wlevel_strobe_write(1); + spin_cycles(10); + dq = ptr::read_volatile(dq_addr); + if dq != 0 { + log!(logger, "1"); + } + else { + log!(logger, "0"); + } + + ddrphy::wdly_dq_inc_write(1); + ddrphy::wdly_dqs_inc_write(1); + } + + log!(logger, "\n"); + } + + enable_write_leveling(false); + } + + #[cfg(ddrphy_wlevel)] + unsafe fn write_level(logger: &mut Option<&mut fmt::Write>, + delay: &mut [u16; DQS_SIGNAL_COUNT], + high_skew: &mut [bool; DQS_SIGNAL_COUNT]) -> bool { + #[cfg(kusddrphy)] + log!(logger, "DQS initial delay: {} taps\n", ddrphy::wdly_dqs_taps_read()); + log!(logger, "Write leveling: "); + + enable_write_leveling(true); + spin_cycles(100); + + #[cfg(not(kusddrphy))] + let ddrphy_max_delay : u16 = DDRPHY_MAX_DELAY; + #[cfg(kusddrphy)] + let ddrphy_max_delay : u16 = DDRPHY_MAX_DELAY - ddrphy::wdly_dqs_taps_read(); + + let mut failed = false; + for n in 0..DQS_SIGNAL_COUNT { + let dq_addr = dfii::PI0_RDDATA_ADDR + .offset((DQS_SIGNAL_COUNT - 1 - n) as isize); + + delay[n] = 0; + high_skew[n] = false; + + ddrphy::dly_sel_write(1 << n); + + ddrphy::wdly_dq_rst_write(1); + ddrphy::wdly_dqs_rst_write(1); + #[cfg(kusddrphy)] + for _ in 0..ddrphy::wdly_dqs_taps_read() { + ddrphy::wdly_dqs_inc_write(1); + } + ddrphy::wlevel_strobe_write(1); + spin_cycles(10); + + let mut incr_delay = || { + delay[n] += 1; + if delay[n] >= ddrphy_max_delay { + failed = true; + return false + } + + ddrphy::wdly_dq_inc_write(1); + ddrphy::wdly_dqs_inc_write(1); + ddrphy::wlevel_strobe_write(1); + spin_cycles(10); + + true + }; + + let mut dq = ptr::read_volatile(dq_addr); + + if dq != 0 { + // Assume this DQ group has between 1 and 2 bit times of skew. + // Bring DQS into the CK=0 zone before continuing leveling. + high_skew[n] = true; + + while dq != 0 { + if !incr_delay() { break } + dq = ptr::read_volatile(dq_addr); + } + + // Get a bit further into the 0 zone + #[cfg(kusddrphy)] + for _ in 0..32 { + incr_delay(); + } + } + + while dq == 0 { + if !incr_delay() { break } + dq = ptr::read_volatile(dq_addr); + } + } + + enable_write_leveling(false); + + for n in (0..DQS_SIGNAL_COUNT).rev() { + log!(logger, "{}{} ", delay[n], if high_skew[n] { "*" } else { "" }); + } + + if !failed { + log!(logger, "done\n") + } else { + log!(logger, "failed\n") + } + + !failed + } + + #[cfg(ddrphy_wlevel)] + unsafe fn read_bitslip(logger: &mut Option<&mut fmt::Write>, + delay: &[u16; DQS_SIGNAL_COUNT], + high_skew: &[bool; DQS_SIGNAL_COUNT]) { + let threshold_opt = delay.iter().zip(high_skew.iter()) + .filter_map(|(&delay, &high_skew)| + if high_skew { Some(delay) } else { None }) + .min() + .map(|threshold| threshold / 2); + + if let Some(threshold) = threshold_opt { + log!(logger, "Read bitslip: "); + + for n in (0..DQS_SIGNAL_COUNT).rev() { + if delay[n] > threshold { + ddrphy::dly_sel_write(1 << n); + + #[cfg(kusddrphy)] + ddrphy::rdly_dq_bitslip_write(1); + #[cfg(not(kusddrphy))] + for _ in 0..3 { + ddrphy::rdly_dq_bitslip_write(1); + } + + log!(logger, "{} ", n); + } + } + + log!(logger, "\n"); + } + } + + unsafe fn read_level_scan(logger: &mut Option<&mut fmt::Write>) { + log!(logger, "Read leveling scan:\n"); + + // Generate pseudo-random sequence + let mut prs = [0; DFII_NPHASES * DFII_PIX_DATA_SIZE]; + let mut prv = 42; + for b in prs.iter_mut() { + prv = 1664525 * prv + 1013904223; + *b = prv as u8; + } + + // Activate + dfii::pi0_address_write(0); + dfii::pi0_baddress_write(0); + sdram_phy::command_p0(DFII_COMMAND_RAS|DFII_COMMAND_CS); + spin_cycles(15); + + // Write test pattern + for p in 0..DFII_NPHASES { + for offset in 0..DFII_PIX_DATA_SIZE { + let addr = DFII_PIX_WRDATA_ADDR[p].offset(offset as isize); + let data = prs[DFII_PIX_DATA_SIZE * p + offset]; + ptr::write_volatile(addr, data as u32); + } + } + sdram_phy::dfii_piwr_address_write(0); + sdram_phy::dfii_piwr_baddress_write(0); + sdram_phy::command_pwr(DFII_COMMAND_CAS|DFII_COMMAND_WE|DFII_COMMAND_CS| + DFII_COMMAND_WRDATA); + + // Calibrate each DQ in turn + sdram_phy::dfii_pird_address_write(0); + sdram_phy::dfii_pird_baddress_write(0); + for n in 0..DQS_SIGNAL_COUNT { + log!(logger, "Module {}:\n", DQS_SIGNAL_COUNT - n - 1); + + ddrphy::dly_sel_write(1 << (DQS_SIGNAL_COUNT - n - 1)); + + ddrphy::rdly_dq_rst_write(1); + #[cfg(soc_platform = "kasli")] + { + for _ in 0..3 { + ddrphy::rdly_dq_bitslip_write(1); + } + } + + for _ in 0..DDRPHY_MAX_DELAY { + let mut working = true; + for _ in 0..256 { + sdram_phy::command_prd(DFII_COMMAND_CAS|DFII_COMMAND_CS| + DFII_COMMAND_RDDATA); + spin_cycles(15); + + for p in 0..DFII_NPHASES { + for &offset in [n, n + DQS_SIGNAL_COUNT].iter() { + let addr = DFII_PIX_RDDATA_ADDR[p].offset(offset as isize); + let data = prs[DFII_PIX_DATA_SIZE * p + offset]; + if ptr::read_volatile(addr) as u8 != data { + working = false; + } + } + } + } + if working { + log!(logger, "1"); + } + else { + log!(logger, "0"); + } + ddrphy::rdly_dq_inc_write(1); + } + + log!(logger, "\n"); + + } + + // Precharge + dfii::pi0_address_write(0); + dfii::pi0_baddress_write(0); + sdram_phy::command_p0(DFII_COMMAND_RAS|DFII_COMMAND_WE|DFII_COMMAND_CS); + spin_cycles(15); + } + + unsafe fn read_level(logger: &mut Option<&mut fmt::Write>) -> bool { + log!(logger, "Read leveling: "); + + // Generate pseudo-random sequence + let mut prs = [0; DFII_NPHASES * DFII_PIX_DATA_SIZE]; + let mut prv = 42; + for b in prs.iter_mut() { + prv = 1664525 * prv + 1013904223; + *b = prv as u8; + } + + // Activate + dfii::pi0_address_write(0); + dfii::pi0_baddress_write(0); + sdram_phy::command_p0(DFII_COMMAND_RAS|DFII_COMMAND_CS); + spin_cycles(15); + + // Write test pattern + for p in 0..DFII_NPHASES { + for offset in 0..DFII_PIX_DATA_SIZE { + let addr = DFII_PIX_WRDATA_ADDR[p].offset(offset as isize); + let data = prs[DFII_PIX_DATA_SIZE * p + offset]; + ptr::write_volatile(addr, data as u32); + } + } + sdram_phy::dfii_piwr_address_write(0); + sdram_phy::dfii_piwr_baddress_write(0); + sdram_phy::command_pwr(DFII_COMMAND_CAS|DFII_COMMAND_WE|DFII_COMMAND_CS| + DFII_COMMAND_WRDATA); + + // Calibrate each DQ in turn + sdram_phy::dfii_pird_address_write(0); + sdram_phy::dfii_pird_baddress_write(0); + for n in 0..DQS_SIGNAL_COUNT { + ddrphy::dly_sel_write(1 << (DQS_SIGNAL_COUNT - n - 1)); + + // Find the first (min_delay) and last (max_delay) tap that bracket + // the largest tap interval of correct reads. + let mut min_delay = 0; + let mut max_delay = 0; + + let mut first_valid = 0; + let mut seen_valid = 0; + let mut seen_invalid = 0; + let mut max_seen_valid = 0; + + ddrphy::rdly_dq_rst_write(1); + #[cfg(soc_platform = "kasli")] + { + for _ in 0..3 { + ddrphy::rdly_dq_bitslip_write(1); + } + } + + for delay in 0..DDRPHY_MAX_DELAY { + let mut valid = true; + for _ in 0..256 { + sdram_phy::command_prd(DFII_COMMAND_CAS|DFII_COMMAND_CS| + DFII_COMMAND_RDDATA); + spin_cycles(15); + + for p in 0..DFII_NPHASES { + for &offset in [n, n + DQS_SIGNAL_COUNT].iter() { + let addr = DFII_PIX_RDDATA_ADDR[p].offset(offset as isize); + let data = prs[DFII_PIX_DATA_SIZE * p + offset]; + if ptr::read_volatile(addr) as u8 != data { + valid = false; + } + } + } + } + + if valid { + if seen_valid == 0 { + first_valid = delay; + } + seen_valid += 1; + seen_invalid = 0; + if seen_valid > max_seen_valid { + min_delay = first_valid; + max_delay = delay; + max_seen_valid = seen_valid; + } + } else { + seen_invalid += 1; + if seen_invalid >= DDRPHY_MAX_DELAY / 8 { + seen_valid = 0; + } + } + ddrphy::rdly_dq_inc_write(1); + } + + if max_delay <= min_delay { + log!(logger, "Zero window: {}: {}-{} ({})\n", + DQS_SIGNAL_COUNT - n - 1, min_delay, max_delay, + max_seen_valid); + return false + } + if max_seen_valid <= 5 { + log!(logger, "Small window: {}: {}-{} ({})\n", + DQS_SIGNAL_COUNT - n - 1, min_delay, max_delay, + max_seen_valid); + return false + } + + let mean_delay = (min_delay + max_delay) / 2; + log!(logger, "{}+-{} ", mean_delay, max_seen_valid / 2); + + // Set delay to the middle + ddrphy::rdly_dq_rst_write(1); + #[cfg(soc_platform = "kasli")] + { + for _ in 0..3 { + ddrphy::rdly_dq_bitslip_write(1); + } + } + for _ in 0..mean_delay { + ddrphy::rdly_dq_inc_write(1); + } + } + + // Precharge + dfii::pi0_address_write(0); + dfii::pi0_baddress_write(0); + sdram_phy::command_p0(DFII_COMMAND_RAS|DFII_COMMAND_WE|DFII_COMMAND_CS); + spin_cycles(15); + + log!(logger, "done\n"); + true + } + + pub unsafe fn level(logger: &mut Option<&mut fmt::Write>) -> bool { + #[cfg(ddrphy_wlevel)] + { + let mut delay = [0; DQS_SIGNAL_COUNT]; + let mut high_skew = [false; DQS_SIGNAL_COUNT]; + write_level_scan(logger); + if !write_level(logger, &mut delay, &mut high_skew) { + return false + } + read_bitslip(logger, &delay, &high_skew); + } + + read_level_scan(logger); + if !read_level(logger) { + return false + } + + true + } +} + +use core::fmt; +use csr; +use sdram_phy; + +pub unsafe fn init(mut _logger: Option<&mut fmt::Write>) -> bool { + sdram_phy::initialize(); + + #[cfg(has_ddrphy)] + { + #[cfg(kusddrphy)] + csr::ddrphy::en_vtc_write(0); + if !ddr::level(&mut _logger) { + return false + } + #[cfg(kusddrphy)] + csr::ddrphy::en_vtc_write(1); + } + + csr::dfii::control_write(sdram_phy::DFII_CONTROL_SEL); + + true +} diff --git a/artiq/firmware/libboard_misoc/slave_fpga.rs b/artiq/firmware/libboard_misoc/slave_fpga.rs new file mode 100644 index 000000000..bafe1d7fe --- /dev/null +++ b/artiq/firmware/libboard_misoc/slave_fpga.rs @@ -0,0 +1,76 @@ +use super::{csr, clock}; + +const CCLK_BIT: u8 = 1 << 0; +const DIN_BIT: u8 = 1 << 1; +const DONE_BIT: u8 = 1 << 2; +const INIT_B_BIT: u8 = 1 << 3; +const PROGRAM_B_BIT: u8 = 1 << 4; + +unsafe fn shift_u8(data: u8) { + for i in 0..8 { + let mut bits: u8 = PROGRAM_B_BIT; + if data & (0x80 >> i) != 0 { + bits |= DIN_BIT; + } + // Without delays, this is about 6 MHz CCLK which is fine. + csr::slave_fpga_cfg::out_write(bits); + // clock::spin_us(1); + csr::slave_fpga_cfg::out_write(bits | CCLK_BIT); + // clock::spin_us(1); + } +} + +pub fn prepare() -> Result<(), &'static str> { + unsafe { + if csr::slave_fpga_cfg::in_read() & DONE_BIT != 0 { + println!(" DONE before loading"); + } + if csr::slave_fpga_cfg::in_read() & INIT_B_BIT == 0 { + println!(" INIT asserted before loading"); + } + + csr::slave_fpga_cfg::out_write(0); + csr::slave_fpga_cfg::oe_write(CCLK_BIT | DIN_BIT | PROGRAM_B_BIT); + clock::spin_us(1_000); // TPROGRAM=250ns min, be_generous + if csr::slave_fpga_cfg::in_read() & INIT_B_BIT != 0 { + return Err("Did not assert INIT in reaction to PROGRAM"); + } + csr::slave_fpga_cfg::out_write(PROGRAM_B_BIT); + clock::spin_us(10_000); // TPL=5ms max + if csr::slave_fpga_cfg::in_read() & INIT_B_BIT == 0 { + return Err("Did not exit INIT after releasing PROGRAM"); + } + if csr::slave_fpga_cfg::in_read() & DONE_BIT != 0 { + return Err("DONE high despite PROGRAM"); + } + } + Ok(()) +} + +pub fn input(data: &[u8]) -> Result<(), &'static str> { + unsafe { + for i in data { + shift_u8(*i); + if csr::slave_fpga_cfg::in_read() & INIT_B_BIT == 0 { + return Err("INIT asserted during load"); + } + } + } + Ok(()) +} + +pub fn startup() -> Result<(), &'static str> { + unsafe { + let t = clock::get_ms(); + while csr::slave_fpga_cfg::in_read() & DONE_BIT == 0 { + if clock::get_ms() > t + 100 { + return Err("Timeout wating for DONE after loading"); + } + shift_u8(0xff); + } + shift_u8(0xff); // "Compensate for Special Startup Conditions" + csr::slave_fpga_cfg::out_write(PROGRAM_B_BIT); + csr::slave_fpga_cfg::oe_write(PROGRAM_B_BIT); + } + Ok(()) +} diff --git a/artiq/firmware/libboard_misoc/spiflash.rs b/artiq/firmware/libboard_misoc/spiflash.rs new file mode 100644 index 000000000..3a4e7cb59 --- /dev/null +++ b/artiq/firmware/libboard_misoc/spiflash.rs @@ -0,0 +1,115 @@ +use core::cmp; +use csr; + +pub const SECTOR_SIZE: usize = csr::CONFIG_SPIFLASH_SECTOR_SIZE as usize; +pub const PAGE_SIZE: usize = csr::CONFIG_SPIFLASH_PAGE_SIZE as usize; + +const PAGE_MASK: usize = PAGE_SIZE - 1; + +const CMD_PP: u8 = 0x02; +// const CMD_WRDI: u8 = 0x04; +const CMD_RDSR: u8 = 0x05; +const CMD_WREN: u8 = 0x06; +const CMD_SE: u8 = 0xd8; + +const PIN_CLK: u8 = 1 << 1; +const PIN_CS_N: u8 = 1 << 2; +const PIN_DQ_I: u8 = 1 << 3; + +const SR_WIP: u8 = 1; + +unsafe fn write_byte(mut byte: u8) { + csr::spiflash::bitbang_write(0); + for _ in 0..8 { + csr::spiflash::bitbang_write((byte & 0x80) >> 7); + csr::spiflash::bitbang_write((byte & 0x80) >> 7 | PIN_CLK); + byte <<= 1; + } + csr::spiflash::bitbang_write(0); +} + +unsafe fn write_addr(mut addr: usize) { + csr::spiflash::bitbang_write(0); + for _ in 0..24 { + csr::spiflash::bitbang_write(((addr & 0x800000) >> 23) as u8); + csr::spiflash::bitbang_write(((addr & 0x800000) >> 23) as u8 | PIN_CLK); + addr <<= 1; + } + csr::spiflash::bitbang_write(0); +} + +fn wait_until_ready() { + unsafe { + loop { + let mut sr = 0; + write_byte(CMD_RDSR); + for _ in 0..8 { + sr <<= 1; + csr::spiflash::bitbang_write(PIN_DQ_I | PIN_CLK); + sr |= csr::spiflash::miso_read(); + csr::spiflash::bitbang_write(PIN_DQ_I); + } + csr::spiflash::bitbang_write(0); + csr::spiflash::bitbang_write(PIN_CS_N); + if sr & SR_WIP == 0 { + return + } + } + } +} + +pub unsafe fn erase_sector(addr: usize) { + let sector_addr = addr & !(csr::CONFIG_SPIFLASH_SECTOR_SIZE as usize - 1); + + csr::spiflash::bitbang_en_write(1); + + wait_until_ready(); + + write_byte(CMD_WREN); + csr::spiflash::bitbang_write(PIN_CS_N); + + write_byte(CMD_SE); + write_addr(sector_addr); + csr::spiflash::bitbang_write(PIN_CS_N); + + wait_until_ready(); + + csr::spiflash::bitbang_en_write(0); +} + +unsafe fn write_page(addr: usize, data: &[u8]) { + csr::spiflash::bitbang_en_write(1); + + wait_until_ready(); + + write_byte(CMD_WREN); + csr::spiflash::bitbang_write(PIN_CS_N); + write_byte(CMD_PP); + write_addr(addr); + for &byte in data { + write_byte(byte) + } + + csr::spiflash::bitbang_write(PIN_CS_N); + csr::spiflash::bitbang_write(0); + + wait_until_ready(); + + csr::spiflash::bitbang_en_write(0); +} + +pub unsafe fn write(mut addr: usize, mut data: &[u8]) { + if addr & PAGE_MASK != 0 { + let size = cmp::min((PAGE_SIZE - (addr & PAGE_MASK)) as usize, data.len()); + write_page(addr, &data[..size]); + addr += size; + data = &data[size..]; + } + + while data.len() > 0 { + let size = cmp::min(PAGE_SIZE as usize, data.len()); + write_page(addr, &data[..size]); + addr += size; + data = &data[size..]; + } +} diff --git a/artiq/firmware/libboard/uart.rs b/artiq/firmware/libboard_misoc/uart.rs similarity index 100% rename from artiq/firmware/libboard/uart.rs rename to artiq/firmware/libboard_misoc/uart.rs diff --git a/artiq/firmware/libboard/uart_console.rs b/artiq/firmware/libboard_misoc/uart_console.rs similarity index 80% rename from artiq/firmware/libboard/uart_console.rs rename to artiq/firmware/libboard_misoc/uart_console.rs index 5a1c82f7c..a63adeea1 100644 --- a/artiq/firmware/libboard/uart_console.rs +++ b/artiq/firmware/libboard_misoc/uart_console.rs @@ -1,16 +1,24 @@ use core::fmt; -use csr; pub struct Console; impl fmt::Write for Console { + #[cfg(has_uart)] fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> { + use csr; + for c in s.bytes() { unsafe { while csr::uart::txfull_read() != 0 {} csr::uart::rxtx_write(c) } } + + Ok(()) + } + + #[cfg(not(has_uart))] + fn write_str(&mut self, _s: &str) -> Result<(), fmt::Error> { Ok(()) } } diff --git a/artiq/firmware/libboard_misoc/uart_logger.rs b/artiq/firmware/libboard_misoc/uart_logger.rs new file mode 100644 index 000000000..e3e26f6cf --- /dev/null +++ b/artiq/firmware/libboard_misoc/uart_logger.rs @@ -0,0 +1,36 @@ +use core::fmt::Write; +use log::{Log, LevelFilter, Metadata, Record, set_logger, set_max_level}; + +use clock; +use uart_console::Console; + +pub struct ConsoleLogger; + +impl ConsoleLogger { + pub fn register() { + static LOGGER: ConsoleLogger = ConsoleLogger; + set_logger(&LOGGER).expect("global logger can only be initialized once"); + set_max_level(LevelFilter::Trace); + } +} + +impl Log for ConsoleLogger { + fn enabled(&self, _metadata: &Metadata) -> bool { + true + } + + fn log(&self, record: &Record) { + if self.enabled(record.metadata()) { + let timestamp = clock::get_us(); + let seconds = timestamp / 1_000_000; + let micros = timestamp % 1_000_000; + + let _ = writeln!(Console, "[{:6}.{:06}s] {:>5}({}): {}", + seconds, micros, record.level(), record.target(), record.args()); + } + } + + fn flush(&self) { + } +} + diff --git a/artiq/firmware/libbuild_artiq/lib.rs b/artiq/firmware/libbuild_artiq/lib.rs deleted file mode 100644 index 2eb27a040..000000000 --- a/artiq/firmware/libbuild_artiq/lib.rs +++ /dev/null @@ -1,59 +0,0 @@ -extern crate walkdir; - -use std::env; -use std::fs::File; -use std::io::{Write, BufRead, BufReader}; -use std::path::{Path, PathBuf}; -use std::process::Command; - -use walkdir::WalkDir; - -pub fn git_describe() { - let git_dir = Path::new("../../../.git"); - - println!("cargo:rerun-if-changed={}", git_dir.join("HEAD").display()); - for entry in WalkDir::new(git_dir.join("refs")) { - let entry = entry.unwrap(); - println!("cargo:rerun-if-changed={}", entry.path().display()); - } - - let version; - if git_dir.exists() { - let git_describe = - Command::new("git") - .arg("describe") - .arg("--tags") - .arg("--dirty") - .arg("--always") - .arg("--long") - .arg("--abbrev=8") - .output() - .ok() - .and_then(|o| String::from_utf8(o.stdout).ok()) - .map(|mut s| { - let len = s.trim_right().len(); - s.truncate(len); - s - }) - .unwrap(); - let parts = git_describe.split("-").collect::>(); - version = format!("{}+{}.{}", parts[0], parts[1], parts[2]); - } else { - version = "unknown".to_owned(); - } - - let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); - let mut f = File::create(out_dir.join("git-describe")).unwrap(); - write!(f, "{}", version).unwrap(); -} - -pub fn misoc_cfg() { - let out_dir = env::var("BUILDINC_DIRECTORY").unwrap(); - let cfg_path = Path::new(&out_dir).join("generated").join("rust-cfg"); - println!("cargo:rerun-if-changed={}", cfg_path.to_str().unwrap()); - - let f = BufReader::new(File::open(&cfg_path).unwrap()); - for line in f.lines() { - println!("cargo:rustc-cfg={}", line.unwrap()); - } -} diff --git a/artiq/firmware/liballoc_stub/Cargo.toml b/artiq/firmware/libbuild_misoc/Cargo.toml similarity index 63% rename from artiq/firmware/liballoc_stub/Cargo.toml rename to artiq/firmware/libbuild_misoc/Cargo.toml index 1503c9005..400e5fbe1 100644 --- a/artiq/firmware/liballoc_stub/Cargo.toml +++ b/artiq/firmware/libbuild_misoc/Cargo.toml @@ -1,8 +1,8 @@ [package] authors = ["M-Labs"] -name = "alloc_stub" +name = "build_misoc" version = "0.0.0" [lib] -name = "alloc_stub" +name = "build_misoc" path = "lib.rs" diff --git a/artiq/firmware/libdrtioaux/build.rs b/artiq/firmware/libbuild_misoc/lib.rs similarity index 96% rename from artiq/firmware/libdrtioaux/build.rs rename to artiq/firmware/libbuild_misoc/lib.rs index a7b0335e5..36f354fa1 100644 --- a/artiq/firmware/libdrtioaux/build.rs +++ b/artiq/firmware/libbuild_misoc/lib.rs @@ -1,9 +1,9 @@ use std::env; -use std::path::Path; -use std::io::{BufRead, BufReader}; use std::fs::File; +use std::io::{BufRead, BufReader}; +use std::path::Path; -fn main() { +pub fn cfg() { let out_dir = env::var("BUILDINC_DIRECTORY").unwrap(); let cfg_path = Path::new(&out_dir).join("generated").join("rust-cfg"); println!("cargo:rerun-if-changed={}", cfg_path.to_str().unwrap()); diff --git a/artiq/firmware/libdrtioaux/Cargo.toml b/artiq/firmware/libdrtioaux/Cargo.toml deleted file mode 100644 index 682d97316..000000000 --- a/artiq/firmware/libdrtioaux/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -authors = ["M-Labs"] -name = "drtioaux" -version = "0.0.0" -build = "build.rs" - -[lib] -name = "drtioaux" -path = "lib.rs" - -[dependencies] -log = { version = "0.3", default-features = false } -std_artiq = { path = "../libstd_artiq", features = ["alloc"] } -board = { path = "../libboard" } -byteorder = { version = "1.0", default-features = false } diff --git a/artiq/firmware/libdrtioaux/crc32.rs b/artiq/firmware/libdrtioaux/crc32.rs deleted file mode 100644 index 9cbaa56de..000000000 --- a/artiq/firmware/libdrtioaux/crc32.rs +++ /dev/null @@ -1,49 +0,0 @@ -// Based on crc 1.4.0 by mrhooray - -static IEEE_TABLE: [u32; 256] = [ - 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, - 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, - 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, - 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, - 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, - 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, - 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, - 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, - 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, - 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, - 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, - 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, - 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, - 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, - 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, - 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, - 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, - 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, - 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, - 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, - 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, - 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, - 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, - 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, - 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, - 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, - 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, - 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, - 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, - 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, - 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, - 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d, -]; - -pub fn update(mut value: u32, table: &[u32; 256], bytes: &[u8]) -> u32 { - value = !value; - for &i in bytes.iter() { - value = table[((value as u8) ^ i) as usize] ^ (value >> 8) - } - !value -} - -pub fn checksum_ieee(bytes: &[u8]) -> u32 { - return update(0, &IEEE_TABLE, bytes); -} - diff --git a/artiq/firmware/libdrtioaux/lib.rs b/artiq/firmware/libdrtioaux/lib.rs deleted file mode 100644 index ece63f37e..000000000 --- a/artiq/firmware/libdrtioaux/lib.rs +++ /dev/null @@ -1,399 +0,0 @@ -#![no_std] - -#[macro_use] -extern crate std_artiq as std; -extern crate board; -extern crate byteorder; - -mod proto; -#[cfg(has_drtio)] -mod crc32; - -use std::io::{self, Read, Write}; -#[cfg(has_drtio)] -use core::slice; -use proto::*; - -#[derive(Debug)] -pub enum Packet { - EchoRequest, - EchoReply, - - RtioErrorRequest, - RtioNoErrorReply, - RtioErrorCollisionReply, - RtioErrorBusyReply, - - MonitorRequest { channel: u16, probe: u8 }, - MonitorReply { value: u32 }, - InjectionRequest { channel: u16, overrd: u8, value: u8 }, - InjectionStatusRequest { channel: u16, overrd: u8 }, - InjectionStatusReply { value: u8 }, - - I2cStartRequest { busno: u8 }, - I2cRestartRequest { busno: u8 }, - I2cStopRequest { busno: u8 }, - I2cWriteRequest { busno: u8, data: u8 }, - I2cWriteReply { succeeded: bool, ack: bool }, - I2cReadRequest { busno: u8, ack: bool }, - I2cReadReply { succeeded: bool, data: u8 }, - I2cBasicReply { succeeded: bool }, - - SpiSetConfigRequest { busno: u8, flags: u8, write_div: u8, read_div: u8 }, - SpiSetXferRequest { busno: u8, chip_select: u16, write_length: u8, read_length: u8 }, - SpiWriteRequest { busno: u8, data: u32 }, - SpiReadRequest { busno: u8 }, - SpiReadReply { succeeded: bool, data: u32 }, - SpiBasicReply { succeeded: bool }, -} - -impl Packet { - pub fn read_from(reader: &mut Read) -> io::Result { - Ok(match read_u8(reader)? { - 0x00 => Packet::EchoRequest, - 0x01 => Packet::EchoReply, - - 0x20 => Packet::RtioErrorRequest, - 0x21 => Packet::RtioNoErrorReply, - 0x22 => Packet::RtioErrorCollisionReply, - 0x23 => Packet::RtioErrorBusyReply, - - 0x40 => Packet::MonitorRequest { - channel: read_u16(reader)?, - probe: read_u8(reader)? - }, - 0x41 => Packet::MonitorReply { - value: read_u32(reader)? - }, - 0x50 => Packet::InjectionRequest { - channel: read_u16(reader)?, - overrd: read_u8(reader)?, - value: read_u8(reader)? - }, - 0x51 => Packet::InjectionStatusRequest { - channel: read_u16(reader)?, - overrd: read_u8(reader)? - }, - 0x52 => Packet::InjectionStatusReply { - value: read_u8(reader)? - }, - - 0x80 => Packet::I2cStartRequest { - busno: read_u8(reader)? - }, - 0x81 => Packet::I2cRestartRequest { - busno: read_u8(reader)? - }, - 0x82 => Packet::I2cStopRequest { - busno: read_u8(reader)? - }, - 0x83 => Packet::I2cWriteRequest { - busno: read_u8(reader)?, - data: read_u8(reader)? - }, - 0x84 => Packet::I2cWriteReply { - succeeded: read_bool(reader)?, - ack: read_bool(reader)? - }, - 0x85 => Packet::I2cReadRequest { - busno: read_u8(reader)?, - ack: read_bool(reader)? - }, - 0x86 => Packet::I2cReadReply { - succeeded: read_bool(reader)?, - data: read_u8(reader)? - }, - 0x87 => Packet::I2cBasicReply { - succeeded: read_bool(reader)? - }, - - 0x90 => Packet::SpiSetConfigRequest { - busno: read_u8(reader)?, - flags: read_u8(reader)?, - write_div: read_u8(reader)?, - read_div: read_u8(reader)? - }, - 0x91 => Packet::SpiSetXferRequest { - busno: read_u8(reader)?, - chip_select: read_u16(reader)?, - write_length: read_u8(reader)?, - read_length: read_u8(reader)? - }, - 0x92 => Packet::SpiWriteRequest { - busno: read_u8(reader)?, - data: read_u32(reader)? - }, - 0x93 => Packet::SpiReadRequest { - busno: read_u8(reader)? - }, - 0x94 => Packet::SpiReadReply { - succeeded: read_bool(reader)?, - data: read_u32(reader)? - }, - 0x95 => Packet::SpiBasicReply { - succeeded: read_bool(reader)? - }, - - _ => return Err(io::Error::new(io::ErrorKind::InvalidData, "unknown packet type")) - }) - } - - pub fn write_to(&self, writer: &mut Write) -> io::Result<()> { - match *self { - Packet::EchoRequest => write_u8(writer, 0x00)?, - Packet::EchoReply => write_u8(writer, 0x01)?, - - Packet::RtioErrorRequest => write_u8(writer, 0x20)?, - Packet::RtioNoErrorReply => write_u8(writer, 0x21)?, - Packet::RtioErrorCollisionReply => write_u8(writer, 0x22)?, - Packet::RtioErrorBusyReply => write_u8(writer, 0x23)?, - - Packet::MonitorRequest { channel, probe } => { - write_u8(writer, 0x40)?; - write_u16(writer, channel)?; - write_u8(writer, probe)?; - }, - Packet::MonitorReply { value } => { - write_u8(writer, 0x41)?; - write_u32(writer, value)?; - }, - Packet::InjectionRequest { channel, overrd, value } => { - write_u8(writer, 0x50)?; - write_u16(writer, channel)?; - write_u8(writer, overrd)?; - write_u8(writer, value)?; - }, - Packet::InjectionStatusRequest { channel, overrd } => { - write_u8(writer, 0x51)?; - write_u16(writer, channel)?; - write_u8(writer, overrd)?; - }, - Packet::InjectionStatusReply { value } => { - write_u8(writer, 0x52)?; - write_u8(writer, value)?; - }, - - Packet::I2cStartRequest { busno } => { - write_u8(writer, 0x80)?; - write_u8(writer, busno)?; - }, - Packet::I2cRestartRequest { busno } => { - write_u8(writer, 0x81)?; - write_u8(writer, busno)?; - }, - Packet::I2cStopRequest { busno } => { - write_u8(writer, 0x82)?; - write_u8(writer, busno)?; - }, - Packet::I2cWriteRequest { busno, data } => { - write_u8(writer, 0x83)?; - write_u8(writer, busno)?; - write_u8(writer, data)?; - }, - Packet::I2cWriteReply { succeeded, ack } => { - write_u8(writer, 0x84)?; - write_bool(writer, succeeded)?; - write_bool(writer, ack)?; - }, - Packet::I2cReadRequest { busno, ack } => { - write_u8(writer, 0x85)?; - write_u8(writer, busno)?; - write_bool(writer, ack)?; - }, - Packet::I2cReadReply { succeeded, data } => { - write_u8(writer, 0x86)?; - write_bool(writer, succeeded)?; - write_u8(writer, data)?; - }, - Packet::I2cBasicReply { succeeded } => { - write_u8(writer, 0x87)?; - write_bool(writer, succeeded)?; - }, - - Packet::SpiSetConfigRequest { busno, flags, write_div, read_div } => { - write_u8(writer, 0x90)?; - write_u8(writer, busno)?; - write_u8(writer, flags)?; - write_u8(writer, write_div)?; - write_u8(writer, read_div)?; - }, - Packet::SpiSetXferRequest { busno, chip_select, write_length, read_length } => { - write_u8(writer, 0x91)?; - write_u8(writer, busno)?; - write_u16(writer, chip_select)?; - write_u8(writer, write_length)?; - write_u8(writer, read_length)?; - }, - Packet::SpiWriteRequest { busno, data } => { - write_u8(writer, 0x92)?; - write_u8(writer, busno)?; - write_u32(writer, data)?; - }, - Packet::SpiReadRequest { busno } => { - write_u8(writer, 0x93)?; - write_u8(writer, busno)?; - }, - Packet::SpiReadReply { succeeded, data } => { - write_u8(writer, 0x94)?; - write_bool(writer, succeeded)?; - write_u32(writer, data)?; - }, - Packet::SpiBasicReply { succeeded } => { - write_u8(writer, 0x95)?; - write_bool(writer, succeeded)?; - }, - } - Ok(()) - } -} - -#[cfg(has_drtio)] -pub mod hw { - use super::*; - use std::io::Cursor; - - fn rx_has_error(linkno: u8) -> bool { - let linkno = linkno as usize; - unsafe { - let error = (board::csr::DRTIO[linkno].aux_rx_error_read)() != 0; - if error { - (board::csr::DRTIO[linkno].aux_rx_error_write)(1) - } - error - } - } - - struct RxBuffer(u8, &'static [u8]); - - impl Drop for RxBuffer { - fn drop(&mut self) { - unsafe { - (board::csr::DRTIO[self.0 as usize].aux_rx_present_write)(1); - } - } - } - - fn rx_get_buffer(linkno: u8) -> Option { - let linkidx = linkno as usize; - unsafe { - if (board::csr::DRTIO[linkidx].aux_rx_present_read)() == 1 { - let length = (board::csr::DRTIO[linkidx].aux_rx_length_read)(); - let base = board::mem::DRTIO_AUX[linkidx].base + board::mem::DRTIO_AUX[linkidx].size/2; - let sl = slice::from_raw_parts(base as *mut u8, length as usize); - Some(RxBuffer(linkno, sl)) - } else { - None - } - } - } - - pub fn recv_link(linkno: u8) -> io::Result> { - if rx_has_error(linkno) { - return Err(io::Error::new(io::ErrorKind::Other, "gateware reported error")) - } - let buffer = rx_get_buffer(linkno); - match buffer { - Some(rxb) => { - let slice = rxb.1; - let mut reader = Cursor::new(slice); - - let len = slice.len(); - if len < 8 { - return Err(io::Error::new(io::ErrorKind::InvalidData, "packet too short")) - } - let computed_crc = crc32::checksum_ieee(&reader.get_ref()[0..len-4]); - reader.set_position((len-4) as u64); - let crc = read_u32(&mut reader)?; - if crc != computed_crc { - return Err(io::Error::new(io::ErrorKind::InvalidData, "packet CRC failed")) - } - reader.set_position(0); - - let packet_r = Packet::read_from(&mut reader); - match packet_r { - Ok(packet) => Ok(Some(packet)), - Err(e) => Err(e) - } - } - None => Ok(None) - } - } - - pub fn recv_timeout_link(linkno: u8, timeout_ms: Option) -> io::Result { - let timeout_ms = timeout_ms.unwrap_or(10); - let limit = board::clock::get_ms() + timeout_ms; - while board::clock::get_ms() < limit { - match recv_link(linkno) { - Ok(None) => (), - Ok(Some(packet)) => return Ok(packet), - Err(e) => return Err(e) - } - } - return Err(io::Error::new(io::ErrorKind::TimedOut, "timed out waiting for data")) - } - - fn tx_get_buffer(linkno: u8) -> &'static mut [u8] { - let linkno = linkno as usize; - unsafe { - while (board::csr::DRTIO[linkno].aux_tx_read)() != 0 {} - let base = board::mem::DRTIO_AUX[linkno].base; - let size = board::mem::DRTIO_AUX[linkno].size/2; - slice::from_raw_parts_mut(base as *mut u8, size) - } - } - - fn tx_ack_buffer(linkno: u8, length: u16) { - let linkno = linkno as usize; - unsafe { - (board::csr::DRTIO[linkno].aux_tx_length_write)(length); - (board::csr::DRTIO[linkno].aux_tx_write)(1) - } - } - - pub fn send_link(linkno: u8, packet: &Packet) -> io::Result<()> { - let sl = tx_get_buffer(linkno); - - let mut writer = Cursor::new(sl); - packet.write_to(&mut writer)?; - let mut len = writer.position(); - - let padding = 4 - (len % 4); - if padding != 4 { - for _ in 0..padding { - write_u8(&mut writer, 0)?; - } - len += padding; - } - - let crc = crc32::checksum_ieee(&writer.get_ref()[0..len as usize]); - write_u32(&mut writer, crc)?; - len += 4; - - tx_ack_buffer(linkno, len as u16); - - Ok(()) - } - - // TODO: routing - fn get_linkno(nodeno: u8) -> io::Result { - if nodeno == 0 || nodeno as usize > board::csr::DRTIO.len() { - return Err(io::Error::new(io::ErrorKind::NotFound, "invalid node number")) - } - Ok(nodeno - 1) - } - - pub fn recv(nodeno: u8) -> io::Result> { - let linkno = get_linkno(nodeno)?; - recv_link(linkno) - } - - pub fn recv_timeout(nodeno: u8, timeout_ms: Option) -> io::Result { - let linkno = get_linkno(nodeno)?; - recv_timeout_link(linkno, timeout_ms) - } - - pub fn send(nodeno: u8, packet: &Packet) -> io::Result<()> { - let linkno = get_linkno(nodeno)?; - send_link(linkno, packet) - } -} diff --git a/artiq/firmware/libdrtioaux/proto.rs b/artiq/firmware/libdrtioaux/proto.rs deleted file mode 100644 index ca3101ba6..000000000 --- a/artiq/firmware/libdrtioaux/proto.rs +++ /dev/null @@ -1,92 +0,0 @@ -#![allow(dead_code)] - -use std::io::{self, Read, Write}; -use std::vec::Vec; -use std::string::String; -use byteorder::{ByteOrder, NetworkEndian}; - -// FIXME: replace these with byteorder core io traits once those are in -pub fn read_u8(reader: &mut Read) -> io::Result { - let mut bytes = [0; 1]; - reader.read_exact(&mut bytes)?; - Ok(bytes[0]) -} - -pub fn write_u8(writer: &mut Write, value: u8) -> io::Result<()> { - let bytes = [value; 1]; - writer.write_all(&bytes) -} - -pub fn read_bool(reader: &mut Read) -> io::Result { - if read_u8(reader)? == 0 { - Ok(false) - } else { - Ok(true) - } -} - -pub fn write_bool(writer: &mut Write, value: bool) -> io::Result<()> { - if value { - write_u8(writer, 1) - } else { - write_u8(writer, 0) - } -} - -pub fn read_u16(reader: &mut Read) -> io::Result { - let mut bytes = [0; 2]; - reader.read_exact(&mut bytes)?; - Ok(NetworkEndian::read_u16(&bytes)) -} - -pub fn write_u16(writer: &mut Write, value: u16) -> io::Result<()> { - let mut bytes = [0; 2]; - NetworkEndian::write_u16(&mut bytes, value); - writer.write_all(&bytes) -} - -pub fn read_u32(reader: &mut Read) -> io::Result { - let mut bytes = [0; 4]; - reader.read_exact(&mut bytes)?; - Ok(NetworkEndian::read_u32(&bytes)) -} - -pub fn write_u32(writer: &mut Write, value: u32) -> io::Result<()> { - let mut bytes = [0; 4]; - NetworkEndian::write_u32(&mut bytes, value); - writer.write_all(&bytes) -} - -pub fn read_u64(reader: &mut Read) -> io::Result { - let mut bytes = [0; 8]; - reader.read_exact(&mut bytes)?; - Ok(NetworkEndian::read_u64(&bytes)) -} - -pub fn write_u64(writer: &mut Write, value: u64) -> io::Result<()> { - let mut bytes = [0; 8]; - NetworkEndian::write_u64(&mut bytes, value); - writer.write_all(&bytes) -} - -pub fn read_bytes(reader: &mut Read) -> io::Result> { - let length = read_u32(reader)?; - let mut value = vec![0; length as usize]; - reader.read_exact(&mut value)?; - Ok(value) -} - -pub fn write_bytes(writer: &mut Write, value: &[u8]) -> io::Result<()> { - write_u32(writer, value.len() as u32)?; - writer.write_all(value) -} - -pub fn read_string(reader: &mut Read) -> io::Result { - let bytes = read_bytes(reader)?; - String::from_utf8(bytes) - .map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "invalid UTF-8")) -} - -pub fn write_string(writer: &mut Write, value: &str) -> io::Result<()> { - write_bytes(writer, value.as_bytes()) -} diff --git a/artiq/firmware/libdyld/lib.rs b/artiq/firmware/libdyld/lib.rs index c5d028226..bbf4f59c5 100644 --- a/artiq/firmware/libdyld/lib.rs +++ b/artiq/firmware/libdyld/lib.rs @@ -1,5 +1,4 @@ #![no_std] -#![feature(untagged_unions)] use core::{mem, ptr, fmt, slice, str, convert}; use elf::*; @@ -198,6 +197,8 @@ impl<'a> Library<'a> { pub fn load(data: &[u8], image: &'a mut [u8], resolve: &Fn(&[u8]) -> Option) -> Result, Error<'a>> { + #![allow(unused_assignments)] + let ehdr = read_unaligned::(data, 0) .map_err(|()| "cannot read ELF header")?; @@ -288,7 +289,7 @@ impl<'a> Library<'a> { if sym_ent != mem::size_of::() { return Err("incorrect symbol entry size")? } - if rela_ent != mem::size_of::() { + if rela_ent != 0 && rela_ent != mem::size_of::() { return Err("incorrect relocation entry size")? } diff --git a/artiq/firmware/libbuild_artiq/Cargo.toml b/artiq/firmware/libeh/Cargo.toml similarity index 60% rename from artiq/firmware/libbuild_artiq/Cargo.toml rename to artiq/firmware/libeh/Cargo.toml index 19e810e99..c1bea6f4f 100644 --- a/artiq/firmware/libbuild_artiq/Cargo.toml +++ b/artiq/firmware/libeh/Cargo.toml @@ -1,11 +1,11 @@ [package] authors = ["M-Labs"] -name = "build_artiq" +name = "eh" version = "0.0.0" [lib] -name = "build_artiq" +name = "eh" path = "lib.rs" [dependencies] -walkdir = "1.0" +cslice = { version = "0.3" } diff --git a/artiq/firmware/libeh/dwarf.rs b/artiq/firmware/libeh/dwarf.rs new file mode 100644 index 000000000..0956dc267 --- /dev/null +++ b/artiq/firmware/libeh/dwarf.rs @@ -0,0 +1,243 @@ +#![allow(non_upper_case_globals, dead_code)] + +use core::{ptr, mem}; +use cslice::CSlice; + +const DW_EH_PE_omit: u8 = 0xFF; +const DW_EH_PE_absptr: u8 = 0x00; + +const DW_EH_PE_uleb128: u8 = 0x01; +const DW_EH_PE_udata2: u8 = 0x02; +const DW_EH_PE_udata4: u8 = 0x03; +const DW_EH_PE_udata8: u8 = 0x04; +const DW_EH_PE_sleb128: u8 = 0x09; +const DW_EH_PE_sdata2: u8 = 0x0A; +const DW_EH_PE_sdata4: u8 = 0x0B; +const DW_EH_PE_sdata8: u8 = 0x0C; + +const DW_EH_PE_pcrel: u8 = 0x10; +const DW_EH_PE_textrel: u8 = 0x20; +const DW_EH_PE_datarel: u8 = 0x30; +const DW_EH_PE_funcrel: u8 = 0x40; +const DW_EH_PE_aligned: u8 = 0x50; + +const DW_EH_PE_indirect: u8 = 0x80; + +#[derive(Clone)] +struct DwarfReader { + pub ptr: *const u8, +} + +impl DwarfReader { + fn new(ptr: *const u8) -> DwarfReader { + DwarfReader { ptr: ptr } + } + + // DWARF streams are packed, so e.g. a u32 would not necessarily be aligned + // on a 4-byte boundary. This may cause problems on platforms with strict + // alignment requirements. By wrapping data in a "packed" struct, we are + // telling the backend to generate "misalignment-safe" code. + unsafe fn read(&mut self) -> T { + let result = ptr::read_unaligned(self.ptr as *const T); + self.ptr = self.ptr.offset(mem::size_of::() as isize); + result + } + + // ULEB128 and SLEB128 encodings are defined in Section 7.6 - "Variable + // Length Data". + unsafe fn read_uleb128(&mut self) -> u64 { + let mut shift: usize = 0; + let mut result: u64 = 0; + let mut byte: u8; + loop { + byte = self.read::(); + result |= ((byte & 0x7F) as u64) << shift; + shift += 7; + if byte & 0x80 == 0 { + break; + } + } + result + } + + unsafe fn read_sleb128(&mut self) -> i64 { + let mut shift: usize = 0; + let mut result: u64 = 0; + let mut byte: u8; + loop { + byte = self.read::(); + result |= ((byte & 0x7F) as u64) << shift; + shift += 7; + if byte & 0x80 == 0 { + break; + } + } + // sign-extend + if shift < 8 * mem::size_of::() && (byte & 0x40) != 0 { + result |= (!0 as u64) << shift; + } + result as i64 + } + + unsafe fn read_encoded_pointer(&mut self, encoding: u8) -> usize { + fn round_up(unrounded: usize, align: usize) -> usize { + debug_assert!(align.is_power_of_two()); + (unrounded + align - 1) & !(align - 1) + } + + debug_assert!(encoding != DW_EH_PE_omit); + + // DW_EH_PE_aligned implies it's an absolute pointer value + if encoding == DW_EH_PE_aligned { + self.ptr = round_up(self.ptr as usize, mem::size_of::()) as *const u8; + return self.read::() + } + + let value_ptr = self.ptr; + let mut result = match encoding & 0x0F { + DW_EH_PE_absptr => self.read::(), + DW_EH_PE_uleb128 => self.read_uleb128() as usize, + DW_EH_PE_udata2 => self.read::() as usize, + DW_EH_PE_udata4 => self.read::() as usize, + DW_EH_PE_udata8 => self.read::() as usize, + DW_EH_PE_sleb128 => self.read_sleb128() as usize, + DW_EH_PE_sdata2 => self.read::() as usize, + DW_EH_PE_sdata4 => self.read::() as usize, + DW_EH_PE_sdata8 => self.read::() as usize, + _ => panic!(), + }; + + result += match encoding & 0x70 { + DW_EH_PE_absptr => 0, + // relative to address of the encoded value, despite the name + DW_EH_PE_pcrel => value_ptr as usize, + _ => panic!(), + }; + + if encoding & DW_EH_PE_indirect != 0 { + result = *(result as *const usize); + } + + result + } +} + +fn encoding_size(encoding: u8) -> usize { + if encoding == DW_EH_PE_omit { + return 0 + } + + match encoding & 0x0F { + DW_EH_PE_absptr => mem::size_of::(), + DW_EH_PE_udata2 => 2, + DW_EH_PE_udata4 => 4, + DW_EH_PE_udata8 => 8, + DW_EH_PE_sdata2 => 2, + DW_EH_PE_sdata4 => 4, + DW_EH_PE_sdata8 => 8, + _ => panic!() + } +} + +pub enum EHAction { + None, + Cleanup(usize), + Catch(usize), + Terminate, +} + +pub unsafe fn find_eh_action(lsda: *const u8, func_start: usize, ip: usize, + exn_name: CSlice) -> EHAction { + if lsda.is_null() { + return EHAction::None + } + + let mut reader = DwarfReader::new(lsda); + + let start_encoding = reader.read::(); + // base address for landing pad offsets + let lpad_base = if start_encoding != DW_EH_PE_omit { + reader.read_encoded_pointer(start_encoding) + } else { + func_start + }; + + let ttype_encoding = reader.read::(); + let ttype_encoding_size = encoding_size(ttype_encoding) as isize; + + let class_info; + if ttype_encoding != DW_EH_PE_omit { + let class_info_offset = reader.read_uleb128(); + class_info = reader.ptr.offset(class_info_offset as isize); + } else { + class_info = ptr::null(); + } + assert!(!class_info.is_null()); + + let call_site_encoding = reader.read::(); + let call_site_table_length = reader.read_uleb128(); + let action_table = reader.ptr.offset(call_site_table_length as isize); + + while reader.ptr < action_table { + let cs_start = reader.read_encoded_pointer(call_site_encoding); + let cs_len = reader.read_encoded_pointer(call_site_encoding); + let cs_lpad = reader.read_encoded_pointer(call_site_encoding); + let cs_action = reader.read_uleb128(); + + if ip < func_start + cs_start { + // Callsite table is sorted by cs_start, so if we've passed the ip, we + // may stop searching. + break + } + if ip > func_start + cs_start + cs_len { + continue + } + + if cs_lpad == 0 { + return EHAction::None + } + + let lpad = lpad_base + cs_lpad; + if cs_action == 0 { + return EHAction::Cleanup(lpad) + } + + let action_entry = action_table.offset((cs_action - 1) as isize); + let mut action_reader = DwarfReader::new(action_entry); + loop { + let type_info_offset = action_reader.read_sleb128() as isize; + let action_offset = action_reader.clone().read_sleb128() as isize; + assert!(type_info_offset >= 0); + + if type_info_offset > 0 { + let type_info_ptr_ptr = class_info.offset(-type_info_offset * ttype_encoding_size); + let type_info_ptr = DwarfReader::new(type_info_ptr_ptr) + .read_encoded_pointer(ttype_encoding); + let type_info = *(type_info_ptr as *const CSlice); + + if type_info.as_ref() == exn_name.as_ref() { + return EHAction::Catch(lpad) + } + + if type_info.len() == 0 { + // This is a catch-all clause. We don't compare type_info_ptr with null here + // because, in PIC mode, the OR1K LLVM backend emits a literal zero + // encoded with DW_EH_PE_pcrel, which of course doesn't result in + // a proper null pointer. + return EHAction::Catch(lpad) + } + } + + if action_offset == 0 { + break + } else { + action_reader.ptr = action_reader.ptr.offset(action_offset) + } + } + + return EHAction::None + } + + // the function has a personality but no landing pads; this is fine + EHAction::None +} diff --git a/artiq/firmware/libeh/eh_rust.rs b/artiq/firmware/libeh/eh_rust.rs new file mode 100644 index 000000000..7fb14193d --- /dev/null +++ b/artiq/firmware/libeh/eh_rust.rs @@ -0,0 +1,88 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// This is the Rust personality function, adapted for use in ARTIQ. We never actually panic +// from Rust or recover from Rust exceptions (there's nothing to catch the panics), but we +// need a personality function to step back through Rust frames in order to make a backtrace. +// +// By design, this personality function is only ever called in the search phase, although +// to keep things simple and close to upstream, it is not modified +#![allow(private_no_mangle_fns)] + +use unwind as uw; +use libc::{c_int, uintptr_t}; +use cslice::AsCSlice; + +use dwarf::{self, EHAction}; + +// Register ids were lifted from LLVM's TargetLowering::getExceptionPointerRegister() +// and TargetLowering::getExceptionSelectorRegister() for each architecture, +// then mapped to DWARF register numbers via register definition tables +// (typically RegisterInfo.td, search for "DwarfRegNum"). +// See also http://llvm.org/docs/WritingAnLLVMBackend.html#defining-a-register. + +#[cfg(target_arch = "x86")] +const UNWIND_DATA_REG: (i32, i32) = (0, 2); // EAX, EDX + +#[cfg(target_arch = "x86_64")] +const UNWIND_DATA_REG: (i32, i32) = (0, 1); // RAX, RDX + +#[cfg(any(target_arch = "or1k"))] +const UNWIND_DATA_REG: (i32, i32) = (3, 4); // R3, R4 + +// The following code is based on GCC's C and C++ personality routines. For reference, see: +// https://github.com/gcc-mirror/gcc/blob/master/libstdc++-v3/libsupc++/eh_personality.cc +// https://github.com/gcc-mirror/gcc/blob/trunk/libgcc/unwind-c.c +#[lang = "eh_personality"] +#[no_mangle] +#[allow(unused)] +unsafe extern "C" fn rust_eh_personality(version: c_int, + actions: uw::_Unwind_Action, + exception_class: uw::_Unwind_Exception_Class, + exception_object: *mut uw::_Unwind_Exception, + context: *mut uw::_Unwind_Context) + -> uw::_Unwind_Reason_Code { + if version != 1 { + return uw::_URC_FATAL_PHASE1_ERROR; + } + let eh_action = match find_eh_action(context) { + Ok(action) => action, + Err(_) => return uw::_URC_FATAL_PHASE1_ERROR, + }; + if actions as i32 & uw::_UA_SEARCH_PHASE as i32 != 0 { + match eh_action { + EHAction::None | + EHAction::Cleanup(_) => return uw::_URC_CONTINUE_UNWIND, + EHAction::Catch(_) => return uw::_URC_HANDLER_FOUND, + EHAction::Terminate => return uw::_URC_FATAL_PHASE1_ERROR, + } + } else { + match eh_action { + EHAction::None => return uw::_URC_CONTINUE_UNWIND, + EHAction::Cleanup(lpad) | + EHAction::Catch(lpad) => { + uw::_Unwind_SetGR(context, UNWIND_DATA_REG.0, exception_object as uintptr_t); + uw::_Unwind_SetGR(context, UNWIND_DATA_REG.1, 0); + uw::_Unwind_SetIP(context, lpad); + return uw::_URC_INSTALL_CONTEXT; + } + EHAction::Terminate => return uw::_URC_FATAL_PHASE2_ERROR, + } + } +} + +unsafe fn find_eh_action(context: *mut uw::_Unwind_Context) + -> Result +{ + let lsda = uw::_Unwind_GetLanguageSpecificData(context) as *const u8; + let func = uw::_Unwind_GetRegionStart(context); + let ip = uw::_Unwind_GetIP(context); + Ok(dwarf::find_eh_action(lsda, func, ip, [].as_c_slice())) +} diff --git a/artiq/firmware/libeh/lib.rs b/artiq/firmware/libeh/lib.rs new file mode 100644 index 000000000..da209ea25 --- /dev/null +++ b/artiq/firmware/libeh/lib.rs @@ -0,0 +1,9 @@ +#![feature(lang_items, panic_unwind, libc, unwind_attributes)] +#![no_std] + +extern crate cslice; +extern crate unwind; +extern crate libc; + +pub mod dwarf; +pub mod eh_rust; diff --git a/artiq/firmware/libio/Cargo.toml b/artiq/firmware/libio/Cargo.toml new file mode 100644 index 000000000..6aacf0c47 --- /dev/null +++ b/artiq/firmware/libio/Cargo.toml @@ -0,0 +1,16 @@ +[package] +authors = ["M-Labs"] +name = "io" +version = "0.0.0" + +[lib] +name = "io" +path = "lib.rs" + +[dependencies] +failure = { version = "0.1", default-features = false } +failure_derive = { version = "0.1", default-features = false } +byteorder = { version = "1.0", default-features = false, optional = true } + +[features] +alloc = [] diff --git a/artiq/firmware/libio/cursor.rs b/artiq/firmware/libio/cursor.rs new file mode 100644 index 000000000..b820bdddc --- /dev/null +++ b/artiq/firmware/libio/cursor.rs @@ -0,0 +1,86 @@ +use {Read, Write}; + +#[derive(Debug, Clone)] +pub struct Cursor { + inner: T, + pos: usize +} + +impl Cursor { + #[inline] + pub fn new(inner: T) -> Cursor { + Cursor { inner, pos: 0 } + } + + #[inline] + pub fn into_inner(self) -> T { + self.inner + } + + #[inline] + pub fn get_ref(&self) -> &T { + &self.inner + } + + #[inline] + pub fn get_mut(&mut self) -> &mut T { + &mut self.inner + } + + #[inline] + pub fn position(&self) -> usize { + self.pos + } + + #[inline] + pub fn set_position(&mut self, pos: usize) { + self.pos = pos + } +} + +impl> Read for Cursor { + type ReadError = !; + + fn read(&mut self, buf: &mut [u8]) -> Result { + let data = &self.inner.as_ref()[self.pos..]; + let len = buf.len().min(data.len()); + buf[..len].copy_from_slice(&data[..len]); + self.pos += len; + Ok(len) + } +} + +impl<'a> Write for Cursor<&'a mut [u8]> { + type WriteError = !; + type FlushError = !; + + fn write(&mut self, buf: &[u8]) -> Result { + let data = &mut self.inner[self.pos..]; + let len = buf.len().min(data.len()); + data[..len].copy_from_slice(&buf[..len]); + self.pos += len; + Ok(len) + } + + #[inline] + fn flush(&mut self) -> Result<(), Self::FlushError> { + Ok(()) + } +} + +#[cfg(feature = "alloc")] +impl Write for Cursor<::alloc::Vec> { + type WriteError = !; + type FlushError = !; + + #[inline] + fn write(&mut self, buf: &[u8]) -> Result { + self.inner.extend_from_slice(buf); + Ok(buf.len()) + } + + #[inline] + fn flush(&mut self) -> Result<(), Self::FlushError> { + Ok(()) + } +} diff --git a/artiq/firmware/libio/lib.rs b/artiq/firmware/libio/lib.rs new file mode 100644 index 000000000..b10c41345 --- /dev/null +++ b/artiq/firmware/libio/lib.rs @@ -0,0 +1,147 @@ +#![no_std] +#![feature(never_type)] +#![cfg_attr(feature = "alloc", feature(alloc))] + +extern crate failure; +#[macro_use] +extern crate failure_derive; +#[cfg(feature = "alloc")] +#[macro_use] +extern crate alloc; +#[cfg(feature = "byteorder")] +extern crate byteorder; + +mod cursor; +#[cfg(feature = "byteorder")] +mod proto; + +pub use cursor::Cursor; +#[cfg(feature = "byteorder")] +pub use proto::{ProtoRead, ProtoWrite}; +#[cfg(all(feature = "byteorder", feature = "alloc"))] +pub use proto::ReadStringError; + +#[derive(Fail, Debug, Clone, PartialEq)] +pub enum Error { + #[fail(display = "unexpected end of stream")] + UnexpectedEnd, + #[fail(display = "{}", _0)] + Other(#[cause] T) +} + +impl From for Error { + fn from(value: T) -> Error { + Error::Other(value) + } +} + +pub trait Read { + type ReadError; + + /// Pull some bytes from this source into the specified buffer, returning + /// how many bytes were read. + fn read(&mut self, buf: &mut [u8]) -> Result; + + /// Read the exact number of bytes required to fill `buf`. + fn read_exact(&mut self, mut buf: &mut [u8]) -> Result<(), Error> { + while !buf.is_empty() { + let read_bytes = self.read(buf)?; + if read_bytes == 0 { + return Err(Error::UnexpectedEnd) + } + + buf = &mut { buf }[read_bytes..]; + } + + Ok(()) + } +} + +impl<'a, T: Read> Read for &'a mut T { + type ReadError = T::ReadError; + + fn read(&mut self, buf: &mut [u8]) -> Result { + T::read(self, buf) + } +} + +pub trait Write { + type WriteError; + type FlushError; + + /// Write a buffer into this object, returning how many bytes were written. + fn write(&mut self, buf: &[u8]) -> Result; + + /// Flush this output stream, ensuring that all intermediately buffered contents + /// reach their destination. + fn flush(&mut self) -> Result<(), Self::FlushError>; + + /// Attempts to write an entire buffer into `self`. + fn write_all(&mut self, mut buf: &[u8]) -> Result<(), Error> { + while buf.len() > 0 { + let written_bytes = self.write(buf)?; + if written_bytes == 0 { + return Err(Error::UnexpectedEnd) + } + + buf = &buf[written_bytes..]; + } + + Ok(()) + } + + /// Hints the writer how much bytes will be written after call to this function. + /// + /// At least `min` bytes should be written after the call to this function and + /// if `max` is `Some(x)` than at most `x` bytes should be written. + fn size_hint(&mut self, _min: usize, _max: Option) {} +} + +impl<'a, T: Write> Write for &'a mut T { + type WriteError = T::WriteError; + type FlushError = T::FlushError; + + fn write(&mut self, buf: &[u8]) -> Result { + T::write(self, buf) + } + + fn flush(&mut self) -> Result<(), Self::FlushError> { + T::flush(self) + } + + fn size_hint(&mut self, min: usize, max: Option) { + T::size_hint(self, min, max) + } +} + +impl<'a> Write for &'a mut [u8] { + type WriteError = !; + type FlushError = !; + + fn write(&mut self, buf: &[u8]) -> Result { + let len = buf.len().min(self.len()); + self[..len].copy_from_slice(&buf[..len]); + Ok(len) + } + + #[inline] + fn flush(&mut self) -> Result<(), Self::FlushError> { + Ok(()) + } +} + +#[cfg(feature = "alloc")] +impl<'a> Write for alloc::Vec { + type WriteError = !; + type FlushError = !; + + fn write(&mut self, buf: &[u8]) -> Result { + self.extend_from_slice(buf); + Ok(buf.len()) + } + + #[inline] + fn flush(&mut self) -> Result<(), Self::FlushError> { + Ok(()) + } +} diff --git a/artiq/firmware/libio/proto.rs b/artiq/firmware/libio/proto.rs new file mode 100644 index 000000000..3df4a04aa --- /dev/null +++ b/artiq/firmware/libio/proto.rs @@ -0,0 +1,161 @@ +#[cfg(feature = "alloc")] +use {core::str::Utf8Error, alloc::String}; +use byteorder::{ByteOrder, NetworkEndian}; + +use ::{Read, Write, Error as IoError}; + +#[cfg(feature = "alloc")] +#[derive(Fail, Debug, Clone, PartialEq)] +pub enum ReadStringError { + #[fail(display = "invalid UTF-8: {}", _0)] + Utf8(Utf8Error), + #[fail(display = "{}", _0)] + Other(T) +} + +pub trait ProtoRead { + type ReadError; + + fn read_exact(&mut self, buf: &mut [u8]) -> Result<(), Self::ReadError>; + + #[inline] + fn read_u8(&mut self) -> Result { + let mut bytes = [0; 1]; + self.read_exact(&mut bytes)?; + Ok(bytes[0]) + } + + #[inline] + fn read_u16(&mut self) -> Result { + let mut bytes = [0; 2]; + self.read_exact(&mut bytes)?; + Ok(NetworkEndian::read_u16(&bytes)) + } + + #[inline] + fn read_u32(&mut self) -> Result { + let mut bytes = [0; 4]; + self.read_exact(&mut bytes)?; + Ok(NetworkEndian::read_u32(&bytes)) + } + + #[inline] + fn read_u64(&mut self) -> Result { + let mut bytes = [0; 8]; + self.read_exact(&mut bytes)?; + Ok(NetworkEndian::read_u64(&bytes)) + } + + #[inline] + fn read_bool(&mut self) -> Result { + Ok(self.read_u8()? != 0) + } + + #[cfg(feature = "alloc")] + #[inline] + fn read_bytes(&mut self) -> Result<::alloc::Vec, Self::ReadError> { + let length = self.read_u32()?; + let mut value = vec![0; length as usize]; + self.read_exact(&mut value)?; + Ok(value) + } + + #[cfg(feature = "alloc")] + #[inline] + fn read_string(&mut self) -> Result<::alloc::String, ReadStringError> { + let bytes = self.read_bytes().map_err(ReadStringError::Other)?; + String::from_utf8(bytes).map_err(|err| ReadStringError::Utf8(err.utf8_error())) + } +} + +pub trait ProtoWrite { + type WriteError; + + fn write_all(&mut self, buf: &[u8]) -> Result<(), Self::WriteError>; + + #[inline] + fn write_u8(&mut self, value: u8) -> Result<(), Self::WriteError> { + let bytes = [value; 1]; + self.write_all(&bytes) + } + + #[inline] + fn write_i8(&mut self, value: i8) -> Result<(), Self::WriteError> { + let bytes = [value as u8; 1]; + self.write_all(&bytes) + } + + #[inline] + fn write_u16(&mut self, value: u16) -> Result<(), Self::WriteError> { + let mut bytes = [0; 2]; + NetworkEndian::write_u16(&mut bytes, value); + self.write_all(&bytes) + } + + #[inline] + fn write_i16(&mut self, value: i16) -> Result<(), Self::WriteError> { + let mut bytes = [0; 2]; + NetworkEndian::write_i16(&mut bytes, value); + self.write_all(&bytes) + } + + #[inline] + fn write_u32(&mut self, value: u32) -> Result<(), Self::WriteError> { + let mut bytes = [0; 4]; + NetworkEndian::write_u32(&mut bytes, value); + self.write_all(&bytes) + } + + #[inline] + fn write_i32(&mut self, value: i32) -> Result<(), Self::WriteError> { + let mut bytes = [0; 4]; + NetworkEndian::write_i32(&mut bytes, value); + self.write_all(&bytes) + } + + #[inline] + fn write_u64(&mut self, value: u64) -> Result<(), Self::WriteError> { + let mut bytes = [0; 8]; + NetworkEndian::write_u64(&mut bytes, value); + self.write_all(&bytes) + } + + #[inline] + fn write_i64(&mut self, value: i64) -> Result<(), Self::WriteError> { + let mut bytes = [0; 8]; + NetworkEndian::write_i64(&mut bytes, value); + self.write_all(&bytes) + } + + #[inline] + fn write_bool(&mut self, value: bool) -> Result<(), Self::WriteError> { + self.write_u8(value as u8) + } + + #[inline] + fn write_bytes(&mut self, value: &[u8]) -> Result<(), Self::WriteError> { + self.write_u32(value.len() as u32)?; + self.write_all(value) + } + + #[inline] + fn write_string(&mut self, value: &str) -> Result<(), Self::WriteError> { + self.write_bytes(value.as_bytes()) + } +} + +impl ProtoRead for T where T: Read + ?Sized { + type ReadError = IoError; + + fn read_exact(&mut self, buf: &mut [u8]) -> Result<(), Self::ReadError> { + T::read_exact(self, buf) + } +} + +impl ProtoWrite for T where T: Write + ?Sized { + type WriteError = IoError; + + fn write_all(&mut self, buf: &[u8]) -> Result<(), Self::WriteError> { + T::write_all(self, buf) + } +} diff --git a/artiq/firmware/liblogger_artiq/Cargo.toml b/artiq/firmware/liblogger_artiq/Cargo.toml index d9e6e2299..4eaa98f09 100644 --- a/artiq/firmware/liblogger_artiq/Cargo.toml +++ b/artiq/firmware/liblogger_artiq/Cargo.toml @@ -8,6 +8,6 @@ name = "logger_artiq" path = "lib.rs" [dependencies] -log = { version = "0.3", default-features = false, features = [] } -log_buffer = { version = "1.0" } -board = { path = "../libboard" } +log = { version = "0.4", default-features = false } +log_buffer = { version = "1.2" } +board_misoc = { path = "../libboard_misoc" } diff --git a/artiq/firmware/liblogger_artiq/lib.rs b/artiq/firmware/liblogger_artiq/lib.rs index 67b2c36f5..d0b72c6cd 100644 --- a/artiq/firmware/liblogger_artiq/lib.rs +++ b/artiq/firmware/liblogger_artiq/lib.rs @@ -2,18 +2,49 @@ extern crate log; extern crate log_buffer; -extern crate board; +#[macro_use] +extern crate board_misoc; -use core::cell::{Cell, RefCell}; +use core::cell::{Cell, RefCell, RefMut}; use core::fmt::Write; -use log::{Log, LogMetadata, LogRecord, LogLevelFilter, MaxLogLevelFilter}; +use log::{Log, LevelFilter}; use log_buffer::LogBuffer; -use board::{Console, clock}; +use board_misoc::clock; + +pub struct LogBufferRef<'a> { + buffer: RefMut<'a, LogBuffer<&'static mut [u8]>>, + old_log_level: LevelFilter +} + +impl<'a> LogBufferRef<'a> { + fn new(buffer: RefMut<'a, LogBuffer<&'static mut [u8]>>) -> LogBufferRef<'a> { + let old_log_level = log::max_level(); + log::set_max_level(LevelFilter::Off); + LogBufferRef { buffer, old_log_level } + } + + pub fn is_empty(&self) -> bool { + self.buffer.is_empty() + } + + pub fn clear(&mut self) { + self.buffer.clear() + } + + pub fn extract(&mut self) -> &str { + self.buffer.extract() + } +} + +impl<'a> Drop for LogBufferRef<'a> { + fn drop(&mut self) { + log::set_max_level(self.old_log_level) + } +} pub struct BufferLogger { buffer: RefCell>, - filter: RefCell>, - uart_filter: Cell + uart_filter: Cell } static mut LOGGER: *const BufferLogger = 0 as *const _; @@ -22,71 +53,36 @@ impl BufferLogger { pub fn new(buffer: &'static mut [u8]) -> BufferLogger { BufferLogger { buffer: RefCell::new(LogBuffer::new(buffer)), - filter: RefCell::new(None), - uart_filter: Cell::new(LogLevelFilter::Info), + uart_filter: Cell::new(LevelFilter::Info), } } pub fn register(&self, f: F) { - // log::set_logger_raw captures a pointer to ourselves, so we must prevent - // ourselves from being moved or dropped after that function is called (and - // before log::shutdown_logger_raw is called). unsafe { - log::set_logger_raw(|max_log_level| { - max_log_level.set(LogLevelFilter::Info); - *self.filter.borrow_mut() = Some(max_log_level); - self as *const Log - }).expect("global logger can only be initialized once"); LOGGER = self; + log::set_logger(&*LOGGER) + .expect("global logger can only be initialized once"); } + log::set_max_level(LevelFilter::Info); f(); - log::shutdown_logger_raw().unwrap(); - unsafe { - LOGGER = 0 as *const _; - } } pub fn with R>(f: F) -> R { f(unsafe { &*LOGGER }) } - pub fn clear(&self) { - self.buffer.borrow_mut().clear() + pub fn buffer<'a>(&'a self) -> Result, ()> { + self.buffer + .try_borrow_mut() + .map(LogBufferRef::new) + .map_err(|_| ()) } - pub fn is_empty(&self) -> bool { - self.buffer.borrow_mut().extract().len() == 0 - } - - pub fn extract R>(&self, f: F) -> R { - let old_log_level = self.max_log_level(); - self.set_max_log_level(LogLevelFilter::Off); - let result = f(self.buffer.borrow_mut().extract()); - self.set_max_log_level(old_log_level); - result - } - - pub fn max_log_level(&self) -> LogLevelFilter { - self.filter - .borrow() - .as_ref() - .expect("register the logger before touching maximum log level") - .get() - } - - pub fn set_max_log_level(&self, max_level: LogLevelFilter) { - self.filter - .borrow() - .as_ref() - .expect("register the logger before touching maximum log level") - .set(max_level) - } - - pub fn uart_log_level(&self) -> LogLevelFilter { + pub fn uart_log_level(&self) -> LevelFilter { self.uart_filter.get() } - pub fn set_uart_log_level(&self, max_level: LogLevelFilter) { + pub fn set_uart_log_level(&self, max_level: LevelFilter) { self.uart_filter.set(max_level) } } @@ -95,25 +91,28 @@ impl BufferLogger { unsafe impl Sync for BufferLogger {} impl Log for BufferLogger { - fn enabled(&self, _metadata: &LogMetadata) -> bool { + fn enabled(&self, _metadata: &log::Metadata) -> bool { true } - fn log(&self, record: &LogRecord) { + fn log(&self, record: &log::Record) { if self.enabled(record.metadata()) { let timestamp = clock::get_us(); let seconds = timestamp / 1_000_000; let micros = timestamp % 1_000_000; - writeln!(self.buffer.borrow_mut(), - "[{:6}.{:06}s] {:>5}({}): {}", seconds, micros, - record.level(), record.target(), record.args()).unwrap(); + if let Ok(mut buffer) = self.buffer.try_borrow_mut() { + writeln!(buffer, "[{:6}.{:06}s] {:>5}({}): {}", seconds, micros, + record.level(), record.target(), record.args()).unwrap(); + } if record.level() <= self.uart_filter.get() { - writeln!(Console, - "[{:6}.{:06}s] {:>5}({}): {}", seconds, micros, - record.level(), record.target(), record.args()).unwrap(); + println!("[{:6}.{:06}s] {:>5}({}): {}", seconds, micros, + record.level(), record.target(), record.args()); } } } + + fn flush(&self) { + } } diff --git a/artiq/firmware/libproto/Cargo.toml b/artiq/firmware/libproto/Cargo.toml deleted file mode 100644 index 487ae483b..000000000 --- a/artiq/firmware/libproto/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -authors = ["M-Labs"] -name = "proto" -version = "0.0.0" - -[lib] -name = "proto" -path = "lib.rs" - -[dependencies] -byteorder = { version = "1.0", default-features = false } -cslice = { version = "0.3" } -log = { version = "0.3", default-features = false, optional = true } -std_artiq = { path = "../libstd_artiq", features = ["alloc"] } -dyld = { path = "../libdyld" } diff --git a/artiq/firmware/libproto/lib.rs b/artiq/firmware/libproto/lib.rs deleted file mode 100644 index d3422b5ee..000000000 --- a/artiq/firmware/libproto/lib.rs +++ /dev/null @@ -1,103 +0,0 @@ -#![no_std] - -extern crate byteorder; -extern crate cslice; -#[cfg(feature = "log")] -#[macro_use] -extern crate log; - -extern crate dyld; -extern crate std_artiq as std; - -// Internal protocols. -pub mod kernel_proto; - -// External protocols. -pub mod mgmt_proto; -pub mod analyzer_proto; -pub mod moninj_proto; -pub mod session_proto; -pub mod rpc_proto; - -use std::io::{Read, Write, Result, Error, ErrorKind}; -use std::vec::Vec; -use std::string::String; -use byteorder::{ByteOrder, NetworkEndian}; - -pub trait ReadExt: Read { - fn read_u8(&mut self) -> Result { - let mut bytes = [0; 1]; - self.read_exact(&mut bytes)?; - Ok(bytes[0]) - } - - fn read_u16(&mut self) -> Result { - let mut bytes = [0; 2]; - self.read_exact(&mut bytes)?; - Ok(NetworkEndian::read_u16(&bytes)) - } - - fn read_u32(&mut self) -> Result { - let mut bytes = [0; 4]; - self.read_exact(&mut bytes)?; - Ok(NetworkEndian::read_u32(&bytes)) - } - - fn read_u64(&mut self) -> Result { - let mut bytes = [0; 8]; - self.read_exact(&mut bytes)?; - Ok(NetworkEndian::read_u64(&bytes)) - } - - fn read_bytes(&mut self) -> Result> { - let length = self.read_u32()?; - let mut value = Vec::new(); - value.resize(length as usize, 0); - self.read_exact(&mut value)?; - Ok(value) - } - - fn read_string(&mut self) -> Result { - let bytes = self.read_bytes()?; - String::from_utf8(bytes) - .map_err(|_| Error::new(ErrorKind::InvalidData, "invalid UTF-8")) - } -} - -impl ReadExt for R {} - -pub trait WriteExt: Write { - fn write_u8(&mut self, value: u8) -> Result<()> { - let bytes = [value; 1]; - self.write_all(&bytes) - } - - fn write_u16(&mut self, value: u16) -> Result<()> { - let mut bytes = [0; 2]; - NetworkEndian::write_u16(&mut bytes, value); - self.write_all(&bytes) - } - - fn write_u32(&mut self, value: u32) -> Result<()> { - let mut bytes = [0; 4]; - NetworkEndian::write_u32(&mut bytes, value); - self.write_all(&bytes) - } - - fn write_u64(&mut self, value: u64) -> Result<()> { - let mut bytes = [0; 8]; - NetworkEndian::write_u64(&mut bytes, value); - self.write_all(&bytes) - } - - fn write_bytes(&mut self, value: &[u8]) -> Result<()> { - self.write_u32(value.len() as u32)?; - self.write_all(value) - } - - fn write_string(&mut self, value: &str) -> Result<()> { - self.write_bytes(value.as_bytes()) - } -} - -impl WriteExt for W {} diff --git a/artiq/firmware/libproto/mgmt_proto.rs b/artiq/firmware/libproto/mgmt_proto.rs deleted file mode 100644 index 123af8915..000000000 --- a/artiq/firmware/libproto/mgmt_proto.rs +++ /dev/null @@ -1,78 +0,0 @@ -use std::vec::Vec; -use std::io::{self, Read, Write}; -use {ReadExt, WriteExt}; -#[cfg(feature = "log")] -use log::LogLevelFilter; - -#[derive(Debug)] -pub enum Request { - GetLog, - ClearLog, - PullLog, - #[cfg(feature = "log")] - SetLogFilter(LogLevelFilter), - #[cfg(feature = "log")] - SetUartLogFilter(LogLevelFilter), - - Hotswap(Vec), - Reboot, -} - -pub enum Reply<'a> { - Success, - - LogContent(&'a str), - - RebootImminent, -} - -impl Request { - pub fn read_from(reader: &mut Read) -> io::Result { - #[cfg(feature = "log")] - fn read_log_level_filter(reader: &mut Read) -> io::Result { - Ok(match reader.read_u8()? { - 0 => LogLevelFilter::Off, - 1 => LogLevelFilter::Error, - 2 => LogLevelFilter::Warn, - 3 => LogLevelFilter::Info, - 4 => LogLevelFilter::Debug, - 5 => LogLevelFilter::Trace, - _ => return Err(io::Error::new(io::ErrorKind::InvalidData, - "invalid log level")) - }) - } - - Ok(match reader.read_u8()? { - 1 => Request::GetLog, - 2 => Request::ClearLog, - 7 => Request::PullLog, - #[cfg(feature = "log")] - 3 => Request::SetLogFilter(read_log_level_filter(reader)?), - #[cfg(feature = "log")] - 6 => Request::SetUartLogFilter(read_log_level_filter(reader)?), - 4 => Request::Hotswap(reader.read_bytes()?), - 5 => Request::Reboot, - _ => return Err(io::Error::new(io::ErrorKind::InvalidData, "unknown request type")) - }) - } -} - -impl<'a> Reply<'a> { - pub fn write_to(&self, writer: &mut Write) -> io::Result<()> { - match *self { - Reply::Success => { - writer.write_u8(1)?; - }, - - Reply::LogContent(ref log) => { - writer.write_u8(2)?; - writer.write_string(log)?; - }, - - Reply::RebootImminent => { - writer.write_u8(3)?; - }, - } - Ok(()) - } -} diff --git a/artiq/firmware/libproto_artiq/Cargo.toml b/artiq/firmware/libproto_artiq/Cargo.toml new file mode 100644 index 000000000..92d5e0d0f --- /dev/null +++ b/artiq/firmware/libproto_artiq/Cargo.toml @@ -0,0 +1,20 @@ +[package] +authors = ["M-Labs"] +name = "proto_artiq" +version = "0.0.0" + +[lib] +name = "proto_artiq" +path = "lib.rs" + +[dependencies] +failure = { version = "0.1", default-features = false } +failure_derive = { version = "0.1", default-features = false } +byteorder = { version = "1.0", default-features = false } +cslice = { version = "0.3" } +log = { version = "0.4", default-features = false, optional = true } +io = { path = "../libio", features = ["byteorder"] } +dyld = { path = "../libdyld" } + +[features] +alloc = ["io/alloc"] diff --git a/artiq/firmware/libproto/analyzer_proto.rs b/artiq/firmware/libproto_artiq/analyzer_proto.rs similarity index 73% rename from artiq/firmware/libproto/analyzer_proto.rs rename to artiq/firmware/libproto_artiq/analyzer_proto.rs index 45153fdd4..a35f8de39 100644 --- a/artiq/firmware/libproto/analyzer_proto.rs +++ b/artiq/firmware/libproto_artiq/analyzer_proto.rs @@ -1,5 +1,4 @@ -use std::io::{self, Write}; -use WriteExt; +use io::{Write, ProtoWrite, Error as IoError}; #[derive(Debug)] pub struct Header { @@ -11,7 +10,9 @@ pub struct Header { } impl Header { - pub fn write_to(&self, writer: &mut Write) -> io::Result<()> { + pub fn write_to(&self, writer: &mut W) -> Result<(), IoError> + where W: Write + ?Sized + { writer.write_u32(self.sent_bytes)?; writer.write_u64(self.total_byte_count)?; writer.write_u8(self.overflow_occurred as u8)?; diff --git a/artiq/firmware/libproto_artiq/drtioaux_proto.rs b/artiq/firmware/libproto_artiq/drtioaux_proto.rs new file mode 100644 index 000000000..bd4875655 --- /dev/null +++ b/artiq/firmware/libproto_artiq/drtioaux_proto.rs @@ -0,0 +1,362 @@ +use io::{Read, ProtoRead, Write, ProtoWrite, Error as IoError}; + +#[derive(Fail, Debug)] +pub enum Error { + #[fail(display = "unknown packet {:#02x}", _0)] + UnknownPacket(u8), + #[fail(display = "{}", _0)] + Io(#[cause] IoError) +} + +impl From> for Error { + fn from(value: IoError) -> Error { + Error::Io(value) + } +} + +#[derive(PartialEq, Debug)] +pub enum Packet { + EchoRequest, + EchoReply, + ResetRequest, + ResetAck, + TSCAck, + + DestinationStatusRequest { destination: u8 }, + DestinationDownReply, + DestinationOkReply, + DestinationSequenceErrorReply { channel: u16 }, + DestinationCollisionReply { channel: u16 }, + DestinationBusyReply { channel: u16 }, + + RoutingSetPath { destination: u8, hops: [u8; 32] }, + RoutingSetRank { rank: u8 }, + RoutingAck, + + MonitorRequest { destination: u8, channel: u16, probe: u8 }, + MonitorReply { value: u32 }, + InjectionRequest { destination: u8, channel: u16, overrd: u8, value: u8 }, + InjectionStatusRequest { destination: u8, channel: u16, overrd: u8 }, + InjectionStatusReply { value: u8 }, + + I2cStartRequest { destination: u8, busno: u8 }, + I2cRestartRequest { destination: u8, busno: u8 }, + I2cStopRequest { destination: u8, busno: u8 }, + I2cWriteRequest { destination: u8, busno: u8, data: u8 }, + I2cWriteReply { succeeded: bool, ack: bool }, + I2cReadRequest { destination: u8, busno: u8, ack: bool }, + I2cReadReply { succeeded: bool, data: u8 }, + I2cBasicReply { succeeded: bool }, + + SpiSetConfigRequest { destination: u8, busno: u8, flags: u8, length: u8, div: u8, cs: u8 }, + SpiWriteRequest { destination: u8, busno: u8, data: u32 }, + SpiReadRequest { destination: u8, busno: u8 }, + SpiReadReply { succeeded: bool, data: u32 }, + SpiBasicReply { succeeded: bool }, + + JdacBasicRequest { destination: u8, dacno: u8, reqno: u8, param: u8 }, + JdacBasicReply { succeeded: bool, retval: u8 }, +} + +impl Packet { + pub fn read_from(reader: &mut R) -> Result> + where R: Read + ?Sized + { + Ok(match reader.read_u8()? { + 0x00 => Packet::EchoRequest, + 0x01 => Packet::EchoReply, + 0x02 => Packet::ResetRequest, + 0x03 => Packet::ResetAck, + 0x04 => Packet::TSCAck, + + 0x20 => Packet::DestinationStatusRequest { + destination: reader.read_u8()? + }, + 0x21 => Packet::DestinationDownReply, + 0x22 => Packet::DestinationOkReply, + 0x23 => Packet::DestinationSequenceErrorReply { + channel: reader.read_u16()? + }, + 0x24 => Packet::DestinationCollisionReply { + channel: reader.read_u16()? + }, + 0x25 => Packet::DestinationBusyReply { + channel: reader.read_u16()? + }, + + 0x30 => { + let destination = reader.read_u8()?; + let mut hops = [0; 32]; + reader.read_exact(&mut hops)?; + Packet::RoutingSetPath { + destination: destination, + hops: hops + } + }, + 0x31 => Packet::RoutingSetRank { + rank: reader.read_u8()? + }, + 0x32 => Packet::RoutingAck, + + 0x40 => Packet::MonitorRequest { + destination: reader.read_u8()?, + channel: reader.read_u16()?, + probe: reader.read_u8()? + }, + 0x41 => Packet::MonitorReply { + value: reader.read_u32()? + }, + 0x50 => Packet::InjectionRequest { + destination: reader.read_u8()?, + channel: reader.read_u16()?, + overrd: reader.read_u8()?, + value: reader.read_u8()? + }, + 0x51 => Packet::InjectionStatusRequest { + destination: reader.read_u8()?, + channel: reader.read_u16()?, + overrd: reader.read_u8()? + }, + 0x52 => Packet::InjectionStatusReply { + value: reader.read_u8()? + }, + + 0x80 => Packet::I2cStartRequest { + destination: reader.read_u8()?, + busno: reader.read_u8()? + }, + 0x81 => Packet::I2cRestartRequest { + destination: reader.read_u8()?, + busno: reader.read_u8()? + }, + 0x82 => Packet::I2cStopRequest { + destination: reader.read_u8()?, + busno: reader.read_u8()? + }, + 0x83 => Packet::I2cWriteRequest { + destination: reader.read_u8()?, + busno: reader.read_u8()?, + data: reader.read_u8()? + }, + 0x84 => Packet::I2cWriteReply { + succeeded: reader.read_bool()?, + ack: reader.read_bool()? + }, + 0x85 => Packet::I2cReadRequest { + destination: reader.read_u8()?, + busno: reader.read_u8()?, + ack: reader.read_bool()? + }, + 0x86 => Packet::I2cReadReply { + succeeded: reader.read_bool()?, + data: reader.read_u8()? + }, + 0x87 => Packet::I2cBasicReply { + succeeded: reader.read_bool()? + }, + + 0x90 => Packet::SpiSetConfigRequest { + destination: reader.read_u8()?, + busno: reader.read_u8()?, + flags: reader.read_u8()?, + length: reader.read_u8()?, + div: reader.read_u8()?, + cs: reader.read_u8()? + }, + /* 0x91: was Packet::SpiSetXferRequest */ + 0x92 => Packet::SpiWriteRequest { + destination: reader.read_u8()?, + busno: reader.read_u8()?, + data: reader.read_u32()? + }, + 0x93 => Packet::SpiReadRequest { + destination: reader.read_u8()?, + busno: reader.read_u8()? + }, + 0x94 => Packet::SpiReadReply { + succeeded: reader.read_bool()?, + data: reader.read_u32()? + }, + 0x95 => Packet::SpiBasicReply { + succeeded: reader.read_bool()? + }, + + 0xa0 => Packet::JdacBasicRequest { + destination: reader.read_u8()?, + dacno: reader.read_u8()?, + reqno: reader.read_u8()?, + param: reader.read_u8()?, + }, + 0xa1 => Packet::JdacBasicReply { + succeeded: reader.read_bool()?, + retval: reader.read_u8()? + }, + + ty => return Err(Error::UnknownPacket(ty)) + }) + } + + pub fn write_to(&self, writer: &mut W) -> Result<(), IoError> + where W: Write + ?Sized + { + match *self { + Packet::EchoRequest => + writer.write_u8(0x00)?, + Packet::EchoReply => + writer.write_u8(0x01)?, + Packet::ResetRequest => + writer.write_u8(0x02)?, + Packet::ResetAck => + writer.write_u8(0x03)?, + Packet::TSCAck => + writer.write_u8(0x04)?, + + Packet::DestinationStatusRequest { destination } => { + writer.write_u8(0x20)?; + writer.write_u8(destination)?; + }, + Packet::DestinationDownReply => + writer.write_u8(0x21)?, + Packet::DestinationOkReply => + writer.write_u8(0x22)?, + Packet::DestinationSequenceErrorReply { channel } => { + writer.write_u8(0x23)?; + writer.write_u16(channel)?; + }, + Packet::DestinationCollisionReply { channel } => { + writer.write_u8(0x24)?; + writer.write_u16(channel)?; + }, + Packet::DestinationBusyReply { channel } => { + writer.write_u8(0x25)?; + writer.write_u16(channel)?; + }, + + Packet::RoutingSetPath { destination, hops } => { + writer.write_u8(0x30)?; + writer.write_u8(destination)?; + writer.write_all(&hops)?; + }, + Packet::RoutingSetRank { rank } => { + writer.write_u8(0x31)?; + writer.write_u8(rank)?; + }, + Packet::RoutingAck => + writer.write_u8(0x32)?, + + Packet::MonitorRequest { destination, channel, probe } => { + writer.write_u8(0x40)?; + writer.write_u8(destination)?; + writer.write_u16(channel)?; + writer.write_u8(probe)?; + }, + Packet::MonitorReply { value } => { + writer.write_u8(0x41)?; + writer.write_u32(value)?; + }, + Packet::InjectionRequest { destination, channel, overrd, value } => { + writer.write_u8(0x50)?; + writer.write_u8(destination)?; + writer.write_u16(channel)?; + writer.write_u8(overrd)?; + writer.write_u8(value)?; + }, + Packet::InjectionStatusRequest { destination, channel, overrd } => { + writer.write_u8(0x51)?; + writer.write_u8(destination)?; + writer.write_u16(channel)?; + writer.write_u8(overrd)?; + }, + Packet::InjectionStatusReply { value } => { + writer.write_u8(0x52)?; + writer.write_u8(value)?; + }, + + Packet::I2cStartRequest { destination, busno } => { + writer.write_u8(0x80)?; + writer.write_u8(destination)?; + writer.write_u8(busno)?; + }, + Packet::I2cRestartRequest { destination, busno } => { + writer.write_u8(0x81)?; + writer.write_u8(destination)?; + writer.write_u8(busno)?; + }, + Packet::I2cStopRequest { destination, busno } => { + writer.write_u8(0x82)?; + writer.write_u8(destination)?; + writer.write_u8(busno)?; + }, + Packet::I2cWriteRequest { destination, busno, data } => { + writer.write_u8(0x83)?; + writer.write_u8(destination)?; + writer.write_u8(busno)?; + writer.write_u8(data)?; + }, + Packet::I2cWriteReply { succeeded, ack } => { + writer.write_u8(0x84)?; + writer.write_bool(succeeded)?; + writer.write_bool(ack)?; + }, + Packet::I2cReadRequest { destination, busno, ack } => { + writer.write_u8(0x85)?; + writer.write_u8(destination)?; + writer.write_u8(busno)?; + writer.write_bool(ack)?; + }, + Packet::I2cReadReply { succeeded, data } => { + writer.write_u8(0x86)?; + writer.write_bool(succeeded)?; + writer.write_u8(data)?; + }, + Packet::I2cBasicReply { succeeded } => { + writer.write_u8(0x87)?; + writer.write_bool(succeeded)?; + }, + + Packet::SpiSetConfigRequest { destination, busno, flags, length, div, cs } => { + writer.write_u8(0x90)?; + writer.write_u8(destination)?; + writer.write_u8(busno)?; + writer.write_u8(flags)?; + writer.write_u8(length)?; + writer.write_u8(div)?; + writer.write_u8(cs)?; + }, + Packet::SpiWriteRequest { destination, busno, data } => { + writer.write_u8(0x92)?; + writer.write_u8(destination)?; + writer.write_u8(busno)?; + writer.write_u32(data)?; + }, + Packet::SpiReadRequest { destination, busno } => { + writer.write_u8(0x93)?; + writer.write_u8(destination)?; + writer.write_u8(busno)?; + }, + Packet::SpiReadReply { succeeded, data } => { + writer.write_u8(0x94)?; + writer.write_bool(succeeded)?; + writer.write_u32(data)?; + }, + Packet::SpiBasicReply { succeeded } => { + writer.write_u8(0x95)?; + writer.write_bool(succeeded)?; + }, + + Packet::JdacBasicRequest { destination, dacno, reqno, param } => { + writer.write_u8(0xa0)?; + writer.write_u8(destination)?; + writer.write_u8(dacno)?; + writer.write_u8(reqno)?; + writer.write_u8(param)?; + } + Packet::JdacBasicReply { succeeded, retval } => { + writer.write_u8(0xa1)?; + writer.write_bool(succeeded)?; + writer.write_u8(retval)?; + }, + } + Ok(()) + } +} diff --git a/artiq/firmware/libproto/kernel_proto.rs b/artiq/firmware/libproto_artiq/kernel_proto.rs similarity index 70% rename from artiq/firmware/libproto/kernel_proto.rs rename to artiq/firmware/libproto_artiq/kernel_proto.rs index c59306cb8..3ca55426d 100644 --- a/artiq/firmware/libproto/kernel_proto.rs +++ b/artiq/firmware/libproto_artiq/kernel_proto.rs @@ -2,7 +2,7 @@ use core::fmt; use dyld; pub const KERNELCPU_EXEC_ADDRESS: usize = 0x40800000; -pub const KERNELCPU_PAYLOAD_ADDRESS: usize = 0x40840000; +pub const KERNELCPU_PAYLOAD_ADDRESS: usize = 0x40860000; pub const KERNELCPU_LAST_ADDRESS: usize = 0x4fffffff; pub const KSUPPORT_HEADER_SIZE: usize = 0x80; @@ -22,12 +22,11 @@ pub enum Message<'a> { LoadRequest(&'a [u8]), LoadReply(Result<(), dyld::Error<'a>>), - NowInitRequest, - NowInitReply(u64), - NowSave(u64), - RtioInitRequest, + RtioDestinationStatusRequest { destination: u8 }, + RtioDestinationStatusReply { up: bool }, + DmaRecordStart(&'a str), DmaRecordAppend(&'a [u8]), DmaRecordStop { @@ -46,15 +45,6 @@ pub enum Message<'a> { duration: u64 }, - DrtioChannelStateRequest { channel: u32 }, - DrtioChannelStateReply { fifo_space: u16, last_timestamp: u64 }, - DrtioResetChannelStateRequest { channel: u32 }, - DrtioGetFifoSpaceRequest { channel: u32 }, - DrtioPacketCountRequest { linkno: u8 }, - DrtioPacketCountReply { tx_cnt: u32, rx_cnt: u32 }, - DrtioFifoSpaceReqCountRequest { linkno: u8 }, - DrtioFifoSpaceReqCountReply { cnt: u32 }, - RunFinished, RunException { exception: Exception<'a>, @@ -62,10 +52,6 @@ pub enum Message<'a> { }, RunAborted, - WatchdogSetRequest { ms: u64 }, - WatchdogSetReply { id: usize }, - WatchdogClear { id: usize }, - RpcSend { async: bool, service: u32, @@ -74,6 +60,7 @@ pub enum Message<'a> { }, RpcRecvRequest(*mut ()), RpcRecvReply(Result>), + RpcFlush, CacheGetRequest { key: &'a str }, CacheGetReply { value: &'static [i32] }, @@ -89,8 +76,7 @@ pub enum Message<'a> { I2cReadReply { succeeded: bool, data: u8 }, I2cBasicReply { succeeded: bool }, - SpiSetConfigRequest { busno: u32, flags: u8, write_div: u8, read_div: u8 }, - SpiSetXferRequest { busno: u32, chip_select: u16, write_length: u8, read_length: u8 }, + SpiSetConfigRequest { busno: u32, flags: u8, length: u8, div: u8, cs: u8 }, SpiWriteRequest { busno: u32, data: u32 }, SpiReadRequest { busno: u32 }, SpiReadReply { succeeded: bool, data: u32 }, diff --git a/artiq/firmware/libproto_artiq/lib.rs b/artiq/firmware/libproto_artiq/lib.rs new file mode 100644 index 000000000..66c04d5e6 --- /dev/null +++ b/artiq/firmware/libproto_artiq/lib.rs @@ -0,0 +1,31 @@ +#![no_std] +#![cfg_attr(feature = "alloc", feature(alloc))] + +extern crate failure; +#[macro_use] +extern crate failure_derive; +#[cfg(feature = "alloc")] +extern crate alloc; +extern crate cslice; +#[cfg(feature = "log")] +#[macro_use] +extern crate log; + +extern crate byteorder; +extern crate io; +extern crate dyld; + +// Internal protocols. +pub mod kernel_proto; +pub mod drtioaux_proto; + +// External protocols. +#[cfg(feature = "alloc")] +pub mod mgmt_proto; +#[cfg(feature = "alloc")] +pub mod analyzer_proto; +#[cfg(feature = "alloc")] +pub mod moninj_proto; +#[cfg(feature = "alloc")] +pub mod session_proto; +pub mod rpc_proto; diff --git a/artiq/firmware/libproto_artiq/mgmt_proto.rs b/artiq/firmware/libproto_artiq/mgmt_proto.rs new file mode 100644 index 000000000..ffa2a9763 --- /dev/null +++ b/artiq/firmware/libproto_artiq/mgmt_proto.rs @@ -0,0 +1,188 @@ +use core::str::Utf8Error; +use alloc::{Vec, String}; +#[cfg(feature = "log")] +use log; + +use io::{Read, ProtoRead, Write, ProtoWrite, Error as IoError, ReadStringError}; + +#[derive(Fail, Debug)] +pub enum Error { + #[fail(display = "incorrect magic")] + WrongMagic, + #[fail(display = "unknown packet {:#02x}", _0)] + UnknownPacket(u8), + #[fail(display = "unknown log level {}", _0)] + UnknownLogLevel(u8), + #[fail(display = "invalid UTF-8: {}", _0)] + Utf8(Utf8Error), + #[fail(display = "{}", _0)] + Io(#[cause] IoError) +} + +impl From> for Error { + fn from(value: IoError) -> Error { + Error::Io(value) + } +} + +impl From>> for Error { + fn from(value: ReadStringError>) -> Error { + match value { + ReadStringError::Utf8(err) => Error::Utf8(err), + ReadStringError::Other(err) => Error::Io(err) + } + } +} + +pub fn read_magic(reader: &mut R) -> Result<(), Error> + where R: Read + ?Sized +{ + const MAGIC: &'static [u8] = b"ARTIQ management\n"; + + let mut magic: [u8; 17] = [0; 17]; + reader.read_exact(&mut magic)?; + if magic != MAGIC { + Err(Error::WrongMagic) + } else { + Ok(()) + } +} + +#[derive(Debug)] +pub enum Request { + GetLog, + ClearLog, + PullLog, + #[cfg(feature = "log")] + SetLogFilter(log::LevelFilter), + #[cfg(feature = "log")] + SetUartLogFilter(log::LevelFilter), + + ConfigRead { key: String }, + ConfigWrite { key: String, value: Vec }, + ConfigRemove { key: String }, + ConfigErase, + + StartProfiler { + interval_us: u32, + hits_size: u32, + edges_size: u32, + }, + StopProfiler, + GetProfile, + + Hotswap(Vec), + Reboot, + + DebugAllocator, +} + +pub enum Reply<'a> { + Success, + Error, + Unavailable, + + LogContent(&'a str), + + ConfigData(&'a [u8]), + + Profile, + + RebootImminent, +} + +impl Request { + pub fn read_from(reader: &mut R) -> Result> + where R: Read + ?Sized + { + #[cfg(feature = "log")] + fn read_log_level_filter(reader: &mut T) -> + Result> { + Ok(match reader.read_u8()? { + 0 => log::LevelFilter::Off, + 1 => log::LevelFilter::Error, + 2 => log::LevelFilter::Warn, + 3 => log::LevelFilter::Info, + 4 => log::LevelFilter::Debug, + 5 => log::LevelFilter::Trace, + lv => return Err(Error::UnknownLogLevel(lv)) + }) + } + + Ok(match reader.read_u8()? { + 1 => Request::GetLog, + 2 => Request::ClearLog, + 7 => Request::PullLog, + #[cfg(feature = "log")] + 3 => Request::SetLogFilter(read_log_level_filter(reader)?), + #[cfg(feature = "log")] + 6 => Request::SetUartLogFilter(read_log_level_filter(reader)?), + + 12 => Request::ConfigRead { + key: reader.read_string()? + }, + 13 => Request::ConfigWrite { + key: reader.read_string()?, + value: reader.read_bytes()? + }, + 14 => Request::ConfigRemove { + key: reader.read_string()? + }, + 15 => Request::ConfigErase, + + 9 => Request::StartProfiler { + interval_us: reader.read_u32()?, + hits_size: reader.read_u32()?, + edges_size: reader.read_u32()?, + }, + 10 => Request::StopProfiler, + 11 => Request::GetProfile, + + 4 => Request::Hotswap(reader.read_bytes()?), + 5 => Request::Reboot, + + 8 => Request::DebugAllocator, + + ty => return Err(Error::UnknownPacket(ty)) + }) + } +} + +impl<'a> Reply<'a> { + pub fn write_to(&self, writer: &mut W) -> Result<(), IoError> + where W: Write + ?Sized + { + match *self { + Reply::Success => { + writer.write_u8(1)?; + } + Reply::Error => { + writer.write_u8(6)?; + } + + Reply::Unavailable => { + writer.write_u8(4)?; + } + + Reply::LogContent(ref log) => { + writer.write_u8(2)?; + writer.write_string(log)?; + } + + Reply::ConfigData(ref bytes) => { + writer.write_u8(7)?; + writer.write_bytes(bytes)?; + }, + + Reply::Profile => { + writer.write_u8(5)?; + // profile data follows + } + + Reply::RebootImminent => { + writer.write_u8(3)?; + } + } + Ok(()) + } +} diff --git a/artiq/firmware/libproto/moninj_proto.rs b/artiq/firmware/libproto_artiq/moninj_proto.rs similarity index 50% rename from artiq/firmware/libproto/moninj_proto.rs rename to artiq/firmware/libproto_artiq/moninj_proto.rs index de233d3c0..dba2f84bc 100644 --- a/artiq/firmware/libproto/moninj_proto.rs +++ b/artiq/firmware/libproto_artiq/moninj_proto.rs @@ -1,9 +1,39 @@ -use std::io::{self, Read, Write}; -use {ReadExt, WriteExt}; +use io::{Read, ProtoRead, Write, ProtoWrite, Error as IoError}; + +#[derive(Fail, Debug)] +pub enum Error { + #[fail(display = "incorrect magic")] + WrongMagic, + #[fail(display = "unknown packet {:#02x}", _0)] + UnknownPacket(u8), + #[fail(display = "{}", _0)] + Io(#[cause] IoError) +} + +impl From> for Error { + fn from(value: IoError) -> Error { + Error::Io(value) + } +} + +pub fn read_magic(reader: &mut R) -> Result<(), Error> + where R: Read + ?Sized +{ + const MAGIC: &'static [u8] = b"ARTIQ moninj\n"; + + let mut magic: [u8; 13] = [0; 13]; + reader.read_exact(&mut magic)?; + if magic != MAGIC { + Err(Error::WrongMagic) + } else { + Ok(()) + } +} #[derive(Debug)] pub enum HostMessage { - Monitor { enable: bool, channel: u32, probe: u8 }, + MonitorProbe { enable: bool, channel: u32, probe: u8 }, + MonitorInjection { enable: bool, channel: u32, overrd: u8 }, Inject { channel: u32, overrd: u8, value: u8 }, GetInjectionStatus { channel: u32, overrd: u8 } } @@ -15,9 +45,11 @@ pub enum DeviceMessage { } impl HostMessage { - pub fn read_from(reader: &mut Read) -> io::Result { + pub fn read_from(reader: &mut R) -> Result> + where R: Read + ?Sized + { Ok(match reader.read_u8()? { - 0 => HostMessage::Monitor { + 0 => HostMessage::MonitorProbe { enable: if reader.read_u8()? == 0 { false } else { true }, channel: reader.read_u32()?, probe: reader.read_u8()? @@ -31,13 +63,20 @@ impl HostMessage { channel: reader.read_u32()?, overrd: reader.read_u8()? }, - _ => return Err(io::Error::new(io::ErrorKind::InvalidData, "unknown packet type")) + 3 => HostMessage::MonitorInjection { + enable: if reader.read_u8()? == 0 { false } else { true }, + channel: reader.read_u32()?, + overrd: reader.read_u8()? + }, + ty => return Err(Error::UnknownPacket(ty)) }) } } impl DeviceMessage { - pub fn write_to(&self, writer: &mut Write) -> io::Result<()> { + pub fn write_to(&self, writer: &mut W) -> Result<(), IoError> + where W: Write + ?Sized + { match *self { DeviceMessage::MonitorStatus { channel, probe, value } => { writer.write_u8(0)?; diff --git a/artiq/firmware/libproto/rpc_proto.rs b/artiq/firmware/libproto_artiq/rpc_proto.rs similarity index 59% rename from artiq/firmware/libproto/rpc_proto.rs rename to artiq/firmware/libproto_artiq/rpc_proto.rs index 8ff7693a9..84296a62c 100644 --- a/artiq/firmware/libproto/rpc_proto.rs +++ b/artiq/firmware/libproto_artiq/rpc_proto.rs @@ -1,11 +1,16 @@ -use std::io::{self, Read, Write}; -use std::str; +use core::str; +use core::slice; use cslice::{CSlice, CMutSlice}; -use {ReadExt, WriteExt}; +use byteorder::{NetworkEndian, ByteOrder}; +use io::{ProtoRead, Read, Write, ProtoWrite, Error}; use self::tag::{Tag, TagIterator, split_tag}; -unsafe fn recv_value(reader: &mut Read, tag: Tag, data: &mut *mut (), - alloc: &Fn(usize) -> io::Result<*mut ()>) -> io::Result<()> { +unsafe fn recv_value(reader: &mut R, tag: Tag, data: &mut *mut (), + alloc: &Fn(usize) -> Result<*mut (), E>) + -> Result<(), E> + where R: Read + ?Sized, + E: From> +{ macro_rules! consume_value { ($ty:ty, |$ptr:ident| $map:expr) => ({ let $ptr = (*data) as *mut $ty; @@ -44,17 +49,79 @@ unsafe fn recv_value(reader: &mut Read, tag: Tag, data: &mut *mut (), } Ok(()) } - Tag::List(it) | Tag::Array(it) => { + Tag::List(it) => { + #[repr(C)] struct List { elements: *mut (), length: u32 }; consume_value!(List, |ptr| { (*ptr).length = reader.read_u32()?; + let length = (*ptr).length as usize; let tag = it.clone().next().expect("truncated tag"); (*ptr).elements = alloc(tag.size() * (*ptr).length as usize)?; let mut data = (*ptr).elements; - for _ in 0..(*ptr).length as usize { - recv_value(reader, tag, &mut data, alloc)? + match tag { + Tag::Bool => { + let dest = slice::from_raw_parts_mut(data as *mut u8, length); + reader.read_exact(dest)?; + }, + Tag::Int32 => { + let dest = slice::from_raw_parts_mut(data as *mut u8, length * 4); + reader.read_exact(dest)?; + let dest = slice::from_raw_parts_mut(data as *mut i32, length); + NetworkEndian::from_slice_i32(dest); + }, + Tag::Int64 | Tag::Float64 => { + let dest = slice::from_raw_parts_mut(data as *mut u8, length * 8); + reader.read_exact(dest)?; + let dest = slice::from_raw_parts_mut(data as *mut i64, length); + NetworkEndian::from_slice_i64(dest); + }, + _ => { + for _ in 0..length { + recv_value(reader, tag, &mut data, alloc)? + } + } + } + Ok(()) + }) + } + Tag::Array(it, num_dims) => { + consume_value!(*mut (), |buffer| { + let mut total_len: u32 = 1; + for _ in 0..num_dims { + let len = reader.read_u32()?; + total_len *= len; + consume_value!(u32, |ptr| *ptr = len ) + } + let length = total_len as usize; + + let elt_tag = it.clone().next().expect("truncated tag"); + *buffer = alloc(elt_tag.size() * total_len as usize)?; + + let mut data = *buffer; + match elt_tag { + Tag::Bool => { + let dest = slice::from_raw_parts_mut(data as *mut u8, length); + reader.read_exact(dest)?; + }, + Tag::Int32 => { + let dest = slice::from_raw_parts_mut(data as *mut u8, length * 4); + reader.read_exact(dest)?; + let dest = slice::from_raw_parts_mut(data as *mut i32, length); + NetworkEndian::from_slice_i32(dest); + }, + Tag::Int64 | Tag::Float64 => { + let dest = slice::from_raw_parts_mut(data as *mut u8, length * 8); + reader.read_exact(dest)?; + let dest = slice::from_raw_parts_mut(data as *mut i64, length); + NetworkEndian::from_slice_i64(dest); + }, + _ => { + for _ in 0..length { + recv_value(reader, elt_tag, &mut data, alloc)? + } + } } Ok(()) }) @@ -71,8 +138,12 @@ unsafe fn recv_value(reader: &mut Read, tag: Tag, data: &mut *mut (), } } -pub fn recv_return(reader: &mut Read, tag_bytes: &[u8], data: *mut (), - alloc: &Fn(usize) -> io::Result<*mut ()>) -> io::Result<()> { +pub fn recv_return(reader: &mut R, tag_bytes: &[u8], data: *mut (), + alloc: &Fn(usize) -> Result<*mut (), E>) + -> Result<(), E> + where R: Read + ?Sized, + E: From> +{ let mut it = TagIterator::new(tag_bytes); #[cfg(feature = "log")] debug!("recv ...->{}", it); @@ -84,7 +155,10 @@ pub fn recv_return(reader: &mut Read, tag_bytes: &[u8], data: *mut (), Ok(()) } -unsafe fn send_value(writer: &mut Write, tag: Tag, data: &mut *const ()) -> io::Result<()> { +unsafe fn send_value(writer: &mut W, tag: Tag, data: &mut *const ()) + -> Result<(), Error> + where W: Write + ?Sized +{ macro_rules! consume_value { ($ty:ty, |$ptr:ident| $map:expr) => ({ let $ptr = (*data) as *const $ty; @@ -120,14 +194,78 @@ unsafe fn send_value(writer: &mut Write, tag: Tag, data: &mut *const ()) -> io:: } Ok(()) } - Tag::List(it) | Tag::Array(it) => { + Tag::List(it) => { + #[repr(C)] struct List { elements: *const (), length: u32 }; consume_value!(List, |ptr| { + let length = (*ptr).length as usize; writer.write_u32((*ptr).length)?; let tag = it.clone().next().expect("truncated tag"); let mut data = (*ptr).elements; - for _ in 0..(*ptr).length as usize { - send_value(writer, tag, &mut data)?; + writer.write_u8(tag.as_u8())?; + match tag { + Tag::Bool => { + let slice = slice::from_raw_parts(data as *const u8, length); + writer.write_all(slice)?; + }, + Tag::Int32 => { + let slice = slice::from_raw_parts(data as *const u32, length); + for v in slice.iter() { + writer.write_u32(*v)?; + } + }, + Tag::Int64 | Tag::Float64 => { + let slice = slice::from_raw_parts(data as *const u64, length); + for v in slice.iter() { + writer.write_u64(*v)?; + } + }, + _ => { + for _ in 0..length { + send_value(writer, tag, &mut data)?; + } + } + } + Ok(()) + }) + } + Tag::Array(it, num_dims) => { + writer.write_u8(num_dims)?; + consume_value!(*const(), |buffer| { + let elt_tag = it.clone().next().expect("truncated tag"); + + let mut total_len = 1; + for _ in 0..num_dims { + consume_value!(u32, |len| { + writer.write_u32(*len)?; + total_len *= *len; + }) + } + let length = total_len as usize; + let mut data = *buffer; + writer.write_u8(elt_tag.as_u8())?; + match elt_tag { + Tag::Bool => { + let slice = slice::from_raw_parts(data as *const u8, length); + writer.write_all(slice)?; + }, + Tag::Int32 => { + let slice = slice::from_raw_parts(data as *const u32, length); + for v in slice.iter() { + writer.write_u32(*v)?; + } + }, + Tag::Int64 | Tag::Float64 => { + let slice = slice::from_raw_parts(data as *const u64, length); + for v in slice.iter() { + writer.write_u64(*v)?; + } + }, + _ => { + for _ in 0..length { + send_value(writer, elt_tag, &mut data)?; + } + } } Ok(()) }) @@ -140,17 +278,19 @@ unsafe fn send_value(writer: &mut Write, tag: Tag, data: &mut *const ()) -> io:: Ok(()) } Tag::Keyword(it) => { - struct Keyword<'a> { name: CSlice<'a, u8>, contents: () }; + #[repr(C)] + struct Keyword<'a> { name: CSlice<'a, u8> }; consume_value!(Keyword, |ptr| { writer.write_string(str::from_utf8((*ptr).name.as_ref()).unwrap())?; let tag = it.clone().next().expect("truncated tag"); - let mut data = &(*ptr).contents as *const (); + let mut data = ptr.offset(1) as *const (); send_value(writer, tag, &mut data) }) // Tag::Keyword never appears in composite types, so we don't have // to accurately advance data. } Tag::Object => { + #[repr(C)] struct Object { id: u32 }; consume_value!(*const Object, |ptr| writer.write_u32((**ptr).id)) @@ -158,8 +298,10 @@ unsafe fn send_value(writer: &mut Write, tag: Tag, data: &mut *const ()) -> io:: } } -pub fn send_args(writer: &mut Write, service: u32, tag_bytes: &[u8], - data: *const *const ()) -> io::Result<()> { +pub fn send_args(writer: &mut W, service: u32, tag_bytes: &[u8], data: *const *const ()) + -> Result<(), Error> + where W: Write + ?Sized +{ let (arg_tags_bytes, return_tag_bytes) = split_tag(tag_bytes); let mut args_it = TagIterator::new(arg_tags_bytes); @@ -209,7 +351,7 @@ mod tag { ByteArray, Tuple(TagIterator<'a>, u8), List(TagIterator<'a>), - Array(TagIterator<'a>), + Array(TagIterator<'a>, u8), Range(TagIterator<'a>), Keyword(TagIterator<'a>), Object @@ -228,7 +370,7 @@ mod tag { Tag::ByteArray => b'A', Tag::Tuple(_, _) => b't', Tag::List(_) => b'l', - Tag::Array(_) => b'a', + Tag::Array(_, _) => b'a', Tag::Range(_) => b'r', Tag::Keyword(_) => b'k', Tag::Object => b'O', @@ -242,19 +384,20 @@ mod tag { Tag::Int32 => 4, Tag::Int64 => 8, Tag::Float64 => 8, - Tag::String => 4, - Tag::Bytes => 4, - Tag::ByteArray => 4, + Tag::String => 8, + Tag::Bytes => 8, + Tag::ByteArray => 8, Tag::Tuple(it, arity) => { let mut size = 0; + let mut it = it.clone(); for _ in 0..arity { - let tag = it.clone().next().expect("truncated tag"); + let tag = it.next().expect("truncated tag"); size += tag.size(); } size } Tag::List(_) => 8, - Tag::Array(_) => 8, + Tag::Array(_, num_dims) => 4 * (1 + num_dims as usize), Tag::Range(it) => { let tag = it.clone().next().expect("truncated tag"); tag.size() * 3 @@ -297,7 +440,11 @@ mod tag { Tag::Tuple(self.sub(count), count) } b'l' => Tag::List(self.sub(1)), - b'a' => Tag::Array(self.sub(1)), + b'a' => { + let count = self.data[0]; + self.data = &self.data[1..]; + Tag::Array(self.sub(1), count) + } b'r' => Tag::Range(self.sub(1)), b'k' => Tag::Keyword(self.sub(1)), b'O' => Tag::Object, @@ -352,10 +499,10 @@ mod tag { it.fmt(f)?; write!(f, ")")?; } - Tag::Array(it) => { + Tag::Array(it, num_dims) => { write!(f, "Array(")?; it.fmt(f)?; - write!(f, ")")?; + write!(f, ", {})", num_dims)?; } Tag::Range(it) => { write!(f, "Range(")?; diff --git a/artiq/firmware/libproto/session_proto.rs b/artiq/firmware/libproto_artiq/session_proto.rs similarity index 67% rename from artiq/firmware/libproto/session_proto.rs rename to artiq/firmware/libproto_artiq/session_proto.rs index c852f3918..db353e874 100644 --- a/artiq/firmware/libproto/session_proto.rs +++ b/artiq/firmware/libproto_artiq/session_proto.rs @@ -1,9 +1,52 @@ -use std::io::{self, Read, Write}; -use std::vec::Vec; -use std::string::String; -use {ReadExt, WriteExt}; +use core::str::Utf8Error; +use alloc::{Vec, String}; -fn read_sync(reader: &mut Read) -> io::Result<()> { +use io::{Read, ProtoRead, Write, ProtoWrite, Error as IoError, ReadStringError}; + +#[derive(Fail, Debug)] +pub enum Error { + #[fail(display = "incorrect magic")] + WrongMagic, + #[fail(display = "unknown packet {:#02x}", _0)] + UnknownPacket(u8), + #[fail(display = "invalid UTF-8: {}", _0)] + Utf8(Utf8Error), + #[fail(display = "{}", _0)] + Io(#[cause] IoError) +} + +impl From> for Error { + fn from(value: IoError) -> Error { + Error::Io(value) + } +} + +impl From>> for Error { + fn from(value: ReadStringError>) -> Error { + match value { + ReadStringError::Utf8(err) => Error::Utf8(err), + ReadStringError::Other(err) => Error::Io(err) + } + } +} + +pub fn read_magic(reader: &mut R) -> Result<(), Error> + where R: Read + ?Sized +{ + const MAGIC: &'static [u8] = b"ARTIQ coredev\n"; + + let mut magic: [u8; 14] = [0; 14]; + reader.read_exact(&mut magic)?; + if magic != MAGIC { + Err(Error::WrongMagic) + } else { + Ok(()) + } +} + +fn read_sync(reader: &mut R) -> Result<(), IoError> + where R: Read + ?Sized +{ let mut sync = [0; 4]; for i in 0.. { sync[i % 4] = reader.read_u8()?; @@ -12,14 +55,15 @@ fn read_sync(reader: &mut Read) -> io::Result<()> { Ok(()) } -fn write_sync(writer: &mut Write) -> io::Result<()> { +fn write_sync(writer: &mut W) -> Result<(), IoError> + where W: Write + ?Sized +{ writer.write_all(&[0x5a; 4]) } #[derive(Debug)] pub enum Request { SystemInfo, - SwitchClock(u8), LoadKernel(Vec), RunKernel, @@ -34,49 +78,6 @@ pub enum Request { column: u32, function: String, }, - - FlashRead { key: String }, - FlashWrite { key: String, value: Vec }, - FlashRemove { key: String }, - FlashErase, -} - -impl Request { - pub fn read_from(reader: &mut Read) -> io::Result { - read_sync(reader)?; - Ok(match reader.read_u8()? { - 3 => Request::SystemInfo, - 4 => Request::SwitchClock(reader.read_u8()?), - 5 => Request::LoadKernel(reader.read_bytes()?), - 6 => Request::RunKernel, - 7 => Request::RpcReply { - tag: reader.read_bytes()? - }, - 8 => Request::RpcException { - name: reader.read_string()?, - message: reader.read_string()?, - param: [reader.read_u64()? as i64, - reader.read_u64()? as i64, - reader.read_u64()? as i64], - file: reader.read_string()?, - line: reader.read_u32()?, - column: reader.read_u32()?, - function: reader.read_string()? - }, - 9 => Request::FlashRead { - key: reader.read_string()? - }, - 10 => Request::FlashWrite { - key: reader.read_string()?, - value: reader.read_bytes()? - }, - 11 => Request::FlashErase, - 12 => Request::FlashRemove { - key: reader.read_string()? - }, - _ => return Err(io::Error::new(io::ErrorKind::InvalidData, "unknown request type")) - }) - } } #[derive(Debug)] @@ -85,8 +86,6 @@ pub enum Reply<'a> { ident: &'a str, finished_cleanly: bool }, - ClockSwitchCompleted, - ClockSwitchFailed, LoadCompleted, LoadFailed(&'a str), @@ -106,16 +105,44 @@ pub enum Reply<'a> { RpcRequest { async: bool }, - FlashRead(&'a [u8]), - FlashOk, - FlashError, - - WatchdogExpired, ClockFailure, } +impl Request { + pub fn read_from(reader: &mut R) -> Result> + where R: Read + ?Sized + { + read_sync(reader)?; + Ok(match reader.read_u8()? { + 3 => Request::SystemInfo, + + 5 => Request::LoadKernel(reader.read_bytes()?), + 6 => Request::RunKernel, + + 7 => Request::RpcReply { + tag: reader.read_bytes()? + }, + 8 => Request::RpcException { + name: reader.read_string()?, + message: reader.read_string()?, + param: [reader.read_u64()? as i64, + reader.read_u64()? as i64, + reader.read_u64()? as i64], + file: reader.read_string()?, + line: reader.read_u32()?, + column: reader.read_u32()?, + function: reader.read_string()? + }, + + ty => return Err(Error::UnknownPacket(ty)) + }) + } +} + impl<'a> Reply<'a> { - pub fn write_to(&self, writer: &mut Write) -> io::Result<()> { + pub fn write_to(&self, writer: &mut W) -> Result<(), IoError> + where W: Write + ?Sized + { write_sync(writer)?; match *self { Reply::SystemInfo { ident, finished_cleanly } => { @@ -124,12 +151,6 @@ impl<'a> Reply<'a> { writer.write_string(ident)?; writer.write_u8(finished_cleanly as u8)?; }, - Reply::ClockSwitchCompleted => { - writer.write_u8(3)?; - }, - Reply::ClockSwitchFailed => { - writer.write_u8(4)?; - }, Reply::LoadCompleted => { writer.write_u8(5)?; @@ -169,20 +190,6 @@ impl<'a> Reply<'a> { writer.write_u8(async as u8)?; }, - Reply::FlashRead(ref bytes) => { - writer.write_u8(11)?; - writer.write_bytes(bytes)?; - }, - Reply::FlashOk => { - writer.write_u8(12)?; - }, - Reply::FlashError => { - writer.write_u8(13)?; - }, - - Reply::WatchdogExpired => { - writer.write_u8(14)?; - }, Reply::ClockFailure => { writer.write_u8(15)?; }, diff --git a/artiq/firmware/libstd_artiq/Cargo.toml b/artiq/firmware/libstd_artiq/Cargo.toml deleted file mode 100644 index bc7034f51..000000000 --- a/artiq/firmware/libstd_artiq/Cargo.toml +++ /dev/null @@ -1,12 +0,0 @@ -[package] -authors = ["M-Labs"] -name = "std_artiq" -version = "0.0.0" - -[lib] -name = "std_artiq" -path = "lib.rs" - -[features] -alloc = [] -io_error_alloc = [] diff --git a/artiq/firmware/libstd_artiq/error.rs b/artiq/firmware/libstd_artiq/error.rs deleted file mode 100644 index 5e4e0ead1..000000000 --- a/artiq/firmware/libstd_artiq/error.rs +++ /dev/null @@ -1,453 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Traits for working with Errors. -//! -//! # The `Error` trait -//! -//! `Error` is a trait representing the basic expectations for error values, -//! i.e. values of type `E` in `Result`. At a minimum, errors must provide -//! a description, but they may optionally provide additional detail (via -//! `Display`) and cause chain information: -//! -//! ``` -//! use std::fmt::Display; -//! -//! trait Error: Display { -//! fn description(&self) -> &str; -//! -//! fn cause(&self) -> Option<&Error> { None } -//! } -//! ``` -//! -//! The `cause` method is generally used when errors cross "abstraction -//! boundaries", i.e. when a one module must report an error that is "caused" -//! by an error from a lower-level module. This setup makes it possible for the -//! high-level module to provide its own errors that do not commit to any -//! particular implementation, but also reveal some of its implementation for -//! debugging via `cause` chains. - -// A note about crates and the facade: -// -// Originally, the `Error` trait was defined in libcore, and the impls -// were scattered about. However, coherence objected to this -// arrangement, because to create the blanket impls for `Box` required -// knowing that `&str: !Error`, and we have no means to deal with that -// sort of conflict just now. Therefore, for the time being, we have -// moved the `Error` trait into libstd. As we evolve a sol'n to the -// coherence challenge (e.g., specialization, neg impls, etc) we can -// reconsider what crate these items belong in. - -use any::TypeId; -use boxed::Box; -use cell; -use fmt::{self, Debug, Display}; -use marker::{Send, Sync}; -use mem::transmute; -use num; -use core::raw::TraitObject; -use str; -use string::{self, String}; - -/// Base functionality for all errors in Rust. -pub trait Error: Debug + Display { - /// A short description of the error. - /// - /// The description should not contain newlines or sentence-ending - /// punctuation, to facilitate embedding in larger user-facing - /// strings. - /// - /// # Examples - /// - /// ``` - /// use std::error::Error; - /// - /// match "xc".parse::() { - /// Err(e) => { - /// println!("Error: {}", e.description()); - /// } - /// _ => println!("No error"), - /// } - /// ``` - fn description(&self) -> &str; - - /// The lower-level cause of this error, if any. - /// - /// # Examples - /// - /// ``` - /// use std::error::Error; - /// use std::fmt; - /// - /// #[derive(Debug)] - /// struct SuperError { - /// side: SuperErrorSideKick, - /// } - /// - /// impl fmt::Display for SuperError { - /// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - /// write!(f, "SuperError is here!") - /// } - /// } - /// - /// impl Error for SuperError { - /// fn description(&self) -> &str { - /// "I'm the superhero of errors!" - /// } - /// - /// fn cause(&self) -> Option<&Error> { - /// Some(&self.side) - /// } - /// } - /// - /// #[derive(Debug)] - /// struct SuperErrorSideKick; - /// - /// impl fmt::Display for SuperErrorSideKick { - /// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - /// write!(f, "SuperErrorSideKick is here!") - /// } - /// } - /// - /// impl Error for SuperErrorSideKick { - /// fn description(&self) -> &str { - /// "I'm SuperError side kick!" - /// } - /// } - /// - /// fn get_super_error() -> Result<(), SuperError> { - /// Err(SuperError { side: SuperErrorSideKick }) - /// } - /// - /// fn main() { - /// match get_super_error() { - /// Err(e) => { - /// println!("Error: {}", e.description()); - /// println!("Caused by: {}", e.cause().unwrap()); - /// } - /// _ => println!("No error"), - /// } - /// } - /// ``` - fn cause(&self) -> Option<&Error> { None } - - /// Get the `TypeId` of `self` - #[doc(hidden)] - fn type_id(&self) -> TypeId where Self: 'static { - TypeId::of::() - } -} - -impl<'a, E: Error + 'a> From for Box { - fn from(err: E) -> Box { - Box::new(err) - } -} - -impl<'a, E: Error + Send + Sync + 'a> From for Box { - fn from(err: E) -> Box { - Box::new(err) - } -} - -impl From for Box { - fn from(err: String) -> Box { - #[derive(Debug)] - struct StringError(String); - - impl Error for StringError { - fn description(&self) -> &str { &self.0 } - } - - impl Display for StringError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Display::fmt(&self.0, f) - } - } - - Box::new(StringError(err)) - } -} - -impl From for Box { - fn from(str_err: String) -> Box { - let err1: Box = From::from(str_err); - let err2: Box = err1; - err2 - } -} - -impl<'a, 'b> From<&'b str> for Box { - fn from(err: &'b str) -> Box { - From::from(String::from(err)) - } -} - -impl<'a> From<&'a str> for Box { - fn from(err: &'a str) -> Box { - From::from(String::from(err)) - } -} - -impl Error for str::ParseBoolError { - fn description(&self) -> &str { "failed to parse bool" } -} - -impl Error for str::Utf8Error { - fn description(&self) -> &str { - "invalid utf-8: corrupt contents" - } -} - -impl Error for num::ParseIntError { - fn description(&self) -> &str { - self.__description() - } -} - -impl Error for num::TryFromIntError { - fn description(&self) -> &str { - self.__description() - } -} - -impl Error for num::ParseFloatError { - fn description(&self) -> &str { - self.__description() - } -} - -impl Error for string::FromUtf8Error { - fn description(&self) -> &str { - "invalid utf-8" - } -} - -impl Error for string::FromUtf16Error { - fn description(&self) -> &str { - "invalid utf-16" - } -} - -impl Error for string::ParseError { - fn description(&self) -> &str { - match *self {} - } -} - -impl Error for Box { - fn description(&self) -> &str { - Error::description(&**self) - } - - fn cause(&self) -> Option<&Error> { - Error::cause(&**self) - } -} - -impl Error for fmt::Error { - fn description(&self) -> &str { - "an error occurred when formatting an argument" - } -} - -impl Error for cell::BorrowError { - fn description(&self) -> &str { - "already mutably borrowed" - } -} - -impl Error for cell::BorrowMutError { - fn description(&self) -> &str { - "already borrowed" - } -} - -// copied from any.rs -impl Error + 'static { - /// Returns true if the boxed type is the same as `T` - #[inline] - pub fn is(&self) -> bool { - // Get TypeId of the type this function is instantiated with - let t = TypeId::of::(); - - // Get TypeId of the type in the trait object - let boxed = self.type_id(); - - // Compare both TypeIds on equality - t == boxed - } - - /// Returns some reference to the boxed value if it is of type `T`, or - /// `None` if it isn't. - #[inline] - pub fn downcast_ref(&self) -> Option<&T> { - if self.is::() { - unsafe { - // Get the raw representation of the trait object - let to: TraitObject = transmute(self); - - // Extract the data pointer - Some(&*(to.data as *const T)) - } - } else { - None - } - } - - /// Returns some mutable reference to the boxed value if it is of type `T`, or - /// `None` if it isn't. - #[inline] - pub fn downcast_mut(&mut self) -> Option<&mut T> { - if self.is::() { - unsafe { - // Get the raw representation of the trait object - let to: TraitObject = transmute(self); - - // Extract the data pointer - Some(&mut *(to.data as *const T as *mut T)) - } - } else { - None - } - } -} - -impl Error + 'static + Send { - /// Forwards to the method defined on the type `Any`. - #[inline] - pub fn is(&self) -> bool { - ::is::(self) - } - - /// Forwards to the method defined on the type `Any`. - #[inline] - pub fn downcast_ref(&self) -> Option<&T> { - ::downcast_ref::(self) - } - - /// Forwards to the method defined on the type `Any`. - #[inline] - pub fn downcast_mut(&mut self) -> Option<&mut T> { - ::downcast_mut::(self) - } -} - -impl Error + 'static + Send + Sync { - /// Forwards to the method defined on the type `Any`. - #[inline] - pub fn is(&self) -> bool { - ::is::(self) - } - - /// Forwards to the method defined on the type `Any`. - #[inline] - pub fn downcast_ref(&self) -> Option<&T> { - ::downcast_ref::(self) - } - - /// Forwards to the method defined on the type `Any`. - #[inline] - pub fn downcast_mut(&mut self) -> Option<&mut T> { - ::downcast_mut::(self) - } -} - -impl Error { - #[inline] - /// Attempt to downcast the box to a concrete type. - pub fn downcast(self: Box) -> Result, Box> { - if self.is::() { - unsafe { - // Get the raw representation of the trait object - let raw = Box::into_raw(self); - let to: TraitObject = - transmute::<*mut Error, TraitObject>(raw); - - // Extract the data pointer - Ok(Box::from_raw(to.data as *mut T)) - } - } else { - Err(self) - } - } -} - -impl Error + Send { - #[inline] - /// Attempt to downcast the box to a concrete type. - pub fn downcast(self: Box) - -> Result, Box> { - let err: Box = self; - ::downcast(err).map_err(|s| unsafe { - // reapply the Send marker - transmute::, Box>(s) - }) - } -} - -impl Error + Send + Sync { - #[inline] - /// Attempt to downcast the box to a concrete type. - pub fn downcast(self: Box) - -> Result, Box> { - let err: Box = self; - ::downcast(err).map_err(|s| unsafe { - // reapply the Send+Sync marker - transmute::, Box>(s) - }) - } -} - -#[cfg(test)] -mod tests { - use prelude::v1::*; - use super::Error; - use fmt; - - #[derive(Debug, PartialEq)] - struct A; - #[derive(Debug, PartialEq)] - struct B; - - impl fmt::Display for A { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "A") - } - } - impl fmt::Display for B { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "B") - } - } - - impl Error for A { - fn description(&self) -> &str { "A-desc" } - } - impl Error for B { - fn description(&self) -> &str { "A-desc" } - } - - #[test] - fn downcasting() { - let mut a = A; - let mut a = &mut a as &mut (Error + 'static); - assert_eq!(a.downcast_ref::(), Some(&A)); - assert_eq!(a.downcast_ref::(), None); - assert_eq!(a.downcast_mut::(), Some(&mut A)); - assert_eq!(a.downcast_mut::(), None); - - let a: Box = Box::new(A); - match a.downcast::() { - Ok(..) => panic!("expected error"), - Err(e) => assert_eq!(*e.downcast::().unwrap(), A), - } - } -} diff --git a/artiq/firmware/libstd_artiq/io/buffered.rs b/artiq/firmware/libstd_artiq/io/buffered.rs deleted file mode 100644 index d0e7dbf72..000000000 --- a/artiq/firmware/libstd_artiq/io/buffered.rs +++ /dev/null @@ -1,1108 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Buffering wrappers for I/O traits - -use core::prelude::v1::*; -use io::prelude::*; - -use core::cmp; -use core::fmt; -use io::{self, DEFAULT_BUF_SIZE, Error, ErrorKind, SeekFrom}; -use io::memchr; -use alloc::boxed::Box; -use alloc::vec::Vec; - -/// The `BufReader` struct adds buffering to any reader. -/// -/// It can be excessively inefficient to work directly with a `Read` instance. -/// For example, every call to `read` on `TcpStream` results in a system call. -/// A `BufReader` performs large, infrequent reads on the underlying `Read` -/// and maintains an in-memory buffer of the results. -/// -/// # Examples -/// -/// ``` -/// use std::io::prelude::*; -/// use std::io::BufReader; -/// use std::fs::File; -/// -/// # fn foo() -> std::io::Result<()> { -/// let mut f = try!(File::open("log.txt")); -/// let mut reader = BufReader::new(f); -/// -/// let mut line = String::new(); -/// let len = try!(reader.read_line(&mut line)); -/// println!("First line is {} bytes long", len); -/// # Ok(()) -/// # } -/// ``` -pub struct BufReader { - inner: R, - buf: Box<[u8]>, - pos: usize, - cap: usize, -} - -impl BufReader { - /// Creates a new `BufReader` with a default buffer capacity. - /// - /// # Examples - /// - /// ``` - /// use std::io::BufReader; - /// use std::fs::File; - /// - /// # fn foo() -> std::io::Result<()> { - /// let mut f = try!(File::open("log.txt")); - /// let mut reader = BufReader::new(f); - /// # Ok(()) - /// # } - /// ``` - pub fn new(inner: R) -> BufReader { - BufReader::with_capacity(DEFAULT_BUF_SIZE, inner) - } - - /// Creates a new `BufReader` with the specified buffer capacity. - /// - /// # Examples - /// - /// Creating a buffer with ten bytes of capacity: - /// - /// ``` - /// use std::io::BufReader; - /// use std::fs::File; - /// - /// # fn foo() -> std::io::Result<()> { - /// let mut f = try!(File::open("log.txt")); - /// let mut reader = BufReader::with_capacity(10, f); - /// # Ok(()) - /// # } - /// ``` - pub fn with_capacity(cap: usize, inner: R) -> BufReader { - BufReader { - inner: inner, - buf: vec![0; cap].into_boxed_slice(), - pos: 0, - cap: 0, - } - } - - /// Gets a reference to the underlying reader. - /// - /// It is inadvisable to directly read from the underlying reader. - /// - /// # Examples - /// - /// ``` - /// use std::io::BufReader; - /// use std::fs::File; - /// - /// # fn foo() -> std::io::Result<()> { - /// let mut f1 = try!(File::open("log.txt")); - /// let mut reader = BufReader::new(f1); - /// - /// let f2 = reader.get_ref(); - /// # Ok(()) - /// # } - /// ``` - pub fn get_ref(&self) -> &R { &self.inner } - - /// Gets a mutable reference to the underlying reader. - /// - /// It is inadvisable to directly read from the underlying reader. - /// - /// # Examples - /// - /// ``` - /// use std::io::BufReader; - /// use std::fs::File; - /// - /// # fn foo() -> std::io::Result<()> { - /// let mut f1 = try!(File::open("log.txt")); - /// let mut reader = BufReader::new(f1); - /// - /// let f2 = reader.get_mut(); - /// # Ok(()) - /// # } - /// ``` - pub fn get_mut(&mut self) -> &mut R { &mut self.inner } - - /// Unwraps this `BufReader`, returning the underlying reader. - /// - /// Note that any leftover data in the internal buffer is lost. - /// - /// # Examples - /// - /// ``` - /// use std::io::BufReader; - /// use std::fs::File; - /// - /// # fn foo() -> std::io::Result<()> { - /// let mut f1 = try!(File::open("log.txt")); - /// let mut reader = BufReader::new(f1); - /// - /// let f2 = reader.into_inner(); - /// # Ok(()) - /// # } - /// ``` - pub fn into_inner(self) -> R { self.inner } -} - -impl Read for BufReader { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - // If we don't have any buffered data and we're doing a massive read - // (larger than our internal buffer), bypass our internal buffer - // entirely. - if self.pos == self.cap && buf.len() >= self.buf.len() { - return self.inner.read(buf); - } - let nread = { - let mut rem = self.fill_buf()?; - rem.read(buf)? - }; - self.consume(nread); - Ok(nread) - } -} - -impl BufRead for BufReader { - fn fill_buf(&mut self) -> io::Result<&[u8]> { - // If we've reached the end of our internal buffer then we need to fetch - // some more data from the underlying reader. - if self.pos == self.cap { - self.cap = self.inner.read(&mut self.buf)?; - self.pos = 0; - } - Ok(&self.buf[self.pos..self.cap]) - } - - fn consume(&mut self, amt: usize) { - self.pos = cmp::min(self.pos + amt, self.cap); - } -} - -impl fmt::Debug for BufReader where R: fmt::Debug { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("BufReader") - .field("reader", &self.inner) - .field("buffer", &format_args!("{}/{}", self.cap - self.pos, self.buf.len())) - .finish() - } -} - -impl Seek for BufReader { - /// Seek to an offset, in bytes, in the underlying reader. - /// - /// The position used for seeking with `SeekFrom::Current(_)` is the - /// position the underlying reader would be at if the `BufReader` had no - /// internal buffer. - /// - /// Seeking always discards the internal buffer, even if the seek position - /// would otherwise fall within it. This guarantees that calling - /// `.unwrap()` immediately after a seek yields the underlying reader at - /// the same position. - /// - /// See `std::io::Seek` for more details. - /// - /// Note: In the edge case where you're seeking with `SeekFrom::Current(n)` - /// where `n` minus the internal buffer length underflows an `i64`, two - /// seeks will be performed instead of one. If the second seek returns - /// `Err`, the underlying reader will be left at the same position it would - /// have if you seeked to `SeekFrom::Current(0)`. - fn seek(&mut self, pos: SeekFrom) -> io::Result { - let result: u64; - if let SeekFrom::Current(n) = pos { - let remainder = (self.cap - self.pos) as i64; - // it should be safe to assume that remainder fits within an i64 as the alternative - // means we managed to allocate 8 ebibytes and that's absurd. - // But it's not out of the realm of possibility for some weird underlying reader to - // support seeking by i64::min_value() so we need to handle underflow when subtracting - // remainder. - if let Some(offset) = n.checked_sub(remainder) { - result = self.inner.seek(SeekFrom::Current(offset))?; - } else { - // seek backwards by our remainder, and then by the offset - self.inner.seek(SeekFrom::Current(-remainder))?; - self.pos = self.cap; // empty the buffer - result = self.inner.seek(SeekFrom::Current(n))?; - } - } else { - // Seeking with Start/End doesn't care about our buffer length. - result = self.inner.seek(pos)?; - } - self.pos = self.cap; // empty the buffer - Ok(result) - } -} - -/// Wraps a writer and buffers its output. -/// -/// It can be excessively inefficient to work directly with something that -/// implements `Write`. For example, every call to `write` on `TcpStream` -/// results in a system call. A `BufWriter` keeps an in-memory buffer of data -/// and writes it to an underlying writer in large, infrequent batches. -/// -/// The buffer will be written out when the writer is dropped. -/// -/// # Examples -/// -/// Let's write the numbers one through ten to a `TcpStream`: -/// -/// ```no_run -/// use std::io::prelude::*; -/// use std::net::TcpStream; -/// -/// let mut stream = TcpStream::connect("127.0.0.1:34254").unwrap(); -/// -/// for i in 1..10 { -/// stream.write(&[i]).unwrap(); -/// } -/// ``` -/// -/// Because we're not buffering, we write each one in turn, incurring the -/// overhead of a system call per byte written. We can fix this with a -/// `BufWriter`: -/// -/// ```no_run -/// use std::io::prelude::*; -/// use std::io::BufWriter; -/// use std::net::TcpStream; -/// -/// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap()); -/// -/// for i in 1..10 { -/// stream.write(&[i]).unwrap(); -/// } -/// ``` -/// -/// By wrapping the stream with a `BufWriter`, these ten writes are all grouped -/// together by the buffer, and will all be written out in one system call when -/// the `stream` is dropped. -pub struct BufWriter { - inner: Option, - buf: Vec, - // #30888: If the inner writer panics in a call to write, we don't want to - // write the buffered data a second time in BufWriter's destructor. This - // flag tells the Drop impl if it should skip the flush. - panicked: bool, -} - -/// An error returned by `into_inner` which combines an error that -/// happened while writing out the buffer, and the buffered writer object -/// which may be used to recover from the condition. -/// -/// # Examples -/// -/// ```no_run -/// use std::io::BufWriter; -/// use std::net::TcpStream; -/// -/// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap()); -/// -/// // do stuff with the stream -/// -/// // we want to get our `TcpStream` back, so let's try: -/// -/// let stream = match stream.into_inner() { -/// Ok(s) => s, -/// Err(e) => { -/// // Here, e is an IntoInnerError -/// panic!("An error occurred"); -/// } -/// }; -/// ``` -#[derive(Debug)] -pub struct IntoInnerError(W, Error); - -impl BufWriter { - /// Creates a new `BufWriter` with a default buffer capacity. - /// - /// # Examples - /// - /// ```no_run - /// use std::io::BufWriter; - /// use std::net::TcpStream; - /// - /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap()); - /// ``` - pub fn new(inner: W) -> BufWriter { - BufWriter::with_capacity(DEFAULT_BUF_SIZE, inner) - } - - /// Creates a new `BufWriter` with the specified buffer capacity. - /// - /// # Examples - /// - /// Creating a buffer with a buffer of a hundred bytes. - /// - /// ```no_run - /// use std::io::BufWriter; - /// use std::net::TcpStream; - /// - /// let stream = TcpStream::connect("127.0.0.1:34254").unwrap(); - /// let mut buffer = BufWriter::with_capacity(100, stream); - /// ``` - pub fn with_capacity(cap: usize, inner: W) -> BufWriter { - BufWriter { - inner: Some(inner), - buf: Vec::with_capacity(cap), - panicked: false, - } - } - - fn flush_buf(&mut self) -> io::Result<()> { - let mut written = 0; - let len = self.buf.len(); - let mut ret = Ok(()); - while written < len { - self.panicked = true; - let r = self.inner.as_mut().unwrap().write(&self.buf[written..]); - self.panicked = false; - - match r { - Ok(0) => { - ret = Err(Error::new(ErrorKind::WriteZero, - "failed to write the buffered data")); - break; - } - Ok(n) => written += n, - Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} - Err(e) => { ret = Err(e); break } - - } - } - if written > 0 { - self.buf.drain(..written); - } - ret - } - - /// Gets a reference to the underlying writer. - /// - /// # Examples - /// - /// ```no_run - /// use std::io::BufWriter; - /// use std::net::TcpStream; - /// - /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap()); - /// - /// // we can use reference just like buffer - /// let reference = buffer.get_ref(); - /// ``` - pub fn get_ref(&self) -> &W { self.inner.as_ref().unwrap() } - - /// Gets a mutable reference to the underlying writer. - /// - /// It is inadvisable to directly write to the underlying writer. - /// - /// # Examples - /// - /// ```no_run - /// use std::io::BufWriter; - /// use std::net::TcpStream; - /// - /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap()); - /// - /// // we can use reference just like buffer - /// let reference = buffer.get_mut(); - /// ``` - pub fn get_mut(&mut self) -> &mut W { self.inner.as_mut().unwrap() } - - /// Unwraps this `BufWriter`, returning the underlying writer. - /// - /// The buffer is written out before returning the writer. - /// - /// # Examples - /// - /// ```no_run - /// use std::io::BufWriter; - /// use std::net::TcpStream; - /// - /// let mut buffer = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap()); - /// - /// // unwrap the TcpStream and flush the buffer - /// let stream = buffer.into_inner().unwrap(); - /// ``` - pub fn into_inner(mut self) -> Result>> { - match self.flush_buf() { - Err(e) => Err(IntoInnerError(self, e)), - Ok(()) => Ok(self.inner.take().unwrap()) - } - } -} - -impl Write for BufWriter { - fn write(&mut self, buf: &[u8]) -> io::Result { - if self.buf.len() + buf.len() > self.buf.capacity() { - self.flush_buf()?; - } - if buf.len() >= self.buf.capacity() { - self.panicked = true; - let r = self.inner.as_mut().unwrap().write(buf); - self.panicked = false; - r - } else { - let amt = cmp::min(buf.len(), self.buf.capacity()); - Write::write(&mut self.buf, &buf[..amt]) - } - } - fn flush(&mut self) -> io::Result<()> { - self.flush_buf().and_then(|()| self.get_mut().flush()) - } -} - -impl fmt::Debug for BufWriter where W: fmt::Debug { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("BufWriter") - .field("writer", &self.inner.as_ref().unwrap()) - .field("buffer", &format_args!("{}/{}", self.buf.len(), self.buf.capacity())) - .finish() - } -} - -impl Seek for BufWriter { - /// Seek to the offset, in bytes, in the underlying writer. - /// - /// Seeking always writes out the internal buffer before seeking. - fn seek(&mut self, pos: SeekFrom) -> io::Result { - self.flush_buf().and_then(|_| self.get_mut().seek(pos)) - } -} - -impl Drop for BufWriter { - fn drop(&mut self) { - if self.inner.is_some() && !self.panicked { - // dtors should not panic, so we ignore a failed flush - let _r = self.flush_buf(); - } - } -} - -impl IntoInnerError { - /// Returns the error which caused the call to `into_inner()` to fail. - /// - /// This error was returned when attempting to write the internal buffer. - /// - /// # Examples - /// - /// ```no_run - /// use std::io::BufWriter; - /// use std::net::TcpStream; - /// - /// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap()); - /// - /// // do stuff with the stream - /// - /// // we want to get our `TcpStream` back, so let's try: - /// - /// let stream = match stream.into_inner() { - /// Ok(s) => s, - /// Err(e) => { - /// // Here, e is an IntoInnerError, let's log the inner error. - /// // - /// // We'll just 'log' to stdout for this example. - /// println!("{}", e.error()); - /// - /// panic!("An unexpected error occurred."); - /// } - /// }; - /// ``` - pub fn error(&self) -> &Error { &self.1 } - - /// Returns the buffered writer instance which generated the error. - /// - /// The returned object can be used for error recovery, such as - /// re-inspecting the buffer. - /// - /// # Examples - /// - /// ```no_run - /// use std::io::BufWriter; - /// use std::net::TcpStream; - /// - /// let mut stream = BufWriter::new(TcpStream::connect("127.0.0.1:34254").unwrap()); - /// - /// // do stuff with the stream - /// - /// // we want to get our `TcpStream` back, so let's try: - /// - /// let stream = match stream.into_inner() { - /// Ok(s) => s, - /// Err(e) => { - /// // Here, e is an IntoInnerError, let's re-examine the buffer: - /// let buffer = e.into_inner(); - /// - /// // do stuff to try to recover - /// - /// // afterwards, let's just return the stream - /// buffer.into_inner().unwrap() - /// } - /// }; - /// ``` - pub fn into_inner(self) -> W { self.0 } -} - -impl From> for Error { - fn from(iie: IntoInnerError) -> Error { iie.1 } -} - -impl fmt::Display for IntoInnerError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.error().fmt(f) - } -} - -/// Wraps a writer and buffers output to it, flushing whenever a newline -/// (`0x0a`, `'\n'`) is detected. -/// -/// The [`BufWriter`][bufwriter] struct wraps a writer and buffers its output. -/// But it only does this batched write when it goes out of scope, or when the -/// internal buffer is full. Sometimes, you'd prefer to write each line as it's -/// completed, rather than the entire buffer at once. Enter `LineWriter`. It -/// does exactly that. -/// -/// [bufwriter]: struct.BufWriter.html -/// -/// If there's still a partial line in the buffer when the `LineWriter` is -/// dropped, it will flush those contents. -/// -/// # Examples -/// -/// We can use `LineWriter` to write one line at a time, significantly -/// reducing the number of actual writes to the file. -/// -/// ``` -/// use std::fs::File; -/// use std::io::prelude::*; -/// use std::io::LineWriter; -/// -/// # fn foo() -> std::io::Result<()> { -/// let road_not_taken = b"I shall be telling this with a sigh -/// Somewhere ages and ages hence: -/// Two roads diverged in a wood, and I - -/// I took the one less traveled by, -/// And that has made all the difference."; -/// -/// let file = try!(File::create("poem.txt")); -/// let mut file = LineWriter::new(file); -/// -/// for &byte in road_not_taken.iter() { -/// file.write(&[byte]).unwrap(); -/// } -/// -/// // let's check we did the right thing. -/// let mut file = try!(File::open("poem.txt")); -/// let mut contents = String::new(); -/// -/// try!(file.read_to_string(&mut contents)); -/// -/// assert_eq!(contents.as_bytes(), &road_not_taken[..]); -/// # Ok(()) -/// # } -/// ``` -pub struct LineWriter { - inner: BufWriter, -} - -impl LineWriter { - /// Creates a new `LineWriter`. - /// - /// # Examples - /// - /// ``` - /// use std::fs::File; - /// use std::io::LineWriter; - /// - /// # fn foo() -> std::io::Result<()> { - /// let file = try!(File::create("poem.txt")); - /// let file = LineWriter::new(file); - /// # Ok(()) - /// # } - /// ``` - pub fn new(inner: W) -> LineWriter { - // Lines typically aren't that long, don't use a giant buffer - LineWriter::with_capacity(1024, inner) - } - - /// Creates a new `LineWriter` with a specified capacity for the internal - /// buffer. - /// - /// # Examples - /// - /// ``` - /// use std::fs::File; - /// use std::io::LineWriter; - /// - /// # fn foo() -> std::io::Result<()> { - /// let file = try!(File::create("poem.txt")); - /// let file = LineWriter::with_capacity(100, file); - /// # Ok(()) - /// # } - /// ``` - pub fn with_capacity(cap: usize, inner: W) -> LineWriter { - LineWriter { inner: BufWriter::with_capacity(cap, inner) } - } - - /// Gets a reference to the underlying writer. - /// - /// # Examples - /// - /// ``` - /// use std::fs::File; - /// use std::io::LineWriter; - /// - /// # fn foo() -> std::io::Result<()> { - /// let file = try!(File::create("poem.txt")); - /// let file = LineWriter::new(file); - /// - /// let reference = file.get_ref(); - /// # Ok(()) - /// # } - /// ``` - pub fn get_ref(&self) -> &W { self.inner.get_ref() } - - /// Gets a mutable reference to the underlying writer. - /// - /// Caution must be taken when calling methods on the mutable reference - /// returned as extra writes could corrupt the output stream. - /// - /// # Examples - /// - /// ``` - /// use std::fs::File; - /// use std::io::LineWriter; - /// - /// # fn foo() -> std::io::Result<()> { - /// let file = try!(File::create("poem.txt")); - /// let mut file = LineWriter::new(file); - /// - /// // we can use reference just like file - /// let reference = file.get_mut(); - /// # Ok(()) - /// # } - /// ``` - pub fn get_mut(&mut self) -> &mut W { self.inner.get_mut() } - - /// Unwraps this `LineWriter`, returning the underlying writer. - /// - /// The internal buffer is written out before returning the writer. - /// - /// # Examples - /// - /// ``` - /// use std::fs::File; - /// use std::io::LineWriter; - /// - /// # fn foo() -> std::io::Result<()> { - /// let file = try!(File::create("poem.txt")); - /// - /// let writer: LineWriter = LineWriter::new(file); - /// - /// let file: File = try!(writer.into_inner()); - /// # Ok(()) - /// # } - /// ``` - pub fn into_inner(self) -> Result>> { - self.inner.into_inner().map_err(|IntoInnerError(buf, e)| { - IntoInnerError(LineWriter { inner: buf }, e) - }) - } -} - -impl Write for LineWriter { - fn write(&mut self, buf: &[u8]) -> io::Result { - match memchr::memrchr(b'\n', buf) { - Some(i) => { - let n = self.inner.write(&buf[..i + 1])?; - if n != i + 1 || self.inner.flush().is_err() { - // Do not return errors on partial writes. - return Ok(n); - } - self.inner.write(&buf[i + 1..]).map(|i| n + i) - } - None => self.inner.write(buf), - } - } - - fn flush(&mut self) -> io::Result<()> { self.inner.flush() } -} - -impl fmt::Debug for LineWriter where W: fmt::Debug { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("LineWriter") - .field("writer", &self.inner.inner) - .field("buffer", - &format_args!("{}/{}", self.inner.buf.len(), self.inner.buf.capacity())) - .finish() - } -} - -#[cfg(test)] -mod tests { - use prelude::v1::*; - use io::prelude::*; - use io::{self, BufReader, BufWriter, LineWriter, SeekFrom}; - use sync::atomic::{AtomicUsize, Ordering}; - use thread; - use test; - - /// A dummy reader intended at testing short-reads propagation. - pub struct ShortReader { - lengths: Vec, - } - - impl Read for ShortReader { - fn read(&mut self, _: &mut [u8]) -> io::Result { - if self.lengths.is_empty() { - Ok(0) - } else { - Ok(self.lengths.remove(0)) - } - } - } - - #[test] - fn test_buffered_reader() { - let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4]; - let mut reader = BufReader::with_capacity(2, inner); - - let mut buf = [0, 0, 0]; - let nread = reader.read(&mut buf); - assert_eq!(nread.unwrap(), 3); - let b: &[_] = &[5, 6, 7]; - assert_eq!(buf, b); - - let mut buf = [0, 0]; - let nread = reader.read(&mut buf); - assert_eq!(nread.unwrap(), 2); - let b: &[_] = &[0, 1]; - assert_eq!(buf, b); - - let mut buf = [0]; - let nread = reader.read(&mut buf); - assert_eq!(nread.unwrap(), 1); - let b: &[_] = &[2]; - assert_eq!(buf, b); - - let mut buf = [0, 0, 0]; - let nread = reader.read(&mut buf); - assert_eq!(nread.unwrap(), 1); - let b: &[_] = &[3, 0, 0]; - assert_eq!(buf, b); - - let nread = reader.read(&mut buf); - assert_eq!(nread.unwrap(), 1); - let b: &[_] = &[4, 0, 0]; - assert_eq!(buf, b); - - assert_eq!(reader.read(&mut buf).unwrap(), 0); - } - - #[test] - fn test_buffered_reader_seek() { - let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4]; - let mut reader = BufReader::with_capacity(2, io::Cursor::new(inner)); - - assert_eq!(reader.seek(SeekFrom::Start(3)).ok(), Some(3)); - assert_eq!(reader.fill_buf().ok(), Some(&[0, 1][..])); - assert_eq!(reader.seek(SeekFrom::Current(0)).ok(), Some(3)); - assert_eq!(reader.fill_buf().ok(), Some(&[0, 1][..])); - assert_eq!(reader.seek(SeekFrom::Current(1)).ok(), Some(4)); - assert_eq!(reader.fill_buf().ok(), Some(&[1, 2][..])); - reader.consume(1); - assert_eq!(reader.seek(SeekFrom::Current(-2)).ok(), Some(3)); - } - - #[test] - fn test_buffered_reader_seek_underflow() { - // gimmick reader that yields its position modulo 256 for each byte - struct PositionReader { - pos: u64 - } - impl Read for PositionReader { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - let len = buf.len(); - for x in buf { - *x = self.pos as u8; - self.pos = self.pos.wrapping_add(1); - } - Ok(len) - } - } - impl Seek for PositionReader { - fn seek(&mut self, pos: SeekFrom) -> io::Result { - match pos { - SeekFrom::Start(n) => { - self.pos = n; - } - SeekFrom::Current(n) => { - self.pos = self.pos.wrapping_add(n as u64); - } - SeekFrom::End(n) => { - self.pos = u64::max_value().wrapping_add(n as u64); - } - } - Ok(self.pos) - } - } - - let mut reader = BufReader::with_capacity(5, PositionReader { pos: 0 }); - assert_eq!(reader.fill_buf().ok(), Some(&[0, 1, 2, 3, 4][..])); - assert_eq!(reader.seek(SeekFrom::End(-5)).ok(), Some(u64::max_value()-5)); - assert_eq!(reader.fill_buf().ok().map(|s| s.len()), Some(5)); - // the following seek will require two underlying seeks - let expected = 9223372036854775802; - assert_eq!(reader.seek(SeekFrom::Current(i64::min_value())).ok(), Some(expected)); - assert_eq!(reader.fill_buf().ok().map(|s| s.len()), Some(5)); - // seeking to 0 should empty the buffer. - assert_eq!(reader.seek(SeekFrom::Current(0)).ok(), Some(expected)); - assert_eq!(reader.get_ref().pos, expected); - } - - #[test] - fn test_buffered_writer() { - let inner = Vec::new(); - let mut writer = BufWriter::with_capacity(2, inner); - - writer.write(&[0, 1]).unwrap(); - assert_eq!(*writer.get_ref(), [0, 1]); - - writer.write(&[2]).unwrap(); - assert_eq!(*writer.get_ref(), [0, 1]); - - writer.write(&[3]).unwrap(); - assert_eq!(*writer.get_ref(), [0, 1]); - - writer.flush().unwrap(); - assert_eq!(*writer.get_ref(), [0, 1, 2, 3]); - - writer.write(&[4]).unwrap(); - writer.write(&[5]).unwrap(); - assert_eq!(*writer.get_ref(), [0, 1, 2, 3]); - - writer.write(&[6]).unwrap(); - assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5]); - - writer.write(&[7, 8]).unwrap(); - assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8]); - - writer.write(&[9, 10, 11]).unwrap(); - assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]); - - writer.flush().unwrap(); - assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]); - } - - #[test] - fn test_buffered_writer_inner_flushes() { - let mut w = BufWriter::with_capacity(3, Vec::new()); - w.write(&[0, 1]).unwrap(); - assert_eq!(*w.get_ref(), []); - let w = w.into_inner().unwrap(); - assert_eq!(w, [0, 1]); - } - - #[test] - fn test_buffered_writer_seek() { - let mut w = BufWriter::with_capacity(3, io::Cursor::new(Vec::new())); - w.write_all(&[0, 1, 2, 3, 4, 5]).unwrap(); - w.write_all(&[6, 7]).unwrap(); - assert_eq!(w.seek(SeekFrom::Current(0)).ok(), Some(8)); - assert_eq!(&w.get_ref().get_ref()[..], &[0, 1, 2, 3, 4, 5, 6, 7][..]); - assert_eq!(w.seek(SeekFrom::Start(2)).ok(), Some(2)); - w.write_all(&[8, 9]).unwrap(); - assert_eq!(&w.into_inner().unwrap().into_inner()[..], &[0, 1, 8, 9, 4, 5, 6, 7]); - } - - #[test] - fn test_read_until() { - let inner: &[u8] = &[0, 1, 2, 1, 0]; - let mut reader = BufReader::with_capacity(2, inner); - let mut v = Vec::new(); - reader.read_until(0, &mut v).unwrap(); - assert_eq!(v, [0]); - v.truncate(0); - reader.read_until(2, &mut v).unwrap(); - assert_eq!(v, [1, 2]); - v.truncate(0); - reader.read_until(1, &mut v).unwrap(); - assert_eq!(v, [1]); - v.truncate(0); - reader.read_until(8, &mut v).unwrap(); - assert_eq!(v, [0]); - v.truncate(0); - reader.read_until(9, &mut v).unwrap(); - assert_eq!(v, []); - } - - #[test] - fn test_line_buffer_fail_flush() { - // Issue #32085 - struct FailFlushWriter<'a>(&'a mut Vec); - - impl<'a> Write for FailFlushWriter<'a> { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.0.extend_from_slice(buf); - Ok(buf.len()) - } - fn flush(&mut self) -> io::Result<()> { - Err(io::Error::new(io::ErrorKind::Other, "flush failed")) - } - } - - let mut buf = Vec::new(); - { - let mut writer = LineWriter::new(FailFlushWriter(&mut buf)); - let to_write = b"abc\ndef"; - if let Ok(written) = writer.write(to_write) { - assert!(written < to_write.len(), "didn't flush on new line"); - // PASS - return; - } - } - assert!(buf.is_empty(), "write returned an error but wrote data"); - } - - #[test] - fn test_line_buffer() { - let mut writer = LineWriter::new(Vec::new()); - writer.write(&[0]).unwrap(); - assert_eq!(*writer.get_ref(), []); - writer.write(&[1]).unwrap(); - assert_eq!(*writer.get_ref(), []); - writer.flush().unwrap(); - assert_eq!(*writer.get_ref(), [0, 1]); - writer.write(&[0, b'\n', 1, b'\n', 2]).unwrap(); - assert_eq!(*writer.get_ref(), [0, 1, 0, b'\n', 1, b'\n']); - writer.flush().unwrap(); - assert_eq!(*writer.get_ref(), [0, 1, 0, b'\n', 1, b'\n', 2]); - writer.write(&[3, b'\n']).unwrap(); - assert_eq!(*writer.get_ref(), [0, 1, 0, b'\n', 1, b'\n', 2, 3, b'\n']); - } - - #[test] - fn test_read_line() { - let in_buf: &[u8] = b"a\nb\nc"; - let mut reader = BufReader::with_capacity(2, in_buf); - let mut s = String::new(); - reader.read_line(&mut s).unwrap(); - assert_eq!(s, "a\n"); - s.truncate(0); - reader.read_line(&mut s).unwrap(); - assert_eq!(s, "b\n"); - s.truncate(0); - reader.read_line(&mut s).unwrap(); - assert_eq!(s, "c"); - s.truncate(0); - reader.read_line(&mut s).unwrap(); - assert_eq!(s, ""); - } - - #[test] - fn test_lines() { - let in_buf: &[u8] = b"a\nb\nc"; - let reader = BufReader::with_capacity(2, in_buf); - let mut it = reader.lines(); - assert_eq!(it.next().unwrap().unwrap(), "a".to_string()); - assert_eq!(it.next().unwrap().unwrap(), "b".to_string()); - assert_eq!(it.next().unwrap().unwrap(), "c".to_string()); - assert!(it.next().is_none()); - } - - #[test] - fn test_short_reads() { - let inner = ShortReader{lengths: vec![0, 1, 2, 0, 1, 0]}; - let mut reader = BufReader::new(inner); - let mut buf = [0, 0]; - assert_eq!(reader.read(&mut buf).unwrap(), 0); - assert_eq!(reader.read(&mut buf).unwrap(), 1); - assert_eq!(reader.read(&mut buf).unwrap(), 2); - assert_eq!(reader.read(&mut buf).unwrap(), 0); - assert_eq!(reader.read(&mut buf).unwrap(), 1); - assert_eq!(reader.read(&mut buf).unwrap(), 0); - assert_eq!(reader.read(&mut buf).unwrap(), 0); - } - - #[test] - fn read_char_buffered() { - let buf = [195, 159]; - let reader = BufReader::with_capacity(1, &buf[..]); - assert_eq!(reader.chars().next().unwrap().unwrap(), 'ß'); - } - - #[test] - fn test_chars() { - let buf = [195, 159, b'a']; - let reader = BufReader::with_capacity(1, &buf[..]); - let mut it = reader.chars(); - assert_eq!(it.next().unwrap().unwrap(), 'ß'); - assert_eq!(it.next().unwrap().unwrap(), 'a'); - assert!(it.next().is_none()); - } - - #[test] - #[should_panic] - fn dont_panic_in_drop_on_panicked_flush() { - struct FailFlushWriter; - - impl Write for FailFlushWriter { - fn write(&mut self, buf: &[u8]) -> io::Result { Ok(buf.len()) } - fn flush(&mut self) -> io::Result<()> { - Err(io::Error::last_os_error()) - } - } - - let writer = FailFlushWriter; - let _writer = BufWriter::new(writer); - - // If writer panics *again* due to the flush error then the process will - // abort. - panic!(); - } - - #[test] - fn panic_in_write_doesnt_flush_in_drop() { - static WRITES: AtomicUsize = AtomicUsize::new(0); - - struct PanicWriter; - - impl Write for PanicWriter { - fn write(&mut self, _: &[u8]) -> io::Result { - WRITES.fetch_add(1, Ordering::SeqCst); - panic!(); - } - fn flush(&mut self) -> io::Result<()> { Ok(()) } - } - - thread::spawn(|| { - let mut writer = BufWriter::new(PanicWriter); - let _ = writer.write(b"hello world"); - let _ = writer.flush(); - }).join().unwrap_err(); - - assert_eq!(WRITES.load(Ordering::SeqCst), 1); - } - - #[bench] - fn bench_buffered_reader(b: &mut test::Bencher) { - b.iter(|| { - BufReader::new(io::empty()) - }); - } - - #[bench] - fn bench_buffered_writer(b: &mut test::Bencher) { - b.iter(|| { - BufWriter::new(io::sink()) - }); - } -} diff --git a/artiq/firmware/libstd_artiq/io/cursor.rs b/artiq/firmware/libstd_artiq/io/cursor.rs deleted file mode 100644 index 8b4783ae7..000000000 --- a/artiq/firmware/libstd_artiq/io/cursor.rs +++ /dev/null @@ -1,574 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use core::prelude::v1::*; -use io::prelude::*; - -use core::cmp; -use io::{self, SeekFrom, Error, ErrorKind}; -use alloc::boxed::Box; -use alloc::vec::Vec; - -/// A `Cursor` wraps another type and provides it with a -/// [`Seek`](trait.Seek.html) implementation. -/// -/// Cursors are typically used with in-memory buffers to allow them to -/// implement `Read` and/or `Write`, allowing these buffers to be used -/// anywhere you might use a reader or writer that does actual I/O. -/// -/// The standard library implements some I/O traits on various types which -/// are commonly used as a buffer, like `Cursor>` and `Cursor<&[u8]>`. -/// -/// # Examples -/// -/// We may want to write bytes to a [`File`][file] in our production -/// code, but use an in-memory buffer in our tests. We can do this with -/// `Cursor`: -/// -/// [file]: ../fs/struct.File.html -/// -/// ```no_run -/// use std::io::prelude::*; -/// use std::io::{self, SeekFrom}; -/// use std::fs::File; -/// -/// // a library function we've written -/// fn write_ten_bytes_at_end(writer: &mut W) -> io::Result<()> { -/// try!(writer.seek(SeekFrom::End(-10))); -/// -/// for i in 0..10 { -/// try!(writer.write(&[i])); -/// } -/// -/// // all went well -/// Ok(()) -/// } -/// -/// # fn foo() -> io::Result<()> { -/// // Here's some code that uses this library function. -/// // -/// // We might want to use a BufReader here for efficiency, but let's -/// // keep this example focused. -/// let mut file = try!(File::create("foo.txt")); -/// -/// try!(write_ten_bytes_at_end(&mut file)); -/// # Ok(()) -/// # } -/// -/// // now let's write a test -/// #[test] -/// fn test_writes_bytes() { -/// // setting up a real File is much more slow than an in-memory buffer, -/// // let's use a cursor instead -/// use std::io::Cursor; -/// let mut buff = Cursor::new(vec![0; 15]); -/// -/// write_ten_bytes_at_end(&mut buff).unwrap(); -/// -/// assert_eq!(&buff.get_ref()[5..15], &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); -/// } -/// ``` -#[derive(Clone, Debug)] -pub struct Cursor { - inner: T, - pos: u64, -} - -impl Cursor { - /// Creates a new cursor wrapping the provided underlying I/O object. - /// - /// # Examples - /// - /// ``` - /// use std::io::Cursor; - /// - /// let buff = Cursor::new(Vec::new()); - /// # fn force_inference(_: &Cursor>) {} - /// # force_inference(&buff); - /// ``` - pub fn new(inner: T) -> Cursor { - Cursor { pos: 0, inner: inner } - } - - /// Consumes this cursor, returning the underlying value. - /// - /// # Examples - /// - /// ``` - /// use std::io::Cursor; - /// - /// let buff = Cursor::new(Vec::new()); - /// # fn force_inference(_: &Cursor>) {} - /// # force_inference(&buff); - /// - /// let vec = buff.into_inner(); - /// ``` - pub fn into_inner(self) -> T { self.inner } - - /// Gets a reference to the underlying value in this cursor. - /// - /// # Examples - /// - /// ``` - /// use std::io::Cursor; - /// - /// let buff = Cursor::new(Vec::new()); - /// # fn force_inference(_: &Cursor>) {} - /// # force_inference(&buff); - /// - /// let reference = buff.get_ref(); - /// ``` - pub fn get_ref(&self) -> &T { &self.inner } - - /// Gets a mutable reference to the underlying value in this cursor. - /// - /// Care should be taken to avoid modifying the internal I/O state of the - /// underlying value as it may corrupt this cursor's position. - /// - /// # Examples - /// - /// ``` - /// use std::io::Cursor; - /// - /// let mut buff = Cursor::new(Vec::new()); - /// # fn force_inference(_: &Cursor>) {} - /// # force_inference(&buff); - /// - /// let reference = buff.get_mut(); - /// ``` - pub fn get_mut(&mut self) -> &mut T { &mut self.inner } - - /// Returns the current position of this cursor. - /// - /// # Examples - /// - /// ``` - /// use std::io::Cursor; - /// use std::io::prelude::*; - /// use std::io::SeekFrom; - /// - /// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]); - /// - /// assert_eq!(buff.position(), 0); - /// - /// buff.seek(SeekFrom::Current(2)).unwrap(); - /// assert_eq!(buff.position(), 2); - /// - /// buff.seek(SeekFrom::Current(-1)).unwrap(); - /// assert_eq!(buff.position(), 1); - /// ``` - pub fn position(&self) -> u64 { self.pos } - - /// Sets the position of this cursor. - /// - /// # Examples - /// - /// ``` - /// use std::io::Cursor; - /// - /// let mut buff = Cursor::new(vec![1, 2, 3, 4, 5]); - /// - /// assert_eq!(buff.position(), 0); - /// - /// buff.set_position(2); - /// assert_eq!(buff.position(), 2); - /// - /// buff.set_position(4); - /// assert_eq!(buff.position(), 4); - /// ``` - pub fn set_position(&mut self, pos: u64) { self.pos = pos; } -} - -impl io::Seek for Cursor where T: AsRef<[u8]> { - fn seek(&mut self, style: SeekFrom) -> io::Result { - let pos = match style { - SeekFrom::Start(n) => { self.pos = n; return Ok(n) } - SeekFrom::End(n) => self.inner.as_ref().len() as i64 + n, - SeekFrom::Current(n) => self.pos as i64 + n, - }; - - if pos < 0 { - Err(Error::new(ErrorKind::InvalidInput, - "invalid seek to a negative position")) - } else { - self.pos = pos as u64; - Ok(self.pos) - } - } -} - -impl Read for Cursor where T: AsRef<[u8]> { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - let n = Read::read(&mut self.fill_buf()?, buf)?; - self.pos += n as u64; - Ok(n) - } -} - -impl BufRead for Cursor where T: AsRef<[u8]> { - fn fill_buf(&mut self) -> io::Result<&[u8]> { - let amt = cmp::min(self.pos, self.inner.as_ref().len() as u64); - Ok(&self.inner.as_ref()[(amt as usize)..]) - } - fn consume(&mut self, amt: usize) { self.pos += amt as u64; } -} - -impl<'a> Write for Cursor<&'a mut [u8]> { - #[inline] - fn write(&mut self, data: &[u8]) -> io::Result { - let pos = cmp::min(self.pos, self.inner.len() as u64); - let amt = (&mut self.inner[(pos as usize)..]).write(data)?; - self.pos += amt as u64; - Ok(amt) - } - fn flush(&mut self) -> io::Result<()> { Ok(()) } -} - -impl Write for Cursor> { - fn write(&mut self, buf: &[u8]) -> io::Result { - // Make sure the internal buffer is as least as big as where we - // currently are - let pos = self.position(); - let amt = pos.saturating_sub(self.inner.len() as u64); - // use `resize` so that the zero filling is as efficient as possible - let len = self.inner.len(); - self.inner.resize(len + amt as usize, 0); - - // Figure out what bytes will be used to overwrite what's currently - // there (left), and what will be appended on the end (right) - { - let pos = pos as usize; - let space = self.inner.len() - pos; - let (left, right) = buf.split_at(cmp::min(space, buf.len())); - self.inner[pos..pos + left.len()].copy_from_slice(left); - self.inner.extend_from_slice(right); - } - - // Bump us forward - self.set_position(pos + buf.len() as u64); - Ok(buf.len()) - } - fn flush(&mut self) -> io::Result<()> { Ok(()) } -} - -impl Write for Cursor> { - #[inline] - fn write(&mut self, buf: &[u8]) -> io::Result { - let pos = cmp::min(self.pos, self.inner.len() as u64); - let amt = (&mut self.inner[(pos as usize)..]).write(buf)?; - self.pos += amt as u64; - Ok(amt) - } - fn flush(&mut self) -> io::Result<()> { Ok(()) } -} - -#[cfg(test)] -mod tests { - use io::prelude::*; - use io::{Cursor, SeekFrom}; - use vec::Vec; - - #[test] - fn test_vec_writer() { - let mut writer = Vec::new(); - assert_eq!(writer.write(&[0]).unwrap(), 1); - assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3); - assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4); - let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7]; - assert_eq!(writer, b); - } - - #[test] - fn test_mem_writer() { - let mut writer = Cursor::new(Vec::new()); - assert_eq!(writer.write(&[0]).unwrap(), 1); - assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3); - assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4); - let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7]; - assert_eq!(&writer.get_ref()[..], b); - } - - #[test] - fn test_box_slice_writer() { - let mut writer = Cursor::new(vec![0u8; 9].into_boxed_slice()); - assert_eq!(writer.position(), 0); - assert_eq!(writer.write(&[0]).unwrap(), 1); - assert_eq!(writer.position(), 1); - assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3); - assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4); - assert_eq!(writer.position(), 8); - assert_eq!(writer.write(&[]).unwrap(), 0); - assert_eq!(writer.position(), 8); - - assert_eq!(writer.write(&[8, 9]).unwrap(), 1); - assert_eq!(writer.write(&[10]).unwrap(), 0); - let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8]; - assert_eq!(&**writer.get_ref(), b); - } - - #[test] - fn test_buf_writer() { - let mut buf = [0 as u8; 9]; - { - let mut writer = Cursor::new(&mut buf[..]); - assert_eq!(writer.position(), 0); - assert_eq!(writer.write(&[0]).unwrap(), 1); - assert_eq!(writer.position(), 1); - assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3); - assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4); - assert_eq!(writer.position(), 8); - assert_eq!(writer.write(&[]).unwrap(), 0); - assert_eq!(writer.position(), 8); - - assert_eq!(writer.write(&[8, 9]).unwrap(), 1); - assert_eq!(writer.write(&[10]).unwrap(), 0); - } - let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8]; - assert_eq!(buf, b); - } - - #[test] - fn test_buf_writer_seek() { - let mut buf = [0 as u8; 8]; - { - let mut writer = Cursor::new(&mut buf[..]); - assert_eq!(writer.position(), 0); - assert_eq!(writer.write(&[1]).unwrap(), 1); - assert_eq!(writer.position(), 1); - - assert_eq!(writer.seek(SeekFrom::Start(2)).unwrap(), 2); - assert_eq!(writer.position(), 2); - assert_eq!(writer.write(&[2]).unwrap(), 1); - assert_eq!(writer.position(), 3); - - assert_eq!(writer.seek(SeekFrom::Current(-2)).unwrap(), 1); - assert_eq!(writer.position(), 1); - assert_eq!(writer.write(&[3]).unwrap(), 1); - assert_eq!(writer.position(), 2); - - assert_eq!(writer.seek(SeekFrom::End(-1)).unwrap(), 7); - assert_eq!(writer.position(), 7); - assert_eq!(writer.write(&[4]).unwrap(), 1); - assert_eq!(writer.position(), 8); - - } - let b: &[_] = &[1, 3, 2, 0, 0, 0, 0, 4]; - assert_eq!(buf, b); - } - - #[test] - fn test_buf_writer_error() { - let mut buf = [0 as u8; 2]; - let mut writer = Cursor::new(&mut buf[..]); - assert_eq!(writer.write(&[0]).unwrap(), 1); - assert_eq!(writer.write(&[0, 0]).unwrap(), 1); - assert_eq!(writer.write(&[0, 0]).unwrap(), 0); - } - - #[test] - fn test_mem_reader() { - let mut reader = Cursor::new(vec!(0, 1, 2, 3, 4, 5, 6, 7)); - let mut buf = []; - assert_eq!(reader.read(&mut buf).unwrap(), 0); - assert_eq!(reader.position(), 0); - let mut buf = [0]; - assert_eq!(reader.read(&mut buf).unwrap(), 1); - assert_eq!(reader.position(), 1); - let b: &[_] = &[0]; - assert_eq!(buf, b); - let mut buf = [0; 4]; - assert_eq!(reader.read(&mut buf).unwrap(), 4); - assert_eq!(reader.position(), 5); - let b: &[_] = &[1, 2, 3, 4]; - assert_eq!(buf, b); - assert_eq!(reader.read(&mut buf).unwrap(), 3); - let b: &[_] = &[5, 6, 7]; - assert_eq!(&buf[..3], b); - assert_eq!(reader.read(&mut buf).unwrap(), 0); - } - - #[test] - fn test_boxed_slice_reader() { - let mut reader = Cursor::new(vec!(0, 1, 2, 3, 4, 5, 6, 7).into_boxed_slice()); - let mut buf = []; - assert_eq!(reader.read(&mut buf).unwrap(), 0); - assert_eq!(reader.position(), 0); - let mut buf = [0]; - assert_eq!(reader.read(&mut buf).unwrap(), 1); - assert_eq!(reader.position(), 1); - let b: &[_] = &[0]; - assert_eq!(buf, b); - let mut buf = [0; 4]; - assert_eq!(reader.read(&mut buf).unwrap(), 4); - assert_eq!(reader.position(), 5); - let b: &[_] = &[1, 2, 3, 4]; - assert_eq!(buf, b); - assert_eq!(reader.read(&mut buf).unwrap(), 3); - let b: &[_] = &[5, 6, 7]; - assert_eq!(&buf[..3], b); - assert_eq!(reader.read(&mut buf).unwrap(), 0); - } - - #[test] - fn read_to_end() { - let mut reader = Cursor::new(vec!(0, 1, 2, 3, 4, 5, 6, 7)); - let mut v = Vec::new(); - reader.read_to_end(&mut v).unwrap(); - assert_eq!(v, [0, 1, 2, 3, 4, 5, 6, 7]); - } - - #[test] - fn test_slice_reader() { - let in_buf = vec![0, 1, 2, 3, 4, 5, 6, 7]; - let mut reader = &mut &in_buf[..]; - let mut buf = []; - assert_eq!(reader.read(&mut buf).unwrap(), 0); - let mut buf = [0]; - assert_eq!(reader.read(&mut buf).unwrap(), 1); - assert_eq!(reader.len(), 7); - let b: &[_] = &[0]; - assert_eq!(&buf[..], b); - let mut buf = [0; 4]; - assert_eq!(reader.read(&mut buf).unwrap(), 4); - assert_eq!(reader.len(), 3); - let b: &[_] = &[1, 2, 3, 4]; - assert_eq!(&buf[..], b); - assert_eq!(reader.read(&mut buf).unwrap(), 3); - let b: &[_] = &[5, 6, 7]; - assert_eq!(&buf[..3], b); - assert_eq!(reader.read(&mut buf).unwrap(), 0); - } - - #[test] - fn test_buf_reader() { - let in_buf = vec![0, 1, 2, 3, 4, 5, 6, 7]; - let mut reader = Cursor::new(&in_buf[..]); - let mut buf = []; - assert_eq!(reader.read(&mut buf).unwrap(), 0); - assert_eq!(reader.position(), 0); - let mut buf = [0]; - assert_eq!(reader.read(&mut buf).unwrap(), 1); - assert_eq!(reader.position(), 1); - let b: &[_] = &[0]; - assert_eq!(buf, b); - let mut buf = [0; 4]; - assert_eq!(reader.read(&mut buf).unwrap(), 4); - assert_eq!(reader.position(), 5); - let b: &[_] = &[1, 2, 3, 4]; - assert_eq!(buf, b); - assert_eq!(reader.read(&mut buf).unwrap(), 3); - let b: &[_] = &[5, 6, 7]; - assert_eq!(&buf[..3], b); - assert_eq!(reader.read(&mut buf).unwrap(), 0); - } - - #[test] - fn test_read_char() { - let b = &b"Vi\xE1\xBB\x87t"[..]; - let mut c = Cursor::new(b).chars(); - assert_eq!(c.next().unwrap().unwrap(), 'V'); - assert_eq!(c.next().unwrap().unwrap(), 'i'); - assert_eq!(c.next().unwrap().unwrap(), 'ệ'); - assert_eq!(c.next().unwrap().unwrap(), 't'); - assert!(c.next().is_none()); - } - - #[test] - fn test_read_bad_char() { - let b = &b"\x80"[..]; - let mut c = Cursor::new(b).chars(); - assert!(c.next().unwrap().is_err()); - } - - #[test] - fn seek_past_end() { - let buf = [0xff]; - let mut r = Cursor::new(&buf[..]); - assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10); - assert_eq!(r.read(&mut [0]).unwrap(), 0); - - let mut r = Cursor::new(vec!(10)); - assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10); - assert_eq!(r.read(&mut [0]).unwrap(), 0); - - let mut buf = [0]; - let mut r = Cursor::new(&mut buf[..]); - assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10); - assert_eq!(r.write(&[3]).unwrap(), 0); - - let mut r = Cursor::new(vec![10].into_boxed_slice()); - assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10); - assert_eq!(r.write(&[3]).unwrap(), 0); - } - - #[test] - fn seek_before_0() { - let buf = [0xff]; - let mut r = Cursor::new(&buf[..]); - assert!(r.seek(SeekFrom::End(-2)).is_err()); - - let mut r = Cursor::new(vec!(10)); - assert!(r.seek(SeekFrom::End(-2)).is_err()); - - let mut buf = [0]; - let mut r = Cursor::new(&mut buf[..]); - assert!(r.seek(SeekFrom::End(-2)).is_err()); - - let mut r = Cursor::new(vec!(10).into_boxed_slice()); - assert!(r.seek(SeekFrom::End(-2)).is_err()); - } - - #[test] - fn test_seekable_mem_writer() { - let mut writer = Cursor::new(Vec::::new()); - assert_eq!(writer.position(), 0); - assert_eq!(writer.write(&[0]).unwrap(), 1); - assert_eq!(writer.position(), 1); - assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3); - assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4); - assert_eq!(writer.position(), 8); - let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7]; - assert_eq!(&writer.get_ref()[..], b); - - assert_eq!(writer.seek(SeekFrom::Start(0)).unwrap(), 0); - assert_eq!(writer.position(), 0); - assert_eq!(writer.write(&[3, 4]).unwrap(), 2); - let b: &[_] = &[3, 4, 2, 3, 4, 5, 6, 7]; - assert_eq!(&writer.get_ref()[..], b); - - assert_eq!(writer.seek(SeekFrom::Current(1)).unwrap(), 3); - assert_eq!(writer.write(&[0, 1]).unwrap(), 2); - let b: &[_] = &[3, 4, 2, 0, 1, 5, 6, 7]; - assert_eq!(&writer.get_ref()[..], b); - - assert_eq!(writer.seek(SeekFrom::End(-1)).unwrap(), 7); - assert_eq!(writer.write(&[1, 2]).unwrap(), 2); - let b: &[_] = &[3, 4, 2, 0, 1, 5, 6, 1, 2]; - assert_eq!(&writer.get_ref()[..], b); - - assert_eq!(writer.seek(SeekFrom::End(1)).unwrap(), 10); - assert_eq!(writer.write(&[1]).unwrap(), 1); - let b: &[_] = &[3, 4, 2, 0, 1, 5, 6, 1, 2, 0, 1]; - assert_eq!(&writer.get_ref()[..], b); - } - - #[test] - fn vec_seek_past_end() { - let mut r = Cursor::new(Vec::new()); - assert_eq!(r.seek(SeekFrom::Start(10)).unwrap(), 10); - assert_eq!(r.write(&[3]).unwrap(), 1); - } - - #[test] - fn vec_seek_before_0() { - let mut r = Cursor::new(Vec::new()); - assert!(r.seek(SeekFrom::End(-2)).is_err()); - } -} diff --git a/artiq/firmware/libstd_artiq/io/error.rs b/artiq/firmware/libstd_artiq/io/error.rs deleted file mode 100644 index 942e4187c..000000000 --- a/artiq/firmware/libstd_artiq/io/error.rs +++ /dev/null @@ -1,384 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#[cfg(feature="io_error_alloc")] use alloc::boxed::Box; -#[cfg(not(feature="io_error_alloc"))] use ::FakeBox as Box; -use core::convert::Into; -use core::fmt; -use core::marker::{Send, Sync}; -use core::option::Option::{self, Some, None}; -use core::result; -use error; - -/// A specialized [`Result`](../result/enum.Result.html) type for I/O -/// operations. -/// -/// This type is broadly used across `std::io` for any operation which may -/// produce an error. -/// -/// This typedef is generally used to avoid writing out `io::Error` directly and -/// is otherwise a direct mapping to `Result`. -/// -/// While usual Rust style is to import types directly, aliases of `Result` -/// often are not, to make it easier to distinguish between them. `Result` is -/// generally assumed to be `std::result::Result`, and so users of this alias -/// will generally use `io::Result` instead of shadowing the prelude's import -/// of `std::result::Result`. -/// -/// # Examples -/// -/// A convenience function that bubbles an `io::Result` to its caller: -/// -/// ``` -/// use std::io; -/// -/// fn get_string() -> io::Result { -/// let mut buffer = String::new(); -/// -/// try!(io::stdin().read_line(&mut buffer)); -/// -/// Ok(buffer) -/// } -/// ``` -pub type Result = result::Result; - -/// The error type for I/O operations of the `Read`, `Write`, `Seek`, and -/// associated traits. -/// -/// Errors mostly originate from the underlying OS, but custom instances of -/// `Error` can be created with crafted error messages and a particular value of -/// `ErrorKind`. -#[derive(Debug)] -pub struct Error { - repr: Repr, -} - -enum Repr { - Os(i32), - - #[cfg(feature="io_error_alloc")] - Custom(Box), - #[cfg(not(feature="io_error_alloc"))] - Custom(Custom), -} - -#[derive(Debug)] -struct Custom { - kind: ErrorKind, - #[cfg(feature="io_error_alloc")] - error: Box, - #[cfg(not(feature="io_error_alloc"))] - error: &'static str -} - -/// A list specifying general categories of I/O error. -/// -/// This list is intended to grow over time and it is not recommended to -/// exhaustively match against it. -#[derive(Copy, PartialEq, Eq, Clone, Debug)] -#[allow(deprecated)] -pub enum ErrorKind { - /// An entity was not found, often a file. - NotFound, - /// The operation lacked the necessary privileges to complete. - PermissionDenied, - /// The connection was refused by the remote server. - ConnectionRefused, - /// The connection was reset by the remote server. - ConnectionReset, - /// The connection was aborted (terminated) by the remote server. - ConnectionAborted, - /// The network operation failed because it was not connected yet. - NotConnected, - /// A socket address could not be bound because the address is already in - /// use elsewhere. - AddrInUse, - /// A nonexistent interface was requested or the requested address was not - /// local. - AddrNotAvailable, - /// The operation failed because a pipe was closed. - BrokenPipe, - /// An entity already exists, often a file. - AlreadyExists, - /// The operation needs to block to complete, but the blocking operation was - /// requested to not occur. - WouldBlock, - /// A parameter was incorrect. - InvalidInput, - /// Data not valid for the operation were encountered. - /// - /// Unlike `InvalidInput`, this typically means that the operation - /// parameters were valid, however the error was caused by malformed - /// input data. - /// - /// For example, a function that reads a file into a string will error with - /// `InvalidData` if the file's contents are not valid UTF-8. - InvalidData, - /// The I/O operation's timeout expired, causing it to be canceled. - TimedOut, - /// An error returned when an operation could not be completed because a - /// call to `write` returned `Ok(0)`. - /// - /// This typically means that an operation could only succeed if it wrote a - /// particular number of bytes but only a smaller number of bytes could be - /// written. - WriteZero, - /// This operation was interrupted. - /// - /// Interrupted operations can typically be retried. - Interrupted, - /// Any I/O error not part of this list. - Other, - - /// An error returned when an operation could not be completed because an - /// "end of file" was reached prematurely. - /// - /// This typically means that an operation could only succeed if it read a - /// particular number of bytes but only a smaller number of bytes could be - /// read. - UnexpectedEof, - - /// Any I/O error not part of this list. - #[doc(hidden)] - __Nonexhaustive, -} - -impl Error { - /// Creates a new I/O error from a known kind of error as well as an - /// arbitrary error payload. - /// - /// This function is used to generically create I/O errors which do not - /// originate from the OS itself. The `error` argument is an arbitrary - /// payload which will be contained in this `Error`. - /// - /// # Examples - /// - /// ``` - /// use std::io::{Error, ErrorKind}; - /// - /// // errors can be created from strings - /// let custom_error = Error::new(ErrorKind::Other, "oh no!"); - /// - /// // errors can also be created from other errors - /// let custom_error2 = Error::new(ErrorKind::Interrupted, custom_error); - /// ``` - #[cfg(feature="io_error_alloc")] - pub fn new(kind: ErrorKind, error: E) -> Error - where E: Into> - { - Self::_new(kind, error.into()) - } - - #[cfg(not(feature="io_error_alloc"))] - pub fn new(kind: ErrorKind, error: E) -> Error - where E: Into<&'static str> - { - Self::_new(kind, error.into()) - } - - #[cfg(feature="io_error_alloc")] - fn _new(kind: ErrorKind, error: Box) -> Error { - Error { - repr: Repr::Custom(Box::new(Custom { - kind: kind, - error: error, - })) - } - } - - #[cfg(not(feature="io_error_alloc"))] - fn _new(kind: ErrorKind, error: &'static str) -> Error { - Error { - repr: Repr::Custom(Box::new(Custom { - kind: kind, - error: error, - })) - } - } - - /// Creates a new instance of an `Error` from a particular OS error code. - pub fn from_raw_os_error(code: i32) -> Error { - Error { repr: Repr::Os(code) } - } - - /// Returns the OS error that this error represents (if any). - /// - /// If this `Error` was constructed via `last_os_error` or - /// `from_raw_os_error`, then this function will return `Some`, otherwise - /// it will return `None`. - pub fn raw_os_error(&self) -> Option { - match self.repr { - Repr::Os(i) => Some(i), - Repr::Custom(..) => None, - } - } - - /// Returns a reference to the inner error wrapped by this error (if any). - /// - /// If this `Error` was constructed via `new` then this function will - /// return `Some`, otherwise it will return `None`. - #[cfg(feature="io_error_alloc")] - pub fn get_ref(&self) -> Option<&(error::Error+Send+Sync+'static)> { - match self.repr { - Repr::Os(..) => None, - Repr::Custom(ref c) => Some(&*c.error), - } - } - - /// Returns a mutable reference to the inner error wrapped by this error - /// (if any). - /// - /// If this `Error` was constructed via `new` then this function will - /// return `Some`, otherwise it will return `None`. - #[cfg(feature="io_error_alloc")] - pub fn get_mut(&mut self) -> Option<&mut (error::Error+Send+Sync+'static)> { - match self.repr { - Repr::Os(..) => None, - Repr::Custom(ref mut c) => Some(&mut *c.error), - } - } - - /// Consumes the `Error`, returning its inner error (if any). - /// - /// If this `Error` was constructed via `new` then this function will - /// return `Some`, otherwise it will return `None`. - #[cfg(feature="io_error_alloc")] - pub fn into_inner(self) -> Option> { - match self.repr { - Repr::Os(..) => None, - Repr::Custom(c) => Some(c.error) - } - } - - /// Returns the corresponding `ErrorKind` for this error. - pub fn kind(&self) -> ErrorKind { - match self.repr { - Repr::Os(_code) => ErrorKind::Other, - Repr::Custom(ref c) => c.kind, - } - } -} - -impl fmt::Debug for Repr { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - match *self { - Repr::Os(ref code) => - fmt.debug_struct("Os").field("code", code).finish(), - Repr::Custom(ref c) => fmt.debug_tuple("Custom").field(c).finish(), - } - } -} - -impl fmt::Display for Error { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - match self.repr { - Repr::Os(code) => { - write!(fmt, "os error {}", code) - } - Repr::Custom(ref c) => c.error.fmt(fmt), - } - } -} - -impl error::Error for Error { - fn description(&self) -> &str { - match self.repr { - Repr::Os(..) => match self.kind() { - ErrorKind::NotFound => "entity not found", - ErrorKind::PermissionDenied => "permission denied", - ErrorKind::ConnectionRefused => "connection refused", - ErrorKind::ConnectionReset => "connection reset", - ErrorKind::ConnectionAborted => "connection aborted", - ErrorKind::NotConnected => "not connected", - ErrorKind::AddrInUse => "address in use", - ErrorKind::AddrNotAvailable => "address not available", - ErrorKind::BrokenPipe => "broken pipe", - ErrorKind::AlreadyExists => "entity already exists", - ErrorKind::WouldBlock => "operation would block", - ErrorKind::InvalidInput => "invalid input parameter", - ErrorKind::InvalidData => "invalid data", - ErrorKind::TimedOut => "timed out", - ErrorKind::WriteZero => "write zero", - ErrorKind::Interrupted => "operation interrupted", - ErrorKind::Other => "other os error", - ErrorKind::UnexpectedEof => "unexpected end of file", - ErrorKind::__Nonexhaustive => unreachable!() - }, - Repr::Custom(ref c) => { - #[cfg(feature="io_error_alloc")] - { c.error.description() } - #[cfg(not(feature="io_error_alloc"))] - { c.error } - }, - } - } - - fn cause(&self) -> Option<&error::Error> { - match self.repr { - Repr::Os(..) => None, - Repr::Custom(ref _c) => { - #[cfg(feature="io_error_alloc")] - { _c.error.cause() } - #[cfg(not(feature="io_error_alloc"))] - { None } - } - } - } -} - -fn _assert_error_is_sync_send() { - fn _is_sync_send() {} - _is_sync_send::(); -} - -#[cfg(test)] -mod test { - use prelude::v1::*; - use super::{Error, ErrorKind}; - use error; - use fmt; - use sys::os::error_string; - - #[test] - fn test_debug_error() { - let code = 6; - let msg = error_string(code); - let err = Error { repr: super::Repr::Os(code) }; - let expected = format!("Error {{ repr: Os {{ code: {:?}, message: {:?} }} }}", code, msg); - assert_eq!(format!("{:?}", err), expected); - } - - #[test] - fn test_downcasting() { - #[derive(Debug)] - struct TestError; - - impl fmt::Display for TestError { - fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { - Ok(()) - } - } - - impl error::Error for TestError { - fn description(&self) -> &str { - "asdf" - } - } - - // we have to call all of these UFCS style right now since method - // resolution won't implicitly drop the Send+Sync bounds - let mut err = Error::new(ErrorKind::Other, TestError); - assert!(err.get_ref().unwrap().is::()); - assert_eq!("asdf", err.get_ref().unwrap().description()); - assert!(err.get_mut().unwrap().is::()); - let extracted = err.into_inner().unwrap(); - extracted.downcast::().unwrap(); - } -} diff --git a/artiq/firmware/libstd_artiq/io/impls.rs b/artiq/firmware/libstd_artiq/io/impls.rs deleted file mode 100644 index 128e693ca..000000000 --- a/artiq/firmware/libstd_artiq/io/impls.rs +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - - use alloc::boxed::Box; -use core::cmp; -use io::{self, SeekFrom, Read, Write, Seek, Error, ErrorKind}; - use io::BufRead; -use core::fmt; -use core::mem; - use alloc::string::String; - use alloc::vec::Vec; - -// ============================================================================= -// Forwarding implementations - -impl<'a, R: Read + ?Sized> Read for &'a mut R { - #[inline] - fn read(&mut self, buf: &mut [u8]) -> io::Result { - (**self).read(buf) - } - - - #[inline] - fn read_to_end(&mut self, buf: &mut Vec) -> io::Result { - (**self).read_to_end(buf) - } - - - #[inline] - fn read_to_string(&mut self, buf: &mut String) -> io::Result { - (**self).read_to_string(buf) - } - - #[inline] - fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> { - (**self).read_exact(buf) - } -} -impl<'a, W: Write + ?Sized> Write for &'a mut W { - #[inline] - fn write(&mut self, buf: &[u8]) -> io::Result { (**self).write(buf) } - - #[inline] - fn flush(&mut self) -> io::Result<()> { (**self).flush() } - - #[inline] - fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { - (**self).write_all(buf) - } - - #[inline] - fn write_fmt(&mut self, fmt: fmt::Arguments) -> io::Result<()> { - (**self).write_fmt(fmt) - } -} -impl<'a, S: Seek + ?Sized> Seek for &'a mut S { - #[inline] - fn seek(&mut self, pos: SeekFrom) -> io::Result { (**self).seek(pos) } -} - -impl<'a, B: BufRead + ?Sized> BufRead for &'a mut B { - #[inline] - fn fill_buf(&mut self) -> io::Result<&[u8]> { (**self).fill_buf() } - - #[inline] - fn consume(&mut self, amt: usize) { (**self).consume(amt) } - - #[inline] - fn read_until(&mut self, byte: u8, buf: &mut Vec) -> io::Result { - (**self).read_until(byte, buf) - } - - #[inline] - fn read_line(&mut self, buf: &mut String) -> io::Result { - (**self).read_line(buf) - } -} - - -impl Read for Box { - #[inline] - fn read(&mut self, buf: &mut [u8]) -> io::Result { - (**self).read(buf) - } - - - #[inline] - fn read_to_end(&mut self, buf: &mut Vec) -> io::Result { - (**self).read_to_end(buf) - } - - - #[inline] - fn read_to_string(&mut self, buf: &mut String) -> io::Result { - (**self).read_to_string(buf) - } - - #[inline] - fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> { - (**self).read_exact(buf) - } -} - -impl Write for Box { - #[inline] - fn write(&mut self, buf: &[u8]) -> io::Result { (**self).write(buf) } - - #[inline] - fn flush(&mut self) -> io::Result<()> { (**self).flush() } - - #[inline] - fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { - (**self).write_all(buf) - } - - #[inline] - fn write_fmt(&mut self, fmt: fmt::Arguments) -> io::Result<()> { - (**self).write_fmt(fmt) - } -} - -impl Seek for Box { - #[inline] - fn seek(&mut self, pos: SeekFrom) -> io::Result { (**self).seek(pos) } -} - -impl BufRead for Box { - #[inline] - fn fill_buf(&mut self) -> io::Result<&[u8]> { (**self).fill_buf() } - - #[inline] - fn consume(&mut self, amt: usize) { (**self).consume(amt) } - - #[inline] - fn read_until(&mut self, byte: u8, buf: &mut Vec) -> io::Result { - (**self).read_until(byte, buf) - } - - #[inline] - fn read_line(&mut self, buf: &mut String) -> io::Result { - (**self).read_line(buf) - } -} - -// ============================================================================= -// In-memory buffer implementations - -impl<'a> Read for &'a [u8] { - #[inline] - fn read(&mut self, buf: &mut [u8]) -> io::Result { - let amt = cmp::min(buf.len(), self.len()); - let (a, b) = self.split_at(amt); - buf[..amt].copy_from_slice(a); - *self = b; - Ok(amt) - } - - #[inline] - fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> { - if buf.len() > self.len() { - return Err(Error::new(ErrorKind::UnexpectedEof, - "failed to fill whole buffer")); - } - let (a, b) = self.split_at(buf.len()); - buf.copy_from_slice(a); - *self = b; - Ok(()) - } -} - - -impl<'a> BufRead for &'a [u8] { - #[inline] - fn fill_buf(&mut self) -> io::Result<&[u8]> { Ok(*self) } - - #[inline] - fn consume(&mut self, amt: usize) { *self = &self[amt..]; } -} - -impl<'a> Write for &'a mut [u8] { - #[inline] - fn write(&mut self, data: &[u8]) -> io::Result { - let amt = cmp::min(data.len(), self.len()); - let (a, b) = mem::replace(self, &mut []).split_at_mut(amt); - a.copy_from_slice(&data[..amt]); - *self = b; - Ok(amt) - } - - #[inline] - fn write_all(&mut self, data: &[u8]) -> io::Result<()> { - if self.write(data)? == data.len() { - Ok(()) - } else { - Err(Error::new(ErrorKind::WriteZero, "failed to write whole buffer")) - } - } - - #[inline] - fn flush(&mut self) -> io::Result<()> { Ok(()) } -} - - -impl Write for Vec { - #[inline] - fn write(&mut self, buf: &[u8]) -> io::Result { - self.extend_from_slice(buf); - Ok(buf.len()) - } - - #[inline] - fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { - self.extend_from_slice(buf); - Ok(()) - } - - #[inline] - fn flush(&mut self) -> io::Result<()> { Ok(()) } -} - -#[cfg(test)] -mod tests { - use io::prelude::*; - use vec::Vec; - use test; - - #[bench] - fn bench_read_slice(b: &mut test::Bencher) { - let buf = [5; 1024]; - let mut dst = [0; 128]; - - b.iter(|| { - let mut rd = &buf[..]; - for _ in 0..8 { - let _ = rd.read(&mut dst); - test::black_box(&dst); - } - }) - } - - #[bench] - fn bench_write_slice(b: &mut test::Bencher) { - let mut buf = [0; 1024]; - let src = [5; 128]; - - b.iter(|| { - let mut wr = &mut buf[..]; - for _ in 0..8 { - let _ = wr.write_all(&src); - test::black_box(&wr); - } - }) - } - - #[bench] - fn bench_read_vec(b: &mut test::Bencher) { - let buf = vec![5; 1024]; - let mut dst = [0; 128]; - - b.iter(|| { - let mut rd = &buf[..]; - for _ in 0..8 { - let _ = rd.read(&mut dst); - test::black_box(&dst); - } - }) - } - - #[bench] - fn bench_write_vec(b: &mut test::Bencher) { - let mut buf = Vec::with_capacity(1024); - let src = [5; 128]; - - b.iter(|| { - let mut wr = &mut buf[..]; - for _ in 0..8 { - let _ = wr.write_all(&src); - test::black_box(&wr); - } - }) - } -} diff --git a/artiq/firmware/libstd_artiq/io/memchr.rs b/artiq/firmware/libstd_artiq/io/memchr.rs deleted file mode 100644 index 110cfac93..000000000 --- a/artiq/firmware/libstd_artiq/io/memchr.rs +++ /dev/null @@ -1,297 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. -// -// Original implementation taken from rust-memchr -// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch - -pub use self::fallback::{memchr,memrchr}; - -#[allow(dead_code)] -mod fallback { - use core::cmp; - use core::mem; - - const LO_U64: u64 = 0x0101010101010101; - const HI_U64: u64 = 0x8080808080808080; - - // use truncation - const LO_USIZE: usize = LO_U64 as usize; - const HI_USIZE: usize = HI_U64 as usize; - - /// Return `true` if `x` contains any zero byte. - /// - /// From *Matters Computational*, J. Arndt - /// - /// "The idea is to subtract one from each of the bytes and then look for - /// bytes where the borrow propagated all the way to the most significant - /// bit." - #[inline] - fn contains_zero_byte(x: usize) -> bool { - x.wrapping_sub(LO_USIZE) & !x & HI_USIZE != 0 - } - - #[cfg(target_pointer_width = "32")] - #[inline] - fn repeat_byte(b: u8) -> usize { - let mut rep = (b as usize) << 8 | b as usize; - rep = rep << 16 | rep; - rep - } - - #[cfg(target_pointer_width = "64")] - #[inline] - fn repeat_byte(b: u8) -> usize { - let mut rep = (b as usize) << 8 | b as usize; - rep = rep << 16 | rep; - rep = rep << 32 | rep; - rep - } - - /// Return the first index matching the byte `a` in `text`. - pub fn memchr(x: u8, text: &[u8]) -> Option { - // Scan for a single byte value by reading two `usize` words at a time. - // - // Split `text` in three parts - // - unaligned initial part, before the first word aligned address in text - // - body, scan by 2 words at a time - // - the last remaining part, < 2 word size - let len = text.len(); - let ptr = text.as_ptr(); - let usize_bytes = mem::size_of::(); - - // search up to an aligned boundary - let align = (ptr as usize) & (usize_bytes- 1); - let mut offset; - if align > 0 { - offset = cmp::min(usize_bytes - align, len); - if let Some(index) = text[..offset].iter().position(|elt| *elt == x) { - return Some(index); - } - } else { - offset = 0; - } - - // search the body of the text - let repeated_x = repeat_byte(x); - - if len >= 2 * usize_bytes { - while offset <= len - 2 * usize_bytes { - unsafe { - let u = *(ptr.offset(offset as isize) as *const usize); - let v = *(ptr.offset((offset + usize_bytes) as isize) as *const usize); - - // break if there is a matching byte - let zu = contains_zero_byte(u ^ repeated_x); - let zv = contains_zero_byte(v ^ repeated_x); - if zu || zv { - break; - } - } - offset += usize_bytes * 2; - } - } - - // find the byte after the point the body loop stopped - text[offset..].iter().position(|elt| *elt == x).map(|i| offset + i) - } - - /// Return the last index matching the byte `a` in `text`. - pub fn memrchr(x: u8, text: &[u8]) -> Option { - // Scan for a single byte value by reading two `usize` words at a time. - // - // Split `text` in three parts - // - unaligned tail, after the last word aligned address in text - // - body, scan by 2 words at a time - // - the first remaining bytes, < 2 word size - let len = text.len(); - let ptr = text.as_ptr(); - let usize_bytes = mem::size_of::(); - - // search to an aligned boundary - let end_align = (ptr as usize + len) & (usize_bytes - 1); - let mut offset; - if end_align > 0 { - offset = len - cmp::min(usize_bytes - end_align, len); - if let Some(index) = text[offset..].iter().rposition(|elt| *elt == x) { - return Some(offset + index); - } - } else { - offset = len; - } - - // search the body of the text - let repeated_x = repeat_byte(x); - - while offset >= 2 * usize_bytes { - unsafe { - let u = *(ptr.offset(offset as isize - 2 * usize_bytes as isize) as *const usize); - let v = *(ptr.offset(offset as isize - usize_bytes as isize) as *const usize); - - // break if there is a matching byte - let zu = contains_zero_byte(u ^ repeated_x); - let zv = contains_zero_byte(v ^ repeated_x); - if zu || zv { - break; - } - } - offset -= 2 * usize_bytes; - } - - // find the byte before the point the body loop stopped - text[..offset].iter().rposition(|elt| *elt == x) - } - - // test fallback implementations on all plattforms - #[test] - fn matches_one() { - assert_eq!(Some(0), memchr(b'a', b"a")); - } - - #[test] - fn matches_begin() { - assert_eq!(Some(0), memchr(b'a', b"aaaa")); - } - - #[test] - fn matches_end() { - assert_eq!(Some(4), memchr(b'z', b"aaaaz")); - } - - #[test] - fn matches_nul() { - assert_eq!(Some(4), memchr(b'\x00', b"aaaa\x00")); - } - - #[test] - fn matches_past_nul() { - assert_eq!(Some(5), memchr(b'z', b"aaaa\x00z")); - } - - #[test] - fn no_match_empty() { - assert_eq!(None, memchr(b'a', b"")); - } - - #[test] - fn no_match() { - assert_eq!(None, memchr(b'a', b"xyz")); - } - - #[test] - fn matches_one_reversed() { - assert_eq!(Some(0), memrchr(b'a', b"a")); - } - - #[test] - fn matches_begin_reversed() { - assert_eq!(Some(3), memrchr(b'a', b"aaaa")); - } - - #[test] - fn matches_end_reversed() { - assert_eq!(Some(0), memrchr(b'z', b"zaaaa")); - } - - #[test] - fn matches_nul_reversed() { - assert_eq!(Some(4), memrchr(b'\x00', b"aaaa\x00")); - } - - #[test] - fn matches_past_nul_reversed() { - assert_eq!(Some(0), memrchr(b'z', b"z\x00aaaa")); - } - - #[test] - fn no_match_empty_reversed() { - assert_eq!(None, memrchr(b'a', b"")); - } - - #[test] - fn no_match_reversed() { - assert_eq!(None, memrchr(b'a', b"xyz")); - } -} - -#[cfg(test)] -mod tests { - // test the implementations for the current plattform - use super::{memchr, memrchr}; - - #[test] - fn matches_one() { - assert_eq!(Some(0), memchr(b'a', b"a")); - } - - #[test] - fn matches_begin() { - assert_eq!(Some(0), memchr(b'a', b"aaaa")); - } - - #[test] - fn matches_end() { - assert_eq!(Some(4), memchr(b'z', b"aaaaz")); - } - - #[test] - fn matches_nul() { - assert_eq!(Some(4), memchr(b'\x00', b"aaaa\x00")); - } - - #[test] - fn matches_past_nul() { - assert_eq!(Some(5), memchr(b'z', b"aaaa\x00z")); - } - - #[test] - fn no_match_empty() { - assert_eq!(None, memchr(b'a', b"")); - } - - #[test] - fn no_match() { - assert_eq!(None, memchr(b'a', b"xyz")); - } - - #[test] - fn matches_one_reversed() { - assert_eq!(Some(0), memrchr(b'a', b"a")); - } - - #[test] - fn matches_begin_reversed() { - assert_eq!(Some(3), memrchr(b'a', b"aaaa")); - } - - #[test] - fn matches_end_reversed() { - assert_eq!(Some(0), memrchr(b'z', b"zaaaa")); - } - - #[test] - fn matches_nul_reversed() { - assert_eq!(Some(4), memrchr(b'\x00', b"aaaa\x00")); - } - - #[test] - fn matches_past_nul_reversed() { - assert_eq!(Some(0), memrchr(b'z', b"z\x00aaaa")); - } - - #[test] - fn no_match_empty_reversed() { - assert_eq!(None, memrchr(b'a', b"")); - } - - #[test] - fn no_match_reversed() { - assert_eq!(None, memrchr(b'a', b"xyz")); - } -} diff --git a/artiq/firmware/libstd_artiq/io/mod.rs b/artiq/firmware/libstd_artiq/io/mod.rs deleted file mode 100644 index 6722b7485..000000000 --- a/artiq/firmware/libstd_artiq/io/mod.rs +++ /dev/null @@ -1,1844 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Traits, helpers, and type definitions for core I/O functionality. -//! -//! The `std::io` module contains a number of common things you'll need -//! when doing input and output. The most core part of this module is -//! the [`Read`][read] and [`Write`][write] traits, which provide the -//! most general interface for reading and writing input and output. -//! -//! [read]: trait.Read.html -//! [write]: trait.Write.html -//! -//! # Read and Write -//! -//! Because they are traits, `Read` and `Write` are implemented by a number -//! of other types, and you can implement them for your types too. As such, -//! you'll see a few different types of I/O throughout the documentation in -//! this module: `File`s, `TcpStream`s, and sometimes even `Vec`s. For -//! example, `Read` adds a `read()` method, which we can use on `File`s: -//! -//! ``` -//! use std::io; -//! use std::io::prelude::*; -//! use std::fs::File; -//! -//! # fn foo() -> io::Result<()> { -//! let mut f = try!(File::open("foo.txt")); -//! let mut buffer = [0; 10]; -//! -//! // read up to 10 bytes -//! try!(f.read(&mut buffer)); -//! -//! println!("The bytes: {:?}", buffer); -//! # Ok(()) -//! # } -//! ``` -//! -//! `Read` and `Write` are so important, implementors of the two traits have a -//! nickname: readers and writers. So you'll sometimes see 'a reader' instead -//! of 'a type that implements the `Read` trait'. Much easier! -//! -//! ## Seek and BufRead -//! -//! Beyond that, there are two important traits that are provided: [`Seek`][seek] -//! and [`BufRead`][bufread]. Both of these build on top of a reader to control -//! how the reading happens. `Seek` lets you control where the next byte is -//! coming from: -//! -//! ``` -//! use std::io; -//! use std::io::prelude::*; -//! use std::io::SeekFrom; -//! use std::fs::File; -//! -//! # fn foo() -> io::Result<()> { -//! let mut f = try!(File::open("foo.txt")); -//! let mut buffer = [0; 10]; -//! -//! // skip to the last 10 bytes of the file -//! try!(f.seek(SeekFrom::End(-10))); -//! -//! // read up to 10 bytes -//! try!(f.read(&mut buffer)); -//! -//! println!("The bytes: {:?}", buffer); -//! # Ok(()) -//! # } -//! ``` -//! -//! [seek]: trait.Seek.html -//! [bufread]: trait.BufRead.html -//! -//! `BufRead` uses an internal buffer to provide a number of other ways to read, but -//! to show it off, we'll need to talk about buffers in general. Keep reading! -//! -//! ## BufReader and BufWriter -//! -//! Byte-based interfaces are unwieldy and can be inefficient, as we'd need to be -//! making near-constant calls to the operating system. To help with this, -//! `std::io` comes with two structs, `BufReader` and `BufWriter`, which wrap -//! readers and writers. The wrapper uses a buffer, reducing the number of -//! calls and providing nicer methods for accessing exactly what you want. -//! -//! For example, `BufReader` works with the `BufRead` trait to add extra -//! methods to any reader: -//! -//! ``` -//! use std::io; -//! use std::io::prelude::*; -//! use std::io::BufReader; -//! use std::fs::File; -//! -//! # fn foo() -> io::Result<()> { -//! let f = try!(File::open("foo.txt")); -//! let mut reader = BufReader::new(f); -//! let mut buffer = String::new(); -//! -//! // read a line into buffer -//! try!(reader.read_line(&mut buffer)); -//! -//! println!("{}", buffer); -//! # Ok(()) -//! # } -//! ``` -//! -//! `BufWriter` doesn't add any new ways of writing; it just buffers every call -//! to [`write()`][write()]: -//! -//! ``` -//! use std::io; -//! use std::io::prelude::*; -//! use std::io::BufWriter; -//! use std::fs::File; -//! -//! # fn foo() -> io::Result<()> { -//! let f = try!(File::create("foo.txt")); -//! { -//! let mut writer = BufWriter::new(f); -//! -//! // write a byte to the buffer -//! try!(writer.write(&[42])); -//! -//! } // the buffer is flushed once writer goes out of scope -//! -//! # Ok(()) -//! # } -//! ``` -//! -//! [write()]: trait.Write.html#tymethod.write -//! -//! ## Standard input and output -//! -//! A very common source of input is standard input: -//! -//! ``` -//! use std::io; -//! -//! # fn foo() -> io::Result<()> { -//! let mut input = String::new(); -//! -//! try!(io::stdin().read_line(&mut input)); -//! -//! println!("You typed: {}", input.trim()); -//! # Ok(()) -//! # } -//! ``` -//! -//! And a very common source of output is standard output: -//! -//! ``` -//! use std::io; -//! use std::io::prelude::*; -//! -//! # fn foo() -> io::Result<()> { -//! try!(io::stdout().write(&[42])); -//! # Ok(()) -//! # } -//! ``` -//! -//! Of course, using `io::stdout()` directly is less common than something like -//! `println!`. -//! -//! ## Iterator types -//! -//! A large number of the structures provided by `std::io` are for various -//! ways of iterating over I/O. For example, `Lines` is used to split over -//! lines: -//! -//! ``` -//! use std::io; -//! use std::io::prelude::*; -//! use std::io::BufReader; -//! use std::fs::File; -//! -//! # fn foo() -> io::Result<()> { -//! let f = try!(File::open("foo.txt")); -//! let reader = BufReader::new(f); -//! -//! for line in reader.lines() { -//! println!("{}", try!(line)); -//! } -//! -//! # Ok(()) -//! # } -//! ``` -//! -//! ## Functions -//! -//! There are a number of [functions][functions-list] that offer access to various -//! features. For example, we can use three of these functions to copy everything -//! from standard input to standard output: -//! -//! ``` -//! use std::io; -//! -//! # fn foo() -> io::Result<()> { -//! try!(io::copy(&mut io::stdin(), &mut io::stdout())); -//! # Ok(()) -//! # } -//! ``` -//! -//! [functions-list]: #functions-1 -//! -//! ## io::Result -//! -//! Last, but certainly not least, is [`io::Result`][result]. This type is used -//! as the return type of many `std::io` functions that can cause an error, and -//! can be returned from your own functions as well. Many of the examples in this -//! module use the [`try!`][try] macro: -//! -//! ``` -//! use std::io; -//! -//! fn read_input() -> io::Result<()> { -//! let mut input = String::new(); -//! -//! try!(io::stdin().read_line(&mut input)); -//! -//! println!("You typed: {}", input.trim()); -//! -//! Ok(()) -//! } -//! ``` -//! -//! The return type of `read_input()`, `io::Result<()>`, is a very common type -//! for functions which don't have a 'real' return value, but do want to return -//! errors if they happen. In this case, the only purpose of this function is -//! to read the line and print it, so we use `()`. -//! -//! [result]: type.Result.html -//! [try]: ../macro.try!.html -//! -//! ## Platform-specific behavior -//! -//! Many I/O functions throughout the standard library are documented to indicate -//! what various library or syscalls they are delegated to. This is done to help -//! applications both understand what's happening under the hood as well as investigate -//! any possibly unclear semantics. Note, however, that this is informative, not a binding -//! contract. The implementation of many of these functions are subject to change over -//! time and may call fewer or more syscalls/library functions. - -use core::cmp; -use core::fmt; -use core::iter::{Iterator}; -use core::marker::Sized; - use core::ops::{Drop, FnOnce}; -use core::option::Option::{self, Some, None}; -use core::result::Result::{Ok, Err}; -use core::result; - use alloc::string::String; - use alloc::vec::Vec; - use alloc::str; -mod memchr; - - pub use self::buffered::{BufReader, BufWriter, LineWriter}; - pub use self::buffered::IntoInnerError; - pub use self::cursor::Cursor; -pub use self::error::{Result, Error, ErrorKind}; -pub use self::util::{copy, sink, Sink, empty, Empty, repeat, Repeat}; - -pub mod prelude; - mod buffered; - mod cursor; -mod error; -mod impls; -mod util; - -const DEFAULT_BUF_SIZE: usize = 8 * 1024; - -// A few methods below (read_to_string, read_line) will append data into a -// `String` buffer, but we need to be pretty careful when doing this. The -// implementation will just call `.as_mut_vec()` and then delegate to a -// byte-oriented reading method, but we must ensure that when returning we never -// leave `buf` in a state such that it contains invalid UTF-8 in its bounds. -// -// To this end, we use an RAII guard (to protect against panics) which updates -// the length of the string when it is dropped. This guard initially truncates -// the string to the prior length and only after we've validated that the -// new contents are valid UTF-8 do we allow it to set a longer length. -// -// The unsafety in this function is twofold: -// -// 1. We're looking at the raw bytes of `buf`, so we take on the burden of UTF-8 -// checks. -// 2. We're passing a raw buffer to the function `f`, and it is expected that -// the function only *appends* bytes to the buffer. We'll get undefined -// behavior if existing bytes are overwritten to have non-UTF-8 data. - -fn append_to_string(buf: &mut String, f: F) -> Result - where F: FnOnce(&mut Vec) -> Result -{ - struct Guard<'a> { s: &'a mut Vec, len: usize } - impl<'a> Drop for Guard<'a> { - fn drop(&mut self) { - unsafe { self.s.set_len(self.len); } - } - } - - unsafe { - let mut g = Guard { len: buf.len(), s: buf.as_mut_vec() }; - let ret = f(g.s); - if str::from_utf8(&g.s[g.len..]).is_err() { - ret.and_then(|_| { - Err(Error::new(ErrorKind::InvalidData, - "stream did not contain valid UTF-8")) - }) - } else { - g.len = g.s.len(); - ret - } - } -} - -// This uses an adaptive system to extend the vector when it fills. We want to -// avoid paying to allocate and zero a huge chunk of memory if the reader only -// has 4 bytes while still making large reads if the reader does have a ton -// of data to return. Simply tacking on an extra DEFAULT_BUF_SIZE space every -// time is 4,500 times (!) slower than this if the reader has a very small -// amount of data to return. - -fn read_to_end(r: &mut R, buf: &mut Vec) -> Result { - let start_len = buf.len(); - let mut len = start_len; - let mut new_write_size = 16; - let ret; - loop { - if len == buf.len() { - if new_write_size < DEFAULT_BUF_SIZE { - new_write_size *= 2; - } - buf.resize(len + new_write_size, 0); - } - - match r.read(&mut buf[len..]) { - Ok(0) => { - ret = Ok(len - start_len); - break; - } - Ok(n) => len += n, - Err(ref e) if e.kind() == ErrorKind::Interrupted => {} - Err(e) => { - ret = Err(e); - break; - } - } - } - - buf.truncate(len); - ret -} - -/// The `Read` trait allows for reading bytes from a source. -/// -/// Implementors of the `Read` trait are sometimes called 'readers'. -/// -/// Readers are defined by one required method, `read()`. Each call to `read` -/// will attempt to pull bytes from this source into a provided buffer. A -/// number of other methods are implemented in terms of `read()`, giving -/// implementors a number of ways to read bytes while only needing to implement -/// a single method. -/// -/// Readers are intended to be composable with one another. Many implementors -/// throughout `std::io` take and provide types which implement the `Read` -/// trait. -/// -/// Please note that each call to `read` may involve a system call, and -/// therefore, using something that implements [`BufRead`][bufread], such as -/// [`BufReader`][bufreader], will be more efficient. -/// -/// [bufread]: trait.BufRead.html -/// [bufreader]: struct.BufReader.html -/// -/// # Examples -/// -/// [`File`][file]s implement `Read`: -/// -/// [file]: ../fs/struct.File.html -/// -/// ``` -/// use std::io; -/// use std::io::prelude::*; -/// use std::fs::File; -/// -/// # fn foo() -> io::Result<()> { -/// let mut f = try!(File::open("foo.txt")); -/// let mut buffer = [0; 10]; -/// -/// // read up to 10 bytes -/// try!(f.read(&mut buffer)); -/// -/// let mut buffer = vec![0; 10]; -/// // read the whole file -/// try!(f.read_to_end(&mut buffer)); -/// -/// // read into a String, so that you don't need to do the conversion. -/// let mut buffer = String::new(); -/// try!(f.read_to_string(&mut buffer)); -/// -/// // and more! See the other methods for more details. -/// # Ok(()) -/// # } -/// ``` -pub trait Read { - /// Pull some bytes from this source into the specified buffer, returning - /// how many bytes were read. - /// - /// This function does not provide any guarantees about whether it blocks - /// waiting for data, but if an object needs to block for a read but cannot - /// it will typically signal this via an `Err` return value. - /// - /// If the return value of this method is `Ok(n)`, then it must be - /// guaranteed that `0 <= n <= buf.len()`. A nonzero `n` value indicates - /// that the buffer `buf` has been filled in with `n` bytes of data from this - /// source. If `n` is `0`, then it can indicate one of two scenarios: - /// - /// 1. This reader has reached its "end of file" and will likely no longer - /// be able to produce bytes. Note that this does not mean that the - /// reader will *always* no longer be able to produce bytes. - /// 2. The buffer specified was 0 bytes in length. - /// - /// No guarantees are provided about the contents of `buf` when this - /// function is called, implementations cannot rely on any property of the - /// contents of `buf` being true. It is recommended that implementations - /// only write data to `buf` instead of reading its contents. - /// - /// # Errors - /// - /// If this function encounters any form of I/O or other error, an error - /// variant will be returned. If an error is returned then it must be - /// guaranteed that no bytes were read. - /// - /// # Examples - /// - /// [`File`][file]s implement `Read`: - /// - /// [file]: ../fs/struct.File.html - /// - /// ``` - /// use std::io; - /// use std::io::prelude::*; - /// use std::fs::File; - /// - /// # fn foo() -> io::Result<()> { - /// let mut f = try!(File::open("foo.txt")); - /// let mut buffer = [0; 10]; - /// - /// // read 10 bytes - /// try!(f.read(&mut buffer[..])); - /// # Ok(()) - /// # } - /// ``` - fn read(&mut self, buf: &mut [u8]) -> Result; - - /// Read all bytes until EOF in this source, placing them into `buf`. - /// - /// All bytes read from this source will be appended to the specified buffer - /// `buf`. This function will continuously call `read` to append more data to - /// `buf` until `read` returns either `Ok(0)` or an error of - /// non-`ErrorKind::Interrupted` kind. - /// - /// If successful, this function will return the total number of bytes read. - /// - /// # Errors - /// - /// If this function encounters an error of the kind - /// `ErrorKind::Interrupted` then the error is ignored and the operation - /// will continue. - /// - /// If any other read error is encountered then this function immediately - /// returns. Any bytes which have already been read will be appended to - /// `buf`. - /// - /// # Examples - /// - /// [`File`][file]s implement `Read`: - /// - /// [file]: ../fs/struct.File.html - /// - /// ``` - /// use std::io; - /// use std::io::prelude::*; - /// use std::fs::File; - /// - /// # fn foo() -> io::Result<()> { - /// let mut f = try!(File::open("foo.txt")); - /// let mut buffer = Vec::new(); - /// - /// // read the whole file - /// try!(f.read_to_end(&mut buffer)); - /// # Ok(()) - /// # } - /// ``` - - fn read_to_end(&mut self, buf: &mut Vec) -> Result { - read_to_end(self, buf) - } - - /// Read all bytes until EOF in this source, placing them into `buf`. - /// - /// If successful, this function returns the number of bytes which were read - /// and appended to `buf`. - /// - /// # Errors - /// - /// If the data in this stream is *not* valid UTF-8 then an error is - /// returned and `buf` is unchanged. - /// - /// See [`read_to_end()`][readtoend] for other error semantics. - /// - /// [readtoend]: #method.read_to_end - /// - /// # Examples - /// - /// [`File`][file]s implement `Read`: - /// - /// [file]: ../fs/struct.File.html - /// - /// ``` - /// use std::io; - /// use std::io::prelude::*; - /// use std::fs::File; - /// - /// # fn foo() -> io::Result<()> { - /// let mut f = try!(File::open("foo.txt")); - /// let mut buffer = String::new(); - /// - /// try!(f.read_to_string(&mut buffer)); - /// # Ok(()) - /// # } - /// ``` - - fn read_to_string(&mut self, buf: &mut String) -> Result { - // Note that we do *not* call `.read_to_end()` here. We are passing - // `&mut Vec` (the raw contents of `buf`) into the `read_to_end` - // method to fill it up. An arbitrary implementation could overwrite the - // entire contents of the vector, not just append to it (which is what - // we are expecting). - // - // To prevent extraneously checking the UTF-8-ness of the entire buffer - // we pass it to our hardcoded `read_to_end` implementation which we - // know is guaranteed to only read data into the end of the buffer. - append_to_string(buf, |b| read_to_end(self, b)) - } - - /// Read the exact number of bytes required to fill `buf`. - /// - /// This function reads as many bytes as necessary to completely fill the - /// specified buffer `buf`. - /// - /// No guarantees are provided about the contents of `buf` when this - /// function is called, implementations cannot rely on any property of the - /// contents of `buf` being true. It is recommended that implementations - /// only write data to `buf` instead of reading its contents. - /// - /// # Errors - /// - /// If this function encounters an error of the kind - /// `ErrorKind::Interrupted` then the error is ignored and the operation - /// will continue. - /// - /// If this function encounters an "end of file" before completely filling - /// the buffer, it returns an error of the kind `ErrorKind::UnexpectedEof`. - /// The contents of `buf` are unspecified in this case. - /// - /// If any other read error is encountered then this function immediately - /// returns. The contents of `buf` are unspecified in this case. - /// - /// If this function returns an error, it is unspecified how many bytes it - /// has read, but it will never read more than would be necessary to - /// completely fill the buffer. - /// - /// # Examples - /// - /// [`File`][file]s implement `Read`: - /// - /// [file]: ../fs/struct.File.html - /// - /// ``` - /// use std::io; - /// use std::io::prelude::*; - /// use std::fs::File; - /// - /// # fn foo() -> io::Result<()> { - /// let mut f = try!(File::open("foo.txt")); - /// let mut buffer = [0; 10]; - /// - /// // read exactly 10 bytes - /// try!(f.read_exact(&mut buffer)); - /// # Ok(()) - /// # } - /// ``` - fn read_exact(&mut self, mut buf: &mut [u8]) -> Result<()> { - while !buf.is_empty() { - match self.read(buf) { - Ok(0) => break, - Ok(n) => { let tmp = buf; buf = &mut tmp[n..]; } - Err(ref e) if e.kind() == ErrorKind::Interrupted => {} - Err(e) => return Err(e), - } - } - if !buf.is_empty() { - Err(Error::new(ErrorKind::UnexpectedEof, - "failed to fill whole buffer")) - } else { - Ok(()) - } - } - - /// Creates a "by reference" adaptor for this instance of `Read`. - /// - /// The returned adaptor also implements `Read` and will simply borrow this - /// current reader. - /// - /// # Examples - /// - /// [`File`][file]s implement `Read`: - /// - /// [file]: ../fs/struct.File.html - /// - /// ``` - /// use std::io; - /// use std::io::Read; - /// use std::fs::File; - /// - /// # fn foo() -> io::Result<()> { - /// let mut f = try!(File::open("foo.txt")); - /// let mut buffer = Vec::new(); - /// let mut other_buffer = Vec::new(); - /// - /// { - /// let reference = f.by_ref(); - /// - /// // read at most 5 bytes - /// try!(reference.take(5).read_to_end(&mut buffer)); - /// - /// } // drop our &mut reference so we can use f again - /// - /// // original file still usable, read the rest - /// try!(f.read_to_end(&mut other_buffer)); - /// # Ok(()) - /// # } - /// ``` - fn by_ref(&mut self) -> &mut Self where Self: Sized { self } - - /// Transforms this `Read` instance to an `Iterator` over its bytes. - /// - /// The returned type implements `Iterator` where the `Item` is `Result`. The yielded item is `Ok` if a byte was successfully read and - /// `Err` otherwise for I/O errors. EOF is mapped to returning `None` from - /// this iterator. - /// - /// # Examples - /// - /// [`File`][file]s implement `Read`: - /// - /// [file]: ../fs/struct.File.html - /// - /// ``` - /// use std::io; - /// use std::io::prelude::*; - /// use std::fs::File; - /// - /// # fn foo() -> io::Result<()> { - /// let mut f = try!(File::open("foo.txt")); - /// - /// for byte in f.bytes() { - /// println!("{}", byte.unwrap()); - /// } - /// # Ok(()) - /// # } - /// ``` - fn bytes(self) -> Bytes where Self: Sized { - Bytes { inner: self } - } - - /// Transforms this `Read` instance to an `Iterator` over `char`s. - /// - /// This adaptor will attempt to interpret this reader as a UTF-8 encoded - /// sequence of characters. The returned iterator will return `None` once - /// EOF is reached for this reader. Otherwise each element yielded will be a - /// `Result` where `E` may contain information about what I/O error - /// occurred or where decoding failed. - /// - /// Currently this adaptor will discard intermediate data read, and should - /// be avoided if this is not desired. - /// - /// # Examples - /// - /// [`File`][file]s implement `Read`: - /// - /// [file]: ../fs/struct.File.html - /// - /// ``` - /// #![feature(io)] - /// use std::io; - /// use std::io::prelude::*; - /// use std::fs::File; - /// - /// # fn foo() -> io::Result<()> { - /// let mut f = try!(File::open("foo.txt")); - /// - /// for c in f.chars() { - /// println!("{}", c.unwrap()); - /// } - /// # Ok(()) - /// # } - /// ``` - fn chars(self) -> Chars where Self: Sized { - Chars { inner: self } - } - - /// Creates an adaptor which will chain this stream with another. - /// - /// The returned `Read` instance will first read all bytes from this object - /// until EOF is encountered. Afterwards the output is equivalent to the - /// output of `next`. - /// - /// # Examples - /// - /// [`File`][file]s implement `Read`: - /// - /// [file]: ../fs/struct.File.html - /// - /// ``` - /// use std::io; - /// use std::io::prelude::*; - /// use std::fs::File; - /// - /// # fn foo() -> io::Result<()> { - /// let mut f1 = try!(File::open("foo.txt")); - /// let mut f2 = try!(File::open("bar.txt")); - /// - /// let mut handle = f1.chain(f2); - /// let mut buffer = String::new(); - /// - /// // read the value into a String. We could use any Read method here, - /// // this is just one example. - /// try!(handle.read_to_string(&mut buffer)); - /// # Ok(()) - /// # } - /// ``` - fn chain(self, next: R) -> Chain where Self: Sized { - Chain { first: self, second: next, done_first: false } - } - - /// Creates an adaptor which will read at most `limit` bytes from it. - /// - /// This function returns a new instance of `Read` which will read at most - /// `limit` bytes, after which it will always return EOF (`Ok(0)`). Any - /// read errors will not count towards the number of bytes read and future - /// calls to `read` may succeed. - /// - /// # Examples - /// - /// [`File`][file]s implement `Read`: - /// - /// [file]: ../fs/struct.File.html - /// - /// ``` - /// use std::io; - /// use std::io::prelude::*; - /// use std::fs::File; - /// - /// # fn foo() -> io::Result<()> { - /// let mut f = try!(File::open("foo.txt")); - /// let mut buffer = [0; 5]; - /// - /// // read at most five bytes - /// let mut handle = f.take(5); - /// - /// try!(handle.read(&mut buffer)); - /// # Ok(()) - /// # } - /// ``` - fn take(self, limit: u64) -> Take where Self: Sized { - Take { inner: self, limit: limit } - } -} - -/// A trait for objects which are byte-oriented sinks. -/// -/// Implementors of the `Write` trait are sometimes called 'writers'. -/// -/// Writers are defined by two required methods, `write()` and `flush()`: -/// -/// * The `write()` method will attempt to write some data into the object, -/// returning how many bytes were successfully written. -/// -/// * The `flush()` method is useful for adaptors and explicit buffers -/// themselves for ensuring that all buffered data has been pushed out to the -/// 'true sink'. -/// -/// Writers are intended to be composable with one another. Many implementors -/// throughout `std::io` take and provide types which implement the `Write` -/// trait. -/// -/// # Examples -/// -/// ``` -/// use std::io::prelude::*; -/// use std::fs::File; -/// -/// # fn foo() -> std::io::Result<()> { -/// let mut buffer = try!(File::create("foo.txt")); -/// -/// try!(buffer.write(b"some bytes")); -/// # Ok(()) -/// # } -/// ``` -pub trait Write { - /// Write a buffer into this object, returning how many bytes were written. - /// - /// This function will attempt to write the entire contents of `buf`, but - /// the entire write may not succeed, or the write may also generate an - /// error. A call to `write` represents *at most one* attempt to write to - /// any wrapped object. - /// - /// Calls to `write` are not guaranteed to block waiting for data to be - /// written, and a write which would otherwise block can be indicated through - /// an `Err` variant. - /// - /// If the return value is `Ok(n)` then it must be guaranteed that - /// `0 <= n <= buf.len()`. A return value of `0` typically means that the - /// underlying object is no longer able to accept bytes and will likely not - /// be able to in the future as well, or that the buffer provided is empty. - /// - /// # Errors - /// - /// Each call to `write` may generate an I/O error indicating that the - /// operation could not be completed. If an error is returned then no bytes - /// in the buffer were written to this writer. - /// - /// It is **not** considered an error if the entire buffer could not be - /// written to this writer. - /// - /// # Examples - /// - /// ``` - /// use std::io::prelude::*; - /// use std::fs::File; - /// - /// # fn foo() -> std::io::Result<()> { - /// let mut buffer = try!(File::create("foo.txt")); - /// - /// try!(buffer.write(b"some bytes")); - /// # Ok(()) - /// # } - /// ``` - fn write(&mut self, buf: &[u8]) -> Result; - - /// Flush this output stream, ensuring that all intermediately buffered - /// contents reach their destination. - /// - /// # Errors - /// - /// It is considered an error if not all bytes could be written due to - /// I/O errors or EOF being reached. - /// - /// # Examples - /// - /// ``` - /// use std::io::prelude::*; - /// use std::io::BufWriter; - /// use std::fs::File; - /// - /// # fn foo() -> std::io::Result<()> { - /// let mut buffer = BufWriter::new(try!(File::create("foo.txt"))); - /// - /// try!(buffer.write(b"some bytes")); - /// try!(buffer.flush()); - /// # Ok(()) - /// # } - /// ``` - fn flush(&mut self) -> Result<()>; - - /// Attempts to write an entire buffer into this write. - /// - /// This method will continuously call `write` while there is more data to - /// write. This method will not return until the entire buffer has been - /// successfully written or an error occurs. The first error generated from - /// this method will be returned. - /// - /// # Errors - /// - /// This function will return the first error that `write` returns. - /// - /// # Examples - /// - /// ``` - /// use std::io::prelude::*; - /// use std::fs::File; - /// - /// # fn foo() -> std::io::Result<()> { - /// let mut buffer = try!(File::create("foo.txt")); - /// - /// try!(buffer.write_all(b"some bytes")); - /// # Ok(()) - /// # } - /// ``` - fn write_all(&mut self, mut buf: &[u8]) -> Result<()> { - while !buf.is_empty() { - match self.write(buf) { - Ok(0) => return Err(Error::new(ErrorKind::WriteZero, - "failed to write whole buffer")), - Ok(n) => buf = &buf[n..], - Err(ref e) if e.kind() == ErrorKind::Interrupted => {} - Err(e) => return Err(e), - } - } - Ok(()) - } - - /// Writes a formatted string into this writer, returning any error - /// encountered. - /// - /// This method is primarily used to interface with the - /// [`format_args!`][formatargs] macro, but it is rare that this should - /// explicitly be called. The [`write!`][write] macro should be favored to - /// invoke this method instead. - /// - /// [formatargs]: ../macro.format_args!.html - /// [write]: ../macro.write!.html - /// - /// This function internally uses the [`write_all`][writeall] method on - /// this trait and hence will continuously write data so long as no errors - /// are received. This also means that partial writes are not indicated in - /// this signature. - /// - /// [writeall]: #method.write_all - /// - /// # Errors - /// - /// This function will return any I/O error reported while formatting. - /// - /// # Examples - /// - /// ``` - /// use std::io::prelude::*; - /// use std::fs::File; - /// - /// # fn foo() -> std::io::Result<()> { - /// let mut buffer = try!(File::create("foo.txt")); - /// - /// // this call - /// try!(write!(buffer, "{:.*}", 2, 1.234567)); - /// // turns into this: - /// try!(buffer.write_fmt(format_args!("{:.*}", 2, 1.234567))); - /// # Ok(()) - /// # } - /// ``` - fn write_fmt(&mut self, fmt: fmt::Arguments) -> Result<()> { - // Create a shim which translates a Write to a fmt::Write and saves - // off I/O errors. instead of discarding them - struct Adaptor<'a, T: ?Sized + 'a> { - inner: &'a mut T, - error: Result<()>, - } - - impl<'a, T: Write + ?Sized> fmt::Write for Adaptor<'a, T> { - fn write_str(&mut self, s: &str) -> fmt::Result { - match self.inner.write_all(s.as_bytes()) { - Ok(()) => Ok(()), - Err(e) => { - self.error = Err(e); - Err(fmt::Error) - } - } - } - } - - let mut output = Adaptor { inner: self, error: Ok(()) }; - match fmt::write(&mut output, fmt) { - Ok(()) => Ok(()), - Err(..) => { - // check if the error came from the underlying `Write` or not - if output.error.is_err() { - output.error - } else { - Err(Error::new(ErrorKind::Other, "formatter error")) - } - } - } - } - - /// Creates a "by reference" adaptor for this instance of `Write`. - /// - /// The returned adaptor also implements `Write` and will simply borrow this - /// current writer. - /// - /// # Examples - /// - /// ``` - /// use std::io::Write; - /// use std::fs::File; - /// - /// # fn foo() -> std::io::Result<()> { - /// let mut buffer = try!(File::create("foo.txt")); - /// - /// let reference = buffer.by_ref(); - /// - /// // we can use reference just like our original buffer - /// try!(reference.write_all(b"some bytes")); - /// # Ok(()) - /// # } - /// ``` - fn by_ref(&mut self) -> &mut Self where Self: Sized { self } -} - -/// The `Seek` trait provides a cursor which can be moved within a stream of -/// bytes. -/// -/// The stream typically has a fixed size, allowing seeking relative to either -/// end or the current offset. -/// -/// # Examples -/// -/// [`File`][file]s implement `Seek`: -/// -/// [file]: ../fs/struct.File.html -/// -/// ``` -/// use std::io; -/// use std::io::prelude::*; -/// use std::fs::File; -/// use std::io::SeekFrom; -/// -/// # fn foo() -> io::Result<()> { -/// let mut f = try!(File::open("foo.txt")); -/// -/// // move the cursor 42 bytes from the start of the file -/// try!(f.seek(SeekFrom::Start(42))); -/// # Ok(()) -/// # } -/// ``` -pub trait Seek { - /// Seek to an offset, in bytes, in a stream. - /// - /// A seek beyond the end of a stream is allowed, but implementation - /// defined. - /// - /// If the seek operation completed successfully, - /// this method returns the new position from the start of the stream. - /// That position can be used later with `SeekFrom::Start`. - /// - /// # Errors - /// - /// Seeking to a negative offset is considered an error. - fn seek(&mut self, pos: SeekFrom) -> Result; -} - -/// Enumeration of possible methods to seek within an I/O object. -#[derive(Copy, PartialEq, Eq, Clone, Debug)] -pub enum SeekFrom { - /// Set the offset to the provided number of bytes. - Start(u64), - - /// Set the offset to the size of this object plus the specified number of - /// bytes. - /// - /// It is possible to seek beyond the end of an object, but it's an error to - /// seek before byte 0. - End(i64), - - /// Set the offset to the current position plus the specified number of - /// bytes. - /// - /// It is possible to seek beyond the end of an object, but it's an error to - /// seek before byte 0. - Current(i64), -} - - -fn read_until(r: &mut R, delim: u8, buf: &mut Vec) - -> Result { - let mut read = 0; - loop { - let (done, used) = { - let available = match r.fill_buf() { - Ok(n) => n, - Err(ref e) if e.kind() == ErrorKind::Interrupted => continue, - Err(e) => return Err(e) - }; - match memchr::memchr(delim, available) { - Some(i) => { - buf.extend_from_slice(&available[..i + 1]); - (true, i + 1) - } - None => { - buf.extend_from_slice(available); - (false, available.len()) - } - } - }; - r.consume(used); - read += used; - if done || used == 0 { - return Ok(read); - } - } -} - -/// A `BufRead` is a type of `Read`er which has an internal buffer, allowing it -/// to perform extra ways of reading. -/// -/// For example, reading line-by-line is inefficient without using a buffer, so -/// if you want to read by line, you'll need `BufRead`, which includes a -/// [`read_line()`][readline] method as well as a [`lines()`][lines] iterator. -/// -/// [readline]: #method.read_line -/// [lines]: #method.lines -/// -/// # Examples -/// -/// A locked standard input implements `BufRead`: -/// -/// ``` -/// use std::io; -/// use std::io::prelude::*; -/// -/// let stdin = io::stdin(); -/// for line in stdin.lock().lines() { -/// println!("{}", line.unwrap()); -/// } -/// ``` -/// -/// If you have something that implements `Read`, you can use the [`BufReader` -/// type][bufreader] to turn it into a `BufRead`. -/// -/// For example, [`File`][file] implements `Read`, but not `BufRead`. -/// `BufReader` to the rescue! -/// -/// [bufreader]: struct.BufReader.html -/// [file]: ../fs/struct.File.html -/// -/// ``` -/// use std::io::{self, BufReader}; -/// use std::io::prelude::*; -/// use std::fs::File; -/// -/// # fn foo() -> io::Result<()> { -/// let f = try!(File::open("foo.txt")); -/// let f = BufReader::new(f); -/// -/// for line in f.lines() { -/// println!("{}", line.unwrap()); -/// } -/// -/// # Ok(()) -/// # } -/// ``` -/// - -pub trait BufRead: Read { - /// Fills the internal buffer of this object, returning the buffer contents. - /// - /// This function is a lower-level call. It needs to be paired with the - /// [`consume`][consume] method to function properly. When calling this - /// method, none of the contents will be "read" in the sense that later - /// calling `read` may return the same contents. As such, `consume` must be - /// called with the number of bytes that are consumed from this buffer to - /// ensure that the bytes are never returned twice. - /// - /// [consume]: #tymethod.consume - /// - /// An empty buffer returned indicates that the stream has reached EOF. - /// - /// # Errors - /// - /// This function will return an I/O error if the underlying reader was - /// read, but returned an error. - /// - /// # Examples - /// - /// A locked standard input implements `BufRead`: - /// - /// ``` - /// use std::io; - /// use std::io::prelude::*; - /// - /// let stdin = io::stdin(); - /// let mut stdin = stdin.lock(); - /// - /// // we can't have two `&mut` references to `stdin`, so use a block - /// // to end the borrow early. - /// let length = { - /// let buffer = stdin.fill_buf().unwrap(); - /// - /// // work with buffer - /// println!("{:?}", buffer); - /// - /// buffer.len() - /// }; - /// - /// // ensure the bytes we worked with aren't returned again later - /// stdin.consume(length); - /// ``` - fn fill_buf(&mut self) -> Result<&[u8]>; - - /// Tells this buffer that `amt` bytes have been consumed from the buffer, - /// so they should no longer be returned in calls to `read`. - /// - /// This function is a lower-level call. It needs to be paired with the - /// [`fill_buf`][fillbuf] method to function properly. This function does - /// not perform any I/O, it simply informs this object that some amount of - /// its buffer, returned from `fill_buf`, has been consumed and should no - /// longer be returned. As such, this function may do odd things if - /// `fill_buf` isn't called before calling it. - /// - /// [fillbuf]: #tymethod.fill_buf - /// - /// The `amt` must be `<=` the number of bytes in the buffer returned by - /// `fill_buf`. - /// - /// # Examples - /// - /// Since `consume()` is meant to be used with [`fill_buf()`][fillbuf], - /// that method's example includes an example of `consume()`. - fn consume(&mut self, amt: usize); - - /// Read all bytes into `buf` until the delimiter `byte` is reached. - /// - /// This function will read bytes from the underlying stream until the - /// delimiter or EOF is found. Once found, all bytes up to, and including, - /// the delimiter (if found) will be appended to `buf`. - /// - /// If this reader is currently at EOF then this function will not modify - /// `buf` and will return `Ok(n)` where `n` is the number of bytes which - /// were read. - /// - /// # Errors - /// - /// This function will ignore all instances of `ErrorKind::Interrupted` and - /// will otherwise return any errors returned by `fill_buf`. - /// - /// If an I/O error is encountered then all bytes read so far will be - /// present in `buf` and its length will have been adjusted appropriately. - /// - /// # Examples - /// - /// A locked standard input implements `BufRead`. In this example, we'll - /// read from standard input until we see an `a` byte. - /// - /// ``` - /// use std::io; - /// use std::io::prelude::*; - /// - /// fn foo() -> io::Result<()> { - /// let stdin = io::stdin(); - /// let mut stdin = stdin.lock(); - /// let mut buffer = Vec::new(); - /// - /// try!(stdin.read_until(b'a', &mut buffer)); - /// - /// println!("{:?}", buffer); - /// # Ok(()) - /// # } - /// ``` - fn read_until(&mut self, byte: u8, buf: &mut Vec) -> Result { - read_until(self, byte, buf) - } - - /// Read all bytes until a newline (the 0xA byte) is reached, and append - /// them to the provided buffer. - /// - /// This function will read bytes from the underlying stream until the - /// newline delimiter (the 0xA byte) or EOF is found. Once found, all bytes - /// up to, and including, the delimiter (if found) will be appended to - /// `buf`. - /// - /// If this reader is currently at EOF then this function will not modify - /// `buf` and will return `Ok(n)` where `n` is the number of bytes which - /// were read. - /// - /// # Errors - /// - /// This function has the same error semantics as `read_until` and will also - /// return an error if the read bytes are not valid UTF-8. If an I/O error - /// is encountered then `buf` may contain some bytes already read in the - /// event that all data read so far was valid UTF-8. - /// - /// # Examples - /// - /// A locked standard input implements `BufRead`. In this example, we'll - /// read all of the lines from standard input. If we were to do this in - /// an actual project, the [`lines()`][lines] method would be easier, of - /// course. - /// - /// [lines]: #method.lines - /// - /// ``` - /// use std::io; - /// use std::io::prelude::*; - /// - /// let stdin = io::stdin(); - /// let mut stdin = stdin.lock(); - /// let mut buffer = String::new(); - /// - /// while stdin.read_line(&mut buffer).unwrap() > 0 { - /// // work with buffer - /// println!("{:?}", buffer); - /// - /// buffer.clear(); - /// } - /// ``` - fn read_line(&mut self, buf: &mut String) -> Result { - // Note that we are not calling the `.read_until` method here, but - // rather our hardcoded implementation. For more details as to why, see - // the comments in `read_to_end`. - append_to_string(buf, |b| read_until(self, b'\n', b)) - } - - /// Returns an iterator over the contents of this reader split on the byte - /// `byte`. - /// - /// The iterator returned from this function will return instances of - /// `io::Result>`. Each vector returned will *not* have the - /// delimiter byte at the end. - /// - /// This function will yield errors whenever `read_until` would have also - /// yielded an error. - /// - /// # Examples - /// - /// A locked standard input implements `BufRead`. In this example, we'll - /// read some input from standard input, splitting on commas. - /// - /// ``` - /// use std::io; - /// use std::io::prelude::*; - /// - /// let stdin = io::stdin(); - /// - /// for content in stdin.lock().split(b',') { - /// println!("{:?}", content.unwrap()); - /// } - /// ``` - fn split(self, byte: u8) -> Split where Self: Sized { - Split { buf: self, delim: byte } - } - - /// Returns an iterator over the lines of this reader. - /// - /// The iterator returned from this function will yield instances of - /// `io::Result`. Each string returned will *not* have a newline - /// byte (the 0xA byte) or CRLF (0xD, 0xA bytes) at the end. - /// - /// # Examples - /// - /// A locked standard input implements `BufRead`: - /// - /// ``` - /// use std::io; - /// use std::io::prelude::*; - /// - /// let stdin = io::stdin(); - /// - /// for line in stdin.lock().lines() { - /// println!("{}", line.unwrap()); - /// } - /// ``` - fn lines(self) -> Lines where Self: Sized { - Lines { buf: self } - } -} - -/// Adaptor to chain together two readers. -/// -/// This struct is generally created by calling [`chain()`][chain] on a reader. -/// Please see the documentation of `chain()` for more details. -/// -/// [chain]: trait.Read.html#method.chain -pub struct Chain { - first: T, - second: U, - done_first: bool, -} - -impl Read for Chain { - fn read(&mut self, buf: &mut [u8]) -> Result { - if !self.done_first { - match self.first.read(buf)? { - 0 => { self.done_first = true; } - n => return Ok(n), - } - } - self.second.read(buf) - } -} - - -impl BufRead for Chain { - fn fill_buf(&mut self) -> Result<&[u8]> { - if !self.done_first { - match self.first.fill_buf()? { - buf if buf.len() == 0 => { self.done_first = true; } - buf => return Ok(buf), - } - } - self.second.fill_buf() - } - - fn consume(&mut self, amt: usize) { - if !self.done_first { - self.first.consume(amt) - } else { - self.second.consume(amt) - } - } -} - -/// Reader adaptor which limits the bytes read from an underlying reader. -/// -/// This struct is generally created by calling [`take()`][take] on a reader. -/// Please see the documentation of `take()` for more details. -/// -/// [take]: trait.Read.html#method.take -pub struct Take { - inner: T, - limit: u64, -} - -impl Take { - /// Returns the number of bytes that can be read before this instance will - /// return EOF. - /// - /// # Note - /// - /// This instance may reach EOF after reading fewer bytes than indicated by - /// this method if the underlying `Read` instance reaches EOF. - pub fn limit(&self) -> u64 { self.limit } -} - -impl Read for Take { - fn read(&mut self, buf: &mut [u8]) -> Result { - // Don't call into inner reader at all at EOF because it may still block - if self.limit == 0 { - return Ok(0); - } - - let max = cmp::min(buf.len() as u64, self.limit) as usize; - let n = self.inner.read(&mut buf[..max])?; - self.limit -= n as u64; - Ok(n) - } -} - - -impl BufRead for Take { - fn fill_buf(&mut self) -> Result<&[u8]> { - // Don't call into inner reader at all at EOF because it may still block - if self.limit == 0 { - return Ok(&[]); - } - - let buf = self.inner.fill_buf()?; - let cap = cmp::min(buf.len() as u64, self.limit) as usize; - Ok(&buf[..cap]) - } - - fn consume(&mut self, amt: usize) { - // Don't let callers reset the limit by passing an overlarge value - let amt = cmp::min(amt as u64, self.limit) as usize; - self.limit -= amt as u64; - self.inner.consume(amt); - } -} - -/// An iterator over `u8` values of a reader. -/// -/// This struct is generally created by calling [`bytes()`][bytes] on a reader. -/// Please see the documentation of `bytes()` for more details. -/// -/// [bytes]: trait.Read.html#method.bytes -pub struct Bytes { - inner: R, -} - -impl Iterator for Bytes { - type Item = Result; - - fn next(&mut self) -> Option> { - let mut buf = [0]; - match self.inner.read(&mut buf) { - Ok(0) => None, - Ok(..) => Some(Ok(buf[0])), - Err(e) => Some(Err(e)), - } - } -} - -/// An iterator over the `char`s of a reader. -/// -/// This struct is generally created by calling [`chars()`][chars] on a reader. -/// Please see the documentation of `chars()` for more details. -/// -/// [chars]: trait.Read.html#method.chars -pub struct Chars { - inner: R, -} - -/// An enumeration of possible errors that can be generated from the `Chars` -/// adapter. -#[derive(Debug)] -pub enum CharsError { - /// Variant representing that the underlying stream was read successfully - /// but it did not contain valid utf8 data. - NotUtf8, - - /// Variant representing that an I/O error occurred. - Other(Error), -} - -impl Iterator for Chars { - type Item = result::Result; - - fn next(&mut self) -> Option> { - let mut buf = [0]; - let first_byte = match self.inner.read(&mut buf) { - Ok(0) => return None, - Ok(..) => buf[0], - Err(e) => return Some(Err(CharsError::Other(e))), - }; - let width = ::core::str::utf8_char_width(first_byte); - if width == 1 { return Some(Ok(first_byte as char)) } - if width == 0 { return Some(Err(CharsError::NotUtf8)) } - let mut buf = [first_byte, 0, 0, 0]; - { - let mut start = 1; - while start < width { - match self.inner.read(&mut buf[start..width]) { - Ok(0) => return Some(Err(CharsError::NotUtf8)), - Ok(n) => start += n, - Err(e) => return Some(Err(CharsError::Other(e))), - } - } - } - Some(match str::from_utf8(&buf[..width]).ok() { - Some(s) => Ok(s.chars().next().unwrap()), - None => Err(CharsError::NotUtf8), - }) - } -} - -impl fmt::Display for CharsError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - CharsError::NotUtf8 => { - "byte stream did not contain valid utf8".fmt(f) - } - CharsError::Other(ref e) => e.fmt(f), - } - } -} - -/// An iterator over the contents of an instance of `BufRead` split on a -/// particular byte. -/// -/// This struct is generally created by calling [`split()`][split] on a -/// `BufRead`. Please see the documentation of `split()` for more details. -/// -/// [split]: trait.BufRead.html#method.split - -pub struct Split { - buf: B, - delim: u8, -} - - -impl Iterator for Split { - type Item = Result>; - - fn next(&mut self) -> Option>> { - let mut buf = Vec::new(); - match self.buf.read_until(self.delim, &mut buf) { - Ok(0) => None, - Ok(_n) => { - if buf[buf.len() - 1] == self.delim { - buf.pop(); - } - Some(Ok(buf)) - } - Err(e) => Some(Err(e)) - } - } -} - -/// An iterator over the lines of an instance of `BufRead`. -/// -/// This struct is generally created by calling [`lines()`][lines] on a -/// `BufRead`. Please see the documentation of `lines()` for more details. -/// -/// [lines]: trait.BufRead.html#method.lines - -pub struct Lines { - buf: B, -} - - -impl Iterator for Lines { - type Item = Result; - - fn next(&mut self) -> Option> { - let mut buf = String::new(); - match self.buf.read_line(&mut buf) { - Ok(0) => None, - Ok(_n) => { - if buf.ends_with("\n") { - buf.pop(); - if buf.ends_with("\r") { - buf.pop(); - } - } - Some(Ok(buf)) - } - Err(e) => Some(Err(e)) - } - } -} - -#[cfg(test)] -mod tests { - use prelude::v1::*; - use io::prelude::*; - use io; - use super::Cursor; - use test; - use super::repeat; - - #[test] - fn read_until() { - let mut buf = Cursor::new(&b"12"[..]); - let mut v = Vec::new(); - assert_eq!(buf.read_until(b'3', &mut v).unwrap(), 2); - assert_eq!(v, b"12"); - - let mut buf = Cursor::new(&b"1233"[..]); - let mut v = Vec::new(); - assert_eq!(buf.read_until(b'3', &mut v).unwrap(), 3); - assert_eq!(v, b"123"); - v.truncate(0); - assert_eq!(buf.read_until(b'3', &mut v).unwrap(), 1); - assert_eq!(v, b"3"); - v.truncate(0); - assert_eq!(buf.read_until(b'3', &mut v).unwrap(), 0); - assert_eq!(v, []); - } - - #[test] - fn split() { - let buf = Cursor::new(&b"12"[..]); - let mut s = buf.split(b'3'); - assert_eq!(s.next().unwrap().unwrap(), vec![b'1', b'2']); - assert!(s.next().is_none()); - - let buf = Cursor::new(&b"1233"[..]); - let mut s = buf.split(b'3'); - assert_eq!(s.next().unwrap().unwrap(), vec![b'1', b'2']); - assert_eq!(s.next().unwrap().unwrap(), vec![]); - assert!(s.next().is_none()); - } - - #[test] - fn read_line() { - let mut buf = Cursor::new(&b"12"[..]); - let mut v = String::new(); - assert_eq!(buf.read_line(&mut v).unwrap(), 2); - assert_eq!(v, "12"); - - let mut buf = Cursor::new(&b"12\n\n"[..]); - let mut v = String::new(); - assert_eq!(buf.read_line(&mut v).unwrap(), 3); - assert_eq!(v, "12\n"); - v.truncate(0); - assert_eq!(buf.read_line(&mut v).unwrap(), 1); - assert_eq!(v, "\n"); - v.truncate(0); - assert_eq!(buf.read_line(&mut v).unwrap(), 0); - assert_eq!(v, ""); - } - - #[test] - fn lines() { - let buf = Cursor::new(&b"12\r"[..]); - let mut s = buf.lines(); - assert_eq!(s.next().unwrap().unwrap(), "12\r".to_string()); - assert!(s.next().is_none()); - - let buf = Cursor::new(&b"12\r\n\n"[..]); - let mut s = buf.lines(); - assert_eq!(s.next().unwrap().unwrap(), "12".to_string()); - assert_eq!(s.next().unwrap().unwrap(), "".to_string()); - assert!(s.next().is_none()); - } - - #[test] - fn read_to_end() { - let mut c = Cursor::new(&b""[..]); - let mut v = Vec::new(); - assert_eq!(c.read_to_end(&mut v).unwrap(), 0); - assert_eq!(v, []); - - let mut c = Cursor::new(&b"1"[..]); - let mut v = Vec::new(); - assert_eq!(c.read_to_end(&mut v).unwrap(), 1); - assert_eq!(v, b"1"); - - let cap = 1024 * 1024; - let data = (0..cap).map(|i| (i / 3) as u8).collect::>(); - let mut v = Vec::new(); - let (a, b) = data.split_at(data.len() / 2); - assert_eq!(Cursor::new(a).read_to_end(&mut v).unwrap(), a.len()); - assert_eq!(Cursor::new(b).read_to_end(&mut v).unwrap(), b.len()); - assert_eq!(v, data); - } - - #[test] - fn read_to_string() { - let mut c = Cursor::new(&b""[..]); - let mut v = String::new(); - assert_eq!(c.read_to_string(&mut v).unwrap(), 0); - assert_eq!(v, ""); - - let mut c = Cursor::new(&b"1"[..]); - let mut v = String::new(); - assert_eq!(c.read_to_string(&mut v).unwrap(), 1); - assert_eq!(v, "1"); - - let mut c = Cursor::new(&b"\xff"[..]); - let mut v = String::new(); - assert!(c.read_to_string(&mut v).is_err()); - } - - #[test] - fn read_exact() { - let mut buf = [0; 4]; - - let mut c = Cursor::new(&b""[..]); - assert_eq!(c.read_exact(&mut buf).unwrap_err().kind(), - io::ErrorKind::UnexpectedEof); - - let mut c = Cursor::new(&b"123"[..]).chain(Cursor::new(&b"456789"[..])); - c.read_exact(&mut buf).unwrap(); - assert_eq!(&buf, b"1234"); - c.read_exact(&mut buf).unwrap(); - assert_eq!(&buf, b"5678"); - assert_eq!(c.read_exact(&mut buf).unwrap_err().kind(), - io::ErrorKind::UnexpectedEof); - } - - #[test] - fn read_exact_slice() { - let mut buf = [0; 4]; - - let mut c = &b""[..]; - assert_eq!(c.read_exact(&mut buf).unwrap_err().kind(), - io::ErrorKind::UnexpectedEof); - - let mut c = &b"123"[..]; - assert_eq!(c.read_exact(&mut buf).unwrap_err().kind(), - io::ErrorKind::UnexpectedEof); - // make sure the optimized (early returning) method is being used - assert_eq!(&buf, &[0; 4]); - - let mut c = &b"1234"[..]; - c.read_exact(&mut buf).unwrap(); - assert_eq!(&buf, b"1234"); - - let mut c = &b"56789"[..]; - c.read_exact(&mut buf).unwrap(); - assert_eq!(&buf, b"5678"); - assert_eq!(c, b"9"); - } - - #[test] - fn take_eof() { - struct R; - - impl Read for R { - fn read(&mut self, _: &mut [u8]) -> io::Result { - Err(io::Error::new(io::ErrorKind::Other, "")) - } - } - impl BufRead for R { - fn fill_buf(&mut self) -> io::Result<&[u8]> { - Err(io::Error::new(io::ErrorKind::Other, "")) - } - fn consume(&mut self, _amt: usize) { } - } - - let mut buf = [0; 1]; - assert_eq!(0, R.take(0).read(&mut buf).unwrap()); - assert_eq!(b"", R.take(0).fill_buf().unwrap()); - } - - fn cmp_bufread(mut br1: Br1, mut br2: Br2, exp: &[u8]) { - let mut cat = Vec::new(); - loop { - let consume = { - let buf1 = br1.fill_buf().unwrap(); - let buf2 = br2.fill_buf().unwrap(); - let minlen = if buf1.len() < buf2.len() { buf1.len() } else { buf2.len() }; - assert_eq!(buf1[..minlen], buf2[..minlen]); - cat.extend_from_slice(&buf1[..minlen]); - minlen - }; - if consume == 0 { - break; - } - br1.consume(consume); - br2.consume(consume); - } - assert_eq!(br1.fill_buf().unwrap().len(), 0); - assert_eq!(br2.fill_buf().unwrap().len(), 0); - assert_eq!(&cat[..], &exp[..]) - } - - #[test] - fn chain_bufread() { - let testdata = b"ABCDEFGHIJKL"; - let chain1 = (&testdata[..3]).chain(&testdata[3..6]) - .chain(&testdata[6..9]) - .chain(&testdata[9..]); - let chain2 = (&testdata[..4]).chain(&testdata[4..8]) - .chain(&testdata[8..]); - cmp_bufread(chain1, chain2, &testdata[..]); - } - - #[bench] - fn bench_read_to_end(b: &mut test::Bencher) { - b.iter(|| { - let mut lr = repeat(1).take(10000000); - let mut vec = Vec::with_capacity(1024); - super::read_to_end(&mut lr, &mut vec) - }); - } -} diff --git a/artiq/firmware/libstd_artiq/io/prelude.rs b/artiq/firmware/libstd_artiq/io/prelude.rs deleted file mode 100644 index 58df71e69..000000000 --- a/artiq/firmware/libstd_artiq/io/prelude.rs +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! The I/O Prelude -//! -//! The purpose of this module is to alleviate imports of many common I/O traits -//! by adding a glob import to the top of I/O heavy modules: -//! -//! ``` -//! # #![allow(unused_imports)] -//! use std::io::prelude::*; -//! ``` - -pub use super::{Read, Write, Seek}; - pub use super::BufRead; diff --git a/artiq/firmware/libstd_artiq/io/util.rs b/artiq/firmware/libstd_artiq/io/util.rs deleted file mode 100644 index b3c2d45fd..000000000 --- a/artiq/firmware/libstd_artiq/io/util.rs +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![allow(missing_copy_implementations)] - -use io::{self, Read, Write, ErrorKind}; - use io::BufRead; - -/// Copies the entire contents of a reader into a writer. -/// -/// This function will continuously read data from `reader` and then -/// write it into `writer` in a streaming fashion until `reader` -/// returns EOF. -/// -/// On success, the total number of bytes that were copied from -/// `reader` to `writer` is returned. -/// -/// # Errors -/// -/// This function will return an error immediately if any call to `read` or -/// `write` returns an error. All instances of `ErrorKind::Interrupted` are -/// handled by this function and the underlying operation is retried. -/// -/// # Examples -/// -/// ``` -/// use std::io; -/// -/// # fn foo() -> io::Result<()> { -/// let mut reader: &[u8] = b"hello"; -/// let mut writer: Vec = vec![]; -/// -/// try!(io::copy(&mut reader, &mut writer)); -/// -/// assert_eq!(reader, &writer[..]); -/// # Ok(()) -/// # } -/// ``` -pub fn copy(reader: &mut R, writer: &mut W) -> io::Result - where R: Read, W: Write -{ - let mut buf = [0; super::DEFAULT_BUF_SIZE]; - let mut written = 0; - loop { - let len = match reader.read(&mut buf) { - Ok(0) => return Ok(written), - Ok(len) => len, - Err(ref e) if e.kind() == ErrorKind::Interrupted => continue, - Err(e) => return Err(e), - }; - writer.write_all(&buf[..len])?; - written += len as u64; - } -} - -/// A reader which is always at EOF. -/// -/// This struct is generally created by calling [`empty()`][empty]. Please see -/// the documentation of `empty()` for more details. -/// -/// [empty]: fn.empty.html -pub struct Empty { _priv: () } - -/// Constructs a new handle to an empty reader. -/// -/// All reads from the returned reader will return `Ok(0)`. -/// -/// # Examples -/// -/// A slightly sad example of not reading anything into a buffer: -/// -/// ``` -/// use std::io::{self, Read}; -/// -/// let mut buffer = String::new(); -/// io::empty().read_to_string(&mut buffer).unwrap(); -/// assert!(buffer.is_empty()); -/// ``` -pub fn empty() -> Empty { Empty { _priv: () } } - -impl Read for Empty { - fn read(&mut self, _buf: &mut [u8]) -> io::Result { Ok(0) } -} - -impl BufRead for Empty { - fn fill_buf(&mut self) -> io::Result<&[u8]> { Ok(&[]) } - fn consume(&mut self, _n: usize) {} -} - -/// A reader which yields one byte over and over and over and over and over and... -/// -/// This struct is generally created by calling [`repeat()`][repeat]. Please -/// see the documentation of `repeat()` for more details. -/// -/// [repeat]: fn.repeat.html -pub struct Repeat { byte: u8 } - -/// Creates an instance of a reader that infinitely repeats one byte. -/// -/// All reads from this reader will succeed by filling the specified buffer with -/// the given byte. -/// -/// # Examples -/// -/// ``` -/// use std::io::{self, Read}; -/// -/// let mut buffer = [0; 3]; -/// io::repeat(0b101).read_exact(&mut buffer).unwrap(); -/// assert_eq!(buffer, [0b101, 0b101, 0b101]); -/// ``` -pub fn repeat(byte: u8) -> Repeat { Repeat { byte: byte } } - -impl Read for Repeat { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - for slot in &mut *buf { - *slot = self.byte; - } - Ok(buf.len()) - } -} - -/// A writer which will move data into the void. -/// -/// This struct is generally created by calling [`sink()`][sink]. Please -/// see the documentation of `sink()` for more details. -/// -/// [sink]: fn.sink.html -pub struct Sink { _priv: () } - -/// Creates an instance of a writer which will successfully consume all data. -/// -/// All calls to `write` on the returned instance will return `Ok(buf.len())` -/// and the contents of the buffer will not be inspected. -/// -/// # Examples -/// -/// ```rust -/// use std::io::{self, Write}; -/// -/// let mut buffer = vec![1, 2, 3, 5, 8]; -/// let num_bytes = io::sink().write(&mut buffer).unwrap(); -/// assert_eq!(num_bytes, 5); -/// ``` -pub fn sink() -> Sink { Sink { _priv: () } } - -impl Write for Sink { - fn write(&mut self, buf: &[u8]) -> io::Result { Ok(buf.len()) } - fn flush(&mut self) -> io::Result<()> { Ok(()) } -} - -#[cfg(test)] -mod tests { - use prelude::v1::*; - - use io::prelude::*; - use io::{copy, sink, empty, repeat}; - - #[test] - fn copy_copies() { - let mut r = repeat(0).take(4); - let mut w = sink(); - assert_eq!(copy(&mut r, &mut w).unwrap(), 4); - - let mut r = repeat(0).take(1 << 17); - assert_eq!(copy(&mut r as &mut Read, &mut w as &mut Write).unwrap(), 1 << 17); - } - - #[test] - fn sink_sinks() { - let mut s = sink(); - assert_eq!(s.write(&[]).unwrap(), 0); - assert_eq!(s.write(&[0]).unwrap(), 1); - assert_eq!(s.write(&[0; 1024]).unwrap(), 1024); - assert_eq!(s.by_ref().write(&[0; 1024]).unwrap(), 1024); - } - - #[test] - fn empty_reads() { - let mut e = empty(); - assert_eq!(e.read(&mut []).unwrap(), 0); - assert_eq!(e.read(&mut [0]).unwrap(), 0); - assert_eq!(e.read(&mut [0; 1024]).unwrap(), 0); - assert_eq!(e.by_ref().read(&mut [0; 1024]).unwrap(), 0); - } - - #[test] - fn repeat_repeats() { - let mut r = repeat(4); - let mut b = [0; 1024]; - assert_eq!(r.read(&mut b).unwrap(), 1024); - assert!(b.iter().all(|b| *b == 4)); - } - - #[test] - fn take_some_bytes() { - assert_eq!(repeat(4).take(100).bytes().count(), 100); - assert_eq!(repeat(4).take(100).bytes().next().unwrap().unwrap(), 4); - assert_eq!(repeat(1).take(10).chain(repeat(2).take(10)).bytes().count(), 20); - } -} diff --git a/artiq/firmware/libstd_artiq/lib.rs b/artiq/firmware/libstd_artiq/lib.rs deleted file mode 100644 index f0c7b8c5a..000000000 --- a/artiq/firmware/libstd_artiq/lib.rs +++ /dev/null @@ -1,40 +0,0 @@ -#![feature(lang_items, asm, alloc, needs_panic_runtime, - unicode, raw, int_error_internals, try_from, macro_reexport, - allow_internal_unstable, stmt_expr_attributes, str_internals)] -#![no_std] -#![needs_panic_runtime] - -extern crate std_unicode; -#[macro_use] -#[macro_reexport(vec, format)] -extern crate alloc; - -pub use core::{any, cell, clone, cmp, convert, default, hash, iter, marker, mem, num, - ops, option, ptr, result, sync, - char, i16, i32, i64, i8, isize, u16, u32, u64, u8, usize, f32, f64}; -pub use alloc::{arc, rc, raw_vec}; -pub use alloc::{binary_heap, borrow, boxed, btree_map, btree_set, fmt, linked_list, slice, - str, string, vec, vec_deque}; - -pub mod prelude { - pub mod v1 { - pub use core::prelude::v1::*; - pub use alloc::boxed::Box; - pub use alloc::borrow::ToOwned; - pub use alloc::string::{String, ToString}; - pub use alloc::vec::Vec; - } -} - -pub mod error; -pub mod io; - -// Provide Box::new wrapper -#[cfg(any(not(feature="alloc"), not(feature="io_error_alloc")))] -struct FakeBox(core::marker::PhantomData); -#[cfg(any(not(feature="alloc"), not(feature="io_error_alloc")))] -impl FakeBox { - fn new(val: T) -> T { - val - } -} diff --git a/artiq/firmware/libunwind_backtrace/Cargo.toml b/artiq/firmware/libunwind_backtrace/Cargo.toml new file mode 100644 index 000000000..637e55bc6 --- /dev/null +++ b/artiq/firmware/libunwind_backtrace/Cargo.toml @@ -0,0 +1,8 @@ +[package] +authors = ["M-Labs"] +name = "unwind_backtrace" +version = "0.0.0" + +[lib] +name = "unwind_backtrace" +path = "lib.rs" diff --git a/artiq/firmware/libunwind_backtrace/lib.rs b/artiq/firmware/libunwind_backtrace/lib.rs new file mode 100644 index 000000000..22c55a628 --- /dev/null +++ b/artiq/firmware/libunwind_backtrace/lib.rs @@ -0,0 +1,47 @@ +#![feature(libc, panic_unwind)] +#![no_std] + +extern crate unwind; +extern crate libc; + +use unwind as uw; +use libc::{c_void, c_int}; + +const UW_REG_SP: c_int = -2; + +pub fn backtrace(f: F) -> Result<(), uw::_Unwind_Reason_Code> + where F: FnMut(usize) -> () +{ + struct TraceContext { + step_fn: F, + prev_sp: uw::_Unwind_Word + } + + extern fn trace(context: *mut uw::_Unwind_Context, arg: *mut c_void) + -> uw::_Unwind_Reason_Code + where F: FnMut(usize) -> () + { + unsafe { + let trace_context = &mut *(arg as *mut TraceContext); + + // Detect the root of a libfringe thread + let cur_sp = uw::_Unwind_GetGR(context, UW_REG_SP); + if cur_sp == trace_context.prev_sp { + return uw::_URC_END_OF_STACK + } else { + trace_context.prev_sp = cur_sp; + } + + (trace_context.step_fn)(uw::_Unwind_GetIP(context)); + uw::_URC_NO_REASON + } + } + + unsafe { + let mut trace_context = TraceContext { step_fn: f, prev_sp: 0 }; + match uw::_Unwind_Backtrace(trace::, &mut trace_context as *mut _ as *mut c_void) { + uw::_URC_NO_REASON => Ok(()), + err => Err(err) + } + } +} diff --git a/artiq/firmware/runtime/Cargo.toml b/artiq/firmware/runtime/Cargo.toml index 4a820e33d..bbb9877bb 100644 --- a/artiq/firmware/runtime/Cargo.toml +++ b/artiq/firmware/runtime/Cargo.toml @@ -7,36 +7,30 @@ build = "build.rs" [lib] name = "runtime" crate-type = ["staticlib"] -path = "lib.rs" +path = "main.rs" [build-dependencies] -build_artiq = { path = "../libbuild_artiq" } +build_misoc = { path = "../libbuild_misoc" } [dependencies] +failure = { version = "0.1", default-features = false } +failure_derive = { version = "0.1", default-features = false } byteorder = { version = "1.0", default-features = false } cslice = { version = "0.3" } -log = { version = "0.3", default-features = false } +log = { version = "0.4", default-features = false } +managed = { version = "= 0.7.0", default-features = false, features = ["alloc", "map"] } +eh = { path = "../libeh" } +unwind_backtrace = { path = "../libunwind_backtrace" } +io = { path = "../libio", features = ["byteorder"] } alloc_list = { path = "../liballoc_list" } -std_artiq = { path = "../libstd_artiq", features = ["alloc", "io_error_alloc"] } +board_misoc = { path = "../libboard_misoc", features = ["uart_console", "smoltcp"] } logger_artiq = { path = "../liblogger_artiq" } -board = { path = "../libboard", features = ["uart_console"] } -proto = { path = "../libproto", features = ["log"] } -amp = { path = "../libamp" } -drtioaux = { path = "../libdrtioaux" } - -[dependencies.compiler_builtins] -git = "https://github.com/rust-lang-nursery/compiler-builtins" -rev = "631b568" -features = ["mem"] +board_artiq = { path = "../libboard_artiq" } +proto_artiq = { path = "../libproto_artiq", features = ["log", "alloc"] } +smoltcp = { version = "0.6.0", default-features = false, features = ["rust-1_28", "alloc", "ethernet", "proto-ipv4", "proto-ipv6", "socket-tcp"] } [dependencies.fringe] git = "https://github.com/m-labs/libfringe" -rev = "bd23494" +rev = "b8a6d8f" default-features = false features = ["alloc"] - -[dependencies.smoltcp] -git = "https://github.com/m-labs/smoltcp" -rev = "960b001" -default-features = false -features = ["alloc", "log", "socket-tcp"] diff --git a/artiq/firmware/runtime/Makefile b/artiq/firmware/runtime/Makefile index 94cd87cbe..5fa6dbdf8 100644 --- a/artiq/firmware/runtime/Makefile +++ b/artiq/firmware/runtime/Makefile @@ -1,32 +1,30 @@ include ../include/generated/variables.mak include $(MISOC_DIRECTORY)/software/common.mak -LDFLAGS += -L../libbase +CFLAGS += \ + -I$(LIBUNWIND_DIRECTORY) \ + -I$(LIBUNWIND_DIRECTORY)/../unwinder/include -RUSTFLAGS += -Cpanic=abort +LDFLAGS += \ + -L../libunwind -all: runtime.bin runtime.fbi +RUSTFLAGS += -Cpanic=unwind + +all:: runtime.bin runtime.fbi .PHONY: $(RUSTOUT)/libruntime.a $(RUSTOUT)/libruntime.a: $(cargo) --manifest-path $(RUNTIME_DIRECTORY)/Cargo.toml runtime.elf: $(RUSTOUT)/libruntime.a ksupport_data.o - $(LD) $(LDFLAGS) -T $(RUNTIME_DIRECTORY)/runtime.ld -o $@ $^ - @chmod -x $@ + $(link) -T $(RUNTIME_DIRECTORY)/runtime.ld \ + -lunwind-bare ksupport_data.o: ../ksupport/ksupport.elf $(LD) -r -b binary -o $@ $< %.bin: %.elf - $(OBJCOPY) -O binary $< $@ - @chmod -x $@ + $(objcopy) -O binary %.fbi: %.bin - @echo " MSCIMG " $@ && $(PYTHON) -m misoc.tools.mkmscimg -f -o $@ $< - -clean: - $(RM) *.o runtime.elf runtime.bin runtime.fbi - $(RM) -rf cargo - -.PHONY: all clean + $(mscimg) -f diff --git a/artiq/firmware/runtime/analyzer.rs b/artiq/firmware/runtime/analyzer.rs index 1e2909a37..6ea47862e 100644 --- a/artiq/firmware/runtime/analyzer.rs +++ b/artiq/firmware/runtime/analyzer.rs @@ -1,22 +1,17 @@ -use std::io::{self, Write}; -use board::{csr, cache}; -use sched::{Io, TcpListener, TcpStream}; +use io::{Write, Error as IoError}; +use board_misoc::{csr, cache}; +use sched::{Io, TcpListener, TcpStream, Error as SchedError}; use analyzer_proto::*; const BUFFER_SIZE: usize = 512 * 1024; -// hack until https://github.com/rust-lang/rust/issues/33626 is fixed -#[repr(simd)] -struct Align64(u64, u64, u64, u64, u64, u64, u64, u64); - +#[repr(align(64))] struct Buffer { data: [u8; BUFFER_SIZE], - __alignment: [Align64; 0] } static mut BUFFER: Buffer = Buffer { - data: [0; BUFFER_SIZE], - __alignment: [] + data: [0; BUFFER_SIZE] }; fn arm() { @@ -40,7 +35,7 @@ fn disarm() { } } -fn worker(stream: &mut TcpStream) -> io::Result<()> { +fn worker(stream: &mut TcpStream) -> Result<(), IoError> { let data = unsafe { &BUFFER.data[..] }; let overflow_occurred = unsafe { csr::rtio_analyzer::message_encoder_overflow_read() != 0 }; let total_byte_count = unsafe { csr::rtio_analyzer::dma_byte_count_read() }; @@ -68,9 +63,6 @@ fn worker(stream: &mut TcpStream) -> io::Result<()> { } pub fn thread(io: Io) { - // verify that the hack above works - assert!(::core::mem::align_of::() == 64); - let listener = TcpListener::new(&io, 65535); listener.listen(1382).expect("analyzer: cannot listen"); diff --git a/artiq/firmware/runtime/build.rs b/artiq/firmware/runtime/build.rs index d9df648a6..3548ea5ff 100644 --- a/artiq/firmware/runtime/build.rs +++ b/artiq/firmware/runtime/build.rs @@ -1,6 +1,5 @@ -extern crate build_artiq; +extern crate build_misoc; fn main() { - build_artiq::git_describe(); - build_artiq::misoc_cfg(); + build_misoc::cfg(); } diff --git a/artiq/firmware/runtime/cache.rs b/artiq/firmware/runtime/cache.rs index 082dda287..2f1948924 100644 --- a/artiq/firmware/runtime/cache.rs +++ b/artiq/firmware/runtime/cache.rs @@ -1,6 +1,4 @@ -use std::vec::Vec; -use std::string::String; -use std::btree_map::BTreeMap; +use alloc::{Vec, String, BTreeMap}; #[derive(Debug)] struct Entry { diff --git a/artiq/firmware/runtime/config.rs b/artiq/firmware/runtime/config.rs deleted file mode 100644 index c4b45ee6c..000000000 --- a/artiq/firmware/runtime/config.rs +++ /dev/null @@ -1,220 +0,0 @@ -#[cfg(has_spiflash)] -mod imp { - use core::str; - use std::btree_map::BTreeMap; - use byteorder::{ByteOrder, BigEndian}; - use board::{mem, csr, cache, spiflash}; - - const ADDR: usize = mem::FLASH_BOOT_ADDRESS + 0x80000 /* max runtime size */; - const SIZE: usize = csr::CONFIG_SPIFLASH_SECTOR_SIZE as usize; - - mod lock { - use core::slice; - use core::sync::atomic::{AtomicUsize, Ordering}; - - static LOCKED: AtomicUsize = AtomicUsize::new(0); - - pub struct Lock; - - impl Lock { - pub fn take() -> Result { - if LOCKED.swap(1, Ordering::SeqCst) != 0 { - Err(()) // already locked - } else { - Ok(Lock) // locked now - } - } - - pub fn data(&self) -> &'static [u8] { - unsafe { slice::from_raw_parts(super::ADDR as *const u8, super::SIZE) } - } - } - - impl Drop for Lock { - fn drop(&mut self) { - LOCKED.store(0, Ordering::SeqCst) - } - } - } - - use self::lock::Lock; - - struct Iter<'a> { - data: &'a [u8], - offset: usize - } - - impl<'a> Iter<'a> { - fn new(data: &'a [u8]) -> Iter<'a> { - Iter { data: data, offset: 0 } - } - } - - impl<'a> Iterator for Iter<'a> { - type Item = Result<(&'a [u8], &'a [u8]), ()>; - - fn next(&mut self) -> Option { - let data = &self.data[self.offset..]; - - if data.len() < 4 { - error!("offset {}: truncated record", self.offset); - return Some(Err(())) - } - - let record_size = BigEndian::read_u32(data) as usize; - if record_size < 4 { - error!("offset {}: invalid record size", self.offset); - return Some(Err(())) - } - if record_size == !0 /* all ones; erased flash */ { - return None - } - - let record_body = &data[4..record_size]; - match record_body.iter().position(|&x| x == 0) { - None => { - error!("offset {}: missing separator", self.offset); - Some(Err(())) - } - Some(pos) => { - self.offset += record_size; - - let (key, zero_and_value) = record_body.split_at(pos); - Some(Ok((key, &zero_and_value[1..]))) - } - } - } - } - - pub fn read) -> R, R>(key: &str, f: F) -> R { - f(Lock::take().and_then(|lock| { - let mut iter = Iter::new(lock.data()); - let mut value = &[][..]; - while let Some(result) = iter.next() { - let (record_key, record_value) = result?; - if key.as_bytes() == record_key { - // last write wins - value = record_value - } - } - Ok(value) - })) - } - - pub fn read_str) -> R, R>(key: &str, f: F) -> R { - read(key, |result| { - f(result.and_then(|value| str::from_utf8(value).map_err(|_| ()))) - }) - } - - fn append_at(mut offset: usize, key: &[u8], value: &[u8]) -> Result { - let record_size = 4 + key.len() + 1 + value.len(); - if offset + record_size > SIZE { - return Err(()) - } - - let mut record_size_bytes = [0u8; 4]; - BigEndian::write_u32(&mut record_size_bytes[..], record_size as u32); - - spiflash::write(ADDR + offset, &record_size_bytes[..]); - offset += record_size_bytes.len(); - - spiflash::write(ADDR + offset, key); - offset += key.len(); - - spiflash::write(ADDR + offset, &[0]); - offset += 1; - - spiflash::write(ADDR + offset, value); - offset += value.len(); - - cache::flush_l2_cache(); - Ok(offset) - } - - fn compact() -> Result<(), ()> { - let lock = Lock::take()?; - - let mut items = BTreeMap::new(); - { - let mut iter = Iter::new(lock.data()); - while let Some(result) = iter.next() { - let (key, value) = result?; - items.insert(key, value); - } - } - - spiflash::erase_sector(ADDR); - cache::flush_l2_cache(); - - let mut offset = 0; - for (key, value) in items { - offset = append_at(offset, key, value)?; - } - Ok(()) - } - - fn append(key: &str, value: &[u8]) -> Result<(), ()> { - let lock = Lock::take()?; - - let free_offset = { - let mut iter = Iter::new(lock.data()); - while let Some(result) = iter.next() { - let _ = result?; - } - iter.offset - }; - - append_at(free_offset, key.as_bytes(), value)?; - Ok(()) - } - - pub fn write(key: &str, value: &[u8]) -> Result<(), ()> { - match append(key, value) { - Ok(()) => (), - Err(()) => { - compact()?; - append(key, value)?; - } - } - Ok(()) - } - - pub fn remove(key: &str) -> Result<(), ()> { - write(key, &[]) - } - - pub fn erase() -> Result<(), ()> { - let _lock = Lock::take()?; - - spiflash::erase_sector(ADDR); - cache::flush_l2_cache(); - - Ok(()) - } -} - -#[cfg(not(has_spiflash))] -mod imp { - pub fn read) -> R, R>(_key: &str, f: F) -> R { - f(Err(())) - } - - pub fn read_str) -> R, R>(_key: &str, f: F) -> R { - f(Err(())) - } - - pub fn write(_key: &str, _value: &[u8]) -> Result<(), ()> { - Err(()) - } - - pub fn remove(_key: &str) -> Result<(), ()> { - Err(()) - } - - pub fn erase() -> Result<(), ()> { - Err(()) - } -} - -pub use self::imp::*; diff --git a/artiq/firmware/runtime/kern_hwreq.rs b/artiq/firmware/runtime/kern_hwreq.rs index 245fdaeab..53e0daa8a 100644 --- a/artiq/firmware/runtime/kern_hwreq.rs +++ b/artiq/firmware/runtime/kern_hwreq.rs @@ -1,190 +1,143 @@ -use std::io; +use core::cell::RefCell; use kernel_proto as kern; -use sched::Io; -use session::{kern_acknowledge, kern_send}; -#[cfg(has_rtio_core)] +use sched::{Io, Mutex, Error as SchedError}; +use session::{kern_acknowledge, kern_send, Error}; use rtio_mgt; +use urc::Urc; +use board_misoc::i2c as local_i2c; +use board_artiq::drtio_routing; +use board_artiq::spi as local_spi; #[cfg(has_drtio)] -mod drtio_i2c { +mod remote_i2c { use drtioaux; + use rtio_mgt::drtio; + use sched::{Io, Mutex}; - fn basic_reply(nodeno: u8) -> Result<(), ()> { - match drtioaux::hw::recv_timeout(nodeno, None) { + pub fn start(io: &Io, aux_mutex: &Mutex, linkno: u8, destination: u8, busno: u8) -> Result<(), &'static str> { + let reply = drtio::aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::I2cStartRequest { + destination: destination, + busno: busno + }); + match reply { Ok(drtioaux::Packet::I2cBasicReply { succeeded }) => { - if succeeded { Ok(()) } else { Err(()) } + if succeeded { Ok(()) } else { Err("i2c basic reply error") } } - Ok(_) => { - error!("received unexpected aux packet"); - Err(()) + Ok(packet) => { + error!("received unexpected aux packet: {:?}", packet); + Err("received unexpected aux packet") } Err(e) => { error!("aux packet error ({})", e); - Err(()) + Err(e) } } } - pub fn start(nodeno: u8, busno: u8) -> Result<(), ()> { - let request = drtioaux::Packet::I2cStartRequest { busno: busno }; - if drtioaux::hw::send(nodeno, &request).is_err() { - return Err(()) + pub fn restart(io: &Io, aux_mutex: &Mutex, linkno: u8, destination: u8, busno: u8) -> Result<(), &'static str> { + let reply = drtio::aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::I2cRestartRequest { + destination: destination, + busno: busno + }); + match reply { + Ok(drtioaux::Packet::I2cBasicReply { succeeded }) => { + if succeeded { Ok(()) } else { Err("i2c basic reply error") } + } + Ok(packet) => { + error!("received unexpected aux packet: {:?}", packet); + Err("received unexpected aux packet") + } + Err(e) => { + error!("aux packet error ({})", e); + Err(e) + } } - basic_reply(nodeno) } - pub fn restart(nodeno: u8, busno: u8) -> Result<(), ()> { - let request = drtioaux::Packet::I2cRestartRequest { busno: busno }; - if drtioaux::hw::send(nodeno, &request).is_err() { - return Err(()) + pub fn stop(io: &Io, aux_mutex: &Mutex, linkno: u8, destination: u8, busno: u8) -> Result<(), &'static str> { + let reply = drtio::aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::I2cStopRequest { + destination: destination, + busno: busno + }); + match reply { + Ok(drtioaux::Packet::I2cBasicReply { succeeded }) => { + if succeeded { Ok(()) } else { Err("i2c basic reply error") } + } + Ok(packet) => { + error!("received unexpected aux packet: {:?}", packet); + Err("received unexpected aux packet") + } + Err(e) => { + error!("aux packet error ({})", e); + Err(e) + } } - basic_reply(nodeno) } - pub fn stop(nodeno: u8, busno: u8) -> Result<(), ()> { - let request = drtioaux::Packet::I2cStopRequest { busno: busno }; - if drtioaux::hw::send(nodeno, &request).is_err() { - return Err(()) - } - basic_reply(nodeno) - } - - pub fn write(nodeno: u8, busno: u8, data: u8) -> Result { - let request = drtioaux::Packet::I2cWriteRequest { + pub fn write(io: &Io, aux_mutex: &Mutex, linkno: u8, destination: u8, busno: u8, data: u8) -> Result { + let reply = drtio::aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::I2cWriteRequest { + destination: destination, busno: busno, data: data - }; - if drtioaux::hw::send(nodeno, &request).is_err() { - return Err(()) - } - match drtioaux::hw::recv_timeout(nodeno, None) { + }); + match reply { Ok(drtioaux::Packet::I2cWriteReply { succeeded, ack }) => { - if succeeded { Ok(ack) } else { Err(()) } + if succeeded { Ok(ack) } else { Err("i2c write reply error") } } Ok(_) => { error!("received unexpected aux packet"); - Err(()) + Err("received unexpected aux packet") } Err(e) => { error!("aux packet error ({})", e); - Err(()) + Err(e) } } } - pub fn read(nodeno: u8, busno: u8, ack: bool) -> Result { - let request = drtioaux::Packet::I2cReadRequest { + pub fn read(io: &Io, aux_mutex: &Mutex, linkno: u8, destination: u8, busno: u8, ack: bool) -> Result { + let reply = drtio::aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::I2cReadRequest { + destination: destination, busno: busno, ack: ack - }; - if drtioaux::hw::send(nodeno, &request).is_err() { - return Err(()) - } - match drtioaux::hw::recv_timeout(nodeno, None) { + }); + match reply { Ok(drtioaux::Packet::I2cReadReply { succeeded, data }) => { - if succeeded { Ok(data) } else { Err(()) } + if succeeded { Ok(data) } else { Err("i2c read reply error") } } Ok(_) => { error!("received unexpected aux packet"); - Err(()) + Err("received unexpected aux packet") } Err(e) => { error!("aux packet error ({})", e); - Err(()) + Err(e) } } } } -#[cfg(not(has_drtio))] -mod drtio_i2c { - pub fn start(_nodeno: u8, _busno: u8) -> Result<(), ()> { - Err(()) - } - - pub fn restart(_nodeno: u8, _busno: u8) -> Result<(), ()> { - Err(()) - } - - pub fn stop(_nodeno: u8, _busno: u8) -> Result<(), ()> { - Err(()) - } - - pub fn write(_nodeno: u8, _busno: u8, _data: u8) -> Result { - Err(()) - } - - pub fn read(_nodeno: u8, _busno: u8, _ack: bool) -> Result { - Err(()) - } -} - -mod i2c { - use board; - use super::drtio_i2c; - - pub fn start(busno: u32) -> Result<(), ()> { - let nodeno = (busno >> 16) as u8; - let node_busno = busno as u8; - if nodeno == 0 { - board::i2c::start(node_busno) - } else { - drtio_i2c::start(nodeno, node_busno) - } - } - - pub fn restart(busno: u32) -> Result<(), ()> { - let nodeno = (busno >> 16) as u8; - let node_busno = busno as u8; - if nodeno == 0 { - board::i2c::restart(node_busno) - } else { - drtio_i2c::restart(nodeno, node_busno) - } - } - - pub fn stop(busno: u32) -> Result<(), ()> { - let nodeno = (busno >> 16) as u8; - let node_busno = busno as u8; - if nodeno == 0 { - board::i2c::stop(node_busno) - } else { - drtio_i2c::stop(nodeno, node_busno) - } - } - - pub fn write(busno: u32, data: u8) -> Result { - let nodeno = (busno >> 16 )as u8; - let node_busno = busno as u8; - if nodeno == 0 { - board::i2c::write(node_busno, data) - } else { - drtio_i2c::write(nodeno, node_busno, data) - } - } - - pub fn read(busno: u32, ack: bool) -> Result { - let nodeno = (busno >> 16) as u8; - let node_busno = busno as u8; - if nodeno == 0 { - board::i2c::read(node_busno, ack) - } else { - drtio_i2c::read(nodeno, node_busno, ack) - } - } -} - #[cfg(has_drtio)] -mod drtio_spi { +mod remote_spi { use drtioaux; + use rtio_mgt::drtio; + use sched::{Io, Mutex}; - fn basic_reply(nodeno: u8) -> Result<(), ()> { - match drtioaux::hw::recv_timeout(nodeno, None) { + pub fn set_config(io: &Io, aux_mutex: &Mutex, linkno: u8, destination: u8, busno: u8, flags: u8, length: u8, div: u8, cs: u8) -> Result<(), ()> { + let reply = drtio::aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::SpiSetConfigRequest { + destination: destination, + busno: busno, + flags: flags, + length: length, + div: div, + cs: cs + }); + match reply { Ok(drtioaux::Packet::SpiBasicReply { succeeded }) => { if succeeded { Ok(()) } else { Err(()) } } - Ok(_) => { - error!("received unexpected aux packet"); + Ok(packet) => { + error!("received unexpected aux packet: {:?}", packet); Err(()) } Err(e) => { @@ -194,54 +147,38 @@ mod drtio_spi { } } - pub fn set_config(nodeno: u8, busno: u8, flags: u8, write_div: u8, read_div: u8) -> Result<(), ()> { - let request = drtioaux::Packet::SpiSetConfigRequest { - busno: busno, - flags: flags, - write_div: write_div, - read_div: read_div - }; - if drtioaux::hw::send(nodeno, &request).is_err() { - return Err(()) - } - basic_reply(nodeno) - } - - pub fn set_xfer(nodeno: u8, busno: u8, chip_select: u16, write_length: u8, read_length: u8) -> Result<(), ()> { - let request = drtioaux::Packet::SpiSetXferRequest { - busno: busno, - chip_select: chip_select, - write_length: write_length, - read_length: read_length - }; - if drtioaux::hw::send(nodeno, &request).is_err() { - return Err(()) - } - basic_reply(nodeno) - } - - pub fn write(nodeno: u8, busno: u8, data: u32) -> Result<(), ()> { - let request = drtioaux::Packet::SpiWriteRequest { + pub fn write(io: &Io, aux_mutex: &Mutex, linkno: u8, destination: u8, busno: u8, data: u32) -> Result<(), ()> { + let reply = drtio::aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::SpiWriteRequest { + destination: destination, busno: busno, data: data - }; - if drtioaux::hw::send(nodeno, &request).is_err() { - return Err(()) + }); + match reply { + Ok(drtioaux::Packet::SpiBasicReply { succeeded }) => { + if succeeded { Ok(()) } else { Err(()) } + } + Ok(packet) => { + error!("received unexpected aux packet: {:?}", packet); + Err(()) + } + Err(e) => { + error!("aux packet error ({})", e); + Err(()) + } } - basic_reply(nodeno) } - pub fn read(nodeno: u8, busno: u8) -> Result { - let request = drtioaux::Packet::SpiReadRequest { busno: busno }; - if drtioaux::hw::send(nodeno, &request).is_err() { - return Err(()) - } - match drtioaux::hw::recv_timeout(nodeno, None) { + pub fn read(io: &Io, aux_mutex: &Mutex, linkno: u8, destination: u8, busno: u8) -> Result { + let reply = drtio::aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::SpiReadRequest { + destination: destination, + busno: busno + }); + match reply { Ok(drtioaux::Packet::SpiReadReply { succeeded, data }) => { if succeeded { Ok(data) } else { Err(()) } } - Ok(_) => { - error!("received unexpected aux packet"); + Ok(packet) => { + error!("received unexpected aux packet: {:?}", packet); Err(()) } Err(e) => { @@ -252,145 +189,89 @@ mod drtio_spi { } } + +#[cfg(has_drtio)] +macro_rules! dispatch { + ($io:ident, $aux_mutex:ident, $mod_local:ident, $mod_remote:ident, $routing_table:ident, $busno:expr, $func:ident $(, $param:expr)*) => {{ + let destination = ($busno >> 16) as u8; + let busno = $busno as u8; + let hop = $routing_table.0[destination as usize][0]; + if hop == 0 { + $mod_local::$func(busno, $($param, )*) + } else { + let linkno = hop - 1; + $mod_remote::$func($io, $aux_mutex, linkno, destination, busno, $($param, )*) + } + }} +} + #[cfg(not(has_drtio))] -mod drtio_spi { - pub fn set_config(_nodeno: u8, _busno: u8, _flags: u8, _write_div: u8, _read_div: u8) -> Result<(), ()> { - Err(()) - } - - pub fn set_xfer(_nodeno: u8, _busno: u8, _chip_select: u16, _write_length: u8, _read_length: u8) -> Result<(), ()> { - Err(()) - } - - pub fn write(_nodeno: u8, _busno: u8, _data: u32) -> Result<(), ()> { - Err(()) - } - - pub fn read(_nodeno: u8, _busno: u8) -> Result { - Err(()) - } +macro_rules! dispatch { + ($io:ident, $aux_mutex:ident,$mod_local:ident, $mod_remote:ident, $routing_table:ident, $busno:expr, $func:ident $(, $param:expr)*) => {{ + let busno = $busno as u8; + $mod_local::$func(busno, $($param, )*) + }} } -mod spi { - use board; - use super::drtio_spi; - - pub fn set_config(busno: u32, flags: u8, write_div: u8, read_div: u8) -> Result<(), ()> { - let nodeno = (busno >> 16) as u8; - let node_busno = busno as u8; - if nodeno == 0 { - board::spi::set_config(node_busno, flags, write_div, read_div) - } else { - drtio_spi::set_config(nodeno, node_busno, flags, write_div, read_div) - } - } - - pub fn set_xfer(busno: u32, chip_select: u16, write_length: u8, read_length: u8) -> Result<(), ()> { - let nodeno = (busno >> 16) as u8; - let node_busno = busno as u8; - if nodeno == 0 { - board::spi::set_xfer(node_busno, chip_select, write_length, read_length) - } else { - drtio_spi::set_xfer(nodeno, node_busno, chip_select, write_length, read_length) - } - } - - pub fn write(busno: u32, data: u32) -> Result<(), ()> { - let nodeno = (busno >> 16) as u8; - let node_busno = busno as u8; - if nodeno == 0 { - board::spi::write(node_busno, data) - } else { - drtio_spi::write(nodeno, node_busno, data) - } - } - - pub fn read(busno: u32) -> Result { - let nodeno = (busno >> 16) as u8; - let node_busno = busno as u8; - if nodeno == 0 { - board::spi::read(node_busno) - } else { - drtio_spi::read(nodeno, node_busno) - } - } -} - -pub fn process_kern_hwreq(io: &Io, request: &kern::Message) -> io::Result { +pub fn process_kern_hwreq(io: &Io, aux_mutex: &Mutex, + _routing_table: &drtio_routing::RoutingTable, + _up_destinations: &Urc>, + request: &kern::Message) -> Result> { match request { - #[cfg(has_rtio_core)] &kern::RtioInitRequest => { info!("resetting RTIO"); - rtio_mgt::init_core(); + rtio_mgt::reset(io, aux_mutex); kern_acknowledge() } - #[cfg(has_rtio_core)] - &kern::DrtioChannelStateRequest { channel } => { - let (fifo_space, last_timestamp) = rtio_mgt::drtio_dbg::get_channel_state(channel); - kern_send(io, &kern::DrtioChannelStateReply { fifo_space: fifo_space, - last_timestamp: last_timestamp }) - } - #[cfg(has_rtio_core)] - &kern::DrtioResetChannelStateRequest { channel } => { - rtio_mgt::drtio_dbg::reset_channel_state(channel); - kern_acknowledge() - } - #[cfg(has_rtio_core)] - &kern::DrtioGetFifoSpaceRequest { channel } => { - rtio_mgt::drtio_dbg::get_fifo_space(channel); - kern_acknowledge() - } - #[cfg(has_rtio_core)] - &kern::DrtioPacketCountRequest { linkno } => { - let (tx_cnt, rx_cnt) = rtio_mgt::drtio_dbg::get_packet_counts(linkno); - kern_send(io, &kern::DrtioPacketCountReply { tx_cnt: tx_cnt, rx_cnt: rx_cnt }) - } - #[cfg(has_rtio_core)] - &kern::DrtioFifoSpaceReqCountRequest { linkno } => { - let cnt = rtio_mgt::drtio_dbg::get_fifo_space_req_count(linkno); - kern_send(io, &kern::DrtioFifoSpaceReqCountReply { cnt: cnt }) + &kern::RtioDestinationStatusRequest { destination: _destination } => { + #[cfg(has_drtio)] + let up = { + let up_destinations = _up_destinations.borrow(); + up_destinations[_destination as usize] + }; + #[cfg(not(has_drtio))] + let up = true; + kern_send(io, &kern::RtioDestinationStatusReply { up: up }) } &kern::I2cStartRequest { busno } => { - let succeeded = i2c::start(busno).is_ok(); + let succeeded = dispatch!(io, aux_mutex, local_i2c, remote_i2c, _routing_table, busno, start).is_ok(); kern_send(io, &kern::I2cBasicReply { succeeded: succeeded }) } &kern::I2cRestartRequest { busno } => { - let succeeded = i2c::restart(busno).is_ok(); + let succeeded = dispatch!(io, aux_mutex, local_i2c, remote_i2c, _routing_table, busno, restart).is_ok(); kern_send(io, &kern::I2cBasicReply { succeeded: succeeded }) } &kern::I2cStopRequest { busno } => { - let succeeded = i2c::stop(busno).is_ok(); + let succeeded = dispatch!(io, aux_mutex, local_i2c, remote_i2c, _routing_table, busno, stop).is_ok(); kern_send(io, &kern::I2cBasicReply { succeeded: succeeded }) } &kern::I2cWriteRequest { busno, data } => { - match i2c::write(busno, data) { + match dispatch!(io, aux_mutex, local_i2c, remote_i2c, _routing_table, busno, write, data) { Ok(ack) => kern_send(io, &kern::I2cWriteReply { succeeded: true, ack: ack }), Err(_) => kern_send(io, &kern::I2cWriteReply { succeeded: false, ack: false }) } } &kern::I2cReadRequest { busno, ack } => { - match i2c::read(busno, ack) { + match dispatch!(io, aux_mutex, local_i2c, remote_i2c, _routing_table, busno, read, ack) { Ok(data) => kern_send(io, &kern::I2cReadReply { succeeded: true, data: data }), Err(_) => kern_send(io, &kern::I2cReadReply { succeeded: false, data: 0xff }) } } - &kern::SpiSetConfigRequest { busno, flags, write_div, read_div } => { - let succeeded = spi::set_config(busno, flags, write_div, read_div).is_ok(); + &kern::SpiSetConfigRequest { busno, flags, length, div, cs } => { + let succeeded = dispatch!(io, aux_mutex, local_spi, remote_spi, _routing_table, busno, + set_config, flags, length, div, cs).is_ok(); kern_send(io, &kern::SpiBasicReply { succeeded: succeeded }) }, - &kern::SpiSetXferRequest { busno, chip_select, write_length, read_length } => { - let succeeded = spi::set_xfer(busno, chip_select, write_length, read_length).is_ok(); - kern_send(io, &kern::SpiBasicReply { succeeded: succeeded }) - } &kern::SpiWriteRequest { busno, data } => { - let succeeded = spi::write(busno, data).is_ok(); + let succeeded = dispatch!(io, aux_mutex, local_spi, remote_spi, _routing_table, busno, + write, data).is_ok(); kern_send(io, &kern::SpiBasicReply { succeeded: succeeded }) } &kern::SpiReadRequest { busno } => { - match spi::read(busno) { + match dispatch!(io, aux_mutex, local_spi, remote_spi, _routing_table, busno, read) { Ok(data) => kern_send(io, &kern::SpiReadReply { succeeded: true, data: data }), Err(_) => kern_send(io, &kern::SpiReadReply { succeeded: false, data: 0 }) } diff --git a/artiq/firmware/runtime/kernel.rs b/artiq/firmware/runtime/kernel.rs index 9e02ce82c..1a67af556 100644 --- a/artiq/firmware/runtime/kernel.rs +++ b/artiq/firmware/runtime/kernel.rs @@ -1,5 +1,5 @@ use core::ptr; -use board::csr; +use board_misoc::csr; use mailbox; use rpc_queue; diff --git a/artiq/firmware/runtime/lib.rs b/artiq/firmware/runtime/lib.rs deleted file mode 100644 index 61d0dba4f..000000000 --- a/artiq/firmware/runtime/lib.rs +++ /dev/null @@ -1,258 +0,0 @@ -#![no_std] -#![feature(compiler_builtins_lib, alloc, repr_simd, lang_items, const_fn, global_allocator)] - -extern crate compiler_builtins; -extern crate alloc; -extern crate cslice; -#[macro_use] -extern crate log; -extern crate byteorder; -extern crate fringe; -extern crate smoltcp; - -extern crate alloc_list; -#[macro_use] -extern crate std_artiq as std; -extern crate logger_artiq; -#[macro_use] -extern crate board; -extern crate proto; -extern crate amp; -#[cfg(has_drtio)] -extern crate drtioaux; - -use smoltcp::wire::{EthernetAddress, IpAddress, IpCidr}; -use proto::{mgmt_proto, analyzer_proto, moninj_proto, rpc_proto, session_proto, kernel_proto}; -use amp::{mailbox, rpc_queue}; - -macro_rules! borrow_mut { - ($x:expr) => ({ - match $x.try_borrow_mut() { - Ok(x) => x, - Err(_) => panic!("cannot borrow mutably at {}:{}", file!(), line!()) - } - }) -} - -mod config; -#[cfg(has_ethmac)] -mod ethmac; -#[cfg(has_rtio_core)] -mod rtio_mgt; - -mod urc; -mod sched; -mod cache; -mod rtio_dma; - -mod mgmt; -mod kernel; -mod kern_hwreq; -mod session; -#[cfg(any(has_rtio_moninj, has_drtio))] -mod moninj; -#[cfg(has_rtio_analyzer)] -mod analyzer; - -fn startup() { - board::clock::init(); - info!("ARTIQ runtime starting..."); - info!("software version {}", include_str!(concat!(env!("OUT_DIR"), "/git-describe"))); - info!("gateware version {}", board::ident(&mut [0; 64])); - - #[cfg(has_serwb_phy_amc)] - board::serwb::wait_init(); - - let t = board::clock::get_ms(); - info!("press 'e' to erase startup and idle kernels..."); - while board::clock::get_ms() < t + 1000 { - if unsafe { board::csr::uart::rxtx_read() == b'e' } { - config::remove("startup_kernel").unwrap(); - config::remove("idle_kernel").unwrap(); - info!("startup and idle kernels erased"); - break - } - } - info!("continuing boot"); - - #[cfg(has_i2c)] - board::i2c::init(); - #[cfg(si5324_free_running)] - setup_si5324_free_running(); - #[cfg(has_hmc830_7043)] - board::hmc830_7043::init().expect("cannot initialize HMC830/7043"); - #[cfg(has_ad9154)] - board::ad9154::init().expect("cannot initialize AD9154"); - - #[cfg(has_ethmac)] - startup_ethernet(); - #[cfg(not(has_ethmac))] - { - info!("done"); - loop {} - } -} - -#[cfg(si5324_free_running)] -fn setup_si5324_free_running() -{ - // 150MHz output (hardcoded) - const SI5324_SETTINGS: board::si5324::FrequencySettings - = board::si5324::FrequencySettings { - n1_hs : 9, - nc1_ls : 4, - n2_hs : 10, - n2_ls : 33732, - n31 : 9370, - n32 : 7139, - bwsel : 3 - }; - board::si5324::setup(&SI5324_SETTINGS).expect("cannot initialize Si5324"); -} - -#[cfg(has_ethmac)] -fn startup_ethernet() { - let hardware_addr; - match config::read_str("mac", |r| r?.parse()) { - Err(()) => { - hardware_addr = EthernetAddress([0x02, 0x00, 0x00, 0x00, 0x00, 0x01]); - warn!("using default MAC address {}; consider changing it", hardware_addr); - } - Ok(addr) => { - hardware_addr = addr; - info!("using MAC address {}", hardware_addr); - } - } - - let protocol_addr; - match config::read_str("ip", |r| r?.parse()) { - Err(()) => { - protocol_addr = IpAddress::v4(192, 168, 1, 50); - info!("using default IP address {}", protocol_addr); - } - Ok(addr) => { - protocol_addr = addr; - info!("using IP address {}", protocol_addr); - } - } - - // fn _net_trace_writer(timestamp: u64, printer: smoltcp::wire::PrettyPrinter) - // where U: smoltcp::wire::pretty_print::PrettyPrint { - // let seconds = timestamp / 1000; - // let micros = timestamp % 1000 * 1000; - // print!("\x1b[37m[{:6}.{:06}s]\n{}\x1b[0m", seconds, micros, printer) - // } - - let net_device = unsafe { ethmac::EthernetDevice::new() }; - // let net_device = smoltcp::phy::EthernetTracer::new(net_device, _net_trace_writer); - let mut neighbor_cache_storage = [None; 8]; - let neighbor_cache = - smoltcp::iface::NeighborCache::new(&mut neighbor_cache_storage[..]); - let mut interface = - smoltcp::iface::EthernetInterfaceBuilder::new(net_device) - .neighbor_cache(neighbor_cache) - .ethernet_addr(hardware_addr) - .ip_addrs([IpCidr::new(protocol_addr, 0)]) - .finalize(); - - let mut scheduler = sched::Scheduler::new(); - let io = scheduler.io(); - #[cfg(has_rtio_core)] - rtio_mgt::startup(&io); - io.spawn(4096, mgmt::thread); - io.spawn(16384, session::thread); - #[cfg(any(has_rtio_moninj, has_drtio))] - io.spawn(4096, moninj::thread); - #[cfg(has_rtio_analyzer)] - io.spawn(4096, analyzer::thread); - - match config::read_str("log_level", |r| r?.parse()) { - Err(()) => (), - Ok(log_level_filter) => { - info!("log level set to {} by `log_level` config key", - log_level_filter); - logger_artiq::BufferLogger::with(|logger| - logger.set_max_log_level(log_level_filter)); - } - } - - match config::read_str("uart_log_level", |r| r?.parse()) { - Err(()) => { - info!("UART log level set to INFO by default"); - }, - Ok(uart_log_level_filter) => { - info!("UART log level set to {} by `uart_log_level` config key", - uart_log_level_filter); - logger_artiq::BufferLogger::with(|logger| - logger.set_uart_log_level(uart_log_level_filter)); - } - } - - let mut net_stats = ethmac::EthernetStatistics::new(); - loop { - scheduler.run(); - - match interface.poll(&mut *borrow_mut!(scheduler.sockets()), - board::clock::get_ms()) { - Ok(_poll_at) => (), - Err(smoltcp::Error::Unrecognized) => (), - Err(err) => warn!("network error: {}", err) - } - - if let Some(_net_stats_diff) = net_stats.update() { - warn!("ethernet mac:{}", ethmac::EthernetStatistics::new()); - } - } -} - -#[global_allocator] -static mut ALLOC: alloc_list::ListAlloc = alloc_list::EMPTY; -static mut LOG_BUFFER: [u8; 1<<17] = [0; 1<<17]; - -#[no_mangle] -pub extern fn main() -> i32 { - unsafe { - extern { - static mut _fheap: u8; - static mut _eheap: u8; - } - ALLOC.add_range(&mut _fheap, &mut _eheap); - - logger_artiq::BufferLogger::new(&mut LOG_BUFFER[..]).register(startup); - - 0 - } -} - -#[no_mangle] -pub extern fn exception_handler(vect: u32, _regs: *const u32, pc: u32, ea: u32) { - panic!("exception {:?} at PC 0x{:x}, EA 0x{:x}", vect, pc, ea) -} - -#[no_mangle] -pub extern fn abort() { - panic!("aborted") -} - -#[no_mangle] -#[lang = "panic_fmt"] -pub extern fn panic_fmt(args: core::fmt::Arguments, file: &'static str, line: u32) -> ! { - println!("panic at {}:{}: {}", file, line, args); - - if config::read_str("panic_reboot", |r| r == Ok("1")) { - println!("rebooting..."); - unsafe { board::boot::reboot() } - } else { - println!("halting."); - println!("use `artiq_coreconfig write -s panic_reboot 1` to reboot instead"); - loop {} - } -} - -// Allow linking with crates that are built as -Cpanic=unwind even if we use -Cpanic=abort. -// This is never called. -#[allow(non_snake_case)] -#[no_mangle] -pub extern fn _Unwind_Resume() -> ! { - loop {} -} diff --git a/artiq/firmware/runtime/main.rs b/artiq/firmware/runtime/main.rs new file mode 100644 index 000000000..91646cc5e --- /dev/null +++ b/artiq/firmware/runtime/main.rs @@ -0,0 +1,346 @@ +#![feature(lang_items, alloc, try_from, nonzero, asm, + panic_implementation, panic_info_message, + const_slice_len)] +#![no_std] + +extern crate eh; +#[macro_use] +extern crate alloc; +extern crate failure; +#[macro_use] +extern crate failure_derive; +extern crate cslice; +#[macro_use] +extern crate log; +extern crate byteorder; +extern crate fringe; +extern crate managed; +extern crate smoltcp; + +extern crate alloc_list; +extern crate unwind_backtrace; +extern crate io; +#[macro_use] +extern crate board_misoc; +extern crate board_artiq; +extern crate logger_artiq; +extern crate proto_artiq; + +use core::cell::RefCell; +use core::convert::TryFrom; +use smoltcp::wire::IpCidr; + +use board_misoc::{csr, irq, ident, clock, boot, config, net_settings}; +#[cfg(has_ethmac)] +use board_misoc::ethmac; +#[cfg(has_drtio)] +use board_artiq::drtioaux; +use board_artiq::drtio_routing; +use board_artiq::{mailbox, rpc_queue}; +use proto_artiq::{mgmt_proto, moninj_proto, rpc_proto, session_proto, kernel_proto}; +#[cfg(has_rtio_analyzer)] +use proto_artiq::analyzer_proto; + +mod rtio_clocking; +mod rtio_mgt; + +mod urc; +mod sched; +mod cache; +mod rtio_dma; + +mod mgmt; +mod profiler; +mod kernel; +mod kern_hwreq; +mod session; +#[cfg(any(has_rtio_moninj, has_drtio))] +mod moninj; +#[cfg(has_rtio_analyzer)] +mod analyzer; + +#[cfg(has_grabber)] +fn grabber_thread(io: sched::Io) { + loop { + board_artiq::grabber::tick(); + io.sleep(200).unwrap(); + } +} + +fn setup_log_levels() { + match config::read_str("log_level", |r| r.map(|s| s.parse())) { + Ok(Ok(log_level_filter)) => { + info!("log level set to {} by `log_level` config key", + log_level_filter); + log::set_max_level(log_level_filter); + } + _ => info!("log level set to INFO by default") + } + match config::read_str("uart_log_level", |r| r.map(|s| s.parse())) { + Ok(Ok(uart_log_level_filter)) => { + info!("UART log level set to {} by `uart_log_level` config key", + uart_log_level_filter); + logger_artiq::BufferLogger::with(|logger| + logger.set_uart_log_level(uart_log_level_filter)); + } + _ => info!("UART log level set to INFO by default") + } +} + +fn startup() { + irq::set_mask(0); + irq::set_ie(true); + clock::init(); + info!("ARTIQ runtime starting..."); + info!("software ident {}", csr::CONFIG_IDENTIFIER_STR); + info!("gateware ident {}", ident::read(&mut [0; 64])); + + setup_log_levels(); + #[cfg(has_i2c)] + board_misoc::i2c::init().expect("I2C initialization failed"); + #[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))] + let (mut io_expander0, mut io_expander1); + #[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))] + { + io_expander0 = board_misoc::io_expander::IoExpander::new(0); + io_expander1 = board_misoc::io_expander::IoExpander::new(1); + io_expander0.init().expect("I2C I/O expander #0 initialization failed"); + io_expander1.init().expect("I2C I/O expander #1 initialization failed"); + + // Actively drive TX_DISABLE to false on SFP0..3 + io_expander0.set_oe(0, 1 << 1).unwrap(); + io_expander0.set_oe(1, 1 << 1).unwrap(); + io_expander1.set_oe(0, 1 << 1).unwrap(); + io_expander1.set_oe(1, 1 << 1).unwrap(); + io_expander0.set(0, 1, false); + io_expander0.set(1, 1, false); + io_expander1.set(0, 1, false); + io_expander1.set(1, 1, false); + io_expander0.service().unwrap(); + io_expander1.service().unwrap(); + } + rtio_clocking::init(); + + let mut net_device = unsafe { ethmac::EthernetDevice::new() }; + net_device.reset_phy_if_any(); + + let net_device = { + use smoltcp::time::Instant; + use smoltcp::wire::PrettyPrinter; + use smoltcp::wire::EthernetFrame; + + fn net_trace_writer(timestamp: Instant, printer: PrettyPrinter>) { + print!("\x1b[37m[{:6}.{:03}s]\n{}\x1b[0m\n", + timestamp.secs(), timestamp.millis(), printer) + } + + fn net_trace_silent(_timestamp: Instant, _printer: PrettyPrinter>) {} + + let net_trace_fn: fn(Instant, PrettyPrinter>); + match config::read_str("net_trace", |r| r.map(|s| s == "1")) { + Ok(true) => net_trace_fn = net_trace_writer, + _ => net_trace_fn = net_trace_silent + } + smoltcp::phy::EthernetTracer::new(net_device, net_trace_fn) + }; + + let neighbor_cache = + smoltcp::iface::NeighborCache::new(alloc::btree_map::BTreeMap::new()); + let net_addresses = net_settings::get_adresses(); + info!("network addresses: {}", net_addresses); + let mut interface = match net_addresses.ipv6_addr { + Some(addr) => { + let ip_addrs = [ + IpCidr::new(net_addresses.ipv4_addr, 0), + IpCidr::new(net_addresses.ipv6_ll_addr, 0), + IpCidr::new(addr, 0) + ]; + smoltcp::iface::EthernetInterfaceBuilder::new(net_device) + .ethernet_addr(net_addresses.hardware_addr) + .ip_addrs(ip_addrs) + .neighbor_cache(neighbor_cache) + .finalize() + } + None => { + let ip_addrs = [ + IpCidr::new(net_addresses.ipv4_addr, 0), + IpCidr::new(net_addresses.ipv6_ll_addr, 0) + ]; + smoltcp::iface::EthernetInterfaceBuilder::new(net_device) + .ethernet_addr(net_addresses.hardware_addr) + .ip_addrs(ip_addrs) + .neighbor_cache(neighbor_cache) + .finalize() + } + }; + + #[cfg(has_drtio)] + let drtio_routing_table = urc::Urc::new(RefCell::new( + drtio_routing::config_routing_table(csr::DRTIO.len()))); + #[cfg(not(has_drtio))] + let drtio_routing_table = urc::Urc::new(RefCell::new( + drtio_routing::RoutingTable::default_empty())); + let up_destinations = urc::Urc::new(RefCell::new( + [false; drtio_routing::DEST_COUNT])); + #[cfg(has_drtio_routing)] + drtio_routing::interconnect_disable_all(); + let aux_mutex = sched::Mutex::new(); + + let mut scheduler = sched::Scheduler::new(); + let io = scheduler.io(); + + rtio_mgt::startup(&io, &aux_mutex, &drtio_routing_table, &up_destinations); + + io.spawn(4096, mgmt::thread); + { + let aux_mutex = aux_mutex.clone(); + let drtio_routing_table = drtio_routing_table.clone(); + let up_destinations = up_destinations.clone(); + io.spawn(16384, move |io| { session::thread(io, &aux_mutex, &drtio_routing_table, &up_destinations) }); + } + #[cfg(any(has_rtio_moninj, has_drtio))] + { + let aux_mutex = aux_mutex.clone(); + let drtio_routing_table = drtio_routing_table.clone(); + io.spawn(4096, move |io| { moninj::thread(io, &aux_mutex, &drtio_routing_table) }); + } + #[cfg(has_rtio_analyzer)] + io.spawn(4096, analyzer::thread); + + #[cfg(has_grabber)] + io.spawn(4096, grabber_thread); + + let mut net_stats = ethmac::EthernetStatistics::new(); + loop { + scheduler.run(); + + { + let sockets = &mut *scheduler.sockets().borrow_mut(); + loop { + let timestamp = smoltcp::time::Instant::from_millis(clock::get_ms() as i64); + match interface.poll(sockets, timestamp) { + Ok(true) => (), + Ok(false) => break, + Err(smoltcp::Error::Unrecognized) => (), + Err(err) => debug!("network error: {}", err) + } + } + } + + if let Some(_net_stats_diff) = net_stats.update() { + debug!("ethernet mac:{}", ethmac::EthernetStatistics::new()); + } + + #[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))] + { + io_expander0.service().expect("I2C I/O expander #0 service failed"); + io_expander1.service().expect("I2C I/O expander #1 service failed"); + } + } +} + +#[global_allocator] +static mut ALLOC: alloc_list::ListAlloc = alloc_list::EMPTY; +static mut LOG_BUFFER: [u8; 1<<17] = [0; 1<<17]; + +#[no_mangle] +pub extern fn main() -> i32 { + unsafe { + extern { + static mut _fheap: u8; + static mut _eheap: u8; + } + ALLOC.add_range(&mut _fheap, &mut _eheap); + + logger_artiq::BufferLogger::new(&mut LOG_BUFFER[..]).register(startup); + + 0 + } +} + +#[no_mangle] +pub extern fn exception(vect: u32, _regs: *const u32, pc: u32, ea: u32) { + let vect = irq::Exception::try_from(vect).expect("unknown exception"); + match vect { + irq::Exception::Interrupt => + while irq::pending_mask() != 0 { + match () { + #[cfg(has_timer1)] + () if irq::is_pending(csr::TIMER1_INTERRUPT) => + profiler::sample(pc as usize), + _ => panic!("spurious irq {}", irq::pending_mask().trailing_zeros()) + } + }, + _ => { + fn hexdump(addr: u32) { + let addr = (addr - addr % 4) as *const u32; + let mut ptr = addr; + println!("@ {:08p}", ptr); + for _ in 0..4 { + print!("+{:04x}: ", ptr as usize - addr as usize); + print!("{:08x} ", unsafe { *ptr }); ptr = ptr.wrapping_offset(1); + print!("{:08x} ", unsafe { *ptr }); ptr = ptr.wrapping_offset(1); + print!("{:08x} ", unsafe { *ptr }); ptr = ptr.wrapping_offset(1); + print!("{:08x}\n", unsafe { *ptr }); ptr = ptr.wrapping_offset(1); + } + } + + hexdump(pc); + hexdump(ea); + panic!("exception {:?} at PC 0x{:x}, EA 0x{:x}", vect, pc, ea) + } + } +} + +#[no_mangle] +pub extern fn abort() { + println!("aborted"); + loop {} +} + +#[no_mangle] // https://github.com/rust-lang/rust/issues/{38281,51647} +#[lang = "oom"] // https://github.com/rust-lang/rust/issues/51540 +pub fn oom(layout: core::alloc::Layout) -> ! { + panic!("heap view: {}\ncannot allocate layout: {:?}", unsafe { &ALLOC }, layout) +} + +#[no_mangle] // https://github.com/rust-lang/rust/issues/{38281,51647} +#[panic_implementation] +pub fn panic_impl(info: &core::panic::PanicInfo) -> ! { + irq::set_ie(false); + + #[cfg(has_error_led)] + unsafe { + csr::error_led::out_write(1); + } + + if let Some(location) = info.location() { + print!("panic at {}:{}:{}", location.file(), location.line(), location.column()); + } else { + print!("panic at unknown location"); + } + if let Some(message) = info.message() { + println!(": {}", message); + } else { + println!(""); + } + + println!("backtrace for software version {}:", csr::CONFIG_IDENTIFIER_STR); + let _ = unwind_backtrace::backtrace(|ip| { + // Backtrace gives us the return address, i.e. the address after the delay slot, + // but we're interested in the call instruction. + println!("{:#08x}", ip - 2 * 4); + }); + + if config::read_str("panic_reset", |r| r == Ok("1")) { + println!("restarting..."); + unsafe { + kernel::stop(); + boot::reset(); + } + } else { + println!("halting."); + println!("use `artiq_coremgmt config write -s panic_reset 1` to restart instead"); + loop {} + } +} diff --git a/artiq/firmware/runtime/mgmt.rs b/artiq/firmware/runtime/mgmt.rs index 0cdb7eef6..c03682e8a 100644 --- a/artiq/firmware/runtime/mgmt.rs +++ b/artiq/firmware/runtime/mgmt.rs @@ -1,100 +1,169 @@ -use std::io::{self, Read, Write}; -use log::LogLevelFilter; +use log::{self, LevelFilter}; + +use io::{Write, ProtoWrite, Error as IoError}; +use board_misoc::{config, boot}; use logger_artiq::BufferLogger; -use sched::Io; -use sched::{TcpListener, TcpStream}; -use board; -use proto::WriteExt; use mgmt_proto::*; +use sched::{Io, TcpListener, TcpStream, Error as SchedError}; +use profiler; -fn check_magic(stream: &mut TcpStream) -> io::Result<()> { - const MAGIC: &'static [u8] = b"ARTIQ management\n"; - - let mut magic: [u8; 17] = [0; 17]; - stream.read_exact(&mut magic)?; - if magic != MAGIC { - Err(io::Error::new(io::ErrorKind::InvalidData, "unrecognized magic")) - } else { - Ok(()) +impl From for Error { + fn from(value: SchedError) -> Error { + Error::Io(IoError::Other(value)) } } -fn worker(io: &Io, stream: &mut TcpStream) -> io::Result<()> { - check_magic(stream)?; +fn worker(io: &Io, stream: &mut TcpStream) -> Result<(), Error> { + read_magic(stream)?; info!("new connection from {}", stream.remote_endpoint()); loop { match Request::read_from(stream)? { Request::GetLog => { BufferLogger::with(|logger| { - logger.extract(|log| { - Reply::LogContent(log).write_to(stream) - }) + let mut buffer = io.until_ok(|| logger.buffer())?; + Reply::LogContent(buffer.extract()).write_to(stream) })?; - }, - + } Request::ClearLog => { - BufferLogger::with(|logger| - logger.clear()); + BufferLogger::with(|logger| -> Result<(), Error> { + let mut buffer = io.until_ok(|| logger.buffer())?; + Ok(buffer.clear()) + })?; + Reply::Success.write_to(stream)?; - }, - + } Request::PullLog => { - loop { - io.until(|| BufferLogger::with(|logger| !logger.is_empty()))?; + BufferLogger::with(|logger| -> Result<(), Error> { + loop { + // Do this *before* acquiring the buffer, since that sets the log level + // to OFF. + let log_level = log::max_level(); - BufferLogger::with(|logger| { - let log_level = logger.max_log_level(); - logger.extract(|log| { - stream.write_string(log)?; + let mut buffer = io.until_ok(|| logger.buffer())?; + if buffer.is_empty() { continue } - if log_level == LogLevelFilter::Trace { - // Hold exclusive access over the logger until we get positive - // acknowledgement; otherwise we get an infinite loop of network - // trace messages being transmitted and causing more network - // trace messages to be emitted. - // - // Any messages unrelated to this management socket that arrive - // while it is flushed are lost, but such is life. - stream.flush() - } else { - Ok(()) - } - })?; + stream.write_string(buffer.extract())?; - Ok(logger.clear()) as io::Result<()> - })?; - } - }, + if log_level == LevelFilter::Trace { + // Hold exclusive access over the logger until we get positive + // acknowledgement; otherwise we get an infinite loop of network + // trace messages being transmitted and causing more network + // trace messages to be emitted. + // + // Any messages unrelated to this management socket that arrive + // while it is flushed are lost, but such is life. + stream.flush()?; + } + // Clear the log *after* flushing the network buffers, or we're just + // going to resend all the trace messages on the next iteration. + buffer.clear(); + } + })?; + } Request::SetLogFilter(level) => { info!("changing log level to {}", level); - BufferLogger::with(|logger| - logger.set_max_log_level(level)); + log::set_max_level(level); Reply::Success.write_to(stream)?; - }, - + } Request::SetUartLogFilter(level) => { info!("changing UART log level to {}", level); BufferLogger::with(|logger| logger.set_uart_log_level(level)); Reply::Success.write_to(stream)?; - }, + } + + Request::ConfigRead { ref key } => { + config::read(key, |result| { + match result { + Ok(value) => Reply::ConfigData(&value).write_to(stream), + Err(_) => Reply::Error.write_to(stream) + } + })?; + } + Request::ConfigWrite { ref key, ref value } => { + match config::write(key, value) { + Ok(_) => Reply::Success.write_to(stream), + Err(_) => Reply::Error.write_to(stream) + }?; + } + Request::ConfigRemove { ref key } => { + match config::remove(key) { + Ok(()) => Reply::Success.write_to(stream), + Err(_) => Reply::Error.write_to(stream) + }?; + + } + Request::ConfigErase => { + match config::erase() { + Ok(()) => Reply::Success.write_to(stream), + Err(_) => Reply::Error.write_to(stream) + }?; + } + + Request::StartProfiler { interval_us, hits_size, edges_size } => { + match profiler::start(interval_us as u64, + hits_size as usize, edges_size as usize) { + Ok(()) => Reply::Success.write_to(stream)?, + Err(()) => Reply::Unavailable.write_to(stream)? + } + } + Request::StopProfiler => { + profiler::stop(); + Reply::Success.write_to(stream)?; + } + Request::GetProfile => { + profiler::pause(|profile| { + let profile = match profile { + None => return Reply::Unavailable.write_to(stream), + Some(profile) => profile + }; + + Reply::Profile.write_to(stream)?; + { + let hits = profile.hits(); + stream.write_u32(hits.len() as u32)?; + for (&addr, &count) in hits.iter() { + stream.write_u32(addr.as_raw() as u32)?; + stream.write_u32(count)?; + } + } + { + let edges = profile.edges(); + stream.write_u32(edges.len() as u32)?; + for (&(caller, callee), &count) in edges.iter() { + stream.write_u32(caller.as_raw() as u32)?; + stream.write_u32(callee.as_raw() as u32)?; + stream.write_u32(count)?; + } + } + + Ok(()) + })?; + } Request::Hotswap(firmware) => { - warn!("hotswapping firmware"); Reply::RebootImminent.write_to(stream)?; stream.close()?; stream.flush()?; - unsafe { board::boot::hotswap(&firmware) } - }, + profiler::stop(); + warn!("hotswapping firmware"); + unsafe { boot::hotswap(&firmware) } + } Request::Reboot => { Reply::RebootImminent.write_to(stream)?; stream.close()?; - warn!("rebooting"); - unsafe { board::boot::reboot() } + stream.flush()?; + + profiler::stop(); + warn!("restarting"); + unsafe { boot::reset() } } + + Request::DebugAllocator => + unsafe { println!("{}", ::ALLOC) }, }; } } @@ -110,8 +179,7 @@ pub fn thread(io: Io) { let mut stream = TcpStream::from_handle(&io, stream); match worker(&io, &mut stream) { Ok(()) => (), - Err(ref err) if err.kind() == io::ErrorKind::UnexpectedEof => (), - Err(ref err) if err.kind() == io::ErrorKind::WriteZero => (), + Err(Error::Io(IoError::UnexpectedEnd)) => (), Err(err) => error!("aborted: {}", err) } }); diff --git a/artiq/firmware/runtime/moninj.rs b/artiq/firmware/runtime/moninj.rs index 76de9cfbb..8534376d5 100644 --- a/artiq/firmware/runtime/moninj.rs +++ b/artiq/firmware/runtime/moninj.rs @@ -1,170 +1,127 @@ -use std::io::{self, Read}; -use std::btree_map::BTreeMap; - -use sched::Io; -use sched::{TcpListener, TcpStream}; -use board::{clock, csr}; -#[cfg(has_drtio)] -use drtioaux; +use alloc::btree_map::BTreeMap; +use core::cell::RefCell; +use io::Error as IoError; use moninj_proto::*; +use sched::{Io, Mutex, TcpListener, TcpStream, Error as SchedError}; +use urc::Urc; +use board_misoc::clock; +use board_artiq::drtio_routing; +#[cfg(has_rtio_moninj)] +mod local_moninj { + use board_misoc::csr; -fn check_magic(stream: &mut TcpStream) -> io::Result<()> { - const MAGIC: &'static [u8] = b"ARTIQ moninj\n"; + pub fn read_probe(channel: u16, probe: u8) -> u32 { + unsafe { + csr::rtio_moninj::mon_chan_sel_write(channel as _); + csr::rtio_moninj::mon_probe_sel_write(probe); + csr::rtio_moninj::mon_value_update_write(1); + csr::rtio_moninj::mon_value_read() as u32 + } + } - let mut magic: [u8; 13] = [0; 13]; - stream.read_exact(&mut magic)?; - if magic != MAGIC { - Err(io::Error::new(io::ErrorKind::InvalidData, "unrecognized magic")) - } else { - Ok(()) + pub fn inject(channel: u16, overrd: u8, value: u8) { + unsafe { + csr::rtio_moninj::inj_chan_sel_write(channel as _); + csr::rtio_moninj::inj_override_sel_write(overrd); + csr::rtio_moninj::inj_value_write(value); + } + } + + pub fn read_injection_status(channel: u16, overrd: u8) -> u8 { + unsafe { + csr::rtio_moninj::inj_chan_sel_write(channel as _); + csr::rtio_moninj::inj_override_sel_write(overrd); + csr::rtio_moninj::inj_value_read() + } } } -#[cfg(has_rtio_moninj)] -fn read_probe_local(channel: u16, probe: u8) -> u32 { - unsafe { - csr::rtio_moninj::mon_chan_sel_write(channel as _); - csr::rtio_moninj::mon_probe_sel_write(probe); - csr::rtio_moninj::mon_value_update_write(1); - csr::rtio_moninj::mon_value_read() as u32 +#[cfg(not(has_rtio_moninj))] +mod local_moninj { + pub fn read_probe(_channel: u16, _probe: u8) -> u32 { 0 } + + pub fn inject(_channel: u16, _overrd: u8, _value: u8) { } + + pub fn read_injection_status(_channel: u16, _overrd: u8) -> u8 { 0 } +} + +#[cfg(has_drtio)] +mod remote_moninj { + use drtioaux; + use rtio_mgt::drtio; + use sched::{Io, Mutex}; + + pub fn read_probe(io: &Io, aux_mutex: &Mutex, linkno: u8, destination: u8, channel: u16, probe: u8) -> u32 { + let reply = drtio::aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::MonitorRequest { + destination: destination, + channel: channel, + probe: probe + }); + match reply { + Ok(drtioaux::Packet::MonitorReply { value }) => return value, + Ok(packet) => error!("received unexpected aux packet: {:?}", packet), + Err(e) => error!("aux packet error ({})", e) + } + 0 + } + + pub fn inject(io: &Io, aux_mutex: &Mutex, linkno: u8, destination: u8, channel: u16, overrd: u8, value: u8) { + let _lock = aux_mutex.lock(io).unwrap(); + drtioaux::send(linkno, &drtioaux::Packet::InjectionRequest { + destination: destination, + channel: channel, + overrd: overrd, + value: value + }).unwrap(); + } + + pub fn read_injection_status(io: &Io, aux_mutex: &Mutex, linkno: u8, destination: u8, channel: u16, overrd: u8) -> u8 { + let reply = drtio::aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::InjectionStatusRequest { + destination: destination, + channel: channel, + overrd: overrd + }); + match reply { + Ok(drtioaux::Packet::InjectionStatusReply { value }) => return value, + Ok(packet) => error!("received unexpected aux packet: {:?}", packet), + Err(e) => error!("aux packet error ({})", e) + } + 0 } } #[cfg(has_drtio)] -fn read_probe_drtio(nodeno: u8, channel: u16, probe: u8) -> u32 { - let request = drtioaux::Packet::MonitorRequest { channel: channel, probe: probe }; - match drtioaux::hw::send(nodeno, &request) { - Ok(_) => (), - Err(e) => { - error!("aux packet error ({})", e); - return 0; +macro_rules! dispatch { + ($io:ident, $aux_mutex:ident, $routing_table:ident, $channel:expr, $func:ident $(, $param:expr)*) => {{ + let destination = ($channel >> 16) as u8; + let channel = $channel as u16; + let hop = $routing_table.0[destination as usize][0]; + if hop == 0 { + local_moninj::$func(channel, $($param, )*) + } else { + let linkno = hop - 1; + remote_moninj::$func($io, $aux_mutex, linkno, destination, channel, $($param, )*) } - } - match drtioaux::hw::recv_timeout(nodeno, None) { - Ok(drtioaux::Packet::MonitorReply { value }) => return value, - Ok(_) => error!("received unexpected aux packet"), - Err(e) => error!("aux packet error ({})", e) - } - 0 + }} } -fn read_probe(channel: u32, probe: u8) -> u32 { - let nodeno = (channel >> 16) as u8; - let node_channel = channel as u16; - #[cfg(has_rtio_moninj)] - { - if nodeno == 0 { - return read_probe_local(node_channel, probe) - } - } - #[cfg(has_drtio)] - { - if nodeno != 0 { - return read_probe_drtio(nodeno, node_channel, probe) - } - } - error!("read_probe: unrecognized channel number {}", channel); - 0 +#[cfg(not(has_drtio))] +macro_rules! dispatch { + ($io:ident, $aux_mutex:ident, $routing_table:ident, $channel:expr, $func:ident $(, $param:expr)*) => {{ + let channel = $channel as u16; + local_moninj::$func(channel, $($param, )*) + }} } -#[cfg(has_rtio_moninj)] -fn inject_local(channel: u16, overrd: u8, value: u8) { - unsafe { - csr::rtio_moninj::inj_chan_sel_write(channel as _); - csr::rtio_moninj::inj_override_sel_write(overrd); - csr::rtio_moninj::inj_value_write(value); - } -} - -#[cfg(has_drtio)] -fn inject_drtio(nodeno: u8, channel: u16, overrd: u8, value: u8) { - let request = drtioaux::Packet::InjectionRequest { - channel: channel, - overrd: overrd, - value: value - }; - match drtioaux::hw::send(nodeno, &request) { - Ok(_) => (), - Err(e) => error!("aux packet error ({})", e) - } -} - -fn inject(channel: u32, overrd: u8, value: u8) { - let nodeno = (channel >> 16) as u8; - let node_channel = channel as u16; - #[cfg(has_rtio_moninj)] - { - if nodeno == 0 { - inject_local(node_channel, overrd, value); - return - } - } - #[cfg(has_drtio)] - { - if nodeno != 0 { - inject_drtio(nodeno, node_channel, overrd, value); - return - } - } - error!("inject: unrecognized channel number {}", channel); -} - -#[cfg(has_rtio_moninj)] -fn read_injection_status_local(channel: u16, overrd: u8) -> u8 { - unsafe { - csr::rtio_moninj::inj_chan_sel_write(channel as _); - csr::rtio_moninj::inj_override_sel_write(overrd); - csr::rtio_moninj::inj_value_read() - } -} - -#[cfg(has_drtio)] -fn read_injection_status_drtio(nodeno: u8, channel: u16, overrd: u8) -> u8 { - let request = drtioaux::Packet::InjectionStatusRequest { - channel: channel, - overrd: overrd - }; - match drtioaux::hw::send(nodeno, &request) { - Ok(_) => (), - Err(e) => { - error!("aux packet error ({})", e); - return 0; - } - } - match drtioaux::hw::recv_timeout(nodeno, None) { - Ok(drtioaux::Packet::InjectionStatusReply { value }) => return value, - Ok(_) => error!("received unexpected aux packet"), - Err(e) => error!("aux packet error ({})", e) - } - 0 -} - -fn read_injection_status(channel: u32, probe: u8) -> u8 { - let nodeno = (channel >> 16) as u8; - let node_channel = channel as u16; - #[cfg(has_rtio_moninj)] - { - if nodeno == 0 { - return read_injection_status_local(node_channel, probe) - } - } - #[cfg(has_drtio)] - { - if nodeno != 0 { - return read_injection_status_drtio(nodeno, node_channel, probe) - } - } - error!("read_injection_status: unrecognized channel number {}", channel); - 0 -} - -fn connection_worker(io: &Io, mut stream: &mut TcpStream) -> io::Result<()> { - let mut watch_list = BTreeMap::new(); +fn connection_worker(io: &Io, _aux_mutex: &Mutex, _routing_table: &drtio_routing::RoutingTable, + mut stream: &mut TcpStream) -> Result<(), Error> { + let mut probe_watch_list = BTreeMap::new(); + let mut inject_watch_list = BTreeMap::new(); let mut next_check = 0; - check_magic(&mut stream)?; + read_magic(&mut stream)?; info!("new connection from {}", stream.remote_endpoint()); loop { @@ -173,16 +130,23 @@ fn connection_worker(io: &Io, mut stream: &mut TcpStream) -> io::Result<()> { trace!("moninj<-host {:?}", request); match request { - HostMessage::Monitor { enable, channel, probe } => { + HostMessage::MonitorProbe { enable, channel, probe } => { if enable { - let _ = watch_list.entry((channel, probe)).or_insert(None); + let _ = probe_watch_list.entry((channel, probe)).or_insert(None); } else { - let _ = watch_list.remove(&(channel, probe)); + let _ = probe_watch_list.remove(&(channel, probe)); } }, - HostMessage::Inject { channel, overrd, value } => inject(channel, overrd, value), + HostMessage::MonitorInjection { enable, channel, overrd } => { + if enable { + let _ = inject_watch_list.entry((channel, overrd)).or_insert(None); + } else { + let _ = inject_watch_list.remove(&(channel, overrd)); + } + }, + HostMessage::Inject { channel, overrd, value } => dispatch!(io, _aux_mutex, _routing_table, channel, inject, overrd, value), HostMessage::GetInjectionStatus { channel, overrd } => { - let value = read_injection_status(channel, overrd); + let value = dispatch!(io, _aux_mutex, _routing_table, channel, read_injection_status, overrd); let reply = DeviceMessage::InjectionStatus { channel: channel, overrd: overrd, @@ -198,9 +162,9 @@ fn connection_worker(io: &Io, mut stream: &mut TcpStream) -> io::Result<()> { } if clock::get_ms() > next_check { - for (&(channel, probe), previous) in watch_list.iter_mut() { - let current = read_probe(channel, probe); - if previous.is_none() || (previous.unwrap() != current) { + for (&(channel, probe), previous) in probe_watch_list.iter_mut() { + let current = dispatch!(io, _aux_mutex, _routing_table, channel, read_probe, probe); + if previous.is_none() || previous.unwrap() != current { let message = DeviceMessage::MonitorStatus { channel: channel, probe: probe, @@ -213,22 +177,40 @@ fn connection_worker(io: &Io, mut stream: &mut TcpStream) -> io::Result<()> { *previous = Some(current); } } + for (&(channel, overrd), previous) in inject_watch_list.iter_mut() { + let current = dispatch!(io, _aux_mutex, _routing_table, channel, read_injection_status, overrd); + if previous.is_none() || previous.unwrap() != current { + let message = DeviceMessage::InjectionStatus { + channel: channel, + overrd: overrd, + value: current + }; + + trace!("moninj->host {:?}", message); + message.write_to(stream)?; + + *previous = Some(current); + } + } next_check = clock::get_ms() + 200; } - io.relinquish().unwrap(); + io.relinquish().map_err(|err| Error::Io(IoError::Other(err)))?; } } -pub fn thread(io: Io) { +pub fn thread(io: Io, aux_mutex: &Mutex, routing_table: &Urc>) { let listener = TcpListener::new(&io, 2047); listener.listen(1383).expect("moninj: cannot listen"); loop { + let aux_mutex = aux_mutex.clone(); + let routing_table = routing_table.clone(); let stream = listener.accept().expect("moninj: cannot accept").into_handle(); io.spawn(16384, move |io| { + let routing_table = routing_table.borrow(); let mut stream = TcpStream::from_handle(&io, stream); - match connection_worker(&io, &mut stream) { + match connection_worker(&io, &aux_mutex, &routing_table, &mut stream) { Ok(()) => {}, Err(err) => error!("moninj aborted: {}", err) } diff --git a/artiq/firmware/runtime/profiler.rs b/artiq/firmware/runtime/profiler.rs new file mode 100644 index 000000000..6df9c3f8c --- /dev/null +++ b/artiq/firmware/runtime/profiler.rs @@ -0,0 +1,278 @@ +#![cfg_attr(not(has_timer1), allow(dead_code))] + +use core::mem; +use core::fmt; +use core::num::NonZeroUsize; +use alloc::Vec; +use managed::ManagedMap; + +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct Address(NonZeroUsize); + +impl Address { + pub fn new(raw: usize) -> Address { + Address(NonZeroUsize::new(raw).expect("null address")) + } + + pub fn as_raw(&self) -> usize { + self.0.get() + } +} + +pub struct Profile { + hits: Vec>, + edges: Vec>, +} + +impl fmt::Debug for Profile { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Profile {{ hits: vec![...; {}], edges: vec![...; {}] }}", + self.hits.len(), self.edges.len()) + } +} + +impl Profile { + pub fn new(hits_size: usize, edges_size: usize) -> Profile { + let mut hits = vec![None; hits_size]; + hits.shrink_to_fit(); + let mut edges = vec![None; edges_size]; + edges.shrink_to_fit(); + Profile { + hits: hits.into(), + edges: edges.into(), + } + } + + pub fn overhead(&self) -> usize { + let hit_size = mem::size_of::>(); + let edge_size = mem::size_of::>(); + self.hits.capacity() * hit_size + + self.edges.capacity() * edge_size + } + + pub fn has_edges(&self) -> bool { + self.edges.is_empty() + } + + pub fn hits<'a>(&'a mut self) -> ManagedMap<'a, Address, u32> { + ManagedMap::Borrowed(&mut self.hits[..]) + } + + pub fn edges<'a>(&'a mut self) -> ManagedMap<'a, (Address, Address), u32> { + ManagedMap::Borrowed(&mut self.edges[..]) + } + + pub fn record_hit(&mut self, addr: Address) -> Result<(), ()> { + let mut hits = self.hits(); + if let Some(count) = hits.get_mut(&addr) { + return Ok(*count = count.saturating_add(1)) + } + if let Err(_) = hits.insert(addr, 1) { + return Err(()) + } + return Ok(()) + } + + #[allow(dead_code)] + pub fn record_edge(&mut self, caller: Address, callee: Address) -> Result<(), ()> { + let mut edges = self.edges(); + if let Some(count) = edges.get_mut(&(caller, callee)) { + return Ok(*count = count.saturating_add(1)) + } + if let Err(_) = edges.insert((caller, callee), 1) { + return Err(()) + } + Ok(()) + } +} + +#[cfg(has_timer1)] +mod imp { + use unwind_backtrace::backtrace; + use board_misoc::{csr, irq}; + use super::{Address, Profile}; + + static mut PROFILE: Option = None; + + mod lock { + use core::ops::{Deref, DerefMut}; + use core::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; + + static LOCKED: AtomicUsize = ATOMIC_USIZE_INIT; + + pub struct Lock; + + impl Lock { + pub fn take() -> Result { + if LOCKED.swap(1, Ordering::SeqCst) != 0 { + Err(()) + } else { + Ok(Lock) + } + } + } + + impl Deref for Lock { + type Target = Option; + + fn deref(&self) -> &Option { + unsafe { &super::PROFILE } + } + } + + impl DerefMut for Lock { + fn deref_mut(&mut self) -> &mut Option { + unsafe { &mut super::PROFILE } + } + } + + impl Drop for Lock { + fn drop(&mut self) { + LOCKED.store(0, Ordering::SeqCst) + } + } + } + + use self::lock::Lock; + + pub fn start(interval_us: u64, hits_size: usize, edges_size: usize) -> Result<(), ()> { + stop(); + + let profile = Profile::new(hits_size, edges_size); + info!("starting at {}us interval using {} heap bytes", + interval_us, profile.overhead()); + + *Lock::take().expect("cannot lock") = Some(profile); + + unsafe { + let reload = csr::CONFIG_CLOCK_FREQUENCY as u64 * interval_us / 1_000_000; + csr::timer1::load_write(reload); + csr::timer1::reload_write(reload); + csr::timer1::ev_pending_write(1); + csr::timer1::ev_enable_write(1); + irq::enable(csr::TIMER1_INTERRUPT); + csr::timer1::en_write(1); + } + + Ok(()) + } + + pub fn stop() { + unsafe { + if csr::timer1::en_read() == 0 || csr::timer1::ev_enable_read() == 0 { + return + } + + irq::disable(csr::TIMER1_INTERRUPT); + csr::timer1::en_write(0); + + *Lock::take().expect("cannot lock") = None; + + info!("stopped"); + } + } + + pub fn pause) -> R, R>(f: F) -> R { + unsafe { + if csr::timer1::en_read() == 0 { + return f(None) + } + + irq::disable(csr::TIMER1_INTERRUPT); + csr::timer1::en_write(0); + + let result = { + let mut profile = Lock::take().expect("cannot lock"); + f(profile.as_mut()) + }; + + irq::enable(csr::TIMER1_INTERRUPT); + csr::timer1::en_write(1); + + result + } + } + + // Skip frames: ::profiler::sample, ::exception, exception vector. + const SKIP_FRAMES: i32 = 3; + + #[inline(always)] // make the top of backtrace predictable + fn record(profile: &mut Profile, exn_pc: usize) -> Result<(), ()> { + let mut result = Ok(()); + let mut frame = -SKIP_FRAMES; + + // If we have storage for edges, use the DWARF unwinder. + // Otherwise, don't bother and use a much faster path that just looks at EPCR. + // Also, acquiring a meaningful backtrace requires libunwind + // with the https://reviews.llvm.org/D46971 patch applied. + if profile.has_edges() { + let mut prev_pc = 0; + let _ = backtrace(|pc| { + // Backtrace gives us the return address, i.e. the address after the delay slot, + // but we're interested in the call instruction, *except* when going through + // the frame directly below the exception frame, which has the address that's + // being executed. + let pc = if pc != exn_pc { pc - 2 * 4 } else { pc }; + + if frame == 0 { + result = result.and_then(|()| + profile.record_hit(Address::new(pc))); + prev_pc = pc; + } else if frame > 0 { + result = result.and_then(|()| + profile.record_edge(Address::new(pc), + Address::new(prev_pc))); + } + + prev_pc = pc; + frame += 1; + }); + } + + // If we couldn't get anything useful out of a backtrace, at least + // record a hit at the exception PC. + if frame <= 0 { + result = profile.record_hit(Address::new(exn_pc)); + } + + result + } + + #[inline(never)] // see above + pub fn sample(pc: usize) { + let result = { + let mut profile = Lock::take().expect("cannot lock"); + record(profile.as_mut().expect("profiler not running"), pc) + }; + + if result.is_err() { + warn!("out of space"); + stop(); + } else { + unsafe { + csr::timer1::ev_pending_write(1); + } + } + } +} + +#[cfg(not(has_timer1))] +mod imp { + #![allow(dead_code)] + + pub fn start(_interval_us: u64, _hits_size: usize, _edges_size: usize) -> Result<(), ()> { + error!("timer not available"); + + Err(()) + } + + pub fn stop() {} + + pub fn pause) -> R, R>(f: F) -> R { + f(None) + } + + pub fn sample(_pc: usize) {} +} + +pub use self::imp::*; diff --git a/artiq/firmware/runtime/rtio_clocking.rs b/artiq/firmware/runtime/rtio_clocking.rs new file mode 100644 index 000000000..3b230bacc --- /dev/null +++ b/artiq/firmware/runtime/rtio_clocking.rs @@ -0,0 +1,195 @@ +use board_misoc::config; +#[cfg(si5324_as_synthesizer)] +use board_artiq::si5324; +#[cfg(has_drtio)] +use board_misoc::{csr, clock}; + +#[derive(Debug)] +pub enum RtioClock { + Internal = 0, + External = 1 +} + +fn get_rtio_clock_cfg() -> RtioClock { + config::read("rtio_clock", |result| { + match result { + Ok(b"i") => { + info!("using internal RTIO clock"); + RtioClock::Internal + }, + Ok(b"e") => { + info!("using external RTIO clock"); + RtioClock::External + }, + _ => { + info!("using internal RTIO clock (by default)"); + RtioClock::Internal + }, + } + }) +} + +#[cfg(has_rtio_crg)] +pub mod crg { + #[cfg(has_rtio_clock_switch)] + use super::RtioClock; + use board_misoc::{clock, csr}; + + pub fn check() -> bool { + unsafe { csr::rtio_crg::pll_locked_read() != 0 } + } + + #[cfg(has_rtio_clock_switch)] + pub fn init(clk: RtioClock) -> bool { + unsafe { + csr::rtio_crg::pll_reset_write(1); + csr::rtio_crg::clock_sel_write(clk as u8); + csr::rtio_crg::pll_reset_write(0); + } + clock::spin_us(150); + return check() + } + + #[cfg(not(has_rtio_clock_switch))] + pub fn init() -> bool { + unsafe { + csr::rtio_crg::pll_reset_write(0); + } + clock::spin_us(150); + return check() + } +} + +#[cfg(not(has_rtio_crg))] +pub mod crg { + pub fn check() -> bool { true } +} + +#[cfg(si5324_as_synthesizer)] +fn setup_si5324_as_synthesizer() { + // 125 MHz output from 10 MHz CLKINx reference, 504 Hz BW + #[cfg(all(rtio_frequency = "125.0", si5324_ext_ref, ext_ref_frequency = "10.0"))] + const SI5324_SETTINGS: si5324::FrequencySettings + = si5324::FrequencySettings { + n1_hs : 10, + nc1_ls : 4, + n2_hs : 10, + n2_ls : 300, + n31 : 6, + n32 : 6, + bwsel : 4, + crystal_ref: false + }; + // 125MHz output, from 100MHz CLKINx reference, 586 Hz loop bandwidth + #[cfg(all(rtio_frequency = "125.0", si5324_ext_ref, ext_ref_frequency = "100.0"))] + const SI5324_SETTINGS: si5324::FrequencySettings + = si5324::FrequencySettings { + n1_hs : 10, + nc1_ls : 4, + n2_hs : 10, + n2_ls : 260, + n31 : 52, + n32 : 52, + bwsel : 4, + crystal_ref: false + }; + // 125MHz output, from 125MHz CLKINx reference, 606 Hz loop bandwidth + #[cfg(all(rtio_frequency = "125.0", si5324_ext_ref, ext_ref_frequency = "125.0"))] + const SI5324_SETTINGS: si5324::FrequencySettings + = si5324::FrequencySettings { + n1_hs : 5, + nc1_ls : 8, + n2_hs : 7, + n2_ls : 360, + n31 : 63, + n32 : 63, + bwsel : 4, + crystal_ref: false + }; + // 125MHz output, from crystal, 7 Hz + #[cfg(all(rtio_frequency = "125.0", not(si5324_ext_ref)))] + const SI5324_SETTINGS: si5324::FrequencySettings + = si5324::FrequencySettings { + n1_hs : 10, + nc1_ls : 4, + n2_hs : 10, + n2_ls : 19972, + n31 : 4565, + n32 : 4565, + bwsel : 4, + crystal_ref: true + }; + // 150MHz output, from crystal + #[cfg(all(rtio_frequency = "150.0", not(si5324_ext_ref)))] + const SI5324_SETTINGS: si5324::FrequencySettings + = si5324::FrequencySettings { + n1_hs : 9, + nc1_ls : 4, + n2_hs : 10, + n2_ls : 33732, + n31 : 7139, + n32 : 7139, + bwsel : 3, + crystal_ref: true + }; + // 100MHz output, from crystal. Also used as reference for Sayma HMC830. + #[cfg(all(rtio_frequency = "100.0", not(si5324_ext_ref)))] + const SI5324_SETTINGS: si5324::FrequencySettings + = si5324::FrequencySettings { + n1_hs : 9, + nc1_ls : 6, + n2_hs : 10, + n2_ls : 33732, + n31 : 7139, + n32 : 7139, + bwsel : 3, + crystal_ref: true + }; + #[cfg(all(soc_platform = "kasli", hw_rev = "v2.0", not(si5324_ext_ref)))] + let si5324_ref_input = si5324::Input::Ckin2; + #[cfg(all(soc_platform = "kasli", hw_rev = "v2.0", si5324_ext_ref))] + let si5324_ref_input = si5324::Input::Ckin1; + #[cfg(all(soc_platform = "kasli", not(hw_rev = "v2.0")))] + let si5324_ref_input = si5324::Input::Ckin2; + #[cfg(soc_platform = "metlino")] + let si5324_ref_input = si5324::Input::Ckin2; + si5324::setup(&SI5324_SETTINGS, si5324_ref_input).expect("cannot initialize Si5324"); +} + +pub fn init() { + #[cfg(si5324_as_synthesizer)] + { + #[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))] + let si5324_ext_input = si5324::Input::Ckin1; + #[cfg(all(soc_platform = "kasli", not(hw_rev = "v2.0")))] + let si5324_ext_input = si5324::Input::Ckin2; + #[cfg(soc_platform = "metlino")] + let si5324_ext_input = si5324::Input::Ckin2; + match get_rtio_clock_cfg() { + RtioClock::Internal => setup_si5324_as_synthesizer(), + RtioClock::External => si5324::bypass(si5324_ext_input).expect("cannot bypass Si5324") + } + } + + #[cfg(has_drtio)] + { + unsafe { + csr::drtio_transceiver::stable_clkin_write(1); + } + clock::spin_us(1500); // wait for CPLL/QPLL lock + unsafe { + csr::drtio_transceiver::txenable_write(0xffffffffu32 as _); + } + } + + #[cfg(has_rtio_crg)] + { + #[cfg(has_rtio_clock_switch)] + let result = crg::init(get_rtio_clock_cfg()); + #[cfg(not(has_rtio_clock_switch))] + let result = crg::init(); + if !result { + error!("RTIO clock failed"); + } + } +} diff --git a/artiq/firmware/runtime/rtio_dma.rs b/artiq/firmware/runtime/rtio_dma.rs index 4a024e929..39e61d621 100644 --- a/artiq/firmware/runtime/rtio_dma.rs +++ b/artiq/firmware/runtime/rtio_dma.rs @@ -1,8 +1,5 @@ -use std::mem; -use std::vec::Vec; -use std::string::String; -use std::btree_map::BTreeMap; -use std::io::Write; +use core::mem; +use alloc::{Vec, String, BTreeMap}; const ALIGNMENT: usize = 64; @@ -38,7 +35,7 @@ impl Manager { } pub fn record_append(&mut self, data: &[u8]) { - self.recording_trace.write_all(data).unwrap(); + self.recording_trace.extend_from_slice(data) } pub fn record_stop(&mut self, duration: u64) { diff --git a/artiq/firmware/runtime/rtio_mgt.rs b/artiq/firmware/runtime/rtio_mgt.rs index 9fd924936..825900b78 100644 --- a/artiq/firmware/runtime/rtio_mgt.rs +++ b/artiq/firmware/runtime/rtio_mgt.rs @@ -1,116 +1,150 @@ -use config; -use board::csr; +use core::cell::RefCell; +use urc::Urc; +use board_misoc::csr; +#[cfg(has_drtio)] +use board_misoc::clock; +use board_artiq::drtio_routing; use sched::Io; - -#[cfg(has_rtio_crg)] -pub mod crg { - use board::{clock, csr}; - - pub fn init() { - unsafe { csr::rtio_crg::pll_reset_write(0) } - } - - pub fn check() -> bool { - unsafe { csr::rtio_crg::pll_locked_read() != 0 } - } - - pub fn switch_clock(clk: u8) -> bool { - unsafe { - let cur_clk = csr::rtio_crg::clock_sel_read(); - if clk != cur_clk { - csr::rtio_crg::pll_reset_write(1); - csr::rtio_crg::clock_sel_write(clk); - csr::rtio_crg::pll_reset_write(0); - } - } - - clock::spin_us(150); - return check() - } -} - -#[cfg(not(has_rtio_crg))] -pub mod crg { - pub fn init() {} - pub fn check() -> bool { true } - pub fn switch_clock(_clk: u8) -> bool { true } -} +use sched::Mutex; #[cfg(has_drtio)] pub mod drtio { use super::*; use drtioaux; - pub fn startup(io: &Io) { - io.spawn(4096, link_thread); + pub fn startup(io: &Io, aux_mutex: &Mutex, + routing_table: &Urc>, + up_destinations: &Urc>) { + let aux_mutex = aux_mutex.clone(); + let routing_table = routing_table.clone(); + let up_destinations = up_destinations.clone(); + io.spawn(4096, move |io| { + let routing_table = routing_table.borrow(); + link_thread(io, &aux_mutex, &routing_table, &up_destinations); + }); } fn link_rx_up(linkno: u8) -> bool { let linkno = linkno as usize; unsafe { - (csr::DRTIO[linkno].link_status_read)() == 1 + (csr::DRTIO[linkno].rx_up_read)() == 1 } } - fn reset_phy(linkno: u8) { - let linkno = linkno as usize; - unsafe { - (csr::DRTIO[linkno].reset_phy_write)(1); - while (csr::DRTIO[linkno].o_wait_read)() == 1 {} - } - } - - fn sync_tsc(linkno: u8) { - let linkno = linkno as usize; - unsafe { - (csr::DRTIO[linkno].set_time_write)(1); - while (csr::DRTIO[linkno].set_time_read)() == 1 {} - } - } - - fn init_link(linkno: u8) { - let linkidx = linkno as usize; - unsafe { - (csr::DRTIO[linkidx].reset_write)(1); - while (csr::DRTIO[linkidx].o_wait_read)() == 1 {} - } - // TODO: determine actual number of remote FIFOs - for channel in 0..16 { - unsafe { - (csr::DRTIO[linkidx].chan_sel_override_write)(channel); - (csr::DRTIO[linkidx].chan_sel_override_en_write)(1); - - (csr::DRTIO[linkidx].o_reset_channel_status_write)(1); - (csr::DRTIO[linkidx].o_get_fifo_space_write)(1); - while (csr::DRTIO[linkidx].o_wait_read)() == 1 {} - info!("[LINK#{}] FIFO space on channel {} is {}", - linkno, channel, (csr::DRTIO[linkidx].o_dbg_fifo_space_read)()); - - (csr::DRTIO[linkidx].chan_sel_override_en_write)(0); + fn recv_aux_timeout(io: &Io, linkno: u8, timeout: u32) -> Result { + let max_time = clock::get_ms() + timeout as u64; + loop { + if !link_rx_up(linkno) { + return Err("link went down"); } + if clock::get_ms() > max_time { + return Err("timeout"); + } + match drtioaux::recv(linkno) { + Ok(Some(packet)) => return Ok(packet), + Ok(None) => (), + Err(_) => return Err("aux packet error") + } + io.relinquish().unwrap(); } } - pub fn init() { - for linkno in 0..csr::DRTIO.len() { - init_link(linkno as u8); - } + pub fn aux_transact(io: &Io, aux_mutex: &Mutex, + linkno: u8, request: &drtioaux::Packet) -> Result { + let _lock = aux_mutex.lock(io).unwrap(); + drtioaux::send(linkno, request).unwrap(); + recv_aux_timeout(io, linkno, 200) } - fn ping_remote(linkno: u8, io: &Io) -> u32 { + fn ping_remote(io: &Io, aux_mutex: &Mutex, linkno: u8) -> u32 { let mut count = 0; loop { if !link_rx_up(linkno) { return 0 } count += 1; - drtioaux::hw::send_link(linkno, &drtioaux::Packet::EchoRequest).unwrap(); - io.sleep(100).unwrap(); - let pr = drtioaux::hw::recv_link(linkno); - match pr { - Ok(Some(drtioaux::Packet::EchoReply)) => return count, + if count > 100 { + return 0; + } + let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::EchoRequest); + match reply { + Ok(drtioaux::Packet::EchoReply) => { + // make sure receive buffer is drained + let max_time = clock::get_ms() + 200; + loop { + if clock::get_ms() > max_time { + return count; + } + let _ = drtioaux::recv(linkno); + io.relinquish().unwrap(); + } + } _ => {} } + io.relinquish().unwrap(); + } + } + + fn sync_tsc(io: &Io, aux_mutex: &Mutex, linkno: u8) -> Result<(), &'static str> { + let _lock = aux_mutex.lock(io).unwrap(); + + unsafe { + (csr::DRTIO[linkno as usize].set_time_write)(1); + while (csr::DRTIO[linkno as usize].set_time_read)() == 1 {} + } + // TSCAck is the only aux packet that is sent spontaneously + // by the satellite, in response to a TSC set on the RT link. + let reply = recv_aux_timeout(io, linkno, 10000)?; + if reply == drtioaux::Packet::TSCAck { + return Ok(()); + } else { + return Err("unexpected reply"); + } + } + + fn load_routing_table(io: &Io, aux_mutex: &Mutex, linkno: u8, routing_table: &drtio_routing::RoutingTable) + -> Result<(), &'static str> { + for i in 0..drtio_routing::DEST_COUNT { + let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::RoutingSetPath { + destination: i as u8, + hops: routing_table.0[i] + })?; + if reply != drtioaux::Packet::RoutingAck { + return Err("unexpected reply"); + } + } + Ok(()) + } + + fn set_rank(io: &Io, aux_mutex: &Mutex, linkno: u8, rank: u8) -> Result<(), &'static str> { + let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::RoutingSetRank { + rank: rank + })?; + if reply != drtioaux::Packet::RoutingAck { + return Err("unexpected reply"); + } + Ok(()) + } + + fn init_buffer_space(destination: u8, linkno: u8) { + let linkno = linkno as usize; + unsafe { + (csr::DRTIO[linkno].destination_write)(destination); + (csr::DRTIO[linkno].force_destination_write)(1); + (csr::DRTIO[linkno].o_get_buffer_space_write)(1); + while (csr::DRTIO[linkno].o_wait_read)() == 1 {} + info!("[DEST#{}] buffer space is {}", + destination, (csr::DRTIO[linkno].o_dbg_buffer_space_read)()); + (csr::DRTIO[linkno].force_destination_write)(0); + } + } + + fn process_unsolicited_aux(io: &Io, aux_mutex: &Mutex, linkno: u8) { + let _lock = aux_mutex.lock(io).unwrap(); + match drtioaux::recv(linkno) { + Ok(Some(packet)) => warn!("[LINK#{}] unsolicited aux packet: {:?}", linkno, packet), + Ok(None) => (), + Err(_) => warn!("[LINK#{}] aux packet error", linkno) } } @@ -122,7 +156,7 @@ pub mod drtio { (csr::DRTIO[linkidx].protocol_error_write)(errors); } if errors != 0 { - error!("[LINK#{}] found error(s)", linkno); + error!("[LINK#{}] error(s) found (0x{:02x}):", linkno, errors); if errors & 1 != 0 { error!("[LINK#{}] received packet of an unknown type", linkno); } @@ -130,66 +164,166 @@ pub mod drtio { error!("[LINK#{}] received truncated packet", linkno); } if errors & 4 != 0 { - error!("[LINK#{}] timeout attempting to get remote FIFO space", linkno); + error!("[LINK#{}] timeout attempting to get remote buffer space", linkno); } } } - fn process_aux_errors(linkno: u8) { - drtioaux::hw::send_link(linkno, &drtioaux::Packet::RtioErrorRequest).unwrap(); - match drtioaux::hw::recv_timeout_link(linkno, None) { - Ok(drtioaux::Packet::RtioNoErrorReply) => (), - Ok(drtioaux::Packet::RtioErrorCollisionReply) => - error!("[LINK#{}] RTIO collision", linkno), - Ok(drtioaux::Packet::RtioErrorBusyReply) => - error!("[LINK#{}] RTIO busy", linkno), - Ok(_) => error!("[LINK#{}] received unexpected aux packet", linkno), - Err(e) => error!("[LINK#{}] aux packet error ({})", linkno, e) + fn destination_set_up(routing_table: &drtio_routing::RoutingTable, + up_destinations: &Urc>, + destination: u8, up: bool) { + let mut up_destinations = up_destinations.borrow_mut(); + up_destinations[destination as usize] = up; + if up { + drtio_routing::interconnect_enable(routing_table, 0, destination); + info!("[DEST#{}] destination is up", destination); + } else { + drtio_routing::interconnect_disable(destination); + info!("[DEST#{}] destination is down", destination); } } - pub fn link_thread(io: Io) { - let mut link_up = vec![false; csr::DRTIO.len()]; + fn destination_up(up_destinations: &Urc>, destination: u8) -> bool { + let up_destinations = up_destinations.borrow(); + up_destinations[destination as usize] + } - loop { - for linkno in 0..csr::DRTIO.len() { - let linkno = linkno as u8; - if !link_up[linkno as usize] { - if link_rx_up(linkno) { - info!("[LINK#{}] link RX became up, pinging", linkno); - let ping_count = ping_remote(linkno, &io); - if ping_count > 0 { - info!("[LINK#{}] remote replied after {} packets", linkno, ping_count); - init_link(linkno); // clear all FIFOs first - reset_phy(linkno); - sync_tsc(linkno); - info!("[LINK#{}] link initialization completed", linkno); - link_up[linkno as usize] = true; - } else { - info!("[LINK#{}] ping failed", linkno); + fn destination_survey(io: &Io, aux_mutex: &Mutex, routing_table: &drtio_routing::RoutingTable, + up_links: &[bool], + up_destinations: &Urc>) { + for destination in 0..drtio_routing::DEST_COUNT { + let hop = routing_table.0[destination][0]; + let destination = destination as u8; + + if hop == 0 { + /* local RTIO */ + if !destination_up(up_destinations, destination) { + destination_set_up(routing_table, up_destinations, destination, true); + } + } else if hop as usize <= csr::DRTIO.len() { + let linkno = hop - 1; + if destination_up(up_destinations, destination) { + if up_links[linkno as usize] { + let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::DestinationStatusRequest { + destination: destination + }); + match reply { + Ok(drtioaux::Packet::DestinationDownReply) => + destination_set_up(routing_table, up_destinations, destination, false), + Ok(drtioaux::Packet::DestinationOkReply) => (), + Ok(drtioaux::Packet::DestinationSequenceErrorReply { channel }) => + error!("[DEST#{}] RTIO sequence error involving channel 0x{:04x}", destination, channel), + Ok(drtioaux::Packet::DestinationCollisionReply { channel }) => + error!("[DEST#{}] RTIO collision involving channel 0x{:04x}", destination, channel), + Ok(drtioaux::Packet::DestinationBusyReply { channel }) => + error!("[DEST#{}] RTIO busy error involving channel 0x{:04x}", destination, channel), + Ok(packet) => error!("[DEST#{}] received unexpected aux packet: {:?}", destination, packet), + Err(e) => error!("[DEST#{}] communication failed ({})", destination, e) } } else { - if link_rx_up(linkno) { - process_local_errors(linkno); - process_aux_errors(linkno); - } else { - info!("[LINK#{}] link is down", linkno); - link_up[linkno as usize] = false; + destination_set_up(routing_table, up_destinations, destination, false); + } + } else { + if up_links[linkno as usize] { + let reply = aux_transact(io, aux_mutex, linkno, &drtioaux::Packet::DestinationStatusRequest { + destination: destination + }); + match reply { + Ok(drtioaux::Packet::DestinationDownReply) => (), + Ok(drtioaux::Packet::DestinationOkReply) => { + destination_set_up(routing_table, up_destinations, destination, true); + init_buffer_space(destination as u8, linkno); + }, + Ok(packet) => error!("[DEST#{}] received unexpected aux packet: {:?}", destination, packet), + Err(e) => error!("[DEST#{}] communication failed ({})", destination, e) } } } } + } + } + + pub fn link_thread(io: Io, aux_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, + up_destinations: &Urc>) { + let mut up_links = [false; csr::DRTIO.len()]; + loop { + for linkno in 0..csr::DRTIO.len() { + let linkno = linkno as u8; + if up_links[linkno as usize] { + /* link was previously up */ + if link_rx_up(linkno) { + process_unsolicited_aux(&io, aux_mutex, linkno); + process_local_errors(linkno); + } else { + info!("[LINK#{}] link is down", linkno); + up_links[linkno as usize] = false; + } + } else { + /* link was previously down */ + if link_rx_up(linkno) { + info!("[LINK#{}] link RX became up, pinging", linkno); + let ping_count = ping_remote(&io, aux_mutex, linkno); + if ping_count > 0 { + info!("[LINK#{}] remote replied after {} packets", linkno, ping_count); + up_links[linkno as usize] = true; + if let Err(e) = sync_tsc(&io, aux_mutex, linkno) { + error!("[LINK#{}] failed to sync TSC ({})", linkno, e); + } + if let Err(e) = load_routing_table(&io, aux_mutex, linkno, routing_table) { + error!("[LINK#{}] failed to load routing table ({})", linkno, e); + } + if let Err(e) = set_rank(&io, aux_mutex, linkno, 1) { + error!("[LINK#{}] failed to set rank ({})", linkno, e); + } + info!("[LINK#{}] link initialization completed", linkno); + } else { + error!("[LINK#{}] ping failed", linkno); + } + } + } + } + destination_survey(&io, aux_mutex, routing_table, &up_links, up_destinations); io.sleep(200).unwrap(); } } + + pub fn reset(io: &Io, aux_mutex: &Mutex) { + for linkno in 0..csr::DRTIO.len() { + unsafe { + (csr::DRTIO[linkno].reset_write)(1); + } + } + io.sleep(1).unwrap(); + for linkno in 0..csr::DRTIO.len() { + unsafe { + (csr::DRTIO[linkno].reset_write)(0); + } + } + + for linkno in 0..csr::DRTIO.len() { + let linkno = linkno as u8; + if link_rx_up(linkno) { + let reply = aux_transact(io, aux_mutex, linkno, + &drtioaux::Packet::ResetRequest); + match reply { + Ok(drtioaux::Packet::ResetAck) => (), + Ok(_) => error!("[LINK#{}] reset failed, received unexpected aux packet", linkno), + Err(e) => error!("[LINK#{}] reset failed, aux packet error ({})", linkno, e) + } + } + } + } } #[cfg(not(has_drtio))] -mod drtio { +pub mod drtio { use super::*; - pub fn startup(_io: &Io) {} - pub fn init() {} + pub fn startup(_io: &Io, _aux_mutex: &Mutex, + _routing_table: &Urc>, + _up_destinations: &Urc>) {} + pub fn reset(_io: &Io, _aux_mutex: &Mutex) {} } fn async_error_thread(io: Io) { @@ -198,130 +332,35 @@ fn async_error_thread(io: Io) { io.until(|| csr::rtio_core::async_error_read() != 0).unwrap(); let errors = csr::rtio_core::async_error_read(); if errors & 1 != 0 { - error!("RTIO collision"); + error!("RTIO collision involving channel {}", + csr::rtio_core::collision_channel_read()); } if errors & 2 != 0 { - error!("RTIO busy"); + error!("RTIO busy error involving channel {}", + csr::rtio_core::busy_channel_read()); + } + if errors & 4 != 0 { + error!("RTIO sequence error involving channel {}", + csr::rtio_core::sequence_error_channel_read()); } csr::rtio_core::async_error_write(errors); } } } -pub fn startup(io: &Io) { - crg::init(); - - #[derive(Debug)] - enum RtioClock { - Internal = 0, - External = 1 - }; - - let clk = config::read("startup_clock", |result| { - match result { - Ok(b"i") => { - info!("using internal startup RTIO clock"); - RtioClock::Internal - }, - Ok(b"e") => { - info!("using external startup RTIO clock"); - RtioClock::External - }, - Err(()) => { - info!("using internal startup RTIO clock (by default)"); - RtioClock::Internal - }, - Ok(_) => { - error!("unrecognized startup_clock configuration entry, using internal RTIO clock"); - RtioClock::Internal - } - } - }); - - if !crg::switch_clock(clk as u8) { - error!("startup RTIO clock failed"); - warn!("this may cause the system initialization to fail"); - warn!("fix clocking and reset the device"); +pub fn startup(io: &Io, aux_mutex: &Mutex, + routing_table: &Urc>, + up_destinations: &Urc>) { + drtio::startup(io, aux_mutex, routing_table, up_destinations); + unsafe { + csr::rtio_core::reset_phy_write(1); } - - drtio::startup(io); - init_core(); io.spawn(4096, async_error_thread); } -pub fn init_core() { +pub fn reset(io: &Io, aux_mutex: &Mutex) { unsafe { csr::rtio_core::reset_write(1); } - drtio::init() -} - -#[cfg(has_drtio)] -pub mod drtio_dbg { - use board::csr; - - // TODO: routing - pub fn get_channel_state(channel: u32) -> (u16, u64) { - let linkno = ((channel >> 16) - 1) as usize; - let node_channel = channel as u16; - unsafe { - (csr::DRTIO[linkno].chan_sel_override_write)(node_channel as u16); - (csr::DRTIO[linkno].chan_sel_override_en_write)(1); - let fifo_space = (csr::DRTIO[linkno].o_dbg_fifo_space_read)(); - let last_timestamp = (csr::DRTIO[linkno].o_dbg_last_timestamp_read)(); - (csr::DRTIO[linkno].chan_sel_override_en_write)(0); - (fifo_space, last_timestamp) - } - } - - pub fn reset_channel_state(channel: u32) { - let linkno = ((channel >> 16) - 1) as usize; - let node_channel = channel as u16; - unsafe { - (csr::DRTIO[linkno].chan_sel_override_write)(node_channel); - (csr::DRTIO[linkno].chan_sel_override_en_write)(1); - (csr::DRTIO[linkno].o_reset_channel_status_write)(1); - (csr::DRTIO[linkno].chan_sel_override_en_write)(0); - } - } - - pub fn get_fifo_space(channel: u32) { - let linkno = ((channel >> 16) - 1) as usize; - let node_channel = channel as u16; - unsafe { - (csr::DRTIO[linkno].chan_sel_override_write)(node_channel); - (csr::DRTIO[linkno].chan_sel_override_en_write)(1); - (csr::DRTIO[linkno].o_get_fifo_space_write)(1); - (csr::DRTIO[linkno].chan_sel_override_en_write)(0); - } - } - - pub fn get_packet_counts(linkno: u8) -> (u32, u32) { - let linkno = linkno as usize; - unsafe { - (csr::DRTIO[linkno].update_packet_cnt_write)(1); - ((csr::DRTIO[linkno].packet_cnt_tx_read)(), - (csr::DRTIO[linkno].packet_cnt_rx_read)()) - } - } - - pub fn get_fifo_space_req_count(linkno: u8) -> u32 { - let linkno = linkno as usize; - unsafe { - (csr::DRTIO[linkno].o_dbg_fifo_space_req_cnt_read)() - } - } -} - -#[cfg(not(has_drtio))] -pub mod drtio_dbg { - pub fn get_channel_state(_channel: u32) -> (u16, u64) { (0, 0) } - - pub fn reset_channel_state(_channel: u32) {} - - pub fn get_fifo_space(_channel: u32) {} - - pub fn get_packet_counts(_linkno: u8) -> (u32, u32) { (0, 0) } - - pub fn get_fifo_space_req_count(_linkno: u8) -> u32 { 0 } + drtio::reset(io, aux_mutex) } diff --git a/artiq/firmware/runtime/runtime.ld b/artiq/firmware/runtime/runtime.ld index 026ec9186..a0a884dd2 100644 --- a/artiq/firmware/runtime/runtime.ld +++ b/artiq/firmware/runtime/runtime.ld @@ -1,8 +1,6 @@ INCLUDE generated/output_format.ld -STARTUP(crt0-or1k.o) -ENTRY(_start) - INCLUDE generated/regions.ld +ENTRY(_reset_handler) /* Assume ORIGIN(main_ram) = 0x40000000. Unfortunately, * ld does not allow this expression here. @@ -13,13 +11,31 @@ MEMORY { SECTIONS { + .vectors : + { + *(.vectors) + } > runtime + .text : { - _ftext = .; - *(.text .stub .text.* .gnu.linkonce.t.*) - _etext = .; + *(.text .text.*) } > runtime + .eh_frame : + { + __eh_frame_start = .; + KEEP(*(.eh_frame)) + __eh_frame_end = .; + } > runtime + + .eh_frame_hdr : + { + KEEP(*(.eh_frame_hdr)) + } > runtime + + __eh_frame_hdr_start = SIZEOF(.eh_frame_hdr) > 0 ? ADDR(.eh_frame_hdr) : 0; + __eh_frame_hdr_end = SIZEOF(.eh_frame_hdr) > 0 ? . : 0; + /* https://sourceware.org/bugzilla/show_bug.cgi?id=20475 */ .got : { @@ -34,41 +50,23 @@ SECTIONS .rodata : { - . = ALIGN(4); - _frodata = .; - *(.rodata .rodata.* .gnu.linkonce.r.*) - *(.rodata1) - _erodata = .; + *(.rodata .rodata.*) } > runtime .data : { - . = ALIGN(4); - _fdata = .; - *(.data .data.* .gnu.linkonce.d.*) - *(.data1) - *(.sdata .sdata.* .gnu.linkonce.s.*) - _edata = .; + *(.data .data.*) } > runtime - .bss : + .bss ALIGN(4) : { - . = ALIGN(4); _fbss = .; - *(.dynsbss) - *(.sbss .sbss.* .gnu.linkonce.sb.*) - *(.scommon) - *(.dynbss) - *(.bss .bss.* .gnu.linkonce.b.*) - *(COMMON) - . = ALIGN(4); + *(.bss .bss.*) _ebss = .; } > runtime .stack : { - . = ALIGN(0x1000); - _estack = .; . += 0x4000; _fstack = . - 4; } > runtime @@ -79,10 +77,4 @@ SECTIONS . = ORIGIN(runtime) + LENGTH(runtime); _eheap = .; } > runtime - - /DISCARD/ : - { - *(.eh_frame) - *(.gcc_except_table) - } } diff --git a/artiq/firmware/runtime/sched.rs b/artiq/firmware/runtime/sched.rs index 1a6ce55b4..7a2a85723 100644 --- a/artiq/firmware/runtime/sched.rs +++ b/artiq/firmware/runtime/sched.rs @@ -1,23 +1,41 @@ #![allow(dead_code)] -use std::mem; -use std::cell::{Cell, RefCell}; -use std::vec::Vec; -use std::io::{Read, Write, Result, Error, ErrorKind}; +use core::mem; +use core::result; +use core::cell::{Cell, RefCell}; +use alloc::Vec; use fringe::OwnedStack; use fringe::generator::{Generator, Yielder, State as GeneratorState}; - +use smoltcp::time::Duration; +use smoltcp::Error as NetworkError; use smoltcp::wire::IpEndpoint; use smoltcp::socket::{SocketHandle, SocketRef}; -use board; +use io::{Read, Write}; +use board_misoc::clock; use urc::Urc; +#[derive(Fail, Debug)] +pub enum Error { + #[fail(display = "interrupted")] + Interrupted, + #[fail(display = "timed out")] + TimedOut, + #[fail(display = "network error: {}", _0)] + Network(NetworkError) +} + +impl From for Error { + fn from(value: NetworkError) -> Error { + Error::Network(value) + } +} + type SocketSet = ::smoltcp::socket::SocketSet<'static, 'static, 'static>; #[derive(Debug)] struct WaitRequest { - event: Option<*const (Fn() -> bool + 'static)>, + event: Option<*mut FnMut() -> bool>, timeout: Option } @@ -124,35 +142,31 @@ impl Scheduler { pub fn run(&mut self) { self.sockets.borrow_mut().prune(); - self.threads.append(&mut *borrow_mut!(self.spawned)); + self.threads.append(&mut *self.spawned.borrow_mut()); if self.threads.len() == 0 { return } - let now = board::clock::get_ms(); + let now = clock::get_ms(); let start_idx = self.run_idx; loop { self.run_idx = (self.run_idx + 1) % self.threads.len(); let result = { - let mut thread = borrow_mut!(self.threads[self.run_idx].0); - match thread.waiting_for { - _ if thread.interrupted => { - thread.interrupted = false; - thread.generator.resume(WaitResult::Interrupted) - } - WaitRequest { event: None, timeout: None } => - thread.generator.resume(WaitResult::Completed), - WaitRequest { timeout: Some(instant), .. } if now >= instant => - thread.generator.resume(WaitResult::TimedOut), - WaitRequest { event: Some(event), .. } if unsafe { (*event)() } => - thread.generator.resume(WaitResult::Completed), - _ => { - if self.run_idx == start_idx { - // We've checked every thread and none of them are runnable. - break - } else { - continue - } - } + let &mut Thread { ref mut generator, ref mut interrupted, ref waiting_for } = + &mut *self.threads[self.run_idx].0.borrow_mut(); + if *interrupted { + *interrupted = false; + generator.resume(WaitResult::Interrupted) + } else if waiting_for.event.is_none() && waiting_for.timeout.is_none() { + generator.resume(WaitResult::Completed) + } else if waiting_for.timeout.map(|instant| now >= instant).unwrap_or(false) { + generator.resume(WaitResult::TimedOut) + } else if waiting_for.event.map(|event| unsafe { (*event)() }).unwrap_or(false) { + generator.resume(WaitResult::Completed) + } else if self.run_idx == start_idx { + // We've checked every thread and none of them are runnable. + break + } else { + continue } }; @@ -164,7 +178,7 @@ impl Scheduler { }, Some(wait_request) => { // The thread has suspended itself. - let mut thread = borrow_mut!(self.threads[self.run_idx].0); + let mut thread = self.threads[self.run_idx].0.borrow_mut(); thread.waiting_for = wait_request } } @@ -189,7 +203,7 @@ impl<'a> Io<'a> { pub fn spawn(&self, stack_size: usize, f: F) -> ThreadHandle where F: 'static + FnOnce(Io) + Send { let handle = unsafe { Thread::new(self, stack_size, f) }; - borrow_mut!(self.spawned).push(handle.clone()); + self.spawned.borrow_mut().push(handle.clone()); handle } @@ -197,62 +211,94 @@ impl<'a> Io<'a> { self.yielder.expect("cannot suspend the scheduler thread") } - pub fn sleep(&self, duration_ms: u64) -> Result<()> { + pub fn sleep(&self, duration_ms: u64) -> Result<(), Error> { let request = WaitRequest { - timeout: Some(board::clock::get_ms() + duration_ms), + timeout: Some(clock::get_ms() + duration_ms), event: None }; match self.yielder().suspend(request) { WaitResult::TimedOut => Ok(()), - WaitResult::Interrupted => Err(Error::new(ErrorKind::Interrupted, "")), + WaitResult::Interrupted => Err(Error::Interrupted), _ => unreachable!() } } - fn suspend(&self, request: WaitRequest) -> Result<()> { + fn suspend(&self, request: WaitRequest) -> Result<(), Error> { match self.yielder().suspend(request) { WaitResult::Completed => Ok(()), - WaitResult::TimedOut => Err(Error::new(ErrorKind::TimedOut, "")), - WaitResult::Interrupted => Err(Error::new(ErrorKind::Interrupted, "")) + WaitResult::TimedOut => Err(Error::TimedOut), + WaitResult::Interrupted => Err(Error::Interrupted) } } - pub fn relinquish(&self) -> Result<()> { + pub fn relinquish(&self) -> Result<(), Error> { self.suspend(WaitRequest { timeout: None, event: None }) } - pub fn until bool + 'static>(&self, f: F) -> Result<()> { + pub fn until bool>(&self, mut f: F) -> Result<(), Error> { + let f = unsafe { mem::transmute::<&mut FnMut() -> bool, *mut FnMut() -> bool>(&mut f) }; self.suspend(WaitRequest { timeout: None, - event: Some(&f as *const _) + event: Some(f) }) } - pub fn join(&self, handle: ThreadHandle) -> Result<()> { + pub fn until_ok(&self, mut f: F) -> Result + where F: FnMut() -> result::Result + { + let mut value = None; + self.until(|| { + if let Ok(result) = f() { + value = Some(result) + } + value.is_some() + })?; + Ok(value.unwrap()) + } + + pub fn join(&self, handle: ThreadHandle) -> Result<(), Error> { self.until(move || handle.terminated()) } } +#[derive(Clone)] +pub struct Mutex(Urc>); + +impl Mutex { + pub fn new() -> Mutex { + Mutex(Urc::new(Cell::new(false))) + } + + pub fn lock<'a>(&'a self, io: &Io) -> Result, Error> { + io.until(|| !self.0.get())?; + self.0.set(true); + Ok(MutexGuard(&*self.0)) + } +} + +pub struct MutexGuard<'a>(&'a Cell); + +impl<'a> Drop for MutexGuard<'a> { + fn drop(&mut self) { + self.0.set(false) + } +} + macro_rules! until { ($socket:expr, $ty:ty, |$var:ident| $cond:expr) => ({ let (sockets, handle) = ($socket.io.sockets.clone(), $socket.handle); $socket.io.until(move || { - let mut sockets = borrow_mut!(sockets); + let mut sockets = sockets.borrow_mut(); let $var = sockets.get::<$ty>(handle); $cond }) }) } - -use ::smoltcp::Error as ErrorLower; -// https://github.com/rust-lang/rust/issues/44057 -// type ErrorLower = ::smoltcp::Error; - type TcpSocketBuffer = ::smoltcp::socket::TcpSocketBuffer<'static>; type TcpSocketLower = ::smoltcp::socket::TcpSocket<'static>; @@ -269,7 +315,8 @@ impl<'a> TcpListener<'a> { fn new_lower(io: &'a Io<'a>, buffer_size: usize) -> SocketHandle { let rx_buffer = vec![0; buffer_size]; let tx_buffer = vec![0; buffer_size]; - borrow_mut!(io.sockets) + io.sockets + .borrow_mut() .add(TcpSocketLower::new( TcpSocketBuffer::new(rx_buffer), TcpSocketBuffer::new(tx_buffer))) @@ -286,7 +333,7 @@ impl<'a> TcpListener<'a> { fn with_lower(&self, f: F) -> R where F: FnOnce(SocketRef) -> R { - let mut sockets = borrow_mut!(self.io.sockets); + let mut sockets = self.io.sockets.borrow_mut(); let result = f(sockets.get(self.handle.get())); result } @@ -303,31 +350,23 @@ impl<'a> TcpListener<'a> { self.with_lower(|s| s.local_endpoint()) } - pub fn listen>(&self, endpoint: T) -> Result<()> { + pub fn listen>(&self, endpoint: T) -> Result<(), Error> { let endpoint = endpoint.into(); self.with_lower(|mut s| s.listen(endpoint)) .map(|()| { self.endpoint.set(endpoint); () }) - .map_err(|err| { - match err { - ErrorLower::Illegal => - Error::new(ErrorKind::Other, "already listening"), - ErrorLower::Unaddressable => - Error::new(ErrorKind::InvalidInput, "port cannot be zero"), - _ => unreachable!() - } - }) + .map_err(|err| err.into()) } - pub fn accept(&self) -> Result> { + pub fn accept(&self) -> Result, Error> { // We're waiting until at least one half of the connection becomes open. // This handles the case where a remote socket immediately sends a FIN-- // that still counts as accepting even though nothing may be sent. let (sockets, handle) = (self.io.sockets.clone(), self.handle.get()); self.io.until(move || { - let mut sockets = borrow_mut!(sockets); + let mut sockets = sockets.borrow_mut(); let socket = sockets.get::(handle); socket.may_send() || socket.may_recv() })?; @@ -352,7 +391,7 @@ impl<'a> TcpListener<'a> { impl<'a> Drop for TcpListener<'a> { fn drop(&mut self) { self.with_lower(|mut s| s.close()); - borrow_mut!(self.io.sockets).release(self.handle.get()) + self.io.sockets.borrow_mut().release(self.handle.get()) } } @@ -377,7 +416,7 @@ impl<'a> TcpStream<'a> { fn with_lower(&self, f: F) -> R where F: FnOnce(SocketRef) -> R { - let mut sockets = borrow_mut!(self.io.sockets); + let mut sockets = self.io.sockets.borrow_mut(); let result = f(sockets.get(self.handle)); result } @@ -411,22 +450,22 @@ impl<'a> TcpStream<'a> { } pub fn timeout(&self) -> Option { - self.with_lower(|s| s.timeout()) + self.with_lower(|s| s.timeout().as_ref().map(Duration::millis)) } pub fn set_timeout(&self, value: Option) { - self.with_lower(|mut s| s.set_timeout(value)) + self.with_lower(|mut s| s.set_timeout(value.map(Duration::from_millis))) } pub fn keep_alive(&self) -> Option { - self.with_lower(|s| s.keep_alive()) + self.with_lower(|s| s.keep_alive().as_ref().map(Duration::millis)) } pub fn set_keep_alive(&self, value: Option) { - self.with_lower(|mut s| s.set_keep_alive(value)) + self.with_lower(|mut s| s.set_keep_alive(value.map(Duration::from_millis))) } - pub fn close(&self) -> Result<()> { + pub fn close(&self) -> Result<(), Error> { self.with_lower(|mut s| s.close()); until!(self, TcpSocketLower, |s| !s.is_open())?; // right now the socket may be in TIME-WAIT state. if we don't give it a chance to send @@ -437,7 +476,9 @@ impl<'a> TcpStream<'a> { } impl<'a> Read for TcpStream<'a> { - fn read(&mut self, buf: &mut [u8]) -> Result { + type ReadError = Error; + + fn read(&mut self, buf: &mut [u8]) -> Result { // Only borrow the underlying socket for the span of the next statement. let result = self.with_lower(|mut s| s.recv_slice(buf)); match result { @@ -446,14 +487,14 @@ impl<'a> Read for TcpStream<'a> { until!(self, TcpSocketLower, |s| s.can_recv() || !s.may_recv())?; match self.with_lower(|mut s| s.recv_slice(buf)) { Ok(length) => Ok(length), - Err(ErrorLower::Illegal) => Ok(0), + Err(NetworkError::Illegal) => Ok(0), _ => unreachable!() } } // Fast path: we had data in buffer. Ok(length) => Ok(length), // Error path: the receive half of the socket is not open. - Err(ErrorLower::Illegal) => Ok(0), + Err(NetworkError::Illegal) => Ok(0), // No other error may be returned. Err(_) => unreachable!() } @@ -461,7 +502,10 @@ impl<'a> Read for TcpStream<'a> { } impl<'a> Write for TcpStream<'a> { - fn write(&mut self, buf: &[u8]) -> Result { + type WriteError = Error; + type FlushError = Error; + + fn write(&mut self, buf: &[u8]) -> Result { // Only borrow the underlying socket for the span of the next statement. let result = self.with_lower(|mut s| s.send_slice(buf)); match result { @@ -470,25 +514,25 @@ impl<'a> Write for TcpStream<'a> { until!(self, TcpSocketLower, |s| s.can_send() || !s.may_send())?; match self.with_lower(|mut s| s.send_slice(buf)) { Ok(length) => Ok(length), - Err(ErrorLower::Illegal) => Ok(0), + Err(NetworkError::Illegal) => Ok(0), _ => unreachable!() } } // Fast path: we had space in buffer. Ok(length) => Ok(length), // Error path: the transmit half of the socket is not open. - Err(ErrorLower::Illegal) => Ok(0), + Err(NetworkError::Illegal) => Ok(0), // No other error may be returned. Err(_) => unreachable!() } } - fn flush(&mut self) -> Result<()> { + fn flush(&mut self) -> Result<(), Self::FlushError> { until!(self, TcpSocketLower, |s| s.send_queue() == 0 || !s.may_send())?; if self.with_lower(|s| s.send_queue()) == 0 { Ok(()) } else { - Err(Error::new(ErrorKind::ConnectionAborted, "connection aborted")) + Err(Error::Network(NetworkError::Illegal)) } } } @@ -496,6 +540,6 @@ impl<'a> Write for TcpStream<'a> { impl<'a> Drop for TcpStream<'a> { fn drop(&mut self) { self.with_lower(|mut s| s.close()); - borrow_mut!(self.io.sockets).release(self.handle) + self.io.sockets.borrow_mut().release(self.handle) } } diff --git a/artiq/firmware/runtime/session.rs b/artiq/firmware/runtime/session.rs index 263933765..a05e18c2f 100644 --- a/artiq/firmware/runtime/session.rs +++ b/artiq/firmware/runtime/session.rs @@ -1,42 +1,63 @@ -use std::prelude::v1::*; -use std::{mem, str}; -use std::cell::{Cell, RefCell}; -use std::io::{self, Read, Write}; -use std::error::Error; +use core::{mem, str, cell::{Cell, RefCell}, fmt::Write as FmtWrite}; +use alloc::{Vec, String}; use byteorder::{ByteOrder, NetworkEndian}; +use io::{Read, Write, Error as IoError}; +use board_misoc::{ident, cache, config}; +use {mailbox, rpc_queue, kernel}; use urc::Urc; -use sched::{ThreadHandle, Io}; -use sched::{TcpListener, TcpStream}; -use board; -use {config, mailbox, rpc_queue, kernel}; -#[cfg(has_rtio_core)] -use rtio_mgt; +use sched::{ThreadHandle, Io, Mutex, TcpListener, TcpStream, Error as SchedError}; +use rtio_clocking; use rtio_dma::Manager as DmaManager; use cache::Cache; use kern_hwreq; +use board_artiq::drtio_routing; use rpc_proto as rpc; use session_proto as host; use kernel_proto as kern; -macro_rules! unexpected { - ($($arg:tt)*) => { - { - error!($($arg)*); - return Err(io::Error::new(io::ErrorKind::InvalidData, "protocol error")) - } - }; +#[derive(Fail, Debug)] +pub enum Error { + #[fail(display = "cannot load kernel: {}", _0)] + Load(String), + #[fail(display = "kernel not found")] + KernelNotFound, + #[fail(display = "invalid kernel CPU pointer: {:#08x}", _0)] + InvalidPointer(usize), + #[fail(display = "RTIO clock failure")] + ClockFailure, + #[fail(display = "protocol error: {}", _0)] + Protocol(#[cause] host::Error), + #[fail(display = "{}", _0)] + Unexpected(String), } -fn io_error(msg: &str) -> io::Error { - io::Error::new(io::ErrorKind::Other, msg) +impl From> for Error { + fn from(value: host::Error) -> Error { + Error::Protocol(value) + } +} + +impl From for Error { + fn from(value: SchedError) -> Error { + Error::Protocol(host::Error::Io(IoError::Other(value))) + } +} + +impl From> for Error { + fn from(value: IoError) -> Error { + Error::Protocol(host::Error::Io(value)) + } +} + +macro_rules! unexpected { + ($($arg:tt)*) => (return Err(Error::Unexpected(format!($($arg)*)))); } // Persistent state #[derive(Debug)] struct Congress { - now: u64, cache: Cache, dma_manager: DmaManager, finished_cleanly: Cell @@ -45,7 +66,6 @@ struct Congress { impl Congress { fn new() -> Congress { Congress { - now: 0, cache: Cache::new(), dma_manager: DmaManager::new(), finished_cleanly: Cell::new(true) @@ -66,7 +86,6 @@ enum KernelState { struct Session<'a> { congress: &'a mut Congress, kernel_state: KernelState, - watchdog_set: board::clock::WatchdogSet, log_buffer: String } @@ -75,7 +94,6 @@ impl<'a> Session<'a> { Session { congress: congress, kernel_state: KernelState::Absent, - watchdog_set: board::clock::WatchdogSet::new(), log_buffer: String::new() } } @@ -103,20 +121,10 @@ impl<'a> Drop for Session<'a> { } } -fn check_magic(stream: &mut TcpStream) -> io::Result<()> { - const MAGIC: &'static [u8] = b"ARTIQ coredev\n"; - - let mut magic: [u8; 14] = [0; 14]; - stream.read_exact(&mut magic)?; - if magic != MAGIC { - Err(io::Error::new(io::ErrorKind::InvalidData, "unrecognized magic")) - } else { - Ok(()) - } -} - -fn host_read(stream: &mut TcpStream) -> io::Result { - let request = host::Request::read_from(stream)?; +fn host_read(reader: &mut R) -> Result> + where R: Read + ?Sized +{ + let request = host::Request::read_from(reader)?; match &request { &host::Request::LoadKernel(_) => debug!("comm<-host LoadLibrary(...)"), _ => debug!("comm<-host {:?}", request) @@ -124,12 +132,14 @@ fn host_read(stream: &mut TcpStream) -> io::Result { Ok(request) } -fn host_write(stream: &mut Write, reply: host::Reply) -> io::Result<()> { +fn host_write(writer: &mut W, reply: host::Reply) -> Result<(), IoError> + where W: Write + ?Sized +{ debug!("comm->host {:?}", reply); - reply.write_to(stream) + reply.write_to(writer) } -pub fn kern_send(io: &Io, request: &kern::Message) -> io::Result<()> { +pub fn kern_send(io: &Io, request: &kern::Message) -> Result<(), Error> { match request { &kern::LoadRequest(_) => debug!("comm->kern LoadRequest(...)"), &kern::DmaRetrieveReply { trace, duration } => { @@ -142,15 +152,14 @@ pub fn kern_send(io: &Io, request: &kern::Message) -> io::Result<()> { _ => debug!("comm->kern {:?}", request) } unsafe { mailbox::send(request as *const _ as usize) } - io.until(mailbox::acknowledged) + Ok(io.until(mailbox::acknowledged)?) } -fn kern_recv_notrace(io: &Io, f: F) -> io::Result - where F: FnOnce(&kern::Message) -> io::Result { +fn kern_recv_notrace(io: &Io, f: F) -> Result> + where F: FnOnce(&kern::Message) -> Result> { io.until(|| mailbox::receive() != 0)?; if !kernel::validate(mailbox::receive()) { - let message = format!("invalid kernel CPU pointer 0x{:x}", mailbox::receive()); - return Err(io::Error::new(io::ErrorKind::InvalidData, message)) + return Err(Error::InvalidPointer(mailbox::receive())) } f(unsafe { &*(mailbox::receive() as *const kern::Message) }) @@ -172,20 +181,21 @@ fn kern_recv_dotrace(reply: &kern::Message) { } #[inline(always)] -fn kern_recv(io: &Io, f: F) -> io::Result - where F: FnOnce(&kern::Message) -> io::Result { +fn kern_recv(io: &Io, f: F) -> Result> + where F: FnOnce(&kern::Message) -> Result> { kern_recv_notrace(io, |reply| { kern_recv_dotrace(reply); f(reply) }) } -pub fn kern_acknowledge() -> io::Result<()> { +pub fn kern_acknowledge() -> Result<(), Error> { mailbox::acknowledge(); Ok(()) } -unsafe fn kern_load(io: &Io, session: &mut Session, library: &[u8]) -> io::Result<()> { +unsafe fn kern_load(io: &Io, session: &mut Session, library: &[u8]) + -> Result<(), Error> { if session.running() { unexpected!("attempted to load a new kernel while a kernel was running") } @@ -195,22 +205,21 @@ unsafe fn kern_load(io: &Io, session: &mut Session, library: &[u8]) -> io::Resul kern_send(io, &kern::LoadRequest(&library))?; kern_recv(io, |reply| { match reply { - &kern::LoadReply(Ok(())) => { + kern::LoadReply(Ok(())) => { session.kernel_state = KernelState::Loaded; Ok(()) } - &kern::LoadReply(Err(ref error)) => { + kern::LoadReply(Err(error)) => { kernel::stop(); - Err(io::Error::new(io::ErrorKind::Other, - format!("cannot load kernel: {}", error))) + Err(Error::Load(format!("{}", error))) } other => - unexpected!("unexpected reply from kernel CPU: {:?}", other) + unexpected!("unexpected kernel CPU reply to load request: {:?}", other) } }) } -fn kern_run(session: &mut Session) -> io::Result<()> { +fn kern_run(session: &mut Session) -> Result<(), Error> { if session.kernel_state != KernelState::Loaded { unexpected!("attempted to run a kernel while not in Loaded state") } @@ -222,81 +231,30 @@ fn kern_run(session: &mut Session) -> io::Result<()> { fn process_host_message(io: &Io, stream: &mut TcpStream, - session: &mut Session) -> io::Result<()> { + session: &mut Session) -> Result<(), Error> { match host_read(stream)? { host::Request::SystemInfo => { host_write(stream, host::Reply::SystemInfo { - ident: board::ident(&mut [0; 64]), + ident: ident::read(&mut [0; 64]), finished_cleanly: session.congress.finished_cleanly.get() })?; - session.congress.finished_cleanly.set(true); - Ok(()) - } - - // artiq_coreconfig - host::Request::FlashRead { ref key } => { - config::read(key, |result| { - match result { - Ok(value) => host_write(stream, host::Reply::FlashRead(&value)), - Err(()) => host_write(stream, host::Reply::FlashError) - } - }) - } - - host::Request::FlashWrite { ref key, ref value } => { - match config::write(key, value) { - Ok(_) => host_write(stream, host::Reply::FlashOk), - Err(_) => host_write(stream, host::Reply::FlashError) - } - } - - host::Request::FlashRemove { ref key } => { - match config::remove(key) { - Ok(()) => host_write(stream, host::Reply::FlashOk), - Err(_) => host_write(stream, host::Reply::FlashError), - } - - } - - host::Request::FlashErase => { - match config::erase() { - Ok(()) => host_write(stream, host::Reply::FlashOk), - Err(_) => host_write(stream, host::Reply::FlashError), - } - } - - // artiq_run/artiq_master - host::Request::SwitchClock(clk) => { - if session.running() { - unexpected!("attempted to switch RTIO clock while a kernel was running") - } - - #[cfg(has_rtio_core)] - { - if rtio_mgt::crg::switch_clock(clk) { - host_write(stream, host::Reply::ClockSwitchCompleted) - } else { - host_write(stream, host::Reply::ClockSwitchFailed) - } - } - - #[cfg(not(has_rtio_core))] - host_write(stream, host::Reply::ClockSwitchFailed) + session.congress.finished_cleanly.set(true) } host::Request::LoadKernel(kernel) => match unsafe { kern_load(io, session, &kernel) } { - Ok(()) => host_write(stream, host::Reply::LoadCompleted), + Ok(()) => host_write(stream, host::Reply::LoadCompleted)?, Err(error) => { - host_write(stream, host::Reply::LoadFailed(error.description()))?; - kern_acknowledge() + let mut description = String::new(); + write!(&mut description, "{}", error).unwrap(); + host_write(stream, host::Reply::LoadFailed(&description))?; + kern_acknowledge()?; } }, - host::Request::RunKernel => match kern_run(session) { - Ok(()) => Ok(()), - Err(_) => host_write(stream, host::Reply::KernelStartupFailed) + Ok(()) => (), + Err(_) => host_write(stream, host::Reply::KernelStartupFailed)? }, host::Request::RpcReply { tag } => { @@ -307,22 +265,28 @@ fn process_host_message(io: &Io, let slot = kern_recv(io, |reply| { match reply { &kern::RpcRecvRequest(slot) => Ok(slot), - other => unexpected!("unexpected reply from kernel CPU: {:?}", other) + other => unexpected!( + "expected root value slot from kernel CPU, not {:?}", other) } })?; - rpc::recv_return(stream, &tag, slot, &|size| { + rpc::recv_return(stream, &tag, slot, &|size| -> Result<_, Error> { + if size == 0 { + // Don't try to allocate zero-length values, as RpcRecvReply(0) is + // used to terminate the kernel-side receive loop. + return Ok(0 as *mut ()) + } kern_send(io, &kern::RpcRecvReply(Ok(size)))?; - kern_recv(io, |reply| { + Ok(kern_recv(io, |reply| { match reply { &kern::RpcRecvRequest(slot) => Ok(slot), - other => unexpected!("unexpected reply from kernel CPU: {:?}", other) + other => unexpected!( + "expected nested value slot from kernel CPU, not {:?}", other) } - }) + })?) })?; kern_send(io, &kern::RpcRecvReply(Ok(0)))?; - session.kernel_state = KernelState::Running; - Ok(()) + session.kernel_state = KernelState::Running } host::Request::RpcException { @@ -335,8 +299,8 @@ fn process_host_message(io: &Io, kern_recv(io, |reply| { match reply { &kern::RpcRecvRequest(_) => Ok(()), - other => - unexpected!("unexpected reply from kernel CPU: {:?}", other) + other => unexpected!( + "expected (ignored) root value slot from kernel CPU, not {:?}", other) } })?; @@ -351,14 +315,18 @@ fn process_host_message(io: &Io, }; kern_send(io, &kern::RpcRecvReply(Err(exn)))?; - session.kernel_state = KernelState::Running; - Ok(()) + session.kernel_state = KernelState::Running } } + + Ok(()) } -fn process_kern_message(io: &Io, mut stream: Option<&mut TcpStream>, - session: &mut Session) -> io::Result { +fn process_kern_message(io: &Io, aux_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, + up_destinations: &Urc>, + mut stream: Option<&mut TcpStream>, + session: &mut Session) -> Result> { kern_recv_notrace(io, |request| { match (request, session.kernel_state) { (&kern::LoadReply(_), KernelState::Loaded) | @@ -375,15 +343,16 @@ fn process_kern_message(io: &Io, mut stream: Option<&mut TcpStream>, kern_recv_dotrace(request); - if kern_hwreq::process_kern_hwreq(io, request)? { + if kern_hwreq::process_kern_hwreq(io, aux_mutex, routing_table, up_destinations, request)? { return Ok(false) } match request { &kern::Log(args) => { - use std::fmt::Write; - session.log_buffer.write_fmt(args) - .map_err(|_| io_error("cannot append to session log buffer"))?; + use core::fmt::Write; + session.log_buffer + .write_fmt(args) + .unwrap_or_else(|_| warn!("cannot append to session log buffer")); session.flush_log_buffer(); kern_acknowledge() } @@ -394,14 +363,6 @@ fn process_kern_message(io: &Io, mut stream: Option<&mut TcpStream>, kern_acknowledge() } - &kern::NowInitRequest => - kern_send(io, &kern::NowInitReply(session.congress.now)), - - &kern::NowSave(now) => { - session.congress.now = now; - kern_acknowledge() - } - &kern::DmaRecordStart(name) => { session.congress.dma_manager.record_start(name); kern_acknowledge() @@ -412,7 +373,7 @@ fn process_kern_message(io: &Io, mut stream: Option<&mut TcpStream>, } &kern::DmaRecordStop { duration } => { session.congress.dma_manager.record_stop(duration); - board::cache::flush_l2_cache(); + cache::flush_l2_cache(); kern_acknowledge() } &kern::DmaEraseRequest { name } => { @@ -428,17 +389,6 @@ fn process_kern_message(io: &Io, mut stream: Option<&mut TcpStream>, }) } - &kern::WatchdogSetRequest { ms } => { - let id = session.watchdog_set.set_ms(ms) - .map_err(|()| io_error("out of watchdogs"))?; - kern_send(io, &kern::WatchdogSetReply { id: id }) - } - - &kern::WatchdogClear { id } => { - session.watchdog_set.clear(id); - kern_acknowledge() - } - &kern::RpcSend { async, service, tag, data } => { match stream { None => unexpected!("unexpected RPC in flash kernel"), @@ -451,7 +401,13 @@ fn process_kern_message(io: &Io, mut stream: Option<&mut TcpStream>, kern_acknowledge() } } - } + }, + &kern::RpcFlush => { + // See ksupport/lib.rs for the reason this request exists. + // We do not need to do anything here because of how the main loop is + // structured. + kern_acknowledge() + }, &kern::CacheGetRequest { key } => { let value = session.congress.cache.get(key); @@ -475,10 +431,9 @@ fn process_kern_message(io: &Io, mut stream: Option<&mut TcpStream>, match stream { None => return Ok(true), Some(ref mut stream) => - host_write(stream, host::Reply::KernelFinished) + host_write(stream, host::Reply::KernelFinished).map_err(|e| e.into()) } } - &kern::RunException { exception: kern::Exception { name, message, param, file, line, column, function }, backtrace @@ -504,7 +459,7 @@ fn process_kern_message(io: &Io, mut stream: Option<&mut TcpStream>, column: column, function: function, backtrace: backtrace - }) + }).map_err(|e| e.into()) } } } @@ -515,49 +470,45 @@ fn process_kern_message(io: &Io, mut stream: Option<&mut TcpStream>, } fn process_kern_queued_rpc(stream: &mut TcpStream, - _session: &mut Session) -> io::Result<()> { + _session: &mut Session) -> Result<(), Error> { rpc_queue::dequeue(|slice| { debug!("comm<-kern (async RPC)"); let length = NetworkEndian::read_u32(slice) as usize; host_write(stream, host::Reply::RpcRequest { async: true })?; debug!("{:?}", &slice[4..][..length]); - stream.write(&slice[4..][..length])?; + stream.write_all(&slice[4..][..length])?; Ok(()) }) } -fn host_kernel_worker(io: &Io, +fn host_kernel_worker(io: &Io, aux_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, + up_destinations: &Urc>, stream: &mut TcpStream, - congress: &mut Congress) -> io::Result<()> { + congress: &mut Congress) -> Result<(), Error> { let mut session = Session::new(congress); loop { - while !rpc_queue::empty() { - process_kern_queued_rpc(stream, &mut session)? - } - if stream.can_recv() { process_host_message(io, stream, &mut session)? } else if !stream.may_recv() { return Ok(()) } + while !rpc_queue::empty() { + process_kern_queued_rpc(stream, &mut session)? + } + if mailbox::receive() != 0 { - process_kern_message(io, Some(stream), &mut session)?; + process_kern_message(io, aux_mutex, + routing_table, up_destinations, + Some(stream), &mut session)?; } if session.kernel_state == KernelState::Running { - if session.watchdog_set.expired() { - host_write(stream, host::Reply::WatchdogExpired)?; - return Err(io_error("watchdog expired")) - } - - #[cfg(has_rtio_core)] - { - if !rtio_mgt::crg::check() { - host_write(stream, host::Reply::ClockFailure)?; - return Err(io_error("RTIO clock failure")) - } + if !rtio_clocking::crg::check() { + host_write(stream, host::Reply::ClockFailure)?; + return Err(Error::ClockFailure) } } @@ -565,9 +516,11 @@ fn host_kernel_worker(io: &Io, } } -fn flash_kernel_worker(io: &Io, +fn flash_kernel_worker(io: &Io, aux_mutex: &Mutex, + routing_table: &drtio_routing::RoutingTable, + up_destinations: &Urc>, congress: &mut Congress, - config_key: &str) -> io::Result<()> { + config_key: &str) -> Result<(), Error> { let mut session = Session::new(congress); config::read(config_key, |result| { @@ -577,31 +530,24 @@ fn flash_kernel_worker(io: &Io, // so make a copy. kern_load(io, &mut session, Vec::from(kernel).as_ref()) }, - _ => Err(io::Error::new(io::ErrorKind::NotFound, "kernel not found")), + _ => Err(Error::KernelNotFound) } })?; kern_run(&mut session)?; loop { if !rpc_queue::empty() { - return Err(io_error("unexpected background RPC in flash kernel")) + unexpected!("unexpected background RPC in flash kernel") } if mailbox::receive() != 0 { - if process_kern_message(io, None, &mut session)? { + if process_kern_message(io, aux_mutex, routing_table, up_destinations, None, &mut session)? { return Ok(()) } } - if session.watchdog_set.expired() { - return Err(io_error("watchdog expired")) - } - - #[cfg(has_rtio_core)] - { - if !rtio_mgt::crg::check() { - return Err(io_error("RTIO clock failure")) - } + if !rtio_clocking::crg::check() { + return Err(Error::ClockFailure) } io.relinquish()? @@ -623,7 +569,9 @@ fn respawn(io: &Io, handle: &mut Option, f: F) *handle = Some(io.spawn(16384, f)) } -pub fn thread(io: Io) { +pub fn thread(io: Io, aux_mutex: &Mutex, + routing_table: &Urc>, + up_destinations: &Urc>) { let listener = TcpListener::new(&io, 65535); listener.listen(1381).expect("session: cannot listen"); info!("accepting network sessions"); @@ -632,19 +580,22 @@ pub fn thread(io: Io) { let mut kernel_thread = None; { + let aux_mutex = aux_mutex.clone(); + let routing_table = routing_table.clone(); + let up_destinations = up_destinations.clone(); let congress = congress.clone(); respawn(&io, &mut kernel_thread, move |io| { - let mut congress = borrow_mut!(congress); + let routing_table = routing_table.borrow(); + let mut congress = congress.borrow_mut(); info!("running startup kernel"); - match flash_kernel_worker(&io, &mut congress, "startup_kernel") { - Ok(()) => info!("startup kernel finished"), + match flash_kernel_worker(&io, &aux_mutex, &routing_table, &up_destinations, &mut congress, "startup_kernel") { + Ok(()) => + info!("startup kernel finished"), + Err(Error::KernelNotFound) => + info!("no startup kernel found"), Err(err) => { - if err.kind() == io::ErrorKind::NotFound { - info!("no startup kernel found") - } else { - congress.finished_cleanly.set(false); - error!("startup kernel aborted: {}", err); - } + congress.finished_cleanly.set(false); + error!("startup kernel aborted: {}", err); } } }) @@ -653,10 +604,10 @@ pub fn thread(io: Io) { loop { if listener.can_accept() { let mut stream = listener.accept().expect("session: cannot accept"); - stream.set_timeout(Some(1000)); + stream.set_timeout(Some(2250)); stream.set_keep_alive(Some(500)); - match check_magic(&mut stream) { + match host::read_magic(&mut stream) { Ok(()) => (), Err(_) => { warn!("wrong magic from {}", stream.remote_endpoint()); @@ -666,22 +617,25 @@ pub fn thread(io: Io) { } info!("new connection from {}", stream.remote_endpoint()); + let aux_mutex = aux_mutex.clone(); + let routing_table = routing_table.clone(); + let up_destinations = up_destinations.clone(); let congress = congress.clone(); let stream = stream.into_handle(); respawn(&io, &mut kernel_thread, move |io| { - let mut congress = borrow_mut!(congress); + let routing_table = routing_table.borrow(); + let mut congress = congress.borrow_mut(); let mut stream = TcpStream::from_handle(&io, stream); - match host_kernel_worker(&io, &mut stream, &mut *congress) { + match host_kernel_worker(&io, &aux_mutex, &routing_table, &up_destinations, &mut stream, &mut *congress) { Ok(()) => (), + Err(Error::Protocol(host::Error::Io(IoError::UnexpectedEnd))) => + info!("connection closed"), + Err(Error::Protocol(host::Error::Io( + IoError::Other(SchedError::Interrupted)))) => + info!("kernel interrupted"), Err(err) => { - if err.kind() == io::ErrorKind::UnexpectedEof { - info!("connection closed"); - } else if err.kind() == io::ErrorKind::Interrupted { - info!("kernel interrupted"); - } else { - congress.finished_cleanly.set(false); - error!("session aborted: {}", err); - } + congress.finished_cleanly.set(false); + error!("session aborted: {}", err); } } }); @@ -690,22 +644,25 @@ pub fn thread(io: Io) { if kernel_thread.as_ref().map_or(true, |h| h.terminated()) { info!("no connection, starting idle kernel"); + let aux_mutex = aux_mutex.clone(); + let routing_table = routing_table.clone(); + let up_destinations = up_destinations.clone(); let congress = congress.clone(); respawn(&io, &mut kernel_thread, move |io| { - let mut congress = borrow_mut!(congress); - match flash_kernel_worker(&io, &mut *congress, "idle_kernel") { + let routing_table = routing_table.borrow(); + let mut congress = congress.borrow_mut(); + match flash_kernel_worker(&io, &aux_mutex, &routing_table, &up_destinations, &mut *congress, "idle_kernel") { Ok(()) => info!("idle kernel finished, standing by"), - Err(err) => { - if err.kind() == io::ErrorKind::Interrupted { - info!("idle kernel interrupted"); - } else if err.kind() == io::ErrorKind::NotFound { - info!("no idle kernel found"); - while io.relinquish().is_ok() {} - } else { - error!("idle kernel aborted: {}", err); - } + Err(Error::Protocol(host::Error::Io( + IoError::Other(SchedError::Interrupted)))) => + info!("idle kernel interrupted"), + Err(Error::KernelNotFound) => { + info!("no idle kernel found"); + while io.relinquish().is_ok() {} } + Err(err) => + error!("idle kernel aborted: {}", err) } }) } diff --git a/artiq/firmware/runtime/urc.rs b/artiq/firmware/runtime/urc.rs index cec7751ea..1117269b6 100644 --- a/artiq/firmware/runtime/urc.rs +++ b/artiq/firmware/runtime/urc.rs @@ -1,6 +1,6 @@ -use std::rc::Rc; -use std::ops::Deref; -use std::fmt; +use core::ops::Deref; +use core::fmt; +use alloc::rc::Rc; pub struct Urc(Rc); diff --git a/artiq/firmware/satman/Cargo.toml b/artiq/firmware/satman/Cargo.toml index b8c7106f5..fdccaf27a 100644 --- a/artiq/firmware/satman/Cargo.toml +++ b/artiq/firmware/satman/Cargo.toml @@ -7,20 +7,12 @@ build = "build.rs" [lib] name = "satman" crate-type = ["staticlib"] -path = "lib.rs" +path = "main.rs" [build-dependencies] -build_artiq = { path = "../libbuild_artiq" } +build_misoc = { path = "../libbuild_misoc" } [dependencies] -alloc_list = { path = "../liballoc_list" } -std_artiq = { path = "../libstd_artiq", features = ["alloc"] } -logger_artiq = { path = "../liblogger_artiq" } -board = { path = "../libboard", features = ["uart_console"] } -drtioaux = { path = "../libdrtioaux" } -log = { version = "0.3", default-features = false } - -[dependencies.compiler_builtins] -git = "https://github.com/rust-lang-nursery/compiler-builtins" -rev = "631b568" -features = ["mem"] +log = { version = "0.4", default-features = false } +board_misoc = { path = "../libboard_misoc", features = ["uart_console", "log"] } +board_artiq = { path = "../libboard_artiq" } diff --git a/artiq/firmware/satman/Makefile b/artiq/firmware/satman/Makefile index 703490663..b96938d1b 100644 --- a/artiq/firmware/satman/Makefile +++ b/artiq/firmware/satman/Makefile @@ -5,25 +5,17 @@ LDFLAGS += -L../libbase RUSTFLAGS += -Cpanic=abort -all: satman.bin satman.fbi +all:: satman.bin satman.fbi .PHONY: $(RUSTOUT)/libsatman.a $(RUSTOUT)/libsatman.a: $(cargo) --manifest-path $(SATMAN_DIRECTORY)/Cargo.toml satman.elf: $(RUSTOUT)/libsatman.a - $(LD) $(LDFLAGS) -T $(SATMAN_DIRECTORY)/satman.ld -o $@ $^ - @chmod -x $@ + $(link) -T $(SATMAN_DIRECTORY)/satman.ld %.bin: %.elf - $(OBJCOPY) -O binary $< $@ - @chmod -x $@ + $(objcopy) -O binary %.fbi: %.bin - @echo " MSCIMG " $@ && $(PYTHON) -m misoc.tools.mkmscimg -f -o $@ $< - -clean: - $(RM) satman.elf satman.bin satman.fbi - $(RM) -rf cargo - -.PHONY: all clean + $(mscimg) -f diff --git a/artiq/firmware/satman/build.rs b/artiq/firmware/satman/build.rs index d9df648a6..3548ea5ff 100644 --- a/artiq/firmware/satman/build.rs +++ b/artiq/firmware/satman/build.rs @@ -1,6 +1,5 @@ -extern crate build_artiq; +extern crate build_misoc; fn main() { - build_artiq::git_describe(); - build_artiq::misoc_cfg(); + build_misoc::cfg(); } diff --git a/artiq/firmware/satman/jdac_common.rs b/artiq/firmware/satman/jdac_common.rs new file mode 100644 index 000000000..3c185516f --- /dev/null +++ b/artiq/firmware/satman/jdac_common.rs @@ -0,0 +1,74 @@ +pub const INIT: u8 = 0x00; +pub const PRINT_STATUS: u8 = 0x01; +pub const PRBS: u8 = 0x02; +pub const STPL: u8 = 0x03; + +pub const SYSREF_DELAY_DAC: u8 = 0x10; +pub const SYSREF_SLIP: u8 = 0x11; +pub const SYNC: u8 = 0x12; + +pub const DDMTD_SYSREF_RAW: u8 = 0x20; +pub const DDMTD_SYSREF: u8 = 0x21; + + +fn average_2phases(a: i32, b: i32, modulo: i32) -> i32 { + let diff = ((a - b + modulo/2 + modulo) % modulo) - modulo/2; + return (modulo + b + diff/2) % modulo; +} + +pub fn average_phases(phases: &[i32], modulo: i32) -> i32 { + if phases.len() == 1 { + panic!("input array length must be a power of 2"); + } else if phases.len() == 2 { + average_2phases(phases[0], phases[1], modulo) + } else { + let cut = phases.len()/2; + average_2phases( + average_phases(&phases[..cut], modulo), + average_phases(&phases[cut..], modulo), + modulo) + } +} + +pub const RAW_DDMTD_N_SHIFT: i32 = 6; +pub const RAW_DDMTD_N: i32 = 1 << RAW_DDMTD_N_SHIFT; +pub const DDMTD_DITHER_BITS: i32 = 1; +pub const DDMTD_N_SHIFT: i32 = RAW_DDMTD_N_SHIFT + DDMTD_DITHER_BITS; +pub const DDMTD_N: i32 = 1 << DDMTD_N_SHIFT; + +#[cfg(has_ad9154)] +use board_misoc::{clock, csr}; + +#[cfg(has_ad9154)] +pub fn init_ddmtd() -> Result<(), &'static str> { + unsafe { + csr::sysref_ddmtd::reset_write(1); + clock::spin_us(1); + csr::sysref_ddmtd::reset_write(0); + clock::spin_us(100); + if csr::sysref_ddmtd::locked_read() != 0 { + Ok(()) + } else { + Err("DDMTD helper PLL failed to lock") + } + } +} + +#[cfg(has_ad9154)] +pub fn measure_ddmdt_phase_raw() -> i32 { + unsafe { csr::sysref_ddmtd::dt_read() as i32 } +} + +#[cfg(has_ad9154)] +pub fn measure_ddmdt_phase() -> i32 { + const AVG_PRECISION_SHIFT: i32 = 6; + const AVG_PRECISION: i32 = 1 << AVG_PRECISION_SHIFT; + const AVG_MOD: i32 = 1 << (RAW_DDMTD_N_SHIFT + AVG_PRECISION_SHIFT + DDMTD_DITHER_BITS); + + let mut measurements = [0; AVG_PRECISION as usize]; + for i in 0..AVG_PRECISION { + measurements[i as usize] = measure_ddmdt_phase_raw() << (AVG_PRECISION_SHIFT + DDMTD_DITHER_BITS); + clock::spin_us(10); + } + average_phases(&measurements, AVG_MOD) >> AVG_PRECISION_SHIFT +} diff --git a/artiq/firmware/satman/jdcg.rs b/artiq/firmware/satman/jdcg.rs new file mode 100644 index 000000000..c8a34827b --- /dev/null +++ b/artiq/firmware/satman/jdcg.rs @@ -0,0 +1,589 @@ +pub mod jesd { + use board_misoc::{csr, clock}; + + pub fn reset(reset: bool) { + unsafe { + csr::jesd_crg::jreset_write(if reset {1} else {0}); + } + } + + pub fn enable(dacno: u8, en: bool) { + unsafe { + (csr::JDCG[dacno as usize].jesd_control_enable_write)(if en {1} else {0}) + } + } + + pub fn phy_done(dacno: u8) -> bool { + unsafe { + (csr::JDCG[dacno as usize].jesd_control_phy_done_read)() != 0 + } + } + + pub fn ready(dacno: u8) -> bool { + unsafe { + (csr::JDCG[dacno as usize].jesd_control_ready_read)() != 0 + } + } + + pub fn prbs(dacno: u8, en: bool) { + unsafe { + (csr::JDCG[dacno as usize].jesd_control_prbs_config_write)(if en {0b01} else {0b00}) + } + clock::spin_us(5000); + } + + pub fn stpl(dacno: u8, en: bool) { + unsafe { + (csr::JDCG[dacno as usize].jesd_control_stpl_enable_write)(if en {1} else {0}) + } + clock::spin_us(5000); + } + + pub fn jsync(dacno: u8) -> bool { + unsafe { + (csr::JDCG[dacno as usize].jesd_control_jsync_read)() != 0 + } + } +} + +pub mod jdac { + use board_misoc::{csr, clock}; + use board_artiq::drtioaux; + + use super::jesd; + use super::super::jdac_common; + + pub fn basic_request(dacno: u8, reqno: u8, param: u8) -> Result { + if let Err(e) = drtioaux::send(1, &drtioaux::Packet::JdacBasicRequest { + destination: 0, + dacno: dacno, + reqno: reqno, + param: param + }) { + error!("aux packet error ({})", e); + return Err("aux packet error while sending for JESD DAC basic request"); + } + match drtioaux::recv_timeout(1, Some(1000)) { + Ok(drtioaux::Packet::JdacBasicReply { succeeded, retval }) => { + if succeeded { + Ok(retval) + } else { + error!("JESD DAC basic request failed (dacno={}, reqno={})", dacno, reqno); + Err("remote error status to JESD DAC basic request") + } + }, + Ok(packet) => { + error!("received unexpected aux packet: {:?}", packet); + Err("unexpected aux packet in reply to JESD DAC basic request") + }, + Err(e) => { + error!("aux packet error ({})", e); + Err("aux packet error while waiting for JESD DAC basic reply") + } + } + } + + pub fn init() -> Result<(), &'static str> { + for dacno in 0..csr::JDCG.len() { + let dacno = dacno as u8; + info!("DAC-{} initializing...", dacno); + + jesd::enable(dacno, true); + clock::spin_us(10_000); + if !jesd::phy_done(dacno) { + error!("JESD core PHY not done"); + return Err("JESD core PHY not done"); + } + + basic_request(dacno, jdac_common::INIT, 0)?; + + // JESD ready depends on JSYNC being valid, so DAC init needs to happen first + if !jesd::ready(dacno) { + error!("JESD core reported not ready, sending DAC status print request"); + basic_request(dacno, jdac_common::PRINT_STATUS, 0)?; + return Err("JESD core reported not ready"); + } + + jesd::prbs(dacno, true); + basic_request(dacno, jdac_common::PRBS, 0)?; + jesd::prbs(dacno, false); + + basic_request(dacno, jdac_common::INIT, 0)?; + clock::spin_us(5000); + + if !jesd::jsync(dacno) { + error!("JESD core reported bad SYNC"); + return Err("JESD core reported bad SYNC"); + } + + info!(" ...done initializing"); + } + Ok(()) + } + + pub fn stpl() -> Result<(), &'static str> { + for dacno in 0..csr::JDCG.len() { + let dacno = dacno as u8; + + info!("Running STPL test on DAC-{}...", dacno); + + jesd::stpl(dacno, true); + basic_request(dacno, jdac_common::STPL, 0)?; + jesd::stpl(dacno, false); + + info!(" ...done STPL test"); + } + Ok(()) + } +} + +pub mod jesd204sync { + use board_misoc::{csr, clock, config}; + + use super::jdac; + use super::super::jdac_common; + + const HMC7043_ANALOG_DELAY_RANGE: u8 = 24; + + const FPGA_CLK_DIV: u16 = 16; // Keep in sync with hmc830_7043.rs + const SYSREF_DIV: u16 = 256; // Keep in sync with hmc830_7043.rs + + fn hmc7043_sysref_delay_dac(dacno: u8, phase_offset: u8) -> Result<(), &'static str> { + match jdac::basic_request(dacno, jdac_common::SYSREF_DELAY_DAC, phase_offset) { + Ok(_) => Ok(()), + Err(e) => Err(e) + } + } + + + fn hmc7043_sysref_slip() -> Result<(), &'static str> { + match jdac::basic_request(0, jdac_common::SYSREF_SLIP, 0) { + Ok(_) => Ok(()), + Err(e) => Err(e) + } + } + + fn ad9154_sync(dacno: u8) -> Result { + match jdac::basic_request(dacno, jdac_common::SYNC, 0) { + Ok(0) => Ok(false), + Ok(_) => Ok(true), + Err(e) => Err(e) + } + } + + fn measure_ddmdt_phase_raw() -> Result { + Ok(jdac::basic_request(0, jdac_common::DDMTD_SYSREF_RAW, 0)? as i32) + } + + fn measure_ddmdt_phase() -> Result { + Ok(jdac::basic_request(0, jdac_common::DDMTD_SYSREF, 0)? as i32) + } + + fn test_ddmtd_stability(raw: bool, tolerance: i32) -> Result<(), &'static str> { + info!("testing DDMTD stability (raw={}, tolerance={})...", raw, tolerance); + + let modulo = if raw { jdac_common::RAW_DDMTD_N } else { jdac_common::DDMTD_N }; + let measurement = if raw { measure_ddmdt_phase_raw } else { measure_ddmdt_phase }; + let ntests = if raw { 150 } else { 15 }; + + let mut max_pkpk = 0; + for _ in 0..32 { + // If we are near the edges, wraparound can throw off the simple min/max computation. + // In this case, add an offset to get near the center. + let quadrant = measure_ddmdt_phase()?; + let center_offset = + if quadrant < jdac_common::DDMTD_N/4 || quadrant > 3*jdac_common::DDMTD_N/4 { + modulo/2 + } else { + 0 + }; + + let mut min = modulo; + let mut max = 0; + for _ in 0..ntests { + let m = (measurement()? + center_offset) % modulo; + if m < min { + min = m; + } + if m > max { + max = m; + } + } + let pkpk = max - min; + if pkpk > max_pkpk { + max_pkpk = pkpk; + } + if pkpk > tolerance { + error!(" ...excessive peak-peak jitter: {} (min={} max={} center_offset={})", pkpk, + min, max, center_offset); + return Err("excessive DDMTD peak-peak jitter"); + } + hmc7043_sysref_slip(); + } + + info!(" ...passed, peak-peak jitter: {}", max_pkpk); + Ok(()) + } + + fn test_slip_ddmtd() -> Result<(), &'static str> { + // expected_step = (RTIO clock frequency)*(DDMTD N)/(HMC7043 CLKIN frequency) + let expected_step = 8; + let tolerance = 1; + + info!("testing HMC7043 SYSREF slip against DDMTD..."); + let mut old_phase = measure_ddmdt_phase()?; + for _ in 0..1024 { + hmc7043_sysref_slip(); + let phase = measure_ddmdt_phase()?; + let step = (jdac_common::DDMTD_N + old_phase - phase) % jdac_common::DDMTD_N; + if (step - expected_step).abs() > tolerance { + error!(" ...got unexpected step: {} ({} -> {})", step, old_phase, phase); + return Err("HMC7043 SYSREF slip produced unexpected DDMTD step"); + } + old_phase = phase; + } + info!(" ...passed"); + Ok(()) + } + + fn sysref_sh_error() -> bool { + unsafe { + csr::sysref_sampler::sh_error_reset_write(1); + clock::spin_us(1); + csr::sysref_sampler::sh_error_reset_write(0); + clock::spin_us(10); + csr::sysref_sampler::sh_error_read() != 0 + } + } + + const SYSREF_SH_PRECISION_SHIFT: i32 = 5; + const SYSREF_SH_PRECISION: i32 = 1 << SYSREF_SH_PRECISION_SHIFT; + const SYSREF_SH_MOD: i32 = 1 << (jdac_common::DDMTD_N_SHIFT + SYSREF_SH_PRECISION_SHIFT); + + #[derive(Default)] + struct SysrefShLimits { + rising_phases: [i32; SYSREF_SH_PRECISION as usize], + falling_phases: [i32; SYSREF_SH_PRECISION as usize], + } + + fn measure_sysref_sh_limits() -> Result { + let mut ret = SysrefShLimits::default(); + let mut nslips = 0; + let mut rising_n = 0; + let mut falling_n = 0; + + let mut previous = sysref_sh_error(); + while rising_n < SYSREF_SH_PRECISION || falling_n < SYSREF_SH_PRECISION { + hmc7043_sysref_slip(); + nslips += 1; + if nslips > 1024 { + return Err("too many slips and not enough SYSREF S/H error transitions"); + } + + let current = sysref_sh_error(); + let phase = measure_ddmdt_phase()?; + if current && !previous && rising_n < SYSREF_SH_PRECISION { + ret.rising_phases[rising_n as usize] = phase << SYSREF_SH_PRECISION_SHIFT; + rising_n += 1; + } + if !current && previous && falling_n < SYSREF_SH_PRECISION { + ret.falling_phases[falling_n as usize] = phase << SYSREF_SH_PRECISION_SHIFT; + falling_n += 1; + } + previous = current; + } + Ok(ret) + } + + fn max_phase_deviation(average: i32, phases: &[i32]) -> i32 { + let mut ret = 0; + for phase in phases.iter() { + let deviation = (phase - average + jdac_common::DDMTD_N) % jdac_common::DDMTD_N; + if deviation > ret { + ret = deviation; + } + } + return ret; + } + + fn reach_sysref_ddmtd_target(target: i32, tolerance: i32) -> Result { + for _ in 0..1024 { + let delta = (measure_ddmdt_phase()? - target + jdac_common::DDMTD_N) % jdac_common::DDMTD_N; + if delta <= tolerance { + return Ok(delta) + } + hmc7043_sysref_slip(); + } + Err("failed to reach SYSREF DDMTD phase target") + } + + fn calibrate_sysref_target(rising_average: i32, falling_average: i32) -> Result { + info!("calibrating SYSREF DDMTD target phase..."); + let coarse_target = + if rising_average < falling_average { + (rising_average + falling_average)/2 + } else { + ((falling_average - (jdac_common::DDMTD_N - rising_average))/2 + jdac_common::DDMTD_N) % jdac_common::DDMTD_N + }; + info!(" SYSREF calibration coarse target: {}", coarse_target); + reach_sysref_ddmtd_target(coarse_target, 8)?; + let target = measure_ddmdt_phase()?; + info!(" ...done, target={}", target); + Ok(target) + } + + fn sysref_get_tsc_phase_raw() -> Result { + if sysref_sh_error() { + return Err("SYSREF failed S/H timing"); + } + let ret = unsafe { csr::sysref_sampler::sysref_phase_read() }; + Ok(ret) + } + + // Note: the code below assumes RTIO/SYSREF frequency ratio is a power of 2 + + fn sysref_get_tsc_phase() -> Result { + let mask = (SYSREF_DIV/FPGA_CLK_DIV - 1) as u8; + Ok((sysref_get_tsc_phase_raw()? & mask) as i32) + } + + pub fn test_sysref_frequency() -> Result<(), &'static str> { + info!("testing SYSREF frequency against raw TSC phase bit toggles..."); + + let mut all_toggles = 0; + let initial_phase = sysref_get_tsc_phase_raw()?; + for _ in 0..20000 { + clock::spin_us(1); + all_toggles |= sysref_get_tsc_phase_raw()? ^ initial_phase; + } + + let ratio = (SYSREF_DIV/FPGA_CLK_DIV) as u8; + let expected_toggles = 0xff ^ (ratio - 1); + if all_toggles == expected_toggles { + info!(" ...done (0x{:02x})", all_toggles); + Ok(()) + } else { + error!(" ...unexpected toggles: got 0x{:02x}, expected 0x{:02x}", + all_toggles, expected_toggles); + Err("unexpected toggles") + } + } + + fn sysref_slip_rtio_cycle() { + for _ in 0..FPGA_CLK_DIV { + hmc7043_sysref_slip(); + } + } + + pub fn test_slip_tsc() -> Result<(), &'static str> { + info!("testing HMC7043 SYSREF slip against TSC phase..."); + let initial_phase = sysref_get_tsc_phase()?; + let modulo = (SYSREF_DIV/FPGA_CLK_DIV) as i32; + for i in 0..128 { + sysref_slip_rtio_cycle(); + let expected_phase = (initial_phase + i + 1) % modulo; + let phase = sysref_get_tsc_phase()?; + if phase != expected_phase { + error!(" ...unexpected TSC phase: got {}, expected {} ", phase, expected_phase); + return Err("HMC7043 SYSREF slip produced unexpected TSC phase"); + } + } + info!(" ...done"); + Ok(()) + } + + pub fn sysref_rtio_align() -> Result<(), &'static str> { + info!("aligning SYSREF with RTIO TSC..."); + let mut nslips = 0; + loop { + sysref_slip_rtio_cycle(); + if sysref_get_tsc_phase()? == 0 { + info!(" ...done"); + return Ok(()) + } + + nslips += 1; + if nslips > SYSREF_DIV/FPGA_CLK_DIV { + return Err("failed to find SYSREF transition aligned with RTIO TSC"); + } + } + } + + pub fn sysref_auto_rtio_align() -> Result<(), &'static str> { + test_ddmtd_stability(true, 4)?; + test_ddmtd_stability(false, 1)?; + test_slip_ddmtd()?; + + info!("determining SYSREF S/H limits..."); + let sysref_sh_limits = measure_sysref_sh_limits()?; + let rising_average = jdac_common::average_phases(&sysref_sh_limits.rising_phases, SYSREF_SH_MOD); + let falling_average = jdac_common::average_phases(&sysref_sh_limits.falling_phases, SYSREF_SH_MOD); + let rising_max_deviation = max_phase_deviation(rising_average, &sysref_sh_limits.rising_phases); + let falling_max_deviation = max_phase_deviation(falling_average, &sysref_sh_limits.falling_phases); + + let rising_average = rising_average >> SYSREF_SH_PRECISION_SHIFT; + let falling_average = falling_average >> SYSREF_SH_PRECISION_SHIFT; + let rising_max_deviation = rising_max_deviation >> SYSREF_SH_PRECISION_SHIFT; + let falling_max_deviation = falling_max_deviation >> SYSREF_SH_PRECISION_SHIFT; + + info!(" SYSREF S/H average limits (DDMTD phases): {} {}", rising_average, falling_average); + info!(" SYSREF S/H maximum limit deviation: {} {}", rising_max_deviation, falling_max_deviation); + if rising_max_deviation > 8 || falling_max_deviation > 8 { + return Err("excessive SYSREF S/H limit deviation"); + } + info!(" ...done"); + + let entry = config::read_str("sysref_ddmtd_phase_fpga", |r| r.map(|s| s.parse())); + let target_phase = match entry { + Ok(Ok(phase)) => { + info!("using FPGA SYSREF DDMTD phase target from config: {}", phase); + phase + } + _ => { + let phase = calibrate_sysref_target(rising_average, falling_average)?; + if let Err(e) = config::write_int("sysref_ddmtd_phase_fpga", phase as u32) { + error!("failed to update FPGA SYSREF DDMTD phase target in config: {}", e); + } + phase + } + }; + + info!("aligning SYSREF with RTIO clock..."); + let delta = reach_sysref_ddmtd_target(target_phase, 3)?; + if sysref_sh_error() { + return Err("SYSREF does not meet S/H timing at DDMTD phase target"); + } + info!(" ...done, delta={}", delta); + + test_sysref_frequency()?; + test_slip_tsc()?; + sysref_rtio_align()?; + + Ok(()) + } + + fn sysref_cal_dac(dacno: u8) -> Result { + info!("calibrating SYSREF delay at DAC-{}...", dacno); + + // Allocate for more than expected as jitter may create spurious entries. + let mut limits_buf = [0; 8]; + let mut n_limits = 0; + + limits_buf[n_limits] = -1; + n_limits += 1; + + // avoid spurious rotation at delay=0 + hmc7043_sysref_delay_dac(dacno, 0); + ad9154_sync(dacno)?; + + for scan_delay in 0..HMC7043_ANALOG_DELAY_RANGE { + hmc7043_sysref_delay_dac(dacno, scan_delay); + if ad9154_sync(dacno)? { + limits_buf[n_limits] = scan_delay as i16; + n_limits += 1; + if n_limits >= limits_buf.len() - 1 { + break; + } + } + } + + limits_buf[n_limits] = HMC7043_ANALOG_DELAY_RANGE as i16; + n_limits += 1; + + info!(" using limits: {:?}", &limits_buf[..n_limits]); + + let mut delay = 0; + let mut best_margin = 0; + + for i in 0..(n_limits-1) { + let margin = limits_buf[i+1] - limits_buf[i]; + if margin > best_margin { + best_margin = margin; + delay = ((limits_buf[i+1] + limits_buf[i])/2) as u8; + } + } + + info!(" ...done, delay={}", delay); + Ok(delay) + } + + fn sysref_dac_align(dacno: u8, delay: u8) -> Result<(), &'static str> { + let tolerance = 5; + + info!("verifying SYSREF margins at DAC-{}...", dacno); + + // avoid spurious rotation at delay=0 + hmc7043_sysref_delay_dac(dacno, 0); + ad9154_sync(dacno)?; + + let mut rotation_seen = false; + for scan_delay in 0..HMC7043_ANALOG_DELAY_RANGE { + hmc7043_sysref_delay_dac(dacno, scan_delay); + if ad9154_sync(dacno)? { + rotation_seen = true; + let distance = (scan_delay as i16 - delay as i16).abs(); + if distance < tolerance { + error!(" rotation at delay={} is {} delay steps from target (FAIL)", scan_delay, distance); + return Err("insufficient SYSREF margin at DAC"); + } else { + info!(" rotation at delay={} is {} delay steps from target (PASS)", scan_delay, distance); + } + } + } + + if !rotation_seen { + return Err("no rotation seen when scanning DAC SYSREF delay"); + } + + info!(" ...done"); + + // We tested that the value is correct - now use it + info!("synchronizing DAC-{}", dacno); + hmc7043_sysref_delay_dac(dacno, delay); + ad9154_sync(dacno)?; + + Ok(()) + } + + pub fn sysref_auto_dac_align() -> Result<(), &'static str> { + // We assume that DAC SYSREF traces are length-matched so only one delay + // value is needed, and we use DAC-0 as calibration reference. + + let entry = config::read_str("sysref_7043_delay_dac", |r| r.map(|s| s.parse())); + let delay = match entry { + Ok(Ok(delay)) => { + info!("using DAC SYSREF delay from config: {}", delay); + delay + }, + _ => { + let delay = sysref_cal_dac(0)?; + if let Err(e) = config::write_int("sysref_7043_delay_dac", delay as u32) { + error!("failed to update DAC SYSREF delay in config: {}", e); + } + delay + } + }; + + for dacno in 0..csr::JDCG.len() { + sysref_dac_align(dacno as u8, delay)?; + } + Ok(()) + } + + pub fn sysref_auto_align() { + if let Err(e) = sysref_auto_rtio_align() { + error!("failed to align SYSREF at FPGA: {}", e); + } + if let Err(e) = sysref_auto_dac_align() { + error!("failed to align SYSREF at DAC: {}", e); + } + } + + pub fn resync_dacs() -> Result<(), &'static str> { + for dacno in 0..csr::JDCG.len() { + info!("resynchronizing DAC-{}", dacno); + ad9154_sync(dacno as u8)?; + } + Ok(()) + } +} diff --git a/artiq/firmware/satman/lib.rs b/artiq/firmware/satman/lib.rs deleted file mode 100644 index 6cf05a137..000000000 --- a/artiq/firmware/satman/lib.rs +++ /dev/null @@ -1,260 +0,0 @@ -#![feature(compiler_builtins_lib, lang_items)] -#![no_std] - -extern crate compiler_builtins; -extern crate alloc_artiq; -extern crate std_artiq as std; -#[macro_use] -extern crate log; -extern crate logger_artiq; -#[macro_use] -extern crate board; -extern crate drtioaux; - -fn process_aux_packet(p: &drtioaux::Packet) { - // In the code below, *_chan_sel_write takes an u8 if there are fewer than 256 channels, - // and u16 otherwise; hence the `as _` conversion. - match *p { - drtioaux::Packet::EchoRequest => drtioaux::hw::send_link(0, &drtioaux::Packet::EchoReply).unwrap(), - - drtioaux::Packet::RtioErrorRequest => { - let errors; - unsafe { - errors = (board::csr::DRTIO[0].rtio_error_read)(); - } - if errors & 1 != 0 { - unsafe { - (board::csr::DRTIO[0].rtio_error_write)(1); - } - drtioaux::hw::send_link(0, &drtioaux::Packet::RtioErrorCollisionReply).unwrap(); - } else if errors & 2 != 0 { - unsafe { - (board::csr::DRTIO[0].rtio_error_write)(2); - } - drtioaux::hw::send_link(0, &drtioaux::Packet::RtioErrorBusyReply).unwrap(); - } else { - drtioaux::hw::send_link(0, &drtioaux::Packet::RtioNoErrorReply).unwrap(); - } - } - - drtioaux::Packet::MonitorRequest { channel, probe } => { - let value; - #[cfg(has_rtio_moninj)] - unsafe { - board::csr::rtio_moninj::mon_chan_sel_write(channel as _); - board::csr::rtio_moninj::mon_probe_sel_write(probe); - board::csr::rtio_moninj::mon_value_update_write(1); - value = board::csr::rtio_moninj::mon_value_read(); - } - #[cfg(not(has_rtio_moninj))] - { - value = 0; - } - let reply = drtioaux::Packet::MonitorReply { value: value as u32 }; - drtioaux::hw::send_link(0, &reply).unwrap(); - }, - drtioaux::Packet::InjectionRequest { channel, overrd, value } => { - #[cfg(has_rtio_moninj)] - unsafe { - board::csr::rtio_moninj::inj_chan_sel_write(channel as _); - board::csr::rtio_moninj::inj_override_sel_write(overrd); - board::csr::rtio_moninj::inj_value_write(value); - } - }, - drtioaux::Packet::InjectionStatusRequest { channel, overrd } => { - let value; - #[cfg(has_rtio_moninj)] - unsafe { - board::csr::rtio_moninj::inj_chan_sel_write(channel as _); - board::csr::rtio_moninj::inj_override_sel_write(overrd); - value = board::csr::rtio_moninj::inj_value_read(); - } - #[cfg(not(has_rtio_moninj))] - { - value = 0; - } - let reply = drtioaux::Packet::InjectionStatusReply { value: value }; - drtioaux::hw::send_link(0, &reply).unwrap(); - }, - - drtioaux::Packet::I2cStartRequest { busno } => { - let succeeded = board::i2c::start(busno).is_ok(); - drtioaux::hw::send_link(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded }).unwrap(); - } - drtioaux::Packet::I2cRestartRequest { busno } => { - let succeeded = board::i2c::restart(busno).is_ok(); - drtioaux::hw::send_link(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded }).unwrap(); - } - drtioaux::Packet::I2cStopRequest { busno } => { - let succeeded = board::i2c::stop(busno).is_ok(); - drtioaux::hw::send_link(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded }).unwrap(); - } - drtioaux::Packet::I2cWriteRequest { busno, data } => { - match board::i2c::write(busno, data) { - Ok(ack) => drtioaux::hw::send_link(0, &drtioaux::Packet::I2cWriteReply { succeeded: true, ack: ack }).unwrap(), - Err(_) => drtioaux::hw::send_link(0, &drtioaux::Packet::I2cWriteReply { succeeded: false, ack: false }).unwrap() - }; - } - drtioaux::Packet::I2cReadRequest { busno, ack } => { - match board::i2c::read(busno, ack) { - Ok(data) => drtioaux::hw::send_link(0, &drtioaux::Packet::I2cReadReply { succeeded: true, data: data }).unwrap(), - Err(_) => drtioaux::hw::send_link(0, &drtioaux::Packet::I2cReadReply { succeeded: false, data: 0xff }).unwrap() - }; - } - - drtioaux::Packet::SpiSetConfigRequest { busno, flags, write_div, read_div } => { - let succeeded = board::spi::set_config(busno, flags, write_div, read_div).is_ok(); - drtioaux::hw::send_link(0, &drtioaux::Packet::SpiBasicReply { succeeded: succeeded }).unwrap(); - }, - drtioaux::Packet::SpiSetXferRequest { busno, chip_select, write_length, read_length } => { - let succeeded = board::spi::set_xfer(busno, chip_select, write_length, read_length).is_ok(); - drtioaux::hw::send_link(0, &drtioaux::Packet::SpiBasicReply { succeeded: succeeded }).unwrap(); - } - drtioaux::Packet::SpiWriteRequest { busno, data } => { - let succeeded = board::spi::write(busno, data).is_ok(); - drtioaux::hw::send_link(0, &drtioaux::Packet::SpiBasicReply { succeeded: succeeded }).unwrap(); - } - drtioaux::Packet::SpiReadRequest { busno } => { - match board::spi::read(busno) { - Ok(data) => drtioaux::hw::send_link(0, &drtioaux::Packet::SpiReadReply { succeeded: true, data: data }).unwrap(), - Err(_) => drtioaux::hw::send_link(0, &drtioaux::Packet::SpiReadReply { succeeded: false, data: 0 }).unwrap() - }; - } - - _ => warn!("received unexpected aux packet {:?}", p) - } -} - -fn process_aux_packets() { - let pr = drtioaux::hw::recv_link(0); - match pr { - Ok(None) => (), - Ok(Some(p)) => process_aux_packet(&p), - Err(e) => warn!("aux packet error ({})", e) - } -} - - -fn process_errors() { - let errors; - unsafe { - errors = (board::csr::DRTIO[0].protocol_error_read)(); - (board::csr::DRTIO[0].protocol_error_write)(errors); - } - if errors & 1 != 0 { - error!("received packet of an unknown type"); - } - if errors & 2 != 0 { - error!("received truncated packet"); - } - if errors & 4 != 0 { - error!("write underflow"); - } - if errors & 8 != 0 { - error!("write overflow"); - } - if errors & 16 != 0 { - error!("write sequence error"); - } -} - - -#[cfg(rtio_frequency = "62.5")] -const SI5324_SETTINGS: board::si5324::FrequencySettings - = board::si5324::FrequencySettings { - n1_hs : 10, - nc1_ls : 8, - n2_hs : 10, - n2_ls : 20112, - n31 : 2514, - n32 : 4597, - bwsel : 4 -}; - -#[cfg(rtio_frequency = "150.0")] -const SI5324_SETTINGS: board::si5324::FrequencySettings - = board::si5324::FrequencySettings { - n1_hs : 9, - nc1_ls : 4, - n2_hs : 10, - n2_ls : 33732, - n31 : 9370, - n32 : 7139, - bwsel : 3 -}; - -fn drtio_link_is_up() -> bool { - unsafe { - (board::csr::DRTIO[0].link_status_read)() == 1 - } -} - -fn startup() { - board::clock::init(); - info!("ARTIQ satellite manager starting..."); - info!("software version {}", include_str!(concat!(env!("OUT_DIR"), "/git-describe"))); - info!("gateware version {}", board::ident(&mut [0; 64])); - - #[cfg(has_serwb_phy_amc)] - board::serwb::wait_init(); - - #[cfg(has_hmc830_7043)] - board::hmc830_7043::init().expect("cannot initialize HMC830/7043"); - board::i2c::init(); - board::si5324::setup(&SI5324_SETTINGS).expect("cannot initialize Si5324"); - - loop { - while !drtio_link_is_up() { - process_errors(); - } - info!("link is up, switching to recovered clock"); - board::si5324::select_ext_input(true).expect("failed to switch clocks"); - while drtio_link_is_up() { - process_errors(); - process_aux_packets(); - } - info!("link is down, switching to local crystal clock"); - board::si5324::select_ext_input(false).expect("failed to switch clocks"); - } -} - -#[no_mangle] -pub extern fn main() -> i32 { - unsafe { - extern { - static mut _fheap: u8; - static mut _eheap: u8; - } - alloc_artiq::seed(&mut _fheap as *mut u8, - &_eheap as *const u8 as usize - &_fheap as *const u8 as usize); - - static mut LOG_BUFFER: [u8; 65536] = [0; 65536]; - logger_artiq::BufferLogger::new(&mut LOG_BUFFER[..]).register(startup); - 0 - } -} - -#[no_mangle] -pub extern fn exception_handler(vect: u32, _regs: *const u32, pc: u32, ea: u32) { - panic!("exception {:?} at PC 0x{:x}, EA 0x{:x}", vect, pc, ea) -} - -#[no_mangle] -pub extern fn abort() { - panic!("aborted") -} - -#[no_mangle] -#[lang = "panic_fmt"] -pub extern fn panic_fmt(args: core::fmt::Arguments, file: &'static str, line: u32) -> ! { - println!("panic at {}:{}: {}", file, line, args); - loop {} -} - -// Allow linking with crates that are built as -Cpanic=unwind even if we use -Cpanic=abort. -// This is never called. -#[allow(non_snake_case)] -#[no_mangle] -pub extern "C" fn _Unwind_Resume() -> ! { - loop {} -} diff --git a/artiq/firmware/satman/main.rs b/artiq/firmware/satman/main.rs new file mode 100644 index 000000000..0b6d1cccb --- /dev/null +++ b/artiq/firmware/satman/main.rs @@ -0,0 +1,691 @@ +#![feature(never_type, panic_implementation, panic_info_message, const_slice_len, try_from)] +#![no_std] + +#[macro_use] +extern crate log; +#[macro_use] +extern crate board_misoc; +extern crate board_artiq; + +use core::convert::TryFrom; +use board_misoc::{csr, irq, ident, clock, uart_logger, i2c}; +#[cfg(has_si5324)] +use board_artiq::si5324; +#[cfg(has_wrpll)] +use board_artiq::wrpll; +use board_artiq::{spi, drtioaux}; +use board_artiq::drtio_routing; +#[cfg(has_hmc830_7043)] +use board_artiq::hmc830_7043; + +mod repeater; +#[cfg(has_jdcg)] +mod jdcg; +#[cfg(any(has_ad9154, has_jdcg))] +pub mod jdac_common; + +fn drtiosat_reset(reset: bool) { + unsafe { + csr::drtiosat::reset_write(if reset { 1 } else { 0 }); + } +} + +fn drtiosat_reset_phy(reset: bool) { + unsafe { + csr::drtiosat::reset_phy_write(if reset { 1 } else { 0 }); + } +} + +fn drtiosat_link_rx_up() -> bool { + unsafe { + csr::drtiosat::rx_up_read() == 1 + } +} + +fn drtiosat_tsc_loaded() -> bool { + unsafe { + let tsc_loaded = csr::drtiosat::tsc_loaded_read() == 1; + if tsc_loaded { + csr::drtiosat::tsc_loaded_write(1); + } + tsc_loaded + } +} + + +#[cfg(has_drtio_routing)] +macro_rules! forward { + ($routing_table:expr, $destination:expr, $rank:expr, $repeaters:expr, $packet:expr) => {{ + let hop = $routing_table.0[$destination as usize][$rank as usize]; + if hop != 0 { + let repno = (hop - 1) as usize; + if repno < $repeaters.len() { + return $repeaters[repno].aux_forward($packet); + } else { + return Err(drtioaux::Error::RoutingError); + } + } + }} +} + +#[cfg(not(has_drtio_routing))] +macro_rules! forward { + ($routing_table:expr, $destination:expr, $rank:expr, $repeaters:expr, $packet:expr) => {} +} + +fn process_aux_packet(_repeaters: &mut [repeater::Repeater], + _routing_table: &mut drtio_routing::RoutingTable, _rank: &mut u8, + packet: drtioaux::Packet) -> Result<(), drtioaux::Error> { + // In the code below, *_chan_sel_write takes an u8 if there are fewer than 256 channels, + // and u16 otherwise; hence the `as _` conversion. + match packet { + drtioaux::Packet::EchoRequest => + drtioaux::send(0, &drtioaux::Packet::EchoReply), + drtioaux::Packet::ResetRequest => { + info!("resetting RTIO"); + drtiosat_reset(true); + clock::spin_us(100); + drtiosat_reset(false); + for rep in _repeaters.iter() { + if let Err(e) = rep.rtio_reset() { + error!("failed to issue RTIO reset ({})", e); + } + } + drtioaux::send(0, &drtioaux::Packet::ResetAck) + }, + + drtioaux::Packet::DestinationStatusRequest { destination: _destination } => { + #[cfg(has_drtio_routing)] + let hop = _routing_table.0[_destination as usize][*_rank as usize]; + #[cfg(not(has_drtio_routing))] + let hop = 0; + + if hop == 0 { + let errors; + unsafe { + errors = csr::drtiosat::rtio_error_read(); + } + if errors & 1 != 0 { + let channel; + unsafe { + channel = csr::drtiosat::sequence_error_channel_read(); + csr::drtiosat::rtio_error_write(1); + } + drtioaux::send(0, + &drtioaux::Packet::DestinationSequenceErrorReply { channel })?; + } else if errors & 2 != 0 { + let channel; + unsafe { + channel = csr::drtiosat::collision_channel_read(); + csr::drtiosat::rtio_error_write(2); + } + drtioaux::send(0, + &drtioaux::Packet::DestinationCollisionReply { channel })?; + } else if errors & 4 != 0 { + let channel; + unsafe { + channel = csr::drtiosat::busy_channel_read(); + csr::drtiosat::rtio_error_write(4); + } + drtioaux::send(0, + &drtioaux::Packet::DestinationBusyReply { channel })?; + } + else { + drtioaux::send(0, &drtioaux::Packet::DestinationOkReply)?; + } + } + + #[cfg(has_drtio_routing)] + { + if hop != 0 { + let hop = hop as usize; + if hop <= csr::DRTIOREP.len() { + let repno = hop - 1; + match _repeaters[repno].aux_forward(&drtioaux::Packet::DestinationStatusRequest { + destination: _destination + }) { + Ok(()) => (), + Err(drtioaux::Error::LinkDown) => drtioaux::send(0, &drtioaux::Packet::DestinationDownReply)?, + Err(e) => { + drtioaux::send(0, &drtioaux::Packet::DestinationDownReply)?; + error!("aux error when handling destination status request: {}", e); + }, + } + } else { + drtioaux::send(0, &drtioaux::Packet::DestinationDownReply)?; + } + } + } + + Ok(()) + } + + #[cfg(has_drtio_routing)] + drtioaux::Packet::RoutingSetPath { destination, hops } => { + _routing_table.0[destination as usize] = hops; + for rep in _repeaters.iter() { + if let Err(e) = rep.set_path(destination, &hops) { + error!("failed to set path ({})", e); + } + } + drtioaux::send(0, &drtioaux::Packet::RoutingAck) + } + #[cfg(has_drtio_routing)] + drtioaux::Packet::RoutingSetRank { rank } => { + *_rank = rank; + drtio_routing::interconnect_enable_all(_routing_table, rank); + + let rep_rank = rank + 1; + for rep in _repeaters.iter() { + if let Err(e) = rep.set_rank(rep_rank) { + error!("failed to set rank ({})", e); + } + } + + info!("rank: {}", rank); + info!("routing table: {}", _routing_table); + + drtioaux::send(0, &drtioaux::Packet::RoutingAck) + } + + #[cfg(not(has_drtio_routing))] + drtioaux::Packet::RoutingSetPath { destination: _, hops: _ } => { + drtioaux::send(0, &drtioaux::Packet::RoutingAck) + } + #[cfg(not(has_drtio_routing))] + drtioaux::Packet::RoutingSetRank { rank: _ } => { + drtioaux::send(0, &drtioaux::Packet::RoutingAck) + } + + drtioaux::Packet::MonitorRequest { destination: _destination, channel, probe } => { + forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + let value; + #[cfg(has_rtio_moninj)] + unsafe { + csr::rtio_moninj::mon_chan_sel_write(channel as _); + csr::rtio_moninj::mon_probe_sel_write(probe); + csr::rtio_moninj::mon_value_update_write(1); + value = csr::rtio_moninj::mon_value_read(); + } + #[cfg(not(has_rtio_moninj))] + { + value = 0; + } + let reply = drtioaux::Packet::MonitorReply { value: value as u32 }; + drtioaux::send(0, &reply) + }, + drtioaux::Packet::InjectionRequest { destination: _destination, channel, overrd, value } => { + forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + #[cfg(has_rtio_moninj)] + unsafe { + csr::rtio_moninj::inj_chan_sel_write(channel as _); + csr::rtio_moninj::inj_override_sel_write(overrd); + csr::rtio_moninj::inj_value_write(value); + } + Ok(()) + }, + drtioaux::Packet::InjectionStatusRequest { destination: _destination, channel, overrd } => { + forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + let value; + #[cfg(has_rtio_moninj)] + unsafe { + csr::rtio_moninj::inj_chan_sel_write(channel as _); + csr::rtio_moninj::inj_override_sel_write(overrd); + value = csr::rtio_moninj::inj_value_read(); + } + #[cfg(not(has_rtio_moninj))] + { + value = 0; + } + drtioaux::send(0, &drtioaux::Packet::InjectionStatusReply { value: value }) + }, + + drtioaux::Packet::I2cStartRequest { destination: _destination, busno } => { + forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + let succeeded = i2c::start(busno).is_ok(); + drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded }) + } + drtioaux::Packet::I2cRestartRequest { destination: _destination, busno } => { + forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + let succeeded = i2c::restart(busno).is_ok(); + drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded }) + } + drtioaux::Packet::I2cStopRequest { destination: _destination, busno } => { + forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + let succeeded = i2c::stop(busno).is_ok(); + drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded }) + } + drtioaux::Packet::I2cWriteRequest { destination: _destination, busno, data } => { + forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + match i2c::write(busno, data) { + Ok(ack) => drtioaux::send(0, + &drtioaux::Packet::I2cWriteReply { succeeded: true, ack: ack }), + Err(_) => drtioaux::send(0, + &drtioaux::Packet::I2cWriteReply { succeeded: false, ack: false }) + } + } + drtioaux::Packet::I2cReadRequest { destination: _destination, busno, ack } => { + forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + match i2c::read(busno, ack) { + Ok(data) => drtioaux::send(0, + &drtioaux::Packet::I2cReadReply { succeeded: true, data: data }), + Err(_) => drtioaux::send(0, + &drtioaux::Packet::I2cReadReply { succeeded: false, data: 0xff }) + } + } + + drtioaux::Packet::SpiSetConfigRequest { destination: _destination, busno, flags, length, div, cs } => { + forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + let succeeded = spi::set_config(busno, flags, length, div, cs).is_ok(); + drtioaux::send(0, + &drtioaux::Packet::SpiBasicReply { succeeded: succeeded }) + }, + drtioaux::Packet::SpiWriteRequest { destination: _destination, busno, data } => { + forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + let succeeded = spi::write(busno, data).is_ok(); + drtioaux::send(0, + &drtioaux::Packet::SpiBasicReply { succeeded: succeeded }) + } + drtioaux::Packet::SpiReadRequest { destination: _destination, busno } => { + forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + match spi::read(busno) { + Ok(data) => drtioaux::send(0, + &drtioaux::Packet::SpiReadReply { succeeded: true, data: data }), + Err(_) => drtioaux::send(0, + &drtioaux::Packet::SpiReadReply { succeeded: false, data: 0 }) + } + } + + drtioaux::Packet::JdacBasicRequest { destination: _destination, dacno: _dacno, + reqno: _reqno, param: _param } => { + forward!(_routing_table, _destination, *_rank, _repeaters, &packet); + #[cfg(has_ad9154)] + let (succeeded, retval) = { + #[cfg(rtio_frequency = "125.0")] + const LINERATE: u64 = 5_000_000_000; + #[cfg(rtio_frequency = "150.0")] + const LINERATE: u64 = 6_000_000_000; + match _reqno { + jdac_common::INIT => (board_artiq::ad9154::setup(_dacno, LINERATE).is_ok(), 0), + jdac_common::PRINT_STATUS => { board_artiq::ad9154::status(_dacno); (true, 0) }, + jdac_common::PRBS => (board_artiq::ad9154::prbs(_dacno).is_ok(), 0), + jdac_common::STPL => (board_artiq::ad9154::stpl(_dacno, 4, 2).is_ok(), 0), + jdac_common::SYSREF_DELAY_DAC => { board_artiq::hmc830_7043::hmc7043::sysref_delay_dac(_dacno, _param); (true, 0) }, + jdac_common::SYSREF_SLIP => { board_artiq::hmc830_7043::hmc7043::sysref_slip(); (true, 0) }, + jdac_common::SYNC => { + match board_artiq::ad9154::sync(_dacno) { + Ok(false) => (true, 0), + Ok(true) => (true, 1), + Err(e) => { + error!("DAC sync failed: {}", e); + (false, 0) + } + } + }, + jdac_common::DDMTD_SYSREF_RAW => (true, jdac_common::measure_ddmdt_phase_raw() as u8), + jdac_common::DDMTD_SYSREF => (true, jdac_common::measure_ddmdt_phase() as u8), + _ => (false, 0) + } + }; + #[cfg(not(has_ad9154))] + let (succeeded, retval) = (false, 0); + drtioaux::send(0, + &drtioaux::Packet::JdacBasicReply { succeeded: succeeded, retval: retval }) + } + + _ => { + warn!("received unexpected aux packet"); + Ok(()) + } + } +} + +fn process_aux_packets(repeaters: &mut [repeater::Repeater], + routing_table: &mut drtio_routing::RoutingTable, rank: &mut u8) { + let result = + drtioaux::recv(0).and_then(|packet| { + if let Some(packet) = packet { + process_aux_packet(repeaters, routing_table, rank, packet) + } else { + Ok(()) + } + }); + match result { + Ok(()) => (), + Err(e) => warn!("aux packet error ({})", e) + } +} + +fn drtiosat_process_errors() { + let errors; + unsafe { + errors = csr::drtiosat::protocol_error_read(); + } + if errors & 1 != 0 { + error!("received packet of an unknown type"); + } + if errors & 2 != 0 { + error!("received truncated packet"); + } + if errors & 4 != 0 { + let destination; + unsafe { + destination = csr::drtiosat::buffer_space_timeout_dest_read(); + } + error!("timeout attempting to get buffer space from CRI, destination=0x{:02x}", destination) + } + if errors & 8 != 0 { + let channel; + let timestamp_event; + let timestamp_counter; + unsafe { + channel = csr::drtiosat::underflow_channel_read(); + timestamp_event = csr::drtiosat::underflow_timestamp_event_read() as i64; + timestamp_counter = csr::drtiosat::underflow_timestamp_counter_read() as i64; + } + error!("write underflow, channel={}, timestamp={}, counter={}, slack={}", + channel, timestamp_event, timestamp_counter, timestamp_event-timestamp_counter); + } + if errors & 16 != 0 { + error!("write overflow"); + } + unsafe { + csr::drtiosat::protocol_error_write(errors); + } +} + + +#[cfg(has_rtio_crg)] +fn init_rtio_crg() { + unsafe { + csr::rtio_crg::pll_reset_write(0); + } + clock::spin_us(150); + let locked = unsafe { csr::rtio_crg::pll_locked_read() != 0 }; + if !locked { + error!("RTIO clock failed"); + } +} + +#[cfg(not(has_rtio_crg))] +fn init_rtio_crg() { } + +fn hardware_tick(ts: &mut u64) { + let now = clock::get_ms(); + if now > *ts { + #[cfg(has_grabber)] + board_artiq::grabber::tick(); + *ts = now + 200; + } +} + +#[cfg(all(has_si5324, rtio_frequency = "150.0"))] +const SI5324_SETTINGS: si5324::FrequencySettings + = si5324::FrequencySettings { + n1_hs : 6, + nc1_ls : 6, + n2_hs : 10, + n2_ls : 270, + n31 : 75, + n32 : 75, + bwsel : 4, + crystal_ref: true +}; + +#[cfg(all(has_si5324, rtio_frequency = "125.0"))] +const SI5324_SETTINGS: si5324::FrequencySettings + = si5324::FrequencySettings { + n1_hs : 5, + nc1_ls : 8, + n2_hs : 7, + n2_ls : 360, + n31 : 63, + n32 : 63, + bwsel : 4, + crystal_ref: true +}; + +#[no_mangle] +pub extern fn main() -> i32 { + clock::init(); + uart_logger::ConsoleLogger::register(); + + info!("ARTIQ satellite manager starting..."); + info!("software ident {}", csr::CONFIG_IDENTIFIER_STR); + info!("gateware ident {}", ident::read(&mut [0; 64])); + + #[cfg(has_i2c)] + i2c::init().expect("I2C initialization failed"); + #[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))] + let (mut io_expander0, mut io_expander1); + #[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))] + { + io_expander0 = board_misoc::io_expander::IoExpander::new(0); + io_expander1 = board_misoc::io_expander::IoExpander::new(1); + io_expander0.init().expect("I2C I/O expander #0 initialization failed"); + io_expander1.init().expect("I2C I/O expander #1 initialization failed"); + #[cfg(has_wrpll)] + { + io_expander0.set_oe(1, 1 << 7).unwrap(); + io_expander0.set(1, 7, true); + io_expander0.service().unwrap(); + io_expander1.set_oe(0, 1 << 7).unwrap(); + io_expander1.set_oe(1, 1 << 7).unwrap(); + io_expander1.set(0, 7, true); + io_expander1.set(1, 7, true); + io_expander1.service().unwrap(); + } + + // Actively drive TX_DISABLE to false on SFP0..3 + io_expander0.set_oe(0, 1 << 1).unwrap(); + io_expander0.set_oe(1, 1 << 1).unwrap(); + io_expander1.set_oe(0, 1 << 1).unwrap(); + io_expander1.set_oe(1, 1 << 1).unwrap(); + io_expander0.set(0, 1, false); + io_expander0.set(1, 1, false); + io_expander1.set(0, 1, false); + io_expander1.set(1, 1, false); + io_expander0.service().unwrap(); + io_expander1.service().unwrap(); + } + + #[cfg(has_si5324)] + si5324::setup(&SI5324_SETTINGS, si5324::Input::Ckin1).expect("cannot initialize Si5324"); + #[cfg(has_wrpll)] + wrpll::init(); + + unsafe { + csr::drtio_transceiver::stable_clkin_write(1); + } + clock::spin_us(1500); // wait for CPLL/QPLL lock + #[cfg(not(has_jdcg))] + unsafe { + csr::drtio_transceiver::txenable_write(0xffffffffu32 as _); + } + #[cfg(has_wrpll)] + wrpll::diagnostics(); + init_rtio_crg(); + + #[cfg(has_hmc830_7043)] + /* must be the first SPI init because of HMC830 SPI mode selection */ + hmc830_7043::init().expect("cannot initialize HMC830/7043"); + #[cfg(has_ad9154)] + { + jdac_common::init_ddmtd().expect("failed to initialize SYSREF DDMTD core"); + for dacno in 0..csr::CONFIG_AD9154_COUNT { + board_artiq::ad9154::reset_and_detect(dacno as u8).expect("AD9154 DAC not detected"); + } + } + + #[cfg(has_drtio_routing)] + let mut repeaters = [repeater::Repeater::default(); csr::DRTIOREP.len()]; + #[cfg(not(has_drtio_routing))] + let mut repeaters = [repeater::Repeater::default(); 0]; + for i in 0..repeaters.len() { + repeaters[i] = repeater::Repeater::new(i as u8); + } + let mut routing_table = drtio_routing::RoutingTable::default_empty(); + let mut rank = 1; + + let mut hardware_tick_ts = 0; + + loop { + #[cfg(has_jdcg)] + unsafe { + // Hide from uplink until RTM is ready + csr::drtio_transceiver::txenable_write(0xfffffffeu32 as _); + } + while !drtiosat_link_rx_up() { + drtiosat_process_errors(); + for mut rep in repeaters.iter_mut() { + rep.service(&routing_table, rank); + } + #[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))] + { + io_expander0.service().expect("I2C I/O expander #0 service failed"); + io_expander1.service().expect("I2C I/O expander #1 service failed"); + } + hardware_tick(&mut hardware_tick_ts); + } + + info!("uplink is up, switching to recovered clock"); + #[cfg(has_si5324)] + { + si5324::siphaser::select_recovered_clock(true).expect("failed to switch clocks"); + si5324::siphaser::calibrate_skew().expect("failed to calibrate skew"); + } + #[cfg(has_wrpll)] + wrpll::select_recovered_clock(true); + + drtioaux::reset(0); + drtiosat_reset(false); + drtiosat_reset_phy(false); + + #[cfg(has_jdcg)] + let mut was_up = false; + while drtiosat_link_rx_up() { + drtiosat_process_errors(); + process_aux_packets(&mut repeaters, &mut routing_table, &mut rank); + for mut rep in repeaters.iter_mut() { + rep.service(&routing_table, rank); + } + #[cfg(all(soc_platform = "kasli", hw_rev = "v2.0"))] + { + io_expander0.service().expect("I2C I/O expander #0 service failed"); + io_expander1.service().expect("I2C I/O expander #1 service failed"); + } + hardware_tick(&mut hardware_tick_ts); + if drtiosat_tsc_loaded() { + info!("TSC loaded from uplink"); + #[cfg(has_jdcg)] + { + // We assume that the RTM on repeater0 is up. + // Uplink should not send a TSC load command unless the link is + // up, and we are hiding when the RTM is down. + if let Err(e) = jdcg::jesd204sync::sysref_rtio_align() { + error!("failed to align SYSREF with TSC ({})", e); + } + if let Err(e) = jdcg::jesd204sync::resync_dacs() { + error!("DAC resync failed after SYSREF/TSC realignment ({})", e); + } + } + for rep in repeaters.iter() { + if let Err(e) = rep.sync_tsc() { + error!("failed to sync TSC ({})", e); + } + } + if let Err(e) = drtioaux::send(0, &drtioaux::Packet::TSCAck) { + error!("aux packet error: {}", e); + } + } + #[cfg(has_jdcg)] + { + let is_up = repeaters[0].is_up(); + if is_up && !was_up { + /* + * One side of the JESD204 elastic buffer is clocked by the jitter filter + * (Si5324 or WRPLL), the other by the RTM. + * The elastic buffer can operate only when those two clocks are derived from + * the same oscillator. + * This is the case when either of those conditions is true: + * (1) The DRTIO master and the RTM are clocked directly from a common external + * source, *and* the jitter filter has locked to the recovered clock. + * This clocking scheme may provide less noise and phase drift at the DACs. + * (2) The RTM clock is connected to the jitter filter output. + * To handle those cases, we simply keep the JESD204 core in reset unless the + * jitter filter is locked to the recovered clock. + */ + jdcg::jesd::reset(false); + let _ = jdcg::jdac::init(); + jdcg::jesd204sync::sysref_auto_align(); + jdcg::jdac::stpl(); + unsafe { + csr::drtio_transceiver::txenable_write(0xffffffffu32 as _); // unhide + } + } + was_up = is_up; + } + } + + #[cfg(has_jdcg)] + jdcg::jesd::reset(true); + + drtiosat_reset_phy(true); + drtiosat_reset(true); + drtiosat_tsc_loaded(); + info!("uplink is down, switching to local oscillator clock"); + #[cfg(has_si5324)] + si5324::siphaser::select_recovered_clock(false).expect("failed to switch clocks"); + #[cfg(has_wrpll)] + wrpll::select_recovered_clock(false); + } +} + +#[no_mangle] +pub extern fn exception(vect: u32, _regs: *const u32, pc: u32, ea: u32) { + let vect = irq::Exception::try_from(vect).expect("unknown exception"); + + fn hexdump(addr: u32) { + let addr = (addr - addr % 4) as *const u32; + let mut ptr = addr; + println!("@ {:08p}", ptr); + for _ in 0..4 { + print!("+{:04x}: ", ptr as usize - addr as usize); + print!("{:08x} ", unsafe { *ptr }); ptr = ptr.wrapping_offset(1); + print!("{:08x} ", unsafe { *ptr }); ptr = ptr.wrapping_offset(1); + print!("{:08x} ", unsafe { *ptr }); ptr = ptr.wrapping_offset(1); + print!("{:08x}\n", unsafe { *ptr }); ptr = ptr.wrapping_offset(1); + } + } + + hexdump(pc); + hexdump(ea); + panic!("exception {:?} at PC 0x{:x}, EA 0x{:x}", vect, pc, ea) +} + +#[no_mangle] +pub extern fn abort() { + println!("aborted"); + loop {} +} + +#[no_mangle] // https://github.com/rust-lang/rust/issues/{38281,51647} +#[panic_implementation] +pub fn panic_fmt(info: &core::panic::PanicInfo) -> ! { + #[cfg(has_error_led)] + unsafe { + csr::error_led::out_write(1); + } + + if let Some(location) = info.location() { + print!("panic at {}:{}:{}", location.file(), location.line(), location.column()); + } else { + print!("panic at unknown location"); + } + if let Some(message) = info.message() { + println!(": {}", message); + } else { + println!(""); + } + loop {} +} diff --git a/artiq/firmware/satman/repeater.rs b/artiq/firmware/satman/repeater.rs new file mode 100644 index 000000000..9969d5099 --- /dev/null +++ b/artiq/firmware/satman/repeater.rs @@ -0,0 +1,283 @@ +use board_artiq::{drtioaux, drtio_routing}; +#[cfg(has_drtio_routing)] +use board_misoc::{csr, clock}; + +#[cfg(has_drtio_routing)] +fn rep_link_rx_up(repno: u8) -> bool { + let repno = repno as usize; + unsafe { + (csr::DRTIOREP[repno].rx_up_read)() == 1 + } +} + +#[cfg(has_drtio_routing)] +#[derive(Clone, Copy, PartialEq)] +enum RepeaterState { + Down, + SendPing { ping_count: u16 }, + WaitPingReply { ping_count: u16, timeout: u64 }, + Up, + Failed +} + +#[cfg(has_drtio_routing)] +impl Default for RepeaterState { + fn default() -> RepeaterState { RepeaterState::Down } +} + +#[cfg(has_drtio_routing)] +#[derive(Clone, Copy, Default)] +pub struct Repeater { + repno: u8, + auxno: u8, + state: RepeaterState +} + +#[cfg(has_drtio_routing)] +impl Repeater { + pub fn new(repno: u8) -> Repeater { + Repeater { + repno: repno, + auxno: repno + 1, + state: RepeaterState::Down + } + } + + #[allow(dead_code)] + pub fn is_up(&self) -> bool { + self.state == RepeaterState::Up + } + + pub fn service(&mut self, routing_table: &drtio_routing::RoutingTable, rank: u8) { + self.process_local_errors(); + + match self.state { + RepeaterState::Down => { + if rep_link_rx_up(self.repno) { + info!("[REP#{}] link RX became up, pinging", self.repno); + self.state = RepeaterState::SendPing { ping_count: 0 }; + } + } + RepeaterState::SendPing { ping_count } => { + if rep_link_rx_up(self.repno) { + drtioaux::send(self.auxno, &drtioaux::Packet::EchoRequest).unwrap(); + self.state = RepeaterState::WaitPingReply { + ping_count: ping_count + 1, + timeout: clock::get_ms() + 100 + } + } else { + error!("[REP#{}] link RX went down during ping", self.repno); + self.state = RepeaterState::Down; + } + } + RepeaterState::WaitPingReply { ping_count, timeout } => { + if rep_link_rx_up(self.repno) { + if let Ok(Some(drtioaux::Packet::EchoReply)) = drtioaux::recv(self.auxno) { + info!("[REP#{}] remote replied after {} packets", self.repno, ping_count); + self.state = RepeaterState::Up; + if let Err(e) = self.sync_tsc() { + error!("[REP#{}] failed to sync TSC ({})", self.repno, e); + self.state = RepeaterState::Failed; + return; + } + if let Err(e) = self.load_routing_table(routing_table) { + error!("[REP#{}] failed to load routing table ({})", self.repno, e); + self.state = RepeaterState::Failed; + return; + } + if let Err(e) = self.set_rank(rank + 1) { + error!("[REP#{}] failed to set rank ({})", self.repno, e); + self.state = RepeaterState::Failed; + return; + } + } else { + if clock::get_ms() > timeout { + if ping_count > 200 { + error!("[REP#{}] ping failed", self.repno); + self.state = RepeaterState::Failed; + } else { + self.state = RepeaterState::SendPing { ping_count: ping_count }; + } + } + } + } else { + error!("[REP#{}] link RX went down during ping", self.repno); + self.state = RepeaterState::Down; + } + } + RepeaterState::Up => { + self.process_unsolicited_aux(); + if !rep_link_rx_up(self.repno) { + info!("[REP#{}] link is down", self.repno); + self.state = RepeaterState::Down; + } + } + RepeaterState::Failed => { + if !rep_link_rx_up(self.repno) { + info!("[REP#{}] link is down", self.repno); + self.state = RepeaterState::Down; + } + } + } + } + + fn process_unsolicited_aux(&self) { + match drtioaux::recv(self.auxno) { + Ok(Some(packet)) => warn!("[REP#{}] unsolicited aux packet: {:?}", self.repno, packet), + Ok(None) => (), + Err(_) => warn!("[REP#{}] aux packet error", self.repno) + } + } + + fn process_local_errors(&self) { + let repno = self.repno as usize; + let errors; + unsafe { + errors = (csr::DRTIOREP[repno].protocol_error_read)(); + } + if errors & 1 != 0 { + error!("[REP#{}] received packet of an unknown type", repno); + } + if errors & 2 != 0 { + error!("[REP#{}] received truncated packet", repno); + } + if errors & 4 != 0 { + let cmd; + let chan_sel; + unsafe { + cmd = (csr::DRTIOREP[repno].command_missed_cmd_read)(); + chan_sel = (csr::DRTIOREP[repno].command_missed_chan_sel_read)(); + } + error!("[REP#{}] CRI command missed, cmd={}, chan_sel=0x{:06x}", repno, cmd, chan_sel) + } + if errors & 8 != 0 { + let destination; + unsafe { + destination = (csr::DRTIOREP[repno].buffer_space_timeout_dest_read)(); + } + error!("[REP#{}] timeout attempting to get remote buffer space, destination=0x{:02x}", repno, destination); + } + unsafe { + (csr::DRTIOREP[repno].protocol_error_write)(errors); + } + } + + fn recv_aux_timeout(&self, timeout: u32) -> Result> { + let max_time = clock::get_ms() + timeout as u64; + loop { + if !rep_link_rx_up(self.repno) { + return Err(drtioaux::Error::LinkDown); + } + if clock::get_ms() > max_time { + return Err(drtioaux::Error::TimedOut); + } + match drtioaux::recv(self.auxno) { + Ok(Some(packet)) => return Ok(packet), + Ok(None) => (), + Err(e) => return Err(e) + } + } + } + + pub fn aux_forward(&self, request: &drtioaux::Packet) -> Result<(), drtioaux::Error> { + if self.state != RepeaterState::Up { + return Err(drtioaux::Error::LinkDown); + } + drtioaux::send(self.auxno, request).unwrap(); + let reply = self.recv_aux_timeout(200)?; + drtioaux::send(0, &reply).unwrap(); + Ok(()) + } + + pub fn sync_tsc(&self) -> Result<(), drtioaux::Error> { + if self.state != RepeaterState::Up { + return Ok(()); + } + + let repno = self.repno as usize; + unsafe { + (csr::DRTIOREP[repno].set_time_write)(1); + while (csr::DRTIOREP[repno].set_time_read)() == 1 {} + } + + // TSCAck is the only aux packet that is sent spontaneously + // by the satellite, in response to a TSC set on the RT link. + let reply = self.recv_aux_timeout(10000)?; + if reply == drtioaux::Packet::TSCAck { + return Ok(()); + } else { + return Err(drtioaux::Error::UnexpectedReply); + } + } + + pub fn set_path(&self, destination: u8, hops: &[u8; drtio_routing::MAX_HOPS]) -> Result<(), drtioaux::Error> { + if self.state != RepeaterState::Up { + return Ok(()); + } + + drtioaux::send(self.auxno, &drtioaux::Packet::RoutingSetPath { + destination: destination, + hops: *hops + }).unwrap(); + let reply = self.recv_aux_timeout(200)?; + if reply != drtioaux::Packet::RoutingAck { + return Err(drtioaux::Error::UnexpectedReply); + } + Ok(()) + } + + pub fn load_routing_table(&self, routing_table: &drtio_routing::RoutingTable) -> Result<(), drtioaux::Error> { + for i in 0..drtio_routing::DEST_COUNT { + self.set_path(i as u8, &routing_table.0[i])?; + } + Ok(()) + } + + pub fn set_rank(&self, rank: u8) -> Result<(), drtioaux::Error> { + if self.state != RepeaterState::Up { + return Ok(()); + } + drtioaux::send(self.auxno, &drtioaux::Packet::RoutingSetRank { + rank: rank + }).unwrap(); + let reply = self.recv_aux_timeout(200)?; + if reply != drtioaux::Packet::RoutingAck { + return Err(drtioaux::Error::UnexpectedReply); + } + Ok(()) + } + + pub fn rtio_reset(&self) -> Result<(), drtioaux::Error> { + let repno = self.repno as usize; + unsafe { (csr::DRTIOREP[repno].reset_write)(1); } + clock::spin_us(100); + unsafe { (csr::DRTIOREP[repno].reset_write)(0); } + + if self.state != RepeaterState::Up { + return Ok(()); + } + + drtioaux::send(self.auxno, &drtioaux::Packet::ResetRequest).unwrap(); + let reply = self.recv_aux_timeout(200)?; + if reply != drtioaux::Packet::ResetAck { + return Err(drtioaux::Error::UnexpectedReply); + } + Ok(()) + } +} + +#[cfg(not(has_drtio_routing))] +#[derive(Clone, Copy, Default)] +pub struct Repeater { +} + +#[cfg(not(has_drtio_routing))] +impl Repeater { + pub fn new(_repno: u8) -> Repeater { Repeater::default() } + + pub fn service(&self, _routing_table: &drtio_routing::RoutingTable, _rank: u8) { } + + pub fn sync_tsc(&self) -> Result<(), drtioaux::Error> { Ok(()) } + + pub fn rtio_reset(&self) -> Result<(), drtioaux::Error> { Ok(()) } +} diff --git a/artiq/firmware/satman/satman.ld b/artiq/firmware/satman/satman.ld index 89a1d6e7e..69cc737d2 100644 --- a/artiq/firmware/satman/satman.ld +++ b/artiq/firmware/satman/satman.ld @@ -1,22 +1,23 @@ INCLUDE generated/output_format.ld -STARTUP(crt0-or1k.o) -ENTRY(_start) - INCLUDE generated/regions.ld +ENTRY(_reset_handler) SECTIONS { + .vectors : + { + *(.vectors) + } > main_ram + .text : { - _ftext = .; - *(.text .stub .text.* .gnu.linkonce.t.*) - _etext = .; + *(.text .text.*) } > main_ram /* https://sourceware.org/bugzilla/show_bug.cgi?id=20475 */ .got : { - _GLOBAL_OFFSET_TABLE_ = .; + PROVIDE(_GLOBAL_OFFSET_TABLE_ = .); *(.got) } > main_ram @@ -27,55 +28,28 @@ SECTIONS .rodata : { - . = ALIGN(4); _frodata = .; - *(.rodata .rodata.* .gnu.linkonce.r.*) - *(.rodata1) + *(.rodata .rodata.*) _erodata = .; } > main_ram .data : { - . = ALIGN(4); - _fdata = .; - *(.data .data.* .gnu.linkonce.d.*) - *(.data1) - *(.sdata .sdata.* .gnu.linkonce.s.*) - _edata = .; + *(.data .data.*) } > main_ram - .bss : + .bss ALIGN(4) : { - . = ALIGN(4); _fbss = .; - *(.dynsbss) - *(.sbss .sbss.* .gnu.linkonce.sb.*) - *(.scommon) - *(.dynbss) - *(.bss .bss.* .gnu.linkonce.b.*) - *(COMMON) + *(.bss .bss.*) . = ALIGN(4); _ebss = .; } > main_ram .stack : { - . = ALIGN(0x1000); _estack = .; - . += 0x4000; + . += 0x10000; _fstack = . - 4; } > main_ram - - .heap : - { - _fheap = .; - . = ORIGIN(main_ram) + LENGTH(main_ram); - _eheap = .; - } > main_ram - - /DISCARD/ : - { - *(.eh_frame) - *(.gcc_except_table) - } } diff --git a/artiq/frontend/aqctl_corelog.py b/artiq/frontend/aqctl_corelog.py index 7cee0b974..b0b396efd 100755 --- a/artiq/frontend/aqctl_corelog.py +++ b/artiq/frontend/aqctl_corelog.py @@ -6,17 +6,21 @@ import struct import logging import re -from artiq.tools import * -from artiq.protocols.pc_rpc import Server -from artiq.protocols.logging import log_with_name +from sipyco.pc_rpc import Server +from sipyco import common_args +from sipyco.logging_tools import log_with_name + from artiq.coredevice.comm_mgmt import Request, Reply def get_argparser(): parser = argparse.ArgumentParser( description="ARTIQ controller for core device logs") - simple_network_args(parser, 1068) - parser.add_argument("core_addr", + common_args.verbosity_args(parser) + common_args.simple_network_args(parser, 1068) + parser.add_argument("--simulation", action="store_true", + help="Simulation - does not connect to device") + parser.add_argument("core_addr", metavar="CORE_ADDR", help="hostname or IP address of the core device") return parser @@ -26,6 +30,12 @@ class PingTarget: return True +async def get_logs_sim(host): + while True: + await asyncio.sleep(2) + log_with_name("firmware.simulation", logging.INFO, "hello " + host) + + async def get_logs(host): reader, writer = await asyncio.open_connection(host, 1380) writer.write(b"ARTIQ management\n") @@ -56,15 +66,16 @@ async def get_logs(host): def main(): args = get_argparser().parse_args() + common_args.init_logger_from_args(args) loop = asyncio.get_event_loop() try: - get_logs_task = asyncio.ensure_future(get_logs(args.core_addr)) + get_logs_task = asyncio.ensure_future( + get_logs_sim(args.core_addr) if args.simulation else get_logs(args.core_addr)) try: server = Server({"corelog": PingTarget()}, None, True) - loop.run_until_complete(server.start(bind_address_from_args(args), args.port)) + loop.run_until_complete(server.start(common_args.bind_address_from_args(args), args.port)) try: - multiline_log_config(logging.TRACE) loop.run_until_complete(server.wait_terminate()) finally: loop.run_until_complete(server.stop()) diff --git a/artiq/frontend/aqctl_korad_ka3005p.py b/artiq/frontend/aqctl_korad_ka3005p.py deleted file mode 100755 index fb9ba6712..000000000 --- a/artiq/frontend/aqctl_korad_ka3005p.py +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env python3 - -# Written by Joe Britton, 2016 - -import argparse -import logging -import sys -import asyncio -import os - -from artiq.devices.korad_ka3005p.driver import KoradKA3005P -from artiq.protocols.pc_rpc import simple_server_loop -from artiq.tools import * - - -logger = logging.getLogger(__name__) - - -def get_argparser(): - parser = argparse.ArgumentParser( - description="ARTIQ controller for the Korad KA3005P programmable DC power supply") - simple_network_args(parser, 3256) - parser.add_argument( - "-d", "--device", default=None, - help="serial port.") - parser.add_argument( - "--simulation", action="store_true", - help="Put the driver in simulation mode, even if --device is used.") - verbosity_args(parser) - return parser - - -def main(): - args = get_argparser().parse_args() - init_logger(args) - - if os.name == "nt": - asyncio.set_event_loop(asyncio.ProactorEventLoop()) - - if not args.simulation and args.device is None: - print("You need to specify either --simulation or -d/--device " - "argument. Use --help for more information.") - sys.exit(1) - - dev = KoradKA3005P(args.device if not args.simulation else None) - asyncio.get_event_loop().run_until_complete(dev.setup()) - try: - simple_server_loop( - {"korad_ka3005p": dev}, bind_address_from_args(args), args.port) - finally: - dev.close() - -if __name__ == "__main__": - main() diff --git a/artiq/frontend/aqctl_lda.py b/artiq/frontend/aqctl_lda.py deleted file mode 100755 index cf3a36aae..000000000 --- a/artiq/frontend/aqctl_lda.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python3 - -import argparse - -from artiq.devices.lda.driver import Lda, Ldasim -from artiq.protocols.pc_rpc import simple_server_loop -from artiq.tools import * - - -def get_argparser(): - parser = argparse.ArgumentParser( - description="ARTIQ controller for the Lab Brick Digital Attenuator") - parser.add_argument("-P", "--product", default="LDA-102", - help="product type (default: %(default)s)", - choices=["LDA-102", "LDA-602"]) - simple_network_args(parser, 3253) - parser.add_argument("-d", "--device", default=None, - help="USB serial number of the device. " - "The serial number is written on a sticker under " - "the device, you should write for example " - "-d \"SN:03461\". You must prepend enough 0s for " - "it to be 5 digits. If omitted, the first " - "available device will be used.") - parser.add_argument("--simulation", action="store_true", - help="Put the driver in simulation mode.") - verbosity_args(parser) - return parser - - -def main(): - args = get_argparser().parse_args() - init_logger(args) - if args.simulation: - lda = Ldasim() - else: - lda = Lda(args.device, args.product) - try: - simple_server_loop({"lda": lda}, - bind_address_from_args(args), args.port) - finally: - lda.close() - -if __name__ == "__main__": - main() diff --git a/artiq/frontend/aqctl_novatech409b.py b/artiq/frontend/aqctl_novatech409b.py deleted file mode 100755 index 3cc9ce7a4..000000000 --- a/artiq/frontend/aqctl_novatech409b.py +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env python3 - -# Written by Joe Britton, 2015 - -import argparse -import logging -import sys -import os -import asyncio - -from artiq.devices.novatech409b.driver import Novatech409B -from artiq.protocols.pc_rpc import simple_server_loop -from artiq.tools import * - - -logger = logging.getLogger(__name__) - - -def get_argparser(): - parser = argparse.ArgumentParser( - description="ARTIQ controller for the Novatech 409B 4-channel DDS box") - simple_network_args(parser, 3254) - parser.add_argument( - "-d", "--device", default=None, - help="serial port.") - parser.add_argument( - "--simulation", action="store_true", - help="Put the driver in simulation mode, even if --device is used.") - verbosity_args(parser) - return parser - - -def main(): - args = get_argparser().parse_args() - init_logger(args) - - if os.name == "nt": - asyncio.set_event_loop(asyncio.ProactorEventLoop()) - - if not args.simulation and args.device is None: - print("You need to specify either --simulation or -d/--device " - "argument. Use --help for more information.") - sys.exit(1) - - dev = Novatech409B(args.device if not args.simulation else None) - asyncio.get_event_loop().run_until_complete(dev.setup()) - try: - simple_server_loop( - {"novatech409b": dev}, bind_address_from_args(args), args.port) - finally: - dev.close() - -if __name__ == "__main__": - main() diff --git a/artiq/frontend/aqctl_thorlabs_tcube.py b/artiq/frontend/aqctl_thorlabs_tcube.py deleted file mode 100755 index bb9e5b716..000000000 --- a/artiq/frontend/aqctl_thorlabs_tcube.py +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -import sys -import os -import asyncio - -from artiq.devices.thorlabs_tcube.driver import Tdc, Tpz, TdcSim, TpzSim -from artiq.protocols.pc_rpc import simple_server_loop -from artiq.tools import * - - -def get_argparser(): - parser = argparse.ArgumentParser() - parser.add_argument("-P", "--product", required=True, - help="type of the Thorlabs T-Cube device to control: " - "tdc001/tpz001") - parser.add_argument("-d", "--device", default=None, - help="serial device. See documentation for how to " - "specify a USB Serial Number.") - parser.add_argument("--simulation", action="store_true", - help="Put the driver in simulation mode, even if " - "--device is used.") - simple_network_args(parser, 3255) - verbosity_args(parser) - return parser - - -def main(): - args = get_argparser().parse_args() - init_logger(args) - - if os.name == "nt": - asyncio.set_event_loop(asyncio.ProactorEventLoop()) - - if not args.simulation and args.device is None: - print("You need to specify either --simulation or -d/--device " - "argument. Use --help for more information.") - sys.exit(1) - - product = args.product.lower() - if args.simulation: - if product == "tdc001": - dev = TdcSim() - elif product == "tpz001": - dev = TpzSim() - else: - print("Invalid product string (-P/--product), " - "choose from tdc001 or tpz001") - sys.exit(1) - else: - if product == "tdc001": - dev = Tdc(args.device) - elif product == "tpz001": - dev = Tpz(args.device) - else: - print("Invalid product string (-P/--product), " - "choose from tdc001 or tpz001") - sys.exit(1) - - try: - simple_server_loop({product: dev}, - bind_address_from_args(args), args.port) - finally: - dev.close() - -if __name__ == "__main__": - main() diff --git a/artiq/frontend/artiq_browser.py b/artiq/frontend/artiq_browser.py index 692558616..fc6d25fe2 100755 --- a/artiq/frontend/artiq_browser.py +++ b/artiq/frontend/artiq_browser.py @@ -5,13 +5,17 @@ import asyncio import atexit import os import logging +import sys from PyQt5 import QtCore, QtGui, QtWidgets -from quamash import QEventLoop +from qasync import QEventLoop +from sipyco.asyncio_tools import atexit_register_coroutine +from sipyco import common_args + +from artiq import __version__ as artiq_version from artiq import __artiq_dir__ as artiq_dir -from artiq.tools import (verbosity_args, atexit_register_coroutine, - get_user_config_dir) +from artiq.tools import get_user_config_dir from artiq.gui import state, applets, models, log from artiq.browser import datasets, files, experiments @@ -20,10 +24,11 @@ logger = logging.getLogger(__name__) def get_argparser(): - default_db_file = os.path.join(get_user_config_dir(), "artiq_browser.pyon") - parser = argparse.ArgumentParser(description="ARTIQ Browser") - parser.add_argument("--db-file", default=default_db_file, + parser.add_argument("--version", action="version", + version="ARTIQ v{}".format(artiq_version), + help="print the ARTIQ version number") + parser.add_argument("--db-file", default=None, help="database file for local browser settings " "(default: %(default)s)") parser.add_argument("--browse-root", default="", @@ -38,7 +43,7 @@ def get_argparser(): help="TCP port to use to connect to the master") parser.add_argument("select", metavar="SELECT", nargs="?", help="directory to browse or file to load") - verbosity_args(parser) + common_args.verbosity_args(parser) return parser @@ -132,6 +137,8 @@ class Browser(QtWidgets.QMainWindow): def main(): # initialize application args = get_argparser().parse_args() + if args.db_file is None: + args.db_file = os.path.join(get_user_config_dir(), "artiq_browser.pyon") widget_log_handler = log.init_log(args, "browser") app = QtWidgets.QApplication(["ARTIQ Browser"]) diff --git a/artiq/frontend/artiq_client.py b/artiq/frontend/artiq_client.py index 7f5c63d7c..7194a4e45 100755 --- a/artiq/frontend/artiq_client.py +++ b/artiq/frontend/artiq_client.py @@ -1,4 +1,10 @@ #!/usr/bin/env python3 +""" +Client to send commands to :mod:`artiq_master` and display results locally. + +The client can perform actions such as accessing/setting datasets, +scanning devices, scheduling experiments, and looking for experiments/devices. +""" import argparse import logging @@ -11,11 +17,13 @@ from dateutil.parser import parse as parse_date from prettytable import PrettyTable -from artiq.protocols.pc_rpc import Client -from artiq.protocols.sync_struct import Subscriber -from artiq.protocols.broadcast import Receiver -from artiq.protocols import pyon -from artiq.tools import short_format +from sipyco.pc_rpc import Client +from sipyco.sync_struct import Subscriber +from sipyco.broadcast import Receiver +from sipyco import common_args, pyon + +from artiq.tools import short_format, parse_arguments +from artiq import __version__ as artiq_version def clear_screen(): @@ -33,6 +41,9 @@ def get_argparser(): parser.add_argument( "--port", default=None, type=int, help="TCP port to use to connect to the master") + parser.add_argument("--version", action="version", + version="ARTIQ v{}".format(artiq_version), + help="print the ARTIQ version number") subparsers = parser.add_subparsers(dest="action") subparsers.required = True @@ -46,7 +57,8 @@ def get_argparser(): "scheduling, default: %(default)s)") parser_add.add_argument("-t", "--timed", default=None, type=str, help="set a due date for the experiment") - parser_add.add_argument("-f", "--flush", default=False, action="store_true", + parser_add.add_argument("-f", "--flush", default=False, + action="store_true", help="flush the pipeline before preparing " "the experiment") parser_add.add_argument("-R", "--repository", default=False, @@ -57,10 +69,6 @@ def get_argparser(): "(defaults to head, ignored without -R)") parser_add.add_argument("-c", "--class-name", default=None, help="name of the class to run") - parser_add.add_argument("-v", "--verbose", default=0, action="count", - help="increase logging level of the experiment") - parser_add.add_argument("-q", "--quiet", default=0, action="count", - help="decrease logging level of the experiment") parser_add.add_argument("file", metavar="FILE", help="file containing the experiment to run") parser_add.add_argument("arguments", metavar="ARGUMENTS", nargs="*", @@ -80,10 +88,12 @@ def get_argparser(): help="name of the dataset") parser_set_dataset.add_argument("value", metavar="VALUE", help="value in PYON format") - parser_set_dataset.add_argument("-p", "--persist", action="store_true", - help="make the dataset persistent") - parser_set_dataset.add_argument("-n", "--no-persist", action="store_true", - help="make the dataset non-persistent") + + persist_group = parser_set_dataset.add_mutually_exclusive_group() + persist_group.add_argument("-p", "--persist", action="store_true", + help="make the dataset persistent") + persist_group.add_argument("-n", "--no-persist", action="store_true", + help="make the dataset non-persistent") parser_del_dataset = subparsers.add_parser( "del-dataset", help="delete a dataset") @@ -93,7 +103,8 @@ def get_argparser(): "show", help="show schedule, log, devices or datasets") parser_show.add_argument( "what", metavar="WHAT", - help="select object to show: schedule/log/devices/datasets") + choices=["schedule", "log", "ccb", "devices", "datasets"], + help="select object to show: %(choices)s") subparsers.add_parser( "scan-devices", help="trigger a device database (re)scan") @@ -111,23 +122,15 @@ def get_argparser(): "ls", help="list a directory on the master") parser_ls.add_argument("directory", default="", nargs="?") + common_args.verbosity_args(parser) return parser -def _parse_arguments(arguments): - d = {} - for argument in arguments: - name, value = argument.split("=") - d[name] = pyon.decode(value) - return d - - def _action_submit(remote, args): try: - arguments = _parse_arguments(args.arguments) - except: - print("Failed to parse run arguments") - sys.exit(1) + arguments = parse_arguments(args.arguments) + except Exception as err: + raise ValueError("Failed to parse run arguments") from err expid = { "log_level": logging.WARNING + args.quiet*10 - args.verbose*10, @@ -154,10 +157,6 @@ def _action_delete(remote, args): def _action_set_dataset(remote, args): - if args.persist and args.no_persist: - print("Options --persist and --no-persist cannot be specified " - "at the same time") - sys.exit(1) persist = None if args.persist: persist = True @@ -175,7 +174,7 @@ def _action_scan_devices(remote, args): def _action_scan_repository(remote, args): - if args.async: + if getattr(args, "async"): remote.scan_repository_async(args.revision) else: remote.scan_repository(args.revision) @@ -190,13 +189,13 @@ def _action_ls(remote, args): def _show_schedule(schedule): clear_screen() if schedule: - l = sorted(schedule.items(), - key=lambda x: (-x[1]["priority"], - x[1]["due_date"] or 0, - x[0])) + sorted_schedule = sorted(schedule.items(), + key=lambda x: (-x[1]["priority"], + x[1]["due_date"] or 0, + x[0])) table = PrettyTable(["RID", "Pipeline", " Status ", "Prio", "Due date", "Revision", "File", "Class name"]) - for rid, v in l: + for rid, v in sorted_schedule: row = [rid, v["pipeline"], v["status"], v["priority"]] if v["due_date"] is None: row.append("") @@ -252,6 +251,7 @@ def _run_subscriber(host, port, subscriber): def _show_dict(args, notifier_name, display_fun): d = dict() + def init_d(x): d.clear() d.update(x) @@ -299,8 +299,7 @@ def main(): elif args.what == "datasets": _show_dict(args, "datasets", _show_datasets) else: - print("Unknown object to show, use -h to list valid names.") - sys.exit(1) + raise ValueError else: port = 3251 if args.port is None else args.port target_name = { @@ -318,5 +317,6 @@ def main(): finally: remote.close_rpc() + if __name__ == "__main__": main() diff --git a/artiq/frontend/artiq_compile.py b/artiq/frontend/artiq_compile.py index 83cf3c079..1609971e0 100755 --- a/artiq/frontend/artiq_compile.py +++ b/artiq/frontend/artiq_compile.py @@ -2,6 +2,9 @@ import os, sys, logging, argparse +from sipyco import common_args + +from artiq import __version__ as artiq_version from artiq.master.databases import DeviceDB, DatasetDB from artiq.master.worker_db import DeviceManager, DatasetManager from artiq.language.environment import ProcessArgumentManager @@ -14,15 +17,18 @@ logger = logging.getLogger(__name__) def get_argparser(): parser = argparse.ArgumentParser(description="ARTIQ static compiler") + parser.add_argument("--version", action="version", + version="ARTIQ v{}".format(artiq_version), + help="print the ARTIQ version number") - verbosity_args(parser) + common_args.verbosity_args(parser) parser.add_argument("--device-db", default="device_db.py", help="device database file (default: '%(default)s')") parser.add_argument("--dataset-db", default="dataset_db.pyon", help="dataset file (default: '%(default)s')") - parser.add_argument("-e", "--experiment", default=None, - help="experiment to compile") + parser.add_argument("-c", "--class-name", default=None, + help="name of the class to compile") parser.add_argument("-o", "--output", default=None, help="output file") @@ -36,17 +42,17 @@ def get_argparser(): def main(): args = get_argparser().parse_args() - init_logger(args) + common_args.init_logger_from_args(args) device_mgr = DeviceManager(DeviceDB(args.device_db)) dataset_mgr = DatasetManager(DatasetDB(args.dataset_db)) try: module = file_import(args.file, prefix="artiq_run_") - exp = get_experiment(module, args.experiment) + exp = get_experiment(module, args.class_name) arguments = parse_arguments(args.arguments) argument_mgr = ProcessArgumentManager(arguments) - exp_inst = exp((device_mgr, dataset_mgr, argument_mgr)) + exp_inst = exp((device_mgr, dataset_mgr, argument_mgr, {})) if not hasattr(exp.run, "artiq_embedded"): raise ValueError("Experiment entry point must be a kernel") diff --git a/artiq/frontend/artiq_coreanalyzer.py b/artiq/frontend/artiq_coreanalyzer.py index 0a5fad21b..96a058b4d 100755 --- a/artiq/frontend/artiq_coreanalyzer.py +++ b/artiq/frontend/artiq_coreanalyzer.py @@ -3,7 +3,8 @@ import argparse import sys -from artiq.tools import verbosity_args, init_logger +from sipyco import common_args + from artiq.master.databases import DeviceDB from artiq.master.worker_db import DeviceManager from artiq.coredevice.comm_analyzer import (get_analyzer_dump, @@ -14,24 +15,30 @@ def get_argparser(): parser = argparse.ArgumentParser(description="ARTIQ core device " "RTIO analysis tool") - verbosity_args(parser) + common_args.verbosity_args(parser) parser.add_argument("--device-db", default="device_db.py", - help="device database file (default: '%(default)s')") + help="device database file (default: '%(default)s')") parser.add_argument("-r", "--read-dump", type=str, default=None, help="read raw dump file instead of accessing device") - parser.add_argument("-p", "--print-decoded", default=False, action="store_true", - help="print raw decoded messages") + parser.add_argument("-p", "--print-decoded", default=False, + action="store_true", help="print raw decoded messages") parser.add_argument("-w", "--write-vcd", type=str, default=None, help="format and write contents to VCD file") parser.add_argument("-d", "--write-dump", type=str, default=None, help="write raw dump file") + + parser.add_argument("-u", "--vcd-uniform-interval", action="store_true", + help="emit uniform time intervals between timed VCD " + "events and show RTIO event interval (in SI " + "seconds) and timestamp (in machine units) as " + "separate VCD channels") return parser def main(): args = get_argparser().parse_args() - init_logger(args) + common_args.init_logger_from_args(args) if (not args.print_decoded and args.write_vcd is None and args.write_dump is None): @@ -54,7 +61,8 @@ def main(): if args.write_vcd: with open(args.write_vcd, "w") as f: decoded_dump_to_vcd(f, device_mgr.get_device_db(), - decoded_dump) + decoded_dump, + uniform_interval=args.vcd_uniform_interval) if args.write_dump: with open(args.write_dump, "wb") as f: f.write(dump) diff --git a/artiq/frontend/artiq_coreboot.py b/artiq/frontend/artiq_coreboot.py deleted file mode 100755 index c0136292e..000000000 --- a/artiq/frontend/artiq_coreboot.py +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -import sys -import struct - -from artiq.tools import verbosity_args, init_logger -from artiq.master.databases import DeviceDB -from artiq.coredevice.comm_mgmt import CommMgmt - - -def get_argparser(): - parser = argparse.ArgumentParser(description="ARTIQ core device boot tool") - - verbosity_args(parser) - parser.add_argument("--device-db", default="device_db.py", - help="device database file (default: '%(default)s')") - - subparsers = parser.add_subparsers(dest="action") - - p_reboot = subparsers.add_parser("reboot", - help="reboot the currently running firmware") - - p_hotswap = subparsers.add_parser("hotswap", - help="load the specified firmware in RAM") - - p_hotswap.add_argument("image", metavar="IMAGE", type=argparse.FileType("rb"), - help="runtime image to be executed") - - return parser - - -def main(): - args = get_argparser().parse_args() - init_logger(args) - - core_addr = DeviceDB(args.device_db).get("core")["arguments"]["host"] - mgmt = CommMgmt(core_addr) - try: - if args.action == "reboot": - mgmt.reboot() - elif args.action == "hotswap": - mgmt.hotswap(args.image.read()) - else: - print("An action needs to be specified.", file=sys.stderr) - sys.exit(1) - finally: - mgmt.close() - -if __name__ == "__main__": - main() diff --git a/artiq/frontend/artiq_coreconfig.py b/artiq/frontend/artiq_coreconfig.py deleted file mode 100755 index 6f1ff2e42..000000000 --- a/artiq/frontend/artiq_coreconfig.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -import struct - -from artiq.tools import verbosity_args, init_logger -from artiq.master.databases import DeviceDB -from artiq.master.worker_db import DeviceManager - - -def get_argparser(): - parser = argparse.ArgumentParser(description="ARTIQ core device " - "configuration tool") - - verbosity_args(parser) - parser.add_argument("--device-db", default="device_db.py", - help="device database file (default: '%(default)s')") - - subparsers = parser.add_subparsers(dest="action") - subparsers.required = True - - p_read = subparsers.add_parser("read", - help="read key from core device config") - p_read.add_argument("key", metavar="KEY", type=str, - help="key to be read from core device config") - - p_write = subparsers.add_parser("write", - help="write key-value records to core " - "device config") - p_write.add_argument("-s", "--string", nargs=2, action="append", - default=[], metavar=("KEY", "STRING"), type=str, - help="key-value records to be written to core device " - "config") - p_write.add_argument("-f", "--file", nargs=2, action="append", - type=str, default=[], - metavar=("KEY", "FILENAME"), - help="key and file whose content to be written to " - "core device config") - - p_delete = subparsers.add_parser("delete", - help="delete key from core device config") - p_delete.add_argument("key", metavar="KEY", nargs=argparse.REMAINDER, - default=[], type=str, - help="key to be deleted from core device config") - - subparsers.add_parser("erase", help="fully erase core device config") - return parser - - -def main(): - args = get_argparser().parse_args() - init_logger(args) - device_mgr = DeviceManager(DeviceDB(args.device_db)) - try: - comm = device_mgr.get("core").comm - comm.check_system_info() - - if args.action == "read": - value = comm.flash_storage_read(args.key) - if not value: - print("Key {} does not exist".format(args.key)) - else: - print(value) - elif args.action == "write": - for key, value in args.string: - comm.flash_storage_write(key, value.encode("utf-8")) - for key, filename in args.file: - with open(filename, "rb") as fi: - comm.flash_storage_write(key, fi.read()) - elif args.action == "delete": - for key in args.key: - comm.flash_storage_remove(key) - elif args.action == "erase": - comm.flash_storage_erase() - finally: - device_mgr.close_devices() - -if __name__ == "__main__": - main() diff --git a/artiq/frontend/artiq_corelog.py b/artiq/frontend/artiq_corelog.py deleted file mode 100755 index 0f7cd8759..000000000 --- a/artiq/frontend/artiq_corelog.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python3 - -import argparse - -from artiq.tools import verbosity_args, init_logger -from artiq.master.databases import DeviceDB -from artiq.coredevice.comm_mgmt import CommMgmt - - -def get_argparser(): - parser = argparse.ArgumentParser(description="ARTIQ core device " - "log tool") - verbosity_args(parser) - parser.add_argument("--device-db", default="device_db.py", - help="device database file (default: '%(default)s')") - - subparsers = parser.add_subparsers(dest="action") - - p_clear = subparsers.add_parser("clear", - help="clear log buffer") - - p_set_level = subparsers.add_parser("set_level", - help="set minimum level for messages to be logged") - p_set_level.add_argument("level", metavar="LEVEL", type=str, - help="log level (one of: OFF ERROR WARN INFO DEBUG TRACE)") - - p_set_uart_level = subparsers.add_parser("set_uart_level", - help="set minimum level for messages to be logged " - "to UART") - p_set_uart_level.add_argument("level", metavar="LEVEL", type=str, - help="log level (one of: OFF ERROR WARN INFO DEBUG TRACE)") - - return parser - - -def main(): - args = get_argparser().parse_args() - init_logger(args) - - core_addr = DeviceDB(args.device_db).get("core")["arguments"]["host"] - mgmt = CommMgmt(core_addr) - try: - if args.action == "set_level": - mgmt.set_log_level(args.level) - elif args.action == "set_uart_level": - mgmt.set_uart_log_level(args.level) - elif args.action == "clear": - mgmt.clear_log() - else: - print(mgmt.get_log(), end="") - finally: - mgmt.close() - - -if __name__ == "__main__": - main() diff --git a/artiq/frontend/artiq_coremgmt.py b/artiq/frontend/artiq_coremgmt.py new file mode 100755 index 000000000..542e12925 --- /dev/null +++ b/artiq/frontend/artiq_coremgmt.py @@ -0,0 +1,207 @@ +#!/usr/bin/env python3 + +import argparse +import struct + +from sipyco import common_args + +from artiq import __version__ as artiq_version +from artiq.master.databases import DeviceDB +from artiq.coredevice.comm_kernel import CommKernel +from artiq.coredevice.comm_mgmt import CommMgmt +from artiq.coredevice.profiler import CallgrindWriter + + +def get_argparser(): + parser = argparse.ArgumentParser(description="ARTIQ core device " + "management tool") + parser.add_argument("--version", action="version", + version="ARTIQ v{}".format(artiq_version), + help="print the ARTIQ version number") + + common_args.verbosity_args(parser) + parser.add_argument("--device-db", default="device_db.py", + help="device database file (default: '%(default)s')") + parser.add_argument("-D", "--device", default=None, + help="use specified core device address instead of " + "reading device database") + + tools = parser.add_subparsers(dest="tool") + tools.required = True + + # logging + t_log = tools.add_parser("log", + help="read logs and change log levels") + + subparsers = t_log.add_subparsers(dest="action") + + p_clear = subparsers.add_parser("clear", + help="clear log buffer") + + p_set_level = subparsers.add_parser("set_level", + help="set minimum level for messages to be logged") + p_set_level.add_argument("level", metavar="LEVEL", type=str, + help="log level (one of: OFF ERROR WARN INFO DEBUG TRACE)") + + p_set_uart_level = subparsers.add_parser("set_uart_level", + help="set minimum level for messages to be logged " + "to UART") + p_set_uart_level.add_argument("level", metavar="LEVEL", type=str, + help="log level (one of: OFF ERROR WARN INFO DEBUG TRACE)") + + # configuration + t_config = tools.add_parser("config", + help="read and change core device configuration") + + subparsers = t_config.add_subparsers(dest="action") + subparsers.required = True + + p_read = subparsers.add_parser("read", + help="read key from core device config") + p_read.add_argument("key", metavar="KEY", type=str, + help="key to be read from core device config") + + p_write = subparsers.add_parser("write", + help="write key-value records to core " + "device config") + p_write.add_argument("-s", "--string", nargs=2, action="append", + default=[], metavar=("KEY", "STRING"), type=str, + help="key-value records to be written to core device " + "config") + p_write.add_argument("-f", "--file", nargs=2, action="append", + type=str, default=[], + metavar=("KEY", "FILENAME"), + help="key and file whose content to be written to " + "core device config") + + p_remove = subparsers.add_parser("remove", + help="remove key from core device config") + p_remove.add_argument("key", metavar="KEY", nargs=argparse.REMAINDER, + default=[], type=str, + help="key to be removed from core device config") + + subparsers.add_parser("erase", help="fully erase core device config") + + # booting + t_boot = tools.add_parser("reboot", + help="reboot the currently running firmware") + + t_hotswap = tools.add_parser("hotswap", + help="load the specified firmware in RAM") + + t_hotswap.add_argument("image", metavar="IMAGE", type=argparse.FileType("rb"), + help="runtime image to be executed") + + # profiling + t_profile = tools.add_parser("profile", + help="account for communications CPU time") + + subparsers = t_profile.add_subparsers(dest="action") + subparsers.required = True + + p_start = subparsers.add_parser("start", + help="start profiling") + p_start.add_argument("--interval", metavar="MICROS", type=int, default=2000, + help="sampling interval, in microseconds") + p_start.add_argument("--hits-size", metavar="ENTRIES", type=int, default=8192, + help="hit buffer size") + p_start.add_argument("--edges-size", metavar="ENTRIES", type=int, default=8192, + help="edge buffer size") + + p_stop = subparsers.add_parser("stop", + help="stop profiling") + + p_save = subparsers.add_parser("save", + help="save profile") + p_save.add_argument("output", metavar="OUTPUT", type=argparse.FileType("w"), + help="file to save profile to, in Callgrind format") + p_save.add_argument("firmware", metavar="FIRMWARE", type=str, + help="path to firmware ELF file") + p_save.add_argument("--no-compression", + dest="compression", default=True, action="store_false", + help="disable profile compression") + p_save.add_argument("--no-demangle", + dest="demangle", default=True, action="store_false", + help="disable symbol demangling") + + # misc debug + t_debug = tools.add_parser("debug", + help="specialized debug functions") + + subparsers = t_debug.add_subparsers(dest="action") + subparsers.required = True + + p_allocator = subparsers.add_parser("allocator", + help="show heap layout") + + return parser + + +def main(): + args = get_argparser().parse_args() + common_args.init_logger_from_args(args) + + if args.device is None: + ddb = DeviceDB(args.device_db) + core_addr = ddb.get("core", resolve_alias=True)["arguments"]["host"] + else: + core_addr = args.device + mgmt = CommMgmt(core_addr) + + if args.tool == "log": + if args.action == "set_level": + mgmt.set_log_level(args.level) + if args.action == "set_uart_level": + mgmt.set_uart_log_level(args.level) + if args.action == "clear": + mgmt.clear_log() + if args.action == None: + print(mgmt.get_log(), end="") + + if args.tool == "config": + if args.action == "read": + value = mgmt.config_read(args.key) + if not value: + print("Key {} does not exist".format(args.key)) + else: + print(value) + if args.action == "write": + for key, value in args.string: + mgmt.config_write(key, value.encode("utf-8")) + for key, filename in args.file: + with open(filename, "rb") as fi: + mgmt.config_write(key, fi.read()) + if args.action == "remove": + for key in args.key: + mgmt.config_remove(key) + if args.action == "erase": + mgmt.config_erase() + + if args.tool == "reboot": + mgmt.reboot() + + if args.tool == "hotswap": + mgmt.hotswap(args.image.read()) + + if args.tool == "profile": + if args.action == "start": + mgmt.start_profiler(args.interval, args.hits_size, args.edges_size) + elif args.action == "stop": + mgmt.stop_profiler() + elif args.action == "save": + hits, edges = mgmt.get_profile() + writer = CallgrindWriter(args.output, args.firmware, "or1k-linux", + args.compression, args.demangle) + writer.header() + for addr, count in hits.items(): + writer.hit(addr, count) + for (caller, callee), count in edges.items(): + writer.edge(caller, callee, count) + + if args.tool == "debug": + if args.action == "allocator": + mgmt.debug_allocator() + + +if __name__ == "__main__": + main() diff --git a/artiq/frontend/artiq_ctlmgr.py b/artiq/frontend/artiq_ctlmgr.py deleted file mode 100755 index 6aeaee0f6..000000000 --- a/artiq/frontend/artiq_ctlmgr.py +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env python3 - -import asyncio -import atexit -import argparse -import os -import logging -import platform - -from artiq.protocols.pc_rpc import Server -from artiq.protocols.logging import LogForwarder, SourceFilter -from artiq.tools import (simple_network_args, atexit_register_coroutine, - bind_address_from_args) -from artiq.devices.ctlmgr import ControllerManager - - -def get_argparser(): - parser = argparse.ArgumentParser(description="ARTIQ controller manager") - - group = parser.add_argument_group("verbosity") - group.add_argument("-v", "--verbose", default=0, action="count", - help="increase logging level of the manager process") - group.add_argument("-q", "--quiet", default=0, action="count", - help="decrease logging level of the manager process") - - parser.add_argument( - "-s", "--server", default="::1", - help="hostname or IP of the master to connect to") - parser.add_argument( - "--port-notify", default=3250, type=int, - help="TCP port to connect to for notifications") - parser.add_argument( - "--port-logging", default=1066, type=int, - help="TCP port to connect to for logging") - parser.add_argument( - "--retry-master", default=5.0, type=float, - help="retry timer for reconnecting to master") - simple_network_args(parser, [("control", "control", 3249)]) - return parser - - -def main(): - args = get_argparser().parse_args() - - root_logger = logging.getLogger() - root_logger.setLevel(logging.NOTSET) - source_adder = SourceFilter(logging.WARNING + - args.quiet*10 - args.verbose*10, - "ctlmgr({})".format(platform.node())) - console_handler = logging.StreamHandler() - console_handler.setFormatter(logging.Formatter( - "%(levelname)s:%(source)s:%(name)s:%(message)s")) - console_handler.addFilter(source_adder) - root_logger.addHandler(console_handler) - - if os.name == "nt": - loop = asyncio.ProactorEventLoop() - asyncio.set_event_loop(loop) - else: - loop = asyncio.get_event_loop() - atexit.register(loop.close) - - logfwd = LogForwarder(args.server, args.port_logging, - args.retry_master) - logfwd.addFilter(source_adder) - root_logger.addHandler(logfwd) - logfwd.start() - atexit_register_coroutine(logfwd.stop) - - ctlmgr = ControllerManager(args.server, args.port_notify, - args.retry_master) - ctlmgr.start() - atexit_register_coroutine(ctlmgr.stop) - - class CtlMgrRPC: - retry_now = ctlmgr.retry_now - - rpc_target = CtlMgrRPC() - rpc_server = Server({"ctlmgr": rpc_target}, builtin_terminate=True) - loop.run_until_complete(rpc_server.start(bind_address_from_args(args), - args.port_control)) - atexit_register_coroutine(rpc_server.stop) - - loop.run_until_complete(rpc_server.wait_terminate()) - - -if __name__ == "__main__": - main() diff --git a/artiq/frontend/artiq_dashboard.py b/artiq/frontend/artiq_dashboard.py index 3ee539713..bd9127f9d 100755 --- a/artiq/frontend/artiq_dashboard.py +++ b/artiq/frontend/artiq_dashboard.py @@ -5,15 +5,18 @@ import asyncio import atexit import os import logging +import sys from PyQt5 import QtCore, QtGui, QtWidgets -from quamash import QEventLoop +from qasync import QEventLoop + +from sipyco.pc_rpc import AsyncioClient, Client +from sipyco.broadcast import Receiver +from sipyco import common_args +from sipyco.asyncio_tools import atexit_register_coroutine from artiq import __artiq_dir__ as artiq_dir, __version__ as artiq_version -from artiq.tools import (atexit_register_coroutine, verbosity_args, - get_user_config_dir) -from artiq.protocols.pc_rpc import AsyncioClient, Client -from artiq.protocols.broadcast import Receiver +from artiq.tools import get_user_config_dir from artiq.gui.models import ModelSubscriber from artiq.gui import state, log from artiq.dashboard import (experiments, shortcuts, explorer, @@ -22,6 +25,9 @@ from artiq.dashboard import (experiments, shortcuts, explorer, def get_argparser(): parser = argparse.ArgumentParser(description="ARTIQ Dashboard") + parser.add_argument("--version", action="version", + version="ARTIQ v{}".format(artiq_version), + help="print the ARTIQ version number") parser.add_argument( "-s", "--server", default="::1", help="hostname or IP of the master to connect to") @@ -36,10 +42,8 @@ def get_argparser(): help="TCP port to connect to for broadcasts") parser.add_argument( "--db-file", default=None, - help="database file for local GUI settings, " - "by default in {} and dependant on master hostname".format( - get_user_config_dir())) - verbosity_args(parser) + help="database file for local GUI settings") + common_args.verbosity_args(parser) return parser @@ -213,6 +217,10 @@ def main(): smgr.start() atexit_register_coroutine(smgr.stop) + # work around for https://github.com/m-labs/artiq/issues/1307 + d_ttl_dds.ttl_dock.show() + d_ttl_dds.dds_dock.show() + # create first log dock if not already in state d_log0 = logmgr.first_log_dock() if d_log0 is not None: diff --git a/artiq/frontend/artiq_ddb_template.py b/artiq/frontend/artiq_ddb_template.py new file mode 100755 index 000000000..15d584fb1 --- /dev/null +++ b/artiq/frontend/artiq_ddb_template.py @@ -0,0 +1,592 @@ +#!/usr/bin/env python3 + +import argparse +import sys +import json +import textwrap +from collections import defaultdict +from itertools import count + +from artiq import __version__ as artiq_version + + +def process_header(output, description): + if description["target"] != "kasli": + raise NotImplementedError + + print(textwrap.dedent(""" + # Autogenerated for the {variant} variant + core_addr = "{core_addr}" + + device_db = {{ + "core": {{ + "type": "local", + "module": "artiq.coredevice.core", + "class": "Core", + "arguments": {{"host": core_addr, "ref_period": {ref_period}}} + }}, + "core_log": {{ + "type": "controller", + "host": "::1", + "port": 1068, + "command": "aqctl_corelog -p {{port}} --bind {{bind}} " + core_addr + }}, + "core_cache": {{ + "type": "local", + "module": "artiq.coredevice.cache", + "class": "CoreCache" + }}, + "core_dma": {{ + "type": "local", + "module": "artiq.coredevice.dma", + "class": "CoreDMA" + }}, + + "i2c_switch0": {{ + "type": "local", + "module": "artiq.coredevice.i2c", + "class": "PCA9548", + "arguments": {{"address": 0xe0}} + }}, + "i2c_switch1": {{ + "type": "local", + "module": "artiq.coredevice.i2c", + "class": "PCA9548", + "arguments": {{"address": 0xe2}} + }}, + }} + """).format( + variant=description["variant"], + core_addr=description.get("core_addr", "192.168.1.70"), + ref_period=1/(8*description.get("rtio_frequency", 125e6))), + file=output) + + +class PeripheralManager: + def __init__(self, output, master_description): + self.counts = defaultdict(int) + self.output = output + self.master_description = master_description + + def get_name(self, ty): + count = self.counts[ty] + self.counts[ty] = count + 1 + return "{}{}".format(ty, count) + + def gen(self, string, **kwargs): + print(textwrap.dedent(string).format(**kwargs), file=self.output) + + def process_dio(self, rtio_offset, peripheral): + class_names = { + "input": "TTLInOut", + "output": "TTLOut" + } + classes = [ + class_names[peripheral["bank_direction_low"]], + class_names[peripheral["bank_direction_high"]] + ] + channel = count(0) + for i in range(8): + self.gen(""" + device_db["{name}"] = {{ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "{class_name}", + "arguments": {{"channel": 0x{channel:06x}}}, + }} + """, + name=self.get_name("ttl"), + class_name=classes[i//4], + channel=rtio_offset+next(channel)) + if peripheral.get("edge_counter", False): + for i in range(8): + class_name = classes[i//4] + if class_name == "TTLInOut": + self.gen(""" + device_db["{name}"] = {{ + "type": "local", + "module": "artiq.coredevice.edge_counter", + "class": "EdgeCounter", + "arguments": {{"channel": 0x{channel:06x}}}, + }} + """, + name=self.get_name("ttl_counter"), + channel=rtio_offset+next(channel)) + return next(channel) + + def process_urukul(self, rtio_offset, peripheral): + urukul_name = self.get_name("urukul") + synchronization = peripheral.get("synchronization", False) + channel = count(0) + self.gen(""" + device_db["eeprom_{name}"]={{ + "type": "local", + "module": "artiq.coredevice.kasli_i2c", + "class": "KasliEEPROM", + "arguments": {{"port": "EEM{eem}"}} + }} + + device_db["spi_{name}"]={{ + "type": "local", + "module": "artiq.coredevice.spi2", + "class": "SPIMaster", + "arguments": {{"channel": 0x{channel:06x}}} + }}""", + name=urukul_name, + eem=peripheral["ports"][0], + channel=rtio_offset+next(channel)) + if synchronization: + self.gen(""" + device_db["ttl_{name}_sync"] = {{ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLClockGen", + "arguments": {{"channel": 0x{channel:06x}, "acc_width": 4}} + }}""", + name=urukul_name, + channel=rtio_offset+next(channel)) + self.gen(""" + device_db["ttl_{name}_io_update"] = {{ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {{"channel": 0x{channel:06x}}} + }}""", + name=urukul_name, + channel=rtio_offset+next(channel)) + if len(peripheral["ports"]) > 1: + for i in range(4): + self.gen(""" + device_db["ttl_{name}_sw{uchn}"] = {{ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {{"channel": 0x{channel:06x}}} + }}""", + name=urukul_name, + uchn=i, + channel=rtio_offset+next(channel)) + self.gen(""" + device_db["{name}_cpld"] = {{ + "type": "local", + "module": "artiq.coredevice.urukul", + "class": "CPLD", + "arguments": {{ + "spi_device": "spi_{name}", + "sync_device": {sync_device}, + "io_update_device": "ttl_{name}_io_update", + "refclk": {refclk}, + "clk_sel": {clk_sel} + }} + }}""", + name=urukul_name, + sync_device="\"ttl_{name}_sync\"".format(name=urukul_name) if synchronization else "None", + refclk=peripheral.get("refclk", self.master_description.get("rtio_frequency", 125e6)), + clk_sel=peripheral["clk_sel"]) + dds = peripheral.get("dds", "ad9910") + pll_vco = peripheral.get("pll_vco", None) + for i in range(4): + if dds == "ad9910": + self.gen(""" + device_db["{name}_ch{uchn}"] = {{ + "type": "local", + "module": "artiq.coredevice.ad9910", + "class": "AD9910", + "arguments": {{ + "pll_n": {pll_n}, + "chip_select": {chip_select}, + "cpld_device": "{name}_cpld"{sw}{pll_vco}{sync_delay_seed}{io_update_delay} + }} + }}""", + name=urukul_name, + chip_select=4 + i, + uchn=i, + sw=",\n \"sw_device\": \"ttl_{name}_sw{uchn}\"".format(name=urukul_name, uchn=i) if len(peripheral["ports"]) > 1 else "", + pll_vco=",\n \"pll_vco\": {}".format(pll_vco) if pll_vco is not None else "", + pll_n=peripheral.get("pll_n", 32), + sync_delay_seed=",\n \"sync_delay_seed\": \"eeprom_{}:{}\"".format(urukul_name, 64 + 4*i) if synchronization else "", + io_update_delay=",\n \"io_update_delay\": \"eeprom_{}:{}\"".format(urukul_name, 64 + 4*i) if synchronization else "") + elif dds == "ad9912": + self.gen(""" + device_db["{name}_ch{uchn}"] = {{ + "type": "local", + "module": "artiq.coredevice.ad9912", + "class": "AD9912", + "arguments": {{ + "pll_n": {pll_n}, + "chip_select": {chip_select}, + "cpld_device": "{name}_cpld"{sw}{pll_vco} + }} + }}""", + name=urukul_name, + chip_select=4 + i, + uchn=i, + sw=",\n \"sw_device\": \"ttl_{name}_sw{uchn}\"".format(name=urukul_name, uchn=i) if len(peripheral["ports"]) > 1 else "", + pll_vco=",\n \"pll_vco\": {}".format(pll_vco) if pll_vco is not None else "", + pll_n=peripheral.get("pll_n", 8)) + else: + raise ValueError + return next(channel) + + def process_mirny(self, rtio_offset, peripheral): + mirny_name = self.get_name("mirny") + channel = count(0) + self.gen(""" + device_db["spi_{name}"]={{ + "type": "local", + "module": "artiq.coredevice.spi2", + "class": "SPIMaster", + "arguments": {{"channel": 0x{channel:06x}}} + }}""", + name=mirny_name, + channel=rtio_offset+next(channel)) + + for i in range(4): + self.gen(""" + device_db["ttl_{name}_sw{mchn}"] = {{ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {{"channel": 0x{ttl_channel:06x}}} + }}""", + name=mirny_name, + mchn=i, + ttl_channel=rtio_offset+next(channel)) + + for i in range(4): + self.gen(""" + device_db["{name}_ch{mchn}"] = {{ + "type": "local", + "module": "artiq.coredevice.adf5356", + "class": "ADF5356", + "arguments": {{ + "channel": {mchn}, + "sw_device": "ttl_{name}_sw{mchn}", + "cpld_device": "{name}_cpld", + }} + }}""", + name=mirny_name, + mchn=i) + + self.gen(""" + device_db["{name}_cpld"] = {{ + "type": "local", + "module": "artiq.coredevice.mirny", + "class": "Mirny", + "arguments": {{ + "spi_device": "spi_{name}", + "refclk": {refclk}, + "clk_sel": {clk_sel} + }}, + }}""", + name=mirny_name, + refclk=peripheral.get("refclk", 100e6), + clk_sel=peripheral.get("clk_sel", 0)) + + return next(channel) + + def process_novogorny(self, rtio_offset, peripheral): + self.gen(""" + device_db["spi_{name}_adc"] = {{ + "type": "local", + "module": "artiq.coredevice.spi2", + "class": "SPIMaster", + "arguments": {{"channel": 0x{adc_channel:06x}}} + }} + device_db["ttl_{name}_cnv"] = {{ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {{"channel": 0x{cnv_channel:06x}}}, + }} + device_db["{name}"] = {{ + "type": "local", + "module": "artiq.coredevice.novogorny", + "class": "Novogorny", + "arguments": {{ + "spi_adc_device": "spi_{name}_adc", + "cnv_device": "ttl_{name}_cnv" + }} + }}""", + name=self.get_name("novogorny"), + adc_channel=rtio_offset, + cnv_channel=rtio_offset + 1) + return 2 + + def process_sampler(self, rtio_offset, peripheral): + self.gen(""" + device_db["spi_{name}_adc"] = {{ + "type": "local", + "module": "artiq.coredevice.spi2", + "class": "SPIMaster", + "arguments": {{"channel": 0x{adc_channel:06x}}} + }} + device_db["spi_{name}_pgia"] = {{ + "type": "local", + "module": "artiq.coredevice.spi2", + "class": "SPIMaster", + "arguments": {{"channel": 0x{pgia_channel:06x}}} + }} + device_db["ttl_{name}_cnv"] = {{ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {{"channel": 0x{cnv_channel:06x}}}, + }} + device_db["{name}"] = {{ + "type": "local", + "module": "artiq.coredevice.sampler", + "class": "Sampler", + "arguments": {{ + "spi_adc_device": "spi_{name}_adc", + "spi_pgia_device": "spi_{name}_pgia", + "cnv_device": "ttl_{name}_cnv" + }} + }}""", + name=self.get_name("sampler"), + adc_channel=rtio_offset, + pgia_channel=rtio_offset + 1, + cnv_channel=rtio_offset + 2) + return 3 + + def process_suservo(self, rtio_offset, peripheral): + suservo_name = self.get_name("suservo") + sampler_name = self.get_name("sampler") + urukul0_name = self.get_name("urukul") + urukul1_name = self.get_name("urukul") + channel = count(0) + for i in range(8): + self.gen(""" + device_db["{suservo_name}_ch{suservo_chn}"] = {{ + "type": "local", + "module": "artiq.coredevice.suservo", + "class": "Channel", + "arguments": {{"channel": 0x{suservo_channel:06x}, "servo_device": "{suservo_name}"}} + }}""", + suservo_name=suservo_name, + suservo_chn=i, + suservo_channel=rtio_offset+next(channel)) + self.gen(""" + device_db["{suservo_name}"] = {{ + "type": "local", + "module": "artiq.coredevice.suservo", + "class": "SUServo", + "arguments": {{ + "channel": 0x{suservo_channel:06x}, + "pgia_device": "spi_{sampler_name}_pgia", + "cpld0_device": "{urukul0_name}_cpld", + "cpld1_device": "{urukul1_name}_cpld", + "dds0_device": "{urukul0_name}_dds", + "dds1_device": "{urukul1_name}_dds" + }} + }}""", + suservo_name=suservo_name, + sampler_name=sampler_name, + urukul0_name=urukul0_name, + urukul1_name=urukul1_name, + suservo_channel=rtio_offset+next(channel)) + self.gen(""" + device_db["spi_{sampler_name}_pgia"] = {{ + "type": "local", + "module": "artiq.coredevice.spi2", + "class": "SPIMaster", + "arguments": {{"channel": 0x{sampler_channel:06x}}} + }}""", + sampler_name=sampler_name, + sampler_channel=rtio_offset+next(channel)) + pll_vco = peripheral.get("pll_vco", None) + for urukul_name in (urukul0_name, urukul1_name): + self.gen(""" + device_db["spi_{urukul_name}"] = {{ + "type": "local", + "module": "artiq.coredevice.spi2", + "class": "SPIMaster", + "arguments": {{"channel": 0x{urukul_channel:06x}}} + }} + device_db["{urukul_name}_cpld"] = {{ + "type": "local", + "module": "artiq.coredevice.urukul", + "class": "CPLD", + "arguments": {{ + "spi_device": "spi_{urukul_name}", + "refclk": {refclk}, + "clk_sel": {clk_sel} + }} + }} + device_db["{urukul_name}_dds"] = {{ + "type": "local", + "module": "artiq.coredevice.ad9910", + "class": "AD9910", + "arguments": {{ + "pll_n": {pll_n}, + "chip_select": 3, + "cpld_device": "{urukul_name}_cpld"{pll_vco} + }} + }}""", + urukul_name=urukul_name, + urukul_channel=rtio_offset+next(channel), + refclk=peripheral.get("refclk", self.master_description.get("rtio_frequency", 125e6)), + clk_sel=peripheral["clk_sel"], + pll_vco=",\n \"pll_vco\": {}".format(pll_vco) if pll_vco is not None else "", + pll_n=peripheral.get("pll_n", 32)) + return next(channel) + + def process_zotino(self, rtio_offset, peripheral): + self.gen(""" + device_db["spi_{name}"] = {{ + "type": "local", + "module": "artiq.coredevice.spi2", + "class": "SPIMaster", + "arguments": {{"channel": 0x{spi_channel:06x}}} + }} + device_db["ttl_{name}_ldac"] = {{ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {{"channel": 0x{ldac_channel:06x}}} + }} + device_db["ttl_{name}_clr"] = {{ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {{"channel": 0x{clr_channel:06x}}} + }} + device_db["{name}"] = {{ + "type": "local", + "module": "artiq.coredevice.zotino", + "class": "Zotino", + "arguments": {{ + "spi_device": "spi_{name}", + "ldac_device": "ttl_{name}_ldac", + "clr_device": "ttl_{name}_clr" + }} + }}""", + name=self.get_name("zotino"), + spi_channel=rtio_offset, + ldac_channel=rtio_offset + 1, + clr_channel=rtio_offset + 2) + return 3 + + def process_grabber(self, rtio_offset, peripheral): + self.gen(""" + device_db["{name}"] = {{ + "type": "local", + "module": "artiq.coredevice.grabber", + "class": "Grabber", + "arguments": {{"channel_base": 0x{channel:06x}}} + }}""", + name=self.get_name("grabber"), + channel=rtio_offset) + return 2 + + def process_fastino(self, rtio_offset, peripheral): + self.gen(""" + device_db["{name}"] = {{ + "type": "local", + "module": "artiq.coredevice.fastino", + "class": "Fastino", + "arguments": {{"channel": 0x{channel:06x}}} + }}""", + name=self.get_name("fastino"), + channel=rtio_offset) + return 1 + + def process_phaser(self, rtio_offset, peripheral): + self.gen(""" + device_db["{name}"] = {{ + "type": "local", + "module": "artiq.coredevice.phaser", + "class": "Phaser", + "arguments": {{ + "channel_base": 0x{channel:06x}, + "miso_delay": 1, + }} + }}""", + name=self.get_name("phaser"), + channel=rtio_offset) + return 5 + + def process(self, rtio_offset, peripheral): + processor = getattr(self, "process_"+str(peripheral["type"])) + return processor(rtio_offset, peripheral) + + def add_sfp_leds(self, rtio_offset): + for i in range(2): + self.gen(""" + device_db["{name}"] = {{ + "type": "local", + "module": "artiq.coredevice.ttl", + "class": "TTLOut", + "arguments": {{"channel": 0x{channel:06x}}} + }}""", + name=self.get_name("led"), + channel=rtio_offset+i) + return 2 + + +def process(output, master_description, satellites): + base = master_description["base"] + if base not in ("standalone", "master"): + raise ValueError("Invalid master base") + + if base == "standalone" and satellites: + raise ValueError("A standalone system cannot have satellites") + + process_header(output, master_description) + + pm = PeripheralManager(output, master_description) + + print("# {} peripherals".format(base), file=output) + rtio_offset = 0 + for peripheral in master_description["peripherals"]: + n_channels = pm.process(rtio_offset, peripheral) + rtio_offset += n_channels + if base == "standalone" and master_description["hw_rev"] in ("v1.0", "v1.1"): + n_channels = pm.add_sfp_leds(rtio_offset) + rtio_offset += n_channels + + for destination, description in satellites: + if description["base"] != "satellite": + raise ValueError("Invalid base for satellite at destination {}".format(destination)) + + print("# DEST#{} peripherals".format(destination), file=output) + rtio_offset = destination << 16 + for peripheral in description["peripherals"]: + n_channels = pm.process(rtio_offset, peripheral) + rtio_offset += n_channels + + +def main(): + parser = argparse.ArgumentParser( + description="ARTIQ device database template builder") + parser.add_argument("--version", action="version", + version="ARTIQ v{}".format(artiq_version), + help="print the ARTIQ version number") + parser.add_argument("master_description", metavar="MASTER_DESCRIPTION", + help="JSON system description file for the standalone or master node") + parser.add_argument("-o", "--output", + help="output file, defaults to standard output if omitted") + parser.add_argument("-s", "--satellite", nargs=2, action="append", + default=[], metavar=("DESTINATION", "DESCRIPTION"), type=str, + help="add DRTIO satellite at the given destination number with " + "devices from the given JSON description") + + args = parser.parse_args() + + with open(args.master_description, "r") as f: + master_description = json.load(f) + + satellites = [] + for destination, description in args.satellite: + with open(description, "r") as f: + satellites.append((int(destination, 0), json.load(f))) + + if args.output is not None: + with open(args.output, "w") as f: + process(f, master_description, satellites) + else: + process(sys.stdout, master_description, satellites) + + +if __name__ == "__main__": + main() diff --git a/artiq/frontend/artiq_devtool.py b/artiq/frontend/artiq_devtool.py deleted file mode 100755 index e84b988f3..000000000 --- a/artiq/frontend/artiq_devtool.py +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/env python3 - -# This script makes the following assumptions: -# * miniconda is installed remotely at ~/miniconda -# * misoc and artiq are installed remotely via conda - -import sys -import argparse -import logging -import subprocess -import socket -import select -import threading -import os -import shutil - -from artiq.tools import verbosity_args, init_logger, logger, SSHClient - - -def get_argparser(): - parser = argparse.ArgumentParser(description="ARTIQ core device development tool") - - verbosity_args(parser) - - parser.add_argument("-H", "--host", metavar="HOSTNAME", - type=str, default="lab.m-labs.hk", - help="SSH host where the development board is located") - parser.add_argument("-D", "--device", metavar="HOSTNAME", - type=str, default="kc705.lab.m-labs.hk", - help="address or domain corresponding to the development board") - parser.add_argument("-s", "--serial", metavar="PATH", - type=str, default="/dev/ttyUSB_kc705", - help="TTY device corresponding to the development board") - parser.add_argument("-l", "--lockfile", metavar="PATH", - type=str, default="/run/boards/kc705", - help="The lockfile to be acquired for the duration of the actions") - parser.add_argument("-w", "--wait", action="store_true", - help="Wait for the board to unlock instead of aborting the actions") - parser.add_argument("-t", "--target", metavar="TARGET", - type=str, default="kc705_dds", - help="Target to build, one of: " - "kc705_dds kc705_drtio_master kc705_drtio_satellite") - - parser.add_argument("actions", metavar="ACTION", - type=str, default=[], nargs="+", - help="actions to perform, sequence of: " - "build reset boot boot+log connect hotswap clean") - - return parser - - -def main(): - args = get_argparser().parse_args() - init_logger(args) - if args.verbose == args.quiet == 0: - logging.getLogger().setLevel(logging.INFO) - - if args.target == "kc705_dds" or args.target == "kc705_drtio_master": - firmware = "runtime" - elif args.target == "kc705_drtio_satellite": - firmware = "satman" - else: - raise NotImplementedError("unknown target {}".format(args.target)) - - client = SSHClient(args.host) - substs = { - "env": "bash -c 'export PATH=$HOME/miniconda/bin:$PATH; exec $0 $*' ", - "serial": args.serial, - "firmware": firmware, - } - - flock_acquired = False - flock_file = None # GC root - def lock(): - nonlocal flock_acquired - nonlocal flock_file - - if not flock_acquired: - logger.info("Acquiring device lock") - flock = client.spawn_command("flock --verbose {block} {lockfile} sleep 86400" - .format(block="" if args.wait else "--nonblock", - lockfile=args.lockfile), - get_pty=True) - flock_file = flock.makefile('r') - while not flock_acquired: - line = flock_file.readline() - if not line: - break - logger.debug(line.rstrip()) - if line.startswith("flock: executing"): - flock_acquired = True - elif line.startswith("flock: failed"): - logger.error("Failed to get lock") - sys.exit(1) - - for action in args.actions: - if action == "build": - logger.info("Building firmware") - try: - subprocess.check_call(["python3", - "-m", "artiq.gateware.targets." + args.target, - "--no-compile-gateware", - "--output-dir", - "/tmp/{target}".format(target=args.target)]) - except subprocess.CalledProcessError: - logger.error("Build failed") - sys.exit(1) - - elif action == "clean": - logger.info("Cleaning build directory") - target_dir = "/tmp/{target}".format(target=args.target) - if os.path.isdir(target_dir): - shutil.rmtree(target_dir) - - elif action == "reset": - lock() - - logger.info("Resetting device") - client.run_command( - "{env} artiq_flash start", - **substs) - - elif action == "boot" or action == "boot+log": - lock() - - logger.info("Uploading firmware") - client.get_sftp().put("/tmp/{target}/software/{firmware}/{firmware}.bin" - .format(target=args.target, firmware=firmware), - "{tmp}/{firmware}.bin" - .format(tmp=client.tmp, firmware=firmware)) - - logger.info("Booting firmware") - flterm = client.spawn_command( - "{env} python3 flterm.py {serial} " + - "--kernel {tmp}/{firmware}.bin " + - ("--upload-only" if action == "boot" else "--output-only"), - **substs) - artiq_flash = client.spawn_command( - "{env} artiq_flash start", - **substs) - client.drain(flterm) - - elif action == "connect": - lock() - - transport = client.get_transport() - - def forwarder(local_stream, remote_stream): - try: - while True: - r, _, _ = select.select([local_stream, remote_stream], [], []) - if local_stream in r: - data = local_stream.recv(65535) - if data == b"": - break - remote_stream.sendall(data) - if remote_stream in r: - data = remote_stream.recv(65535) - if data == b"": - break - local_stream.sendall(data) - except Exception as err: - logger.error("Cannot forward on port %s: %s", port, repr(err)) - local_stream.close() - remote_stream.close() - - def listener(port): - listener = socket.socket() - listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - listener.bind(('localhost', port)) - listener.listen(8) - while True: - local_stream, peer_addr = listener.accept() - logger.info("Accepting %s:%s and opening SSH channel to %s:%s", - *peer_addr, args.device, port) - try: - remote_stream = \ - transport.open_channel('direct-tcpip', (args.device, port), peer_addr) - except Exception: - logger.exception("Cannot open channel on port %s", port) - continue - - thread = threading.Thread(target=forwarder, args=(local_stream, remote_stream), - name="forward-{}".format(port), daemon=True) - thread.start() - - ports = [1380, 1381, 1382, 1383] - for port in ports: - thread = threading.Thread(target=listener, args=(port,), - name="listen-{}".format(port), daemon=True) - thread.start() - - logger.info("Forwarding ports {} to core device and logs from core device" - .format(", ".join(map(str, ports)))) - client.run_command( - "{env} python3 flterm.py {serial} --output-only", - **substs) - - elif action == "hotswap": - logger.info("Hotswapping firmware") - try: - subprocess.check_call(["python3", - "-m", "artiq.frontend.artiq_coreboot", "hotswap", - "/tmp/{target}/software/{firmware}/{firmware}.bin" - .format(target=args.target, firmware=firmware)]) - except subprocess.CalledProcessError: - logger.error("Build failed") - sys.exit(1) - - else: - logger.error("Unknown action {}".format(action)) - sys.exit(1) - -if __name__ == "__main__": - main() diff --git a/artiq/frontend/artiq_flash.py b/artiq/frontend/artiq_flash.py index db1a98f31..6e7319a1c 100755 --- a/artiq/frontend/artiq_flash.py +++ b/artiq/frontend/artiq_flash.py @@ -5,9 +5,16 @@ import os import subprocess import tempfile import shutil +import re +import atexit from functools import partial +from collections import defaultdict +from sipyco import common_args + +from artiq import __version__ as artiq_version from artiq import __artiq_dir__ as artiq_dir +from artiq.remoting import SSHClient, LocalClient from artiq.frontend.bit2bin import bit2bin @@ -18,12 +25,14 @@ def get_argparser(): epilog="""\ Valid actions: - * proxy: load the flash proxy gateware bitstream - * gateware: write gateware bitstream to flash - * bios: write bios to flash - * runtime: write runtime to flash + * gateware: write main gateware bitstream to flash + * rtm_gateware: write RTM gateware bitstream to flash + * bootloader: write bootloader to flash * storage: write storage image to flash - * load: load gateware bitstream into device (volatile but fast) + * firmware: write firmware to flash + * load: load main gateware bitstream into device (volatile but fast) + * rtm_load: load RTM gateware bitstream into device + * erase: erase flash memory * start: trigger the target to (re)load its gateware bitstream from flash Prerequisites: @@ -35,21 +44,39 @@ Prerequisites: and replug the device. Ensure you are member of the plugdev group: 'sudo adduser $USER plugdev' and re-login. """) - parser.add_argument("-t", "--target", default="kc705", - help="target board, default: %(default)s") - parser.add_argument("-m", "--variant", default=None, - help="board variant") - parser.add_argument("--preinit-command", default=[], action="append", + + parser.add_argument("--version", action="version", + version="ARTIQ v{}".format(artiq_version), + help="print the ARTIQ version number") + + common_args.verbosity_args(parser) + + parser.add_argument("-n", "--dry-run", + default=False, action="store_true", + help="only show the openocd script that would be run") + parser.add_argument("-H", "--host", metavar="HOSTNAME", + type=str, default=None, + help="SSH host where the board is located") + parser.add_argument("-J", "--jump", + type=str, default=None, + help="SSH host to jump through") + parser.add_argument("-t", "--target", default="kasli", + help="target board, default: %(default)s, one of: " + "kasli sayma metlino kc705") + parser.add_argument("-V", "--variant", default=None, + help="board variant. Autodetected if only one is installed.") + parser.add_argument("-I", "--preinit-command", default=[], action="append", help="add a pre-initialization OpenOCD command. " - "Useful for selecting a development board " - "when several are connected.") + "Useful for selecting a board when several are connected.") parser.add_argument("-f", "--storage", help="write file to storage area") - parser.add_argument("-d", "--dir", help="look for files in this directory") - parser.add_argument("--srcbuild", help="look for bitstream, BIOS and runtime in this " - "ARTIQ source build tree") + parser.add_argument("-d", "--dir", help="look for board binaries in this directory") + parser.add_argument("--srcbuild", help="board binaries directory is laid out as a source build tree", + default=False, action="store_true") + parser.add_argument("--no-rtm-jtag", help="do not attempt JTAG to the RTM", + default=False, action="store_true") parser.add_argument("action", metavar="ACTION", nargs="*", - default="proxy gateware bios runtime start".split(), - help="actions to perform, default: %(default)s") + default=[], + help="actions to perform, default: flash everything") return parser @@ -58,7 +85,7 @@ def scripts_path(): if os.name == "nt": p.insert(0, "Library") p = os.path.abspath(os.path.join( - os.path.dirname(shutil.which("openocd")), + os.path.dirname(os.path.realpath(shutil.which("openocd"))), "..", *p)) return p @@ -66,75 +93,155 @@ def scripts_path(): def proxy_path(): p = ["share", "bscan-spi-bitstreams"] p = os.path.abspath(os.path.join( - os.path.dirname(shutil.which("openocd")), + os.path.dirname(os.path.realpath(shutil.which("openocd"))), "..", *p)) return p +def find_proxy_bitfile(filename): + for p in [proxy_path(), os.path.expanduser("~/.migen"), + "/usr/local/share/migen", "/usr/share/migen"]: + full_path = os.path.join(p, filename) + if os.access(full_path, os.R_OK): + return full_path + raise FileNotFoundError("Cannot find proxy bitstream {}" + .format(filename)) + + +def add_commands(script, *commands, **substs): + script += [command.format(**substs) for command in commands] + + class Programmer: - def __init__(self, target_file, preinit_commands): - self.target_file = target_file - self.preinit_commands = preinit_commands - self.prog = [] + def __init__(self, client, preinit_script): + self._client = client + self._board_script = [] + self._preinit_script = [ + "gdb_port disabled", + "tcl_port disabled", + "telnet_port disabled" + ] + preinit_script + self._loaded = defaultdict(lambda: None) + self._script = ["init"] - def init(self): - self.prog.extend(self.preinit_commands) - self.prog.append("init") + def _transfer_script(self, script): + if isinstance(self._client, LocalClient): + return "[find {}]".format(script) - def load(self, bitfile): + def rewriter(content): + def repl(match): + return self._transfer_script(match.group(1).decode()).encode() + return re.sub(rb"\[find (.+?)\]", repl, content, re.DOTALL) + + script = os.path.join(scripts_path(), script) + return self._client.upload(script, rewriter) + + def add_flash_bank(self, name, tap, index): + add_commands(self._board_script, + "target create {tap}.{name}.proxy testee -chain-position {tap}.tap", + "flash bank {name} jtagspi 0 0 0 0 {tap}.{name}.proxy {ir:#x}", + tap=tap, name=name, ir=0x02 + index) + + def erase_flash(self, bankname): + self.load_proxy() + add_commands(self._script, + "flash probe {bankname}", + "flash erase_sector {bankname} 0 last", + bankname=bankname) + + def load(self, bitfile, pld): + os.stat(bitfile) # check for existence + + if self._loaded[pld] == bitfile: + return + self._loaded[pld] = bitfile + + bitfile = self._client.upload(bitfile) + add_commands(self._script, + "pld load {pld} {{{filename}}}", + pld=pld, filename=bitfile) + + def load_proxy(self): raise NotImplementedError - def proxy(self, proxy_bitfile): - raise NotImplementedError + def write_binary(self, bankname, address, filename): + self.load_proxy() - def flash_binary(self, flashno, address, filename): - raise NotImplementedError + size = os.path.getsize(filename) + filename = self._client.upload(filename) + add_commands(self._script, + "flash probe {bankname}", + "flash erase_sector {bankname} {firstsector} {lastsector}", + "flash write_bank {bankname} {{{filename}}} {address:#x}", + "flash verify_bank {bankname} {{{filename}}} {address:#x}", + bankname=bankname, address=address, filename=filename, + firstsector=address // self._sector_size, + lastsector=(address + size - 1) // self._sector_size) + + def read_binary(self, bankname, address, length, filename): + self.load_proxy() + + filename = self._client.prepare_download(filename) + add_commands(self._script, + "flash probe {bankname}", + "flash read_bank {bankname} {{{filename}}} {address:#x} {length}", + bankname=bankname, filename=filename, address=address, length=length) def start(self): raise NotImplementedError - def do(self): - self.prog.append("exit") - cmdline = [ - "openocd", - "-s", scripts_path() + def script(self): + return [ + *self._board_script, + *self._preinit_script, + *self._script, + "exit" ] - if self.target_file is not None: - cmdline += ["-f", self.target_file] - cmdline += ["-c", "; ".join(self.prog)] - subprocess.check_call(cmdline) + + def run(self): + cmdline = ["openocd"] + if isinstance(self._client, LocalClient): + cmdline += ["-s", scripts_path()] + cmdline += ["-c", "; ".join(self.script())] + + cmdline = [arg.replace("{", "{{").replace("}", "}}") for arg in cmdline] + self._client.run_command(cmdline) + self._client.download() + + self._script = [] -class ProgrammerJtagSpi7(Programmer): - def __init__(self, target, preinit_commands): - Programmer.__init__(self, os.path.join("board", target + ".cfg"), - preinit_commands) - self.init() +class ProgrammerXC7(Programmer): + _sector_size = 0x10000 - def load(self, bitfile, pld=0): - self.prog.append("pld load {} {{{}}}".format(pld, bitfile)) + def __init__(self, client, preinit_script, board, proxy): + Programmer.__init__(self, client, preinit_script) + self._proxy = proxy - def proxy(self, proxy_bitfile, pld=0): - self.prog.append("jtagspi_init {} {{{}}}".format(pld, proxy_bitfile)) + add_commands(self._board_script, + "source {boardfile}", + boardfile=self._transfer_script("board/{}.cfg".format(board))) + self.add_flash_bank("spi0", "xc7", index=0) - def flash_binary(self, flashno, address, filename): - # jtagspi_program supports only one flash - assert flashno == 0 - self.prog.append("jtagspi_program {{{}}} 0x{:x}".format( - filename, address)) + add_commands(self._script, "xadc_report xc7.tap") + + def load_proxy(self): + self.load(find_proxy_bitfile(self._proxy), pld=0) def start(self): - self.prog.append("xc7_program xc7.tap") + add_commands(self._script, + "xc7_program xc7.tap") -class ProgrammerSayma(Programmer): - sector_size = 0x10000 +class ProgrammerAMCRTM(Programmer): + _sector_size = 0x10000 + + def __init__(self, client, preinit_script): + Programmer.__init__(self, client, preinit_script) + + add_commands(self._board_script, + "source {}".format(self._transfer_script("fpga/xilinx-xadc.cfg")), - def __init__(self, preinit_commands): - # TODO: support Sayma RTM - Programmer.__init__(self, None, preinit_commands) - self.proxy_loaded = False - self.prog += [ "interface ftdi", "ftdi_device_desc \"Quad RS232-HS\"", "ftdi_vid_pid 0x0403 0x6011", @@ -143,148 +250,229 @@ class ProgrammerSayma(Programmer): # nTRST on ADBUS4: out, high, but R46 is DNP "ftdi_layout_init 0x0098 0x008b", "reset_config none", - "adapter_khz 5000", "transport select jtag", - - "source [find cpld/xilinx-xc7.cfg]", # tap 0, pld 0 + # tap 0, pld 0 + "source {}".format(self._transfer_script("cpld/xilinx-xc7.cfg")), + # tap 1, pld 1 "set CHIP XCKU040", - "source [find cpld/xilinx-xcu.cfg]", # tap 1, pld 1 + "source {}".format(self._transfer_script("cpld/xilinx-xcu.cfg"))) + self.add_flash_bank("spi0", "xcu", index=0) + self.add_flash_bank("spi1", "xcu", index=1) - "target create xcu.proxy testee -chain-position xcu.tap", - "set XILINX_USER1 0x02", - "set XILINX_USER2 0x03", - "flash bank xcu.spi0 jtagspi 0 0 0 0 xcu.proxy $XILINX_USER1", - "flash bank xcu.spi1 jtagspi 0 0 0 0 xcu.proxy $XILINX_USER2" - ] - self.init() + add_commands(self._script, "echo \"RTM FPGA XADC:\"", "xadc_report xc7.tap") + add_commands(self._script, "echo \"AMC FPGA XADC:\"", "xadc_report xcu.tap") - def load(self, bitfile, pld=1): - self.prog.append("pld load {} {{{}}}".format(pld, bitfile)) - - def proxy(self, proxy_bitfile, pld=1): - self.load(proxy_bitfile, pld) - self.prog.append("reset halt") - - def flash_binary(self, flashno, address, filename): - sector_first = address // self.sector_size - size = os.path.getsize(filename) - assert size - sector_last = sector_first + (size - 1) // self.sector_size - assert sector_last >= sector_first - self.prog += [ - "flash probe xcu.spi{}".format(flashno), - "flash erase_sector {} {} {}".format(flashno, sector_first, sector_last), - "flash write_bank {} {{{}}} 0x{:x}".format(flashno, filename, address), - "flash verify_bank {} {{{}}} 0x{:x}".format(flashno, filename, address), - ] + def load_proxy(self): + self.load(find_proxy_bitfile("bscan_spi_xcku040.bit"), pld=1) def start(self): - self.proxy_loaded = False - self.prog.append("xcu_program xcu.tap") + add_commands(self._script, "xcu_program xcu.tap") + + +class ProgrammerAMC(Programmer): + _sector_size = 0x10000 + + def __init__(self, client, preinit_script): + Programmer.__init__(self, client, preinit_script) + + add_commands(self._board_script, + "source {}".format(self._transfer_script("fpga/xilinx-xadc.cfg")), + + "interface ftdi", + "ftdi_device_desc \"Quad RS232-HS\"", + "ftdi_vid_pid 0x0403 0x6011", + "ftdi_channel 0", + # EN_USB_JTAG on ADBUS7: out, high + # nTRST on ADBUS4: out, high, but R46 is DNP + "ftdi_layout_init 0x0098 0x008b", + "reset_config none", + "adapter_khz 5000", + "transport select jtag", + "set CHIP XCKU040", + "source {}".format(self._transfer_script("cpld/xilinx-xcu.cfg"))) + self.add_flash_bank("spi0", "xcu", index=0) + self.add_flash_bank("spi1", "xcu", index=1) + + add_commands(self._script, "echo \"AMC FPGA XADC:\"", "xadc_report xcu.tap") + + def load_proxy(self): + self.load(find_proxy_bitfile("bscan_spi_xcku040.bit"), pld=0) + + def start(self): + add_commands(self._script, "xcu_program xcu.tap") def main(): - parser = get_argparser() - opts = parser.parse_args() + args = get_argparser().parse_args() + common_args.init_logger_from_args(args) config = { - "kc705": { - "programmer_factory": partial(ProgrammerJtagSpi7, "kc705"), - "proxy_bitfile": "bscan_spi_xc7k325t.bit", - "variants": ["nist_clock", "nist_qc2"], - "gateware": (0, 0x000000), - "bios": (0, 0xaf0000), - "runtime": (0, 0xb00000), - "storage": (0, 0xb80000), + "kasli": { + "programmer": partial(ProgrammerXC7, board="kasli", proxy="bscan_spi_xc7a100t.bit"), + "gateware": ("spi0", 0x000000), + "bootloader": ("spi0", 0x400000), + "storage": ("spi0", 0x440000), + "firmware": ("spi0", 0x450000), }, "sayma": { - "programmer_factory": ProgrammerSayma, - "proxy_bitfile": "bscan_spi_xcku040-sayma.bit", - "variants": ["standalone"], - "gateware": (0, 0x000000), - "bios": (1, 0x000000), - "runtime": (1, 0x010000), - "storage": (1, 0x090000), + "programmer": ProgrammerAMCRTM, + "gateware": ("spi0", 0x000000), + "bootloader": ("spi1", 0x000000), + "storage": ("spi1", 0x040000), + "firmware": ("spi1", 0x050000), + "rtm_gateware": ("spi1", 0x200000), }, - }[opts.target] + "metlino": { + "programmer": ProgrammerAMC, + "gateware": ("spi0", 0x000000), + "bootloader": ("spi1", 0x000000), + "storage": ("spi1", 0x040000), + "firmware": ("spi1", 0x050000), + }, + "kc705": { + "programmer": partial(ProgrammerXC7, board="kc705", proxy="bscan_spi_xc7k325t.bit"), + "gateware": ("spi0", 0x000000), + "bootloader": ("spi0", 0xaf0000), + "storage": ("spi0", 0xb30000), + "firmware": ("spi0", 0xb40000), + }, + }[args.target] - variant = opts.variant - if variant is not None and variant not in config["variants"]: - raise SystemExit("Invalid variant for this board") - if variant is None and config["variants"]: - variant = config["variants"][0] - bin_dir = opts.dir + bin_dir = args.dir if bin_dir is None: - if variant is None: - bin_dir = os.path.join(artiq_dir, "binaries", - "{}".format(opts.target)) + bin_dir = os.path.join(artiq_dir, "board-support") + + needs_artifacts = not args.action or any( + action in args.action + for action in ["gateware", "rtm_gateware", "bootloader", "firmware", "load", "rtm_load"]) + variant = args.variant + if needs_artifacts and variant is None: + variants = [] + if args.srcbuild: + for entry in os.scandir(bin_dir): + if entry.is_dir(): + variants.append(entry.name) else: - bin_dir = os.path.join(artiq_dir, "binaries", - "{}-{}".format(opts.target, variant)) - if opts.srcbuild is None and not os.path.exists(bin_dir) and opts.action != ["start"]: - raise SystemExit("Binaries directory '{}' does not exist" - .format(bin_dir)) + prefix = args.target + "-" + for entry in os.scandir(bin_dir): + if entry.is_dir() and entry.name.startswith(prefix): + variants.append(entry.name[len(prefix):]) + if args.target == "sayma": + try: + variants.remove("rtm") + except ValueError: + pass + if len(variants) == 0: + raise FileNotFoundError("no variants found, did you install a board binary package?") + elif len(variants) == 1: + variant = variants[0] + else: + raise ValueError("more than one variant found for selected board, specify -V. " + "Found variants: {}".format(" ".join(sorted(variants)))) + if needs_artifacts: + if args.srcbuild: + variant_dir = variant + else: + variant_dir = args.target + "-" + variant + if args.target == "sayma": + if args.srcbuild: + rtm_variant_dir = "rtm" + else: + rtm_variant_dir = "sayma-rtm" - programmer = config["programmer_factory"](opts.preinit_command) + if not args.action: + if args.target == "sayma" and variant != "simplesatellite" and variant != "master": + args.action = "gateware rtm_gateware bootloader firmware start".split() + else: + args.action = "gateware bootloader firmware start".split() - conv = False - for action in opts.action: - if action == "proxy": - proxy_found = False - for p in [bin_dir, proxy_path(), os.path.expanduser("~/.migen"), - "/usr/local/share/migen", "/usr/share/migen"]: - proxy_bitfile = os.path.join(p, config["proxy_bitfile"]) - if os.access(proxy_bitfile, os.R_OK): - programmer.proxy(proxy_bitfile) - proxy_found = True - break - if not proxy_found: - raise SystemExit( - "proxy gateware bitstream {} not found".format(config["proxy_bitfile"])) - elif action == "gateware": - if opts.srcbuild is None: - path = bin_dir - else: - path = os.path.join(opts.srcbuild, "gateware") - bin = os.path.join(path, "top.bin") - if not os.access(bin, os.R_OK): - bin_handle, bin = tempfile.mkstemp() - bit = os.path.join(path, "top.bit") - with open(bit, "rb") as f, open(bin_handle, "wb") as g: - bit2bin(f, g) - conv = True - programmer.flash_binary(*config["gateware"], bin) - elif action == "bios": - if opts.srcbuild is None: - path = bin_dir - else: - path = os.path.join(opts.srcbuild, "software", "bios") - programmer.flash_binary(*config["bios"], os.path.join(path, "bios.bin")) - elif action == "runtime": - if opts.srcbuild is None: - path = bin_dir - else: - path = os.path.join(opts.srcbuild, "software", "runtime") - programmer.flash_binary(*config["runtime"], os.path.join(path, "runtime.fbi")) + if args.host is None: + client = LocalClient() + else: + client = SSHClient(args.host, args.jump) + + if args.target == "sayma" and args.no_rtm_jtag: + programmer_cls = ProgrammerAMC + else: + programmer_cls = config["programmer"] + programmer = programmer_cls(client, preinit_script=args.preinit_command) + + def artifact_path(this_variant_dir, *path_filename): + if args.srcbuild: + # source tree - use path elements to locate file + return os.path.join(bin_dir, this_variant_dir, *path_filename) + else: + # flat tree - all files in the same directory, discard path elements + *_, filename = path_filename + return os.path.join(bin_dir, this_variant_dir, filename) + + def convert_gateware(bit_filename, header=False): + bin_handle, bin_filename = tempfile.mkstemp( + prefix="artiq_", suffix="_" + os.path.basename(bit_filename)) + with open(bit_filename, "rb") as bit_file, \ + open(bin_handle, "wb") as bin_file: + if header: + bin_file.write(b"\x00"*8) + bit2bin(bit_file, bin_file) + if header: + magic = 0x5352544d # "SRTM", see sayma_rtm target + length = bin_file.tell() - 8 + bin_file.seek(0) + bin_file.write(magic.to_bytes(4, byteorder="big")) + bin_file.write(length.to_bytes(4, byteorder="big")) + atexit.register(lambda: os.unlink(bin_filename)) + return bin_filename + + for action in args.action: + if action == "gateware": + gateware_bin = convert_gateware( + artifact_path(variant_dir, "gateware", "top.bit")) + programmer.write_binary(*config["gateware"], gateware_bin) + elif action == "rtm_gateware": + rtm_gateware_bin = convert_gateware( + artifact_path(rtm_variant_dir, "gateware", "top.bit"), header=True) + programmer.write_binary(*config["rtm_gateware"], + rtm_gateware_bin) + elif action == "bootloader": + bootloader_bin = artifact_path(variant_dir, "software", "bootloader", "bootloader.bin") + programmer.write_binary(*config["bootloader"], bootloader_bin) elif action == "storage": - programmer.flash_binary(*config["storage"], opts.storage) - elif action == "load": - if opts.srcbuild is None: - path = bin_dir + storage_img = args.storage + programmer.write_binary(*config["storage"], storage_img) + elif action == "firmware": + if variant.endswith("satellite"): + firmware = "satman" else: - path = os.path.join(opts.srcbuild, "gateware") - programmer.load(os.path.join(path, "top.bit")) + firmware = "runtime" + + firmware_fbi = artifact_path(variant_dir, "software", firmware, firmware + ".fbi") + programmer.write_binary(*config["firmware"], firmware_fbi) + elif action == "load": + if args.target == "sayma": + gateware_bit = artifact_path(variant_dir, "gateware", "top.bit") + programmer.load(gateware_bit, 1) + else: + gateware_bit = artifact_path(variant_dir, "gateware", "top.bit") + programmer.load(gateware_bit, 0) + elif action == "rtm_load": + rtm_gateware_bit = artifact_path(rtm_variant_dir, "gateware", "top.bit") + programmer.load(rtm_gateware_bit, 0) elif action == "start": programmer.start() + elif action == "erase": + if args.target == "sayma" or args.target == "metlino": + programmer.erase_flash("spi0") + programmer.erase_flash("spi1") + else: + programmer.erase_flash("spi0") else: raise ValueError("invalid action", action) - try: - programmer.do() - finally: - if conv: - os.unlink(bin) + if args.dry_run: + print("\n".join(programmer.script())) + else: + programmer.run() if __name__ == "__main__": diff --git a/artiq/frontend/artiq_influxdb.py b/artiq/frontend/artiq_influxdb.py deleted file mode 100755 index ca9c3f343..000000000 --- a/artiq/frontend/artiq_influxdb.py +++ /dev/null @@ -1,240 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -import logging -import asyncio -import atexit -import fnmatch -from functools import partial -import time - -import numpy as np -import aiohttp - -from artiq.tools import * -from artiq.protocols.sync_struct import Subscriber -from artiq.protocols.pc_rpc import Server -from artiq.protocols import pyon - - -logger = logging.getLogger(__name__) - - -def get_argparser(): - parser = argparse.ArgumentParser( - description="ARTIQ data to InfluxDB bridge", - epilog="Pattern matching works as follows. " - "The default action on a key (dataset name) is to log it. " - "Then the patterns are traversed in order and glob-matched " - "with the key. " - "Optional + and - pattern prefixes specify whether to ignore or " - "log keys matching the rest of the pattern. " - "Default (in the absence of prefix) is to ignore. Last matched " - "pattern takes precedence.") - group = parser.add_argument_group("master") - group.add_argument( - "--server-master", default="::1", - help="hostname or IP of the master to connect to") - group.add_argument( - "--port-master", default=3250, type=int, - help="TCP port to use to connect to the master") - group.add_argument( - "--retry-master", default=5.0, type=float, - help="retry timer for reconnecting to master") - group = parser.add_argument_group("database") - group.add_argument( - "--baseurl-db", default="http://localhost:8086", - help="base URL to access InfluxDB (default: %(default)s)") - group.add_argument( - "--user-db", default="", help="InfluxDB username") - group.add_argument( - "--password-db", default="", help="InfluxDB password") - group.add_argument( - "--database", default="db", help="database name to use") - group.add_argument( - "--table", default="lab", help="table name to use") - group = parser.add_argument_group("filter") - group.add_argument( - "--pattern-file", default="influxdb_patterns.cfg", - help="file to load the patterns from (default: %(default)s). " - "If the file is not found, no patterns are loaded " - "(everything is logged).") - simple_network_args(parser, [("control", "control", 3248)]) - verbosity_args(parser) - return parser - - -def format_influxdb(v): - if np.issubdtype(type(v), np.bool_): - return "bool={}".format(v) - if np.issubdtype(type(v), np.integer): - return "int={}i".format(v) - if np.issubdtype(type(v), np.floating): - return "float={}".format(v) - if np.issubdtype(type(v), np.str_): - return "str=\"{}\"".format(v.replace('"', '\\"')) - return "pyon=\"{}\"".format(pyon.encode(v).replace('"', '\\"')) - - -class DBWriter(TaskObject): - def __init__(self, base_url, user, password, database, table): - self.base_url = base_url - self.user = user - self.password = password - self.database = database - self.table = table - - self._queue = asyncio.Queue(100) - - def update(self, k, v): - try: - self._queue.put_nowait((k, v, time.time())) - except asyncio.QueueFull: - logger.warning("failed to update dataset '%s': " - "too many pending updates", k) - - async def _do(self): - async with aiohttp.ClientSession() as session: - while True: - k, v, t = await self._queue.get() - url = self.base_url + "/write" - params = {"u": self.user, "p": self.password, "db": self.database, - "precision": "ms"} - data = "{},dataset={} {} {}".format( - self.table, k, format_influxdb(v), round(t*1e3)) - try: - response = await session.post(url, params=params, data=data) - except: - logger.warning("got exception trying to update '%s'", - k, exc_info=True) - else: - if response.status not in (200, 204): - content = (await response.content.read()).decode().strip() - logger.warning("got HTTP status %d " - "trying to update '%s': %s", - response.status, k, content) - response.close() - - -class _Mock: - def __setitem__(self, k, v): - pass - - def __getitem__(self, k): - return self - - def __delitem__(self, k): - pass - - -class Datasets: - def __init__(self, filter_function, writer, init): - self.filter_function = filter_function - self.writer = writer - - def __setitem__(self, k, v): - if self.filter_function(k): - self.writer.update(k, v[1]) - - # ignore mutations - def __getitem__(self, k): - return _Mock() - - # ignore deletions - def __delitem__(self, k): - pass - - -class MasterReader(TaskObject): - def __init__(self, server, port, retry, filter_function, writer): - self.server = server - self.port = port - self.retry = retry - - self.filter_function = filter_function - self.writer = writer - - async def _do(self): - subscriber = Subscriber( - "datasets", - partial(Datasets, self.filter_function, self.writer)) - while True: - try: - await subscriber.connect(self.server, self.port) - try: - await asyncio.wait_for(subscriber.receive_task, None) - finally: - await subscriber.close() - except (ConnectionAbortedError, ConnectionError, - ConnectionRefusedError, ConnectionResetError) as e: - logger.warning("Connection to master failed (%s: %s)", - e.__class__.__name__, str(e)) - else: - logger.warning("Connection to master lost") - logger.warning("Retrying in %.1f seconds", self.retry) - await asyncio.sleep(self.retry) - - -class Filter: - def __init__(self, pattern_file): - self.pattern_file = pattern_file - self.scan_patterns() - - def scan_patterns(self): - """(Re)load the patterns file.""" - try: - with open(self.pattern_file, "r") as f: - self.patterns = [] - for line in f: - line = line.rstrip() - if line: - self.patterns.append(line) - except FileNotFoundError: - logger.info("no pattern file found, logging everything") - self.patterns = [] - - # Privatize so that it is not shown in artiq_rpctool list-methods. - def _filter(self, k): - take = "+" - for pattern in self.patterns: - sign = "-" - if pattern[0] in "+-": - sign, pattern = pattern[0], pattern[1:] - if fnmatch.fnmatchcase(k, pattern): - take = sign - return take == "+" - - def get_patterns(self): - """Show existing patterns.""" - return self.patterns - - -def main(): - args = get_argparser().parse_args() - init_logger(args) - - loop = asyncio.get_event_loop() - atexit.register(loop.close) - - writer = DBWriter(args.baseurl_db, - args.user_db, args.password_db, - args.database, args.table) - writer.start() - atexit_register_coroutine(writer.stop) - - filter = Filter(args.pattern_file) - rpc_server = Server({"influxdb_filter": filter}, builtin_terminate=True) - loop.run_until_complete(rpc_server.start(bind_address_from_args(args), - args.port_control)) - atexit_register_coroutine(rpc_server.stop) - - reader = MasterReader(args.server_master, args.port_master, - args.retry_master, filter._filter, writer) - reader.start() - atexit_register_coroutine(reader.stop) - - loop.run_until_complete(rpc_server.wait_terminate()) - - -if __name__ == "__main__": - main() diff --git a/artiq/frontend/artiq_master.py b/artiq/frontend/artiq_master.py index 91672f9c6..1a5073692 100755 --- a/artiq/frontend/artiq_master.py +++ b/artiq/frontend/artiq_master.py @@ -6,16 +6,18 @@ import atexit import os import logging -from artiq.tools import (simple_network_args, atexit_register_coroutine, - bind_address_from_args) -from artiq.protocols.pc_rpc import Server as RPCServer -from artiq.protocols.sync_struct import Publisher -from artiq.protocols.logging import Server as LoggingServer -from artiq.protocols.broadcast import Broadcaster +from sipyco.pc_rpc import Server as RPCServer +from sipyco.sync_struct import Publisher +from sipyco.logging_tools import Server as LoggingServer +from sipyco.broadcast import Broadcaster +from sipyco import common_args +from sipyco.asyncio_tools import atexit_register_coroutine + +from artiq import __version__ as artiq_version from artiq.master.log import log_args, init_log from artiq.master.databases import DeviceDB, DatasetDB from artiq.master.scheduler import Scheduler -from artiq.master.worker_db import RIDCounter +from artiq.master.rid_counter import RIDCounter from artiq.master.experiments import (FilesystemBackend, GitBackend, ExperimentDB) @@ -24,8 +26,11 @@ logger = logging.getLogger(__name__) def get_argparser(): parser = argparse.ArgumentParser(description="ARTIQ master") + parser.add_argument("--version", action="version", + version="ARTIQ v{}".format(artiq_version), + help="print the ARTIQ version number") - simple_network_args(parser, [ + common_args.simple_network_args(parser, [ ("notify", "notifications", 3250), ("control", "control", 3251), ("logging", "remote logging", 1066), @@ -72,7 +77,7 @@ def main(): else: loop = asyncio.get_event_loop() atexit.register(loop.close) - bind = bind_address_from_args(args) + bind = common_args.bind_address_from_args(args) server_broadcast = Broadcaster() loop.run_until_complete(server_broadcast.start( diff --git a/artiq/frontend/artiq_pcap.py b/artiq/frontend/artiq_pcap.py deleted file mode 100644 index 59f124734..000000000 --- a/artiq/frontend/artiq_pcap.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python3 - -# This script makes the following assumptions: -# * tcpdump has CAP_NET_RAW capabilities set -# use # setcap cap_net_raw+eip /usr/sbin/tcpdump - -import os -import argparse -import subprocess - -from artiq.tools import verbosity_args, init_logger, logger, SSHClient - - -def get_argparser(): - parser = argparse.ArgumentParser(description="ARTIQ core device " - "packet capture tool") - - verbosity_args(parser) - - parser.add_argument("-H", "--host", metavar="HOST", - type=str, default="lab.m-labs.hk", - help="SSH host where the development board is located") - parser.add_argument("-D", "--device", metavar="DEVICE", - type=str, default="kc705.lab.m-labs.hk", - help="address or domain corresponding to the development board") - parser.add_argument("-f", "--file", metavar="PCAP_FILE", - type=str, default="coredevice.pcap", - help="Location to retrieve the pcap file into") - - parser.add_argument("command", metavar="COMMAND", - type=str, default=[], nargs=argparse.REMAINDER, - help="command to execute while capturing") - - return parser - - -def main(): - args = get_argparser().parse_args() - init_logger(args) - - client = SSHClient(args.host) - - sftp = client.get_sftp() - tcpdump = client.spawn_command( - "/usr/sbin/tcpdump host {device} -w {tmp}/trace.pcap", get_pty=True, - device=args.device) - - try: - subprocess.check_call(args.command) - except subprocess.CalledProcessError: - logger.error("Command failed") - - tcpdump.close() - sftp.get("{tmp}/trace.pcap".format(tmp=client.tmp), - args.file + ".new") - os.rename(args.file + ".new", args.file) - logger.info("Pcap file {file} retrieved".format(file=args.file)) diff --git a/artiq/frontend/artiq_route.py b/artiq/frontend/artiq_route.py new file mode 100755 index 000000000..739a649e2 --- /dev/null +++ b/artiq/frontend/artiq_route.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 + +import argparse + + +def get_argparser(): + parser = argparse.ArgumentParser(description="ARTIQ DRTIO routing table " + "manipulation tool") + + parser.add_argument("file", metavar="FILE", type=str, + help="target file") + + action = parser.add_subparsers(dest="action") + action.required = True + + action.add_parser("init", help="create a new empty routing table") + + action.add_parser("show", help="show contents of routing table") + + a_set = action.add_parser("set", help="set routing table entry") + a_set.add_argument("destination", metavar="DESTINATION", type=int, + help="destination to operate on") + a_set.add_argument("hop", metavar="HOP", type=int, nargs="*", + help="hop(s) to the destination") + + return parser + + +DEST_COUNT = 256 +MAX_HOPS = 32 + + +def init(filename): + with open(filename, "wb") as f: + f.write(b"\xff"*(DEST_COUNT*MAX_HOPS)) + + +def show_routes(filename): + routes = [] + with open(filename, "rb") as f: + for i in range(DEST_COUNT): + hops = [int.from_bytes(f.read(1), "big") for j in range(MAX_HOPS)] + routes.append(hops) + + for destination, route in enumerate(routes): + if route[0] != 0xff: + fmt = "{:3d}:".format(destination) + for hop in route: + if hop == 0xff: + break + fmt += " {:3d}".format(hop) + print(fmt) + + +def set_route(filename, destination, hops): + with open(filename, "r+b") as f: + if destination >= DEST_COUNT: + raise ValueError("destination must be less than {}".format(DEST_COUNT)) + f.seek(destination*MAX_HOPS) + + if len(hops) + 1 >= MAX_HOPS: + raise ValueError("too many hops") + for hop in hops: + if hop >= 0xff: + raise ValueError("all hops must be less than 255") + + hops = hops + [0xff]*(MAX_HOPS-len(hops)) + f.write(bytes(hops)) + + +def main(): + args = get_argparser().parse_args() + if args.action == "init": + init(args.file) + elif args.action == "show": + show_routes(args.file) + elif args.action == "set": + set_route(args.file, args.destination, args.hop) + else: + raise ValueError + +if __name__ == "__main__": + main() diff --git a/artiq/frontend/artiq_rpctool.py b/artiq/frontend/artiq_rpctool.py deleted file mode 100755 index 74cb8890a..000000000 --- a/artiq/frontend/artiq_rpctool.py +++ /dev/null @@ -1,146 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -import textwrap -import sys -import traceback -import numpy as np # Needed to use numpy in RPC call arguments on cmd line -import pprint - -from artiq.protocols.pc_rpc import AutoTarget, Client - - -def get_argparser(): - parser = argparse.ArgumentParser( - description="ARTIQ RPC tool") - parser.add_argument("server", metavar="SERVER", - help="hostname or IP of the controller to connect to") - parser.add_argument("port", metavar="PORT", type=int, - help="TCP port to use to connect to the controller") - subparsers = parser.add_subparsers(dest="action") - subparsers.add_parser("list-targets", help="list existing targets") - parser_list_methods = subparsers.add_parser("list-methods", - help="list target's methods") - parser_list_methods.add_argument("-t", "--target", help="target name") - parser_call = subparsers.add_parser("call", help="call a target's method") - parser_call.add_argument("-t", "--target", help="target name") - parser_call.add_argument("method", metavar="METHOD", help="method name") - parser_call.add_argument("args", metavar="ARGS", nargs=argparse.REMAINDER, - help="arguments") - parser_interactive = subparsers.add_parser("interactive", - help="enter interactive mode " - "(default)") - parser_interactive.add_argument("-t", "--target", help="target name") - return parser - - -def list_targets(target_names, description): - print("Target(s): " + ", ".join(target_names)) - if description is not None: - print("Description: " + description) - - -def list_methods(remote): - doc = remote.get_rpc_method_list() - if doc["docstring"] is not None: - print(doc["docstring"]) - print() - for name, (argspec, docstring) in sorted(doc["methods"].items()): - args = "" - for arg in argspec["args"]: - args += arg - if argspec["defaults"] is not None: - kword_index = len(argspec["defaults"]) - len(argspec["args"])\ - + argspec["args"].index(arg) - if kword_index >= 0: - if argspec["defaults"][kword_index] == Ellipsis: - args += "=..." - else: - args += "={}".format(argspec["defaults"][kword_index]) - if argspec["args"].index(arg) < len(argspec["args"]) - 1: - args += ", " - if argspec["varargs"] is not None: - args += ", *{}".format(argspec["varargs"]) - elif len(argspec["kwonlyargs"]) > 0: - args += ", *" - for kwonlyarg in argspec["kwonlyargs"]: - args += ", {}".format(kwonlyarg) - if kwonlyarg in argspec["kwonlydefaults"]: - if argspec["kwonlydefaults"][kwonlyarg] == Ellipsis: - args += "=..." - else: - args += "={}".format(argspec["kwonlydefaults"][kwonlyarg]) - if argspec["varkw"] is not None: - args += ", **{}".format(argspec["varkw"]) - print("{}({})".format(name, args)) - if docstring is not None: - print(textwrap.indent(docstring, " ")) - print() - - -def call_method(remote, method_name, args): - method = getattr(remote, method_name) - ret = method(*[eval(arg) for arg in args]) - if ret is not None: - pprint.pprint(ret) - - -def interactive(remote): - try: - import readline # This makes input() nicer - except ImportError: - print("Warning: readline not available. " - "Install it to add line editing capabilities.") - - while True: - try: - cmd = input("({}) ".format(remote.get_selected_target())) - except EOFError: - return - class RemoteDict: - def __getitem__(self, k): - if k == "np": - return np - else: - return getattr(remote, k) - try: - ret = eval(cmd, {}, RemoteDict()) - except Exception as e: - if hasattr(e, "parent_traceback"): - print("Remote exception:") - print(traceback.format_exception_only(type(e), e)[0].rstrip()) - for l in e.parent_traceback: - print(l.rstrip()) - else: - traceback.print_exc() - else: - if ret is not None: - pprint.pprint(ret) - - -def main(): - args = get_argparser().parse_args() - if not args.action: - args.target = None - - remote = Client(args.server, args.port, None) - targets, description = remote.get_rpc_id() - if args.action != "list-targets": - if not args.target: - remote.select_rpc_target(AutoTarget) - else: - remote.select_rpc_target(args.target) - - if args.action == "list-targets": - list_targets(targets, description) - elif args.action == "list-methods": - list_methods(remote) - elif args.action == "call": - call_method(remote, args.method, args.args) - elif args.action == "interactive" or not args.action: - interactive(remote) - else: - print("Unrecognized action: {}".format(args.action)) - -if __name__ == "__main__": - main() diff --git a/artiq/frontend/artiq_rtiomon.py b/artiq/frontend/artiq_rtiomon.py new file mode 100755 index 000000000..eed6a9074 --- /dev/null +++ b/artiq/frontend/artiq_rtiomon.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 + +import argparse +import asyncio + +from artiq.coredevice.comm_moninj import * + + +def get_argparser(): + parser = argparse.ArgumentParser( + description="ARTIQ RTIO monitor") + parser.add_argument("core_addr", metavar="CORE_ADDR", + help="hostname or IP address of the core device") + parser.add_argument("channel", metavar="CHANNEL", type=lambda x: int(x, 0), nargs="+", + help="channel(s) to monitor") + return parser + + +def main(): + args = get_argparser().parse_args() + + loop = asyncio.get_event_loop() + try: + comm = CommMonInj( + lambda channel, probe, value: print("0x{:06x}: {}".format(channel, value)), + lambda channel, override, value: None) + loop.run_until_complete(comm.connect(args.core_addr)) + try: + for channel in args.channel: + comm.monitor_probe(True, channel, 0) + loop.run_forever() + finally: + loop.run_until_complete(comm.close()) + finally: + loop.close() + + +if __name__ == "__main__": + main() diff --git a/artiq/frontend/artiq_run.py b/artiq/frontend/artiq_run.py index 962e7a5e5..4e6f2b7a3 100755 --- a/artiq/frontend/artiq_run.py +++ b/artiq/frontend/artiq_run.py @@ -12,6 +12,9 @@ import h5py from llvmlite_artiq import binding as llvm +from sipyco import common_args + +from artiq import __version__ as artiq_version from artiq.language.environment import EnvExperiment, ProcessArgumentManager from artiq.language.types import TBool from artiq.master.databases import DeviceDB, DatasetDB @@ -125,15 +128,18 @@ class DummyCCB: def get_argparser(with_file=True): parser = argparse.ArgumentParser( description="Local experiment running tool") + parser.add_argument("--version", action="version", + version="ARTIQ v{}".format(artiq_version), + help="print the ARTIQ version number") - verbosity_args(parser) + common_args.verbosity_args(parser) parser.add_argument("--device-db", default="device_db.py", help="device database file (default: '%(default)s')") parser.add_argument("--dataset-db", default="dataset_db.pyon", help="dataset file (default: '%(default)s')") - parser.add_argument("-e", "--experiment", default=None, - help="experiment to run") + parser.add_argument("-c", "--class-name", default=None, + help="name of the class to run") parser.add_argument("-o", "--hdf5", default=None, help="write results to specified HDF5 file" " (default: print them)") @@ -149,7 +155,7 @@ def get_argparser(with_file=True): def _build_experiment(device_mgr, dataset_mgr, args): arguments = parse_arguments(args.arguments) argument_mgr = ProcessArgumentManager(arguments) - managers = (device_mgr, dataset_mgr, argument_mgr) + managers = (device_mgr, dataset_mgr, argument_mgr, {}) if hasattr(args, "file"): is_elf = args.file.endswith(".elf") is_ll = args.file.endswith(".ll") @@ -157,8 +163,8 @@ def _build_experiment(device_mgr, dataset_mgr, args): if is_elf or is_ll or is_bc: if args.arguments: raise ValueError("arguments not supported for precompiled kernels") - if args.experiment: - raise ValueError("experiment-by-name not supported " + if args.class_name: + raise ValueError("class-name not supported " "for precompiled kernels") if is_elf: return ELFRunner(managers, file=args.file) @@ -175,16 +181,16 @@ def _build_experiment(device_mgr, dataset_mgr, args): file = getattr(module, "__file__") expid = { "file": file, - "experiment": args.experiment, + "class_name": args.class_name, "arguments": arguments } device_mgr.virtual_devices["scheduler"].expid = expid - return get_experiment(module, args.experiment)(managers) + return get_experiment(module, args.class_name)(managers) def run(with_file=False): args = get_argparser(with_file).parse_args() - init_logger(args) + common_args.init_logger_from_args(args) device_mgr = DeviceManager(DeviceDB(args.device_db), virtual_devices={"scheduler": DummyScheduler(), diff --git a/artiq/frontend/artiq_session.py b/artiq/frontend/artiq_session.py index b04a83008..3a500c90f 100755 --- a/artiq/frontend/artiq_session.py +++ b/artiq/frontend/artiq_session.py @@ -3,14 +3,19 @@ import argparse import sys import subprocess - +from artiq import __version__ as artiq_version def get_argparser(): parser = argparse.ArgumentParser( description="ARTIQ session manager. " "Automatically runs the master, dashboard and " - "local controller manager on the current machine.") + "local controller manager on the current machine. " + "The latter requires the artiq-comtools package to " + "be installed.") + parser.add_argument("--version", action="version", + version="ARTIQ v{}".format(artiq_version), + help="print the ARTIQ version number") parser.add_argument("-m", action="append", default=[], help="add argument to the master command line") parser.add_argument("-d", action="append", default=[], @@ -25,7 +30,7 @@ def main(): master_cmd = [sys.executable, "-u", "-m", "artiq.frontend.artiq_master"] dashboard_cmd = [sys.executable, "-m", "artiq.frontend.artiq_dashboard"] - ctlmgr_cmd = [sys.executable, "-m", "artiq.frontend.artiq_ctlmgr"] + ctlmgr_cmd = [sys.executable, "-m", "artiq_comtools.artiq_ctlmgr"] master_cmd += args.m dashboard_cmd += args.d ctlmgr_cmd += args.c diff --git a/artiq/frontend/artiq_sinara_tester.py b/artiq/frontend/artiq_sinara_tester.py new file mode 100755 index 000000000..7c51e49f6 --- /dev/null +++ b/artiq/frontend/artiq_sinara_tester.py @@ -0,0 +1,522 @@ +#!/usr/bin/env python3 + +import sys +import os +import select + +from artiq.experiment import * +from artiq.coredevice.ad9910 import AD9910, SyncDataEeprom +from artiq.master.databases import DeviceDB +from artiq.master.worker_db import DeviceManager + + +if os.name == "nt": + import msvcrt + + +def chunker(seq, size): + res = [] + for el in seq: + res.append(el) + if len(res) == size: + yield res + res = [] + if res: + yield res + + +def is_enter_pressed() -> TBool: + if os.name == "nt": + if msvcrt.kbhit() and msvcrt.getch() == b"\r": + return True + else: + return False + else: + if select.select([sys.stdin, ], [], [], 0.0)[0]: + sys.stdin.read(1) + return True + else: + return False + + +class SinaraTester(EnvExperiment): + def build(self): + self.setattr_device("core") + + self.leds = dict() + self.ttl_outs = dict() + self.ttl_ins = dict() + self.urukul_cplds = dict() + self.urukuls = dict() + self.samplers = dict() + self.zotinos = dict() + self.fastinos = dict() + self.phasers = dict() + self.grabbers = dict() + + ddb = self.get_device_db() + for name, desc in ddb.items(): + if isinstance(desc, dict) and desc["type"] == "local": + module, cls = desc["module"], desc["class"] + if (module, cls) == ("artiq.coredevice.ttl", "TTLOut"): + dev = self.get_device(name) + if "led" in name: # guess + self.leds[name] = dev + else: + self.ttl_outs[name] = dev + elif (module, cls) == ("artiq.coredevice.ttl", "TTLInOut"): + self.ttl_ins[name] = self.get_device(name) + elif (module, cls) == ("artiq.coredevice.urukul", "CPLD"): + self.urukul_cplds[name] = self.get_device(name) + elif (module, cls) == ("artiq.coredevice.ad9910", "AD9910"): + self.urukuls[name] = self.get_device(name) + elif (module, cls) == ("artiq.coredevice.ad9912", "AD9912"): + self.urukuls[name] = self.get_device(name) + elif (module, cls) == ("artiq.coredevice.sampler", "Sampler"): + self.samplers[name] = self.get_device(name) + elif (module, cls) == ("artiq.coredevice.zotino", "Zotino"): + self.zotinos[name] = self.get_device(name) + elif (module, cls) == ("artiq.coredevice.fastino", "Fastino"): + self.fastinos[name] = self.get_device(name) + elif (module, cls) == ("artiq.coredevice.phaser", "Phaser"): + self.phasers[name] = self.get_device(name) + elif (module, cls) == ("artiq.coredevice.grabber", "Grabber"): + self.grabbers[name] = self.get_device(name) + + # Remove Urukul, Sampler and Zotino control signals + # from TTL outs (tested separately) + ddb = self.get_device_db() + for name, desc in ddb.items(): + if isinstance(desc, dict) and desc["type"] == "local": + module, cls = desc["module"], desc["class"] + if ((module, cls) == ("artiq.coredevice.ad9910", "AD9910") + or (module, cls) == ("artiq.coredevice.ad9912", "AD9912")): + if "sw_device" in desc["arguments"]: + sw_device = desc["arguments"]["sw_device"] + del self.ttl_outs[sw_device] + elif (module, cls) == ("artiq.coredevice.urukul", "CPLD"): + io_update_device = desc["arguments"]["io_update_device"] + del self.ttl_outs[io_update_device] + elif (module, cls) == ("artiq.coredevice.sampler", "Sampler"): + cnv_device = desc["arguments"]["cnv_device"] + del self.ttl_outs[cnv_device] + elif (module, cls) == ("artiq.coredevice.zotino", "Zotino"): + ldac_device = desc["arguments"]["ldac_device"] + clr_device = desc["arguments"]["clr_device"] + del self.ttl_outs[ldac_device] + del self.ttl_outs[clr_device] + + # Sort everything by RTIO channel number + self.leds = sorted(self.leds.items(), key=lambda x: x[1].channel) + self.ttl_outs = sorted(self.ttl_outs.items(), key=lambda x: x[1].channel) + self.ttl_ins = sorted(self.ttl_ins.items(), key=lambda x: x[1].channel) + self.urukuls = sorted(self.urukuls.items(), key=lambda x: (x[1].cpld.bus.channel, x[1].chip_select)) + self.samplers = sorted(self.samplers.items(), key=lambda x: x[1].cnv.channel) + self.zotinos = sorted(self.zotinos.items(), key=lambda x: x[1].bus.channel) + self.fastinos = sorted(self.fastinos.items(), key=lambda x: x[1].channel) + self.phasers = sorted(self.phasers.items(), key=lambda x: x[1].channel_base) + self.grabbers = sorted(self.grabbers.items(), key=lambda x: x[1].channel_base) + + @kernel + def test_led(self, led): + while not is_enter_pressed(): + self.core.break_realtime() + # do not fill the FIFOs too much to avoid long response times + t = now_mu() - self.core.seconds_to_mu(0.2) + while self.core.get_rtio_counter_mu() < t: + pass + for i in range(3): + led.pulse(100*ms) + delay(100*ms) + + def test_leds(self): + print("*** Testing LEDs.") + print("Check for blinking. Press ENTER when done.") + + for led_name, led_dev in self.leds: + print("Testing LED: {}".format(led_name)) + self.test_led(led_dev) + + @kernel + def test_ttl_out_chunk(self, ttl_chunk): + while not is_enter_pressed(): + self.core.break_realtime() + for _ in range(50000): + i = 0 + for ttl in ttl_chunk: + i += 1 + for _ in range(i): + ttl.pulse(1*us) + delay(1*us) + delay(10*us) + + def test_ttl_outs(self): + print("*** Testing TTL outputs.") + print("Outputs are tested in groups of 4. Touch each TTL connector") + print("with the oscilloscope probe tip, and check that the number of") + print("pulses corresponds to its number in the group.") + print("Press ENTER when done.") + + for ttl_chunk in chunker(self.ttl_outs, 4): + print("Testing TTL outputs: {}.".format(", ".join(name for name, dev in ttl_chunk))) + self.test_ttl_out_chunk([dev for name, dev in ttl_chunk]) + + @kernel + def test_ttl_in(self, ttl_out, ttl_in): + n = 42 + self.core.break_realtime() + with parallel: + ttl_in.gate_rising(1*ms) + with sequential: + delay(50*us) + for _ in range(n): + ttl_out.pulse(2*us) + delay(2*us) + return ttl_in.count(now_mu()) == n + + def test_ttl_ins(self): + print("*** Testing TTL inputs.") + if not self.ttl_outs: + print("No TTL output channel available to use as stimulus.") + return + default_ttl_out_name, default_ttl_out_dev = next(iter(self.ttl_outs)) + ttl_out_name = input("TTL device to use as stimulus (default: {}): ".format(default_ttl_out_name)) + if ttl_out_name: + ttl_out_dev = self.get_device(ttl_out_name) + else: + ttl_out_name = default_ttl_out_name + ttl_out_dev = default_ttl_out_dev + for ttl_in_name, ttl_in_dev in self.ttl_ins: + print("Connect {} to {}. Press ENTER when done." + .format(ttl_out_name, ttl_in_name)) + input() + if self.test_ttl_in(ttl_out_dev, ttl_in_dev): + print("PASSED") + else: + print("FAILED") + + @kernel + def init_urukul(self, cpld): + self.core.break_realtime() + cpld.init() + + @kernel + def calibrate_urukul(self, channel): + self.core.break_realtime() + channel.init() + self.core.break_realtime() + sync_delay_seed, _ = channel.tune_sync_delay() + self.core.break_realtime() + io_update_delay = channel.tune_io_update_delay() + return sync_delay_seed, io_update_delay + + @kernel + def setup_urukul(self, channel, frequency): + self.core.break_realtime() + channel.init() + channel.set(frequency*MHz) + channel.cfg_sw(1) + channel.set_att(6.) + + @kernel + def cfg_sw_off_urukul(self, channel): + self.core.break_realtime() + channel.cfg_sw(0) + + @kernel + def rf_switch_wave(self, channels): + while not is_enter_pressed(): + self.core.break_realtime() + # do not fill the FIFOs too much to avoid long response times + t = now_mu() - self.core.seconds_to_mu(0.2) + while self.core.get_rtio_counter_mu() < t: + pass + for channel in channels: + channel.pulse(100*ms) + delay(100*ms) + + # We assume that RTIO channels for switches are grouped by card. + def test_urukuls(self): + print("*** Testing Urukul DDSes.") + + print("Initializing CPLDs...") + for name, cpld in sorted(self.urukul_cplds.items(), key=lambda x: x[0]): + print(name + "...") + self.init_urukul(cpld) + print("...done") + + print("Calibrating inter-device synchronization...") + for channel_name, channel_dev in self.urukuls: + if (not isinstance(channel_dev, AD9910) or + not isinstance(channel_dev.sync_data, SyncDataEeprom)): + print("{}\tno EEPROM synchronization".format(channel_name)) + else: + eeprom = channel_dev.sync_data.eeprom_device + offset = channel_dev.sync_data.eeprom_offset + sync_delay_seed, io_update_delay = self.calibrate_urukul(channel_dev) + print("{}\t{} {}".format(channel_name, sync_delay_seed, io_update_delay)) + eeprom_word = (sync_delay_seed << 24) | (io_update_delay << 16) + eeprom.write_i32(offset, eeprom_word) + print("...done") + + print("All urukul channels active.") + print("Check each channel amplitude (~1.6Vpp/8dbm at 50ohm) and frequency.") + print("Frequencies:") + for card_n, channels in enumerate(chunker(self.urukuls, 4)): + for channel_n, (channel_name, channel_dev) in enumerate(channels): + frequency = 10*(card_n + 1) + channel_n + print("{}\t{}MHz".format(channel_name, frequency)) + self.setup_urukul(channel_dev, frequency) + print("Press ENTER when done.") + input() + + sw = [channel_dev for channel_name, channel_dev in self.urukuls if hasattr(channel_dev, "sw")] + if sw: + print("Testing RF switch control. Check LEDs at urukul RF ports.") + print("Press ENTER when done.") + for swi in sw: + self.cfg_sw_off_urukul(swi) + self.rf_switch_wave([swi.sw for swi in sw]) + + @kernel + def get_sampler_voltages(self, sampler, cb): + self.core.break_realtime() + sampler.init() + delay(5*ms) + for i in range(8): + sampler.set_gain_mu(i, 0) + delay(100*us) + smp = [0.0]*8 + sampler.sample(smp) + cb(smp) + + def test_samplers(self): + print("*** Testing Sampler ADCs.") + for card_name, card_dev in self.samplers: + print("Testing: ", card_name) + + for channel in range(8): + print("Apply 1.5V to channel {}. Press ENTER when done.".format(channel)) + input() + + voltages = [] + def setv(x): + nonlocal voltages + voltages = x + self.get_sampler_voltages(card_dev, setv) + + passed = True + for n, voltage in enumerate(voltages): + if n == channel: + if abs(voltage - 1.5) > 0.2: + passed = False + else: + if abs(voltage) > 0.2: + passed = False + if passed: + print("PASSED") + else: + print("FAILED") + print(" ".join(["{:.1f}".format(x) for x in voltages])) + + @kernel + def set_zotino_voltages(self, zotino, voltages): + self.core.break_realtime() + zotino.init() + delay(200*us) + i = 0 + for voltage in voltages: + zotino.write_dac(i, voltage) + delay(100*us) + i += 1 + zotino.load() + + @kernel + def zotinos_led_wave(self, zotinos): + while not is_enter_pressed(): + self.core.break_realtime() + # do not fill the FIFOs too much to avoid long response times + t = now_mu() - self.core.seconds_to_mu(0.2) + while self.core.get_rtio_counter_mu() < t: + pass + for zotino in zotinos: + for i in range(8): + zotino.set_leds(1 << i) + delay(100*ms) + zotino.set_leds(0) + delay(100*ms) + + def test_zotinos(self): + print("*** Testing Zotino DACs and USER LEDs.") + print("Voltages:") + for card_n, (card_name, card_dev) in enumerate(self.zotinos): + voltages = [(-1)**i*(2.*card_n + .1*(i//2 + 1)) for i in range(32)] + print(card_name, " ".join(["{:.1f}".format(x) for x in voltages])) + self.set_zotino_voltages(card_dev, voltages) + print("Press ENTER when done.") + # Test switching on/off USR_LEDs at the same time + self.zotinos_led_wave( + [card_dev for _, (__, card_dev) in enumerate(self.zotinos)] + ) + + @kernel + def set_fastino_voltages(self, fastino, voltages): + self.core.break_realtime() + fastino.init() + delay(200*us) + i = 0 + for voltage in voltages: + fastino.set_dac(i, voltage) + delay(100*us) + i += 1 + + @kernel + def fastinos_led_wave(self, fastinos): + while not is_enter_pressed(): + self.core.break_realtime() + # do not fill the FIFOs too much to avoid long response times + t = now_mu() - self.core.seconds_to_mu(0.2) + while self.core.get_rtio_counter_mu() < t: + pass + for fastino in fastinos: + for i in range(8): + fastino.set_leds(1 << i) + delay(100*ms) + fastino.set_leds(0) + delay(100*ms) + + def test_fastinos(self): + print("*** Testing Fastino DACs and USER LEDs.") + print("Voltages:") + for card_n, (card_name, card_dev) in enumerate(self.fastinos): + voltages = [(-1)**i*(2.*card_n + .1*(i//2 + 1)) for i in range(32)] + print(card_name, " ".join(["{:.1f}".format(x) for x in voltages])) + self.set_fastino_voltages(card_dev, voltages) + print("Press ENTER when done.") + # Test switching on/off USR_LEDs at the same time + self.fastinos_led_wave( + [card_dev for _, (__, card_dev) in enumerate(self.fastinos)] + ) + + @kernel + def set_phaser_frequencies(self, phaser, duc, osc): + self.core.break_realtime() + phaser.init() + delay(1*ms) + phaser.channel[0].set_duc_frequency(duc) + phaser.channel[0].set_duc_cfg() + phaser.channel[0].set_att(6*dB) + phaser.channel[1].set_duc_frequency(-duc) + phaser.channel[1].set_duc_cfg() + phaser.channel[1].set_att(6*dB) + phaser.duc_stb() + delay(1*ms) + for i in range(len(osc)): + phaser.channel[0].oscillator[i].set_frequency(osc[i]) + phaser.channel[0].oscillator[i].set_amplitude_phase(.2) + phaser.channel[1].oscillator[i].set_frequency(-osc[i]) + phaser.channel[1].oscillator[i].set_amplitude_phase(.2) + delay(1*ms) + + @kernel + def phaser_led_wave(self, phasers): + while not is_enter_pressed(): + self.core.break_realtime() + # do not fill the FIFOs too much to avoid long response times + t = now_mu() - self.core.seconds_to_mu(.2) + while self.core.get_rtio_counter_mu() < t: + pass + for phaser in phasers: + for i in range(6): + phaser.set_leds(1 << i) + delay(100*ms) + phaser.set_leds(0) + delay(100*ms) + + def test_phasers(self): + print("*** Testing Phaser DACs and 6 USER LEDs.") + print("Frequencies:") + for card_n, (card_name, card_dev) in enumerate(self.phasers): + duc = (card_n + 1)*10*MHz + osc = [i*1*MHz for i in range(5)] + print(card_name, + " ".join(["{:.0f}+{:.0f}".format(duc/MHz, f/MHz) for f in osc]), + "MHz") + self.set_phaser_frequencies(card_dev, duc, osc) + print("Press ENTER when done.") + # Test switching on/off USR_LEDs at the same time + self.phaser_led_wave( + [card_dev for _, (__, card_dev) in enumerate(self.phasers)] + ) + + @kernel + def grabber_capture(self, card_dev, rois): + self.core.break_realtime() + delay(100*us) + mask = 0 + for i in range(len(rois)): + i = rois[i][0] + x0 = rois[i][1] + y0 = rois[i][2] + x1 = rois[i][3] + y1 = rois[i][4] + mask |= 1 << i + card_dev.setup_roi(i, x0, y0, x1, y1) + card_dev.gate_roi(mask) + n = [0]*len(rois) + card_dev.input_mu(n) + self.core.break_realtime() + card_dev.gate_roi(0) + print("ROI sums:", n) + + def test_grabbers(self): + print("*** Testing Grabber Frame Grabbers.") + print("Activate the camera's frame grabber output, type 'g', press " + "ENTER, and trigger the camera.") + print("Just press ENTER to skip the test.") + if input().strip().lower() != "g": + print("skipping...") + return + rois = [[0, 0, 0, 2, 2], [1, 0, 0, 2048, 2048]] + print("ROIs:", rois) + for card_n, (card_name, card_dev) in enumerate(self.grabbers): + print(card_name) + self.grabber_capture(card_dev, rois) + + def run(self): + print("****** Sinara system tester ******") + print("") + self.core.reset() + if self.leds: + self.test_leds() + if self.ttl_outs: + self.test_ttl_outs() + if self.ttl_ins: + self.test_ttl_ins() + if self.urukuls: + self.test_urukuls() + if self.samplers: + self.test_samplers() + if self.zotinos: + self.test_zotinos() + if self.fastinos: + self.test_fastinos() + if self.phasers: + self.test_phasers() + if self.grabbers: + self.test_grabbers() + + +def main(): + device_mgr = DeviceManager(DeviceDB("device_db.py")) + try: + experiment = SinaraTester((device_mgr, None, None, None)) + experiment.prepare() + experiment.run() + experiment.analyze() + finally: + device_mgr.close_devices() + + +if __name__ == "__main__": + main() diff --git a/artiq/frontend/bit2bin.py b/artiq/frontend/bit2bin.py index f511c0559..df3927415 100755 --- a/artiq/frontend/bit2bin.py +++ b/artiq/frontend/bit2bin.py @@ -32,7 +32,7 @@ def bit2bin(bit, bin, flip=False): if key in "abcd": d = bit.read(*struct.unpack(">H", bit.read(2))) assert d.endswith(b"\x00") - d = d.decode() + d = d[:-1].decode() name = { "a": "Design", "b": "Part name", diff --git a/artiq/gateware/amp/__init__.py b/artiq/gateware/amp/__init__.py index df70e9f90..6abb5b594 100644 --- a/artiq/gateware/amp/__init__.py +++ b/artiq/gateware/amp/__init__.py @@ -1 +1 @@ -from artiq.gateware.amp.soc import AMPSoC, build_artiq_soc +from artiq.gateware.amp.soc import AMPSoC diff --git a/artiq/gateware/amp/kernel_cpu.py b/artiq/gateware/amp/kernel_cpu.py index 62a1f6f16..b1c06517e 100644 --- a/artiq/gateware/amp/kernel_cpu.py +++ b/artiq/gateware/amp/kernel_cpu.py @@ -25,9 +25,7 @@ class KernelCPU(Module): self.submodules.cpu = ClockDomainsRenamer("sys_kernel")( mor1kx.MOR1KX( platform, - OPTION_RESET_PC=exec_address, - FEATURE_PERFCOUNTERS="ENABLED", - OPTION_PERFCOUNTERS_NUM=7)) + OPTION_RESET_PC=exec_address)) # DRAM access self.wb_sdram = wishbone.Interface() diff --git a/artiq/gateware/amp/soc.py b/artiq/gateware/amp/soc.py index d237822eb..76d79807c 100644 --- a/artiq/gateware/amp/soc.py +++ b/artiq/gateware/amp/soc.py @@ -1,13 +1,8 @@ -import os -import subprocess - from misoc.cores import timer from misoc.interconnect import wishbone -from misoc.integration.builder import * from artiq.gateware.amp.kernel_cpu import KernelCPU from artiq.gateware.amp.mailbox import Mailbox -from artiq import __artiq_dir__ as artiq_dir class AMPSoC: @@ -42,15 +37,3 @@ class AMPSoC: self.add_csr_region(name, self.mem_map[name] | 0x80000000, 32, csrs) - - -def build_artiq_soc(soc, argdict): - builder = Builder(soc, **argdict) - builder.add_software_package("libm") - builder.add_software_package("libunwind") - builder.add_software_package("ksupport", os.path.join(artiq_dir, "firmware", "ksupport")) - builder.add_software_package("runtime", os.path.join(artiq_dir, "firmware", "runtime")) - try: - builder.build() - except subprocess.CalledProcessError as e: - raise SystemExit("Command {} failed".format(" ".join(e.cmd))) diff --git a/artiq/gateware/drtio/__init__.py b/artiq/gateware/drtio/__init__.py index 7e3143e30..6efb44361 100644 --- a/artiq/gateware/drtio/__init__.py +++ b/artiq/gateware/drtio/__init__.py @@ -1,2 +1,2 @@ -from artiq.gateware.drtio.core import DRTIOSatellite, DRTIOMaster - +from artiq.gateware.drtio.core import SyncRTIO, DRTIOSatellite, DRTIOMaster, DRTIORepeater +from artiq.gateware.drtio.aux_controller import DRTIOAuxController diff --git a/artiq/gateware/drtio/aux_controller.py b/artiq/gateware/drtio/aux_controller.py index b60167dea..8effda67d 100644 --- a/artiq/gateware/drtio/aux_controller.py +++ b/artiq/gateware/drtio/aux_controller.py @@ -211,7 +211,7 @@ class Receiver(Module, AutoCSR): # TODO: FullMemoryWE should be applied by migen.build @FullMemoryWE() -class AuxController(Module): +class DRTIOAuxController(Module): def __init__(self, link_layer): self.bus = wishbone.Interface() self.submodules.transmitter = Transmitter(link_layer, len(self.bus.dat_w)) diff --git a/artiq/gateware/drtio/cdc.py b/artiq/gateware/drtio/cdc.py new file mode 100644 index 000000000..9edd8a1b4 --- /dev/null +++ b/artiq/gateware/drtio/cdc.py @@ -0,0 +1,55 @@ +from migen import * + +from migen.genlib.cdc import PulseSynchronizer + + +class CrossDomainRequest(Module): + def __init__(self, domain, + req_stb, req_ack, req_data, + srv_stb, srv_ack, srv_data): + dsync = getattr(self.sync, domain) + + request = PulseSynchronizer("sys", domain) + reply = PulseSynchronizer(domain, "sys") + self.submodules += request, reply + + ongoing = Signal() + self.comb += request.i.eq(~ongoing & req_stb) + self.sync += [ + req_ack.eq(reply.o), + If(req_stb, ongoing.eq(1)), + If(req_ack, ongoing.eq(0)) + ] + if req_data is not None: + req_data_r = Signal.like(req_data) + req_data_r.attr.add("no_retiming") + self.sync += If(req_stb, req_data_r.eq(req_data)) + dsync += [ + If(request.o, srv_stb.eq(1)), + If(srv_ack, srv_stb.eq(0)) + ] + if req_data is not None: + dsync += If(request.o, srv_data.eq(req_data_r)) + self.comb += reply.i.eq(srv_stb & srv_ack) + + +class CrossDomainNotification(Module): + def __init__(self, domain, rdomain, + emi_stb, emi_data, + rec_stb, rec_ack, rec_data): + emi_data_r = Signal(len(emi_data)) + emi_data_r.attr.add("no_retiming") + dsync = getattr(self.sync, domain) + dsync += If(emi_stb, emi_data_r.eq(emi_data)) + + ps = PulseSynchronizer(domain, rdomain) + self.submodules += ps + self.comb += ps.i.eq(emi_stb) + rsync = getattr(self.sync, rdomain) + rsync += [ + If(rec_ack, rec_stb.eq(0)), + If(ps.o, + rec_data.eq(emi_data_r), + rec_stb.eq(1) + ) + ] diff --git a/artiq/gateware/drtio/core.py b/artiq/gateware/drtio/core.py index c3da1cb51..6311456e4 100644 --- a/artiq/gateware/drtio/core.py +++ b/artiq/gateware/drtio/core.py @@ -1,12 +1,23 @@ from types import SimpleNamespace from migen import * -from migen.genlib.cdc import ElasticBuffer +from migen.genlib.resetsync import AsyncResetSynchronizer +from migen.genlib.cdc import PulseSynchronizer +from misoc.interconnect.csr import * -from artiq.gateware.drtio import (link_layer, aux_controller, - rt_packet_satellite, rt_ios_satellite, - rt_errors_satellite, - rt_packet_master, rt_controller_master) +from artiq.gateware.rtio import cri, rtlink +from artiq.gateware.rtio.sed.core import * +from artiq.gateware.rtio.input_collector import * +from artiq.gateware.drtio import (link_layer, + rt_packet_satellite, rt_errors_satellite, + rt_packet_master, rt_controller_master, + rt_packet_repeater, rt_controller_repeater) +from artiq.gateware.drtio.rx_synchronizer import GenericRXSynchronizer + + +__all__ = ["ChannelInterface", "TransceiverInterface", + "SyncRTIO", + "DRTIOSatellite", "DRTIOMaster", "DRTIORepeater"] class ChannelInterface: @@ -16,8 +27,10 @@ class ChannelInterface: self.decoders = decoders -class TransceiverInterface: +class TransceiverInterface(AutoCSR): def __init__(self, channel_interfaces): + self.stable_clkin = CSRStorage() + self.txenable = CSRStorage(len(channel_interfaces)) self.clock_domains.cd_rtio = ClockDomain() for i in range(len(channel_interfaces)): name = "rtio_rx" + str(i) @@ -25,39 +38,78 @@ class TransceiverInterface: self.channels = channel_interfaces -class GenericRXSynchronizer(Module): - """Simple RX synchronizer based on the portable Migen elastic buffer. +async_errors_layout = [ + ("sequence_error", 1), + ("sequence_error_channel", 16), + ("collision", 1), + ("collision_channel", 16), + ("busy", 1), + ("busy_channel", 16) +] - Introduces timing non-determinism in the satellite -> master path, - (and in the echo_request/echo_reply RTT) but useful for testing. - """ - def __init__(self): - self.signals = [] - def resync(self, signal): - synchronized = Signal.like(signal, related=signal) - self.signals.append((signal, synchronized)) - return synchronized +class SyncRTIO(Module): + def __init__(self, tsc, channels, lane_count=8, fifo_depth=128): + self.cri = cri.Interface() + self.async_errors = Record(async_errors_layout) - def do_finalize(self): - eb = ElasticBuffer(sum(len(s[0]) for s in self.signals), 4, "rtio_rx", "rtio") - self.submodules += eb - self.comb += [ - eb.din.eq(Cat(*[s[0] for s in self.signals])), - Cat(*[s[1] for s in self.signals]).eq(eb.dout) - ] + chan_fine_ts_width = max(max(rtlink.get_fine_ts_width(channel.interface.o) + for channel in channels), + max(rtlink.get_fine_ts_width(channel.interface.i) + for channel in channels)) + assert tsc.glbl_fine_ts_width >= chan_fine_ts_width + + self.submodules.outputs = ClockDomainsRenamer("rio")( + SED(channels, tsc.glbl_fine_ts_width, "sync", + lane_count=lane_count, fifo_depth=fifo_depth, + enable_spread=False, report_buffer_space=True, + interface=self.cri)) + self.comb += self.outputs.coarse_timestamp.eq(tsc.coarse_ts) + self.sync.rtio += self.outputs.minimum_coarse_timestamp.eq(tsc.coarse_ts + 16) + + self.submodules.inputs = ClockDomainsRenamer("rio")( + InputCollector(tsc, channels, "sync", interface=self.cri)) + + for attr, _ in async_errors_layout: + self.comb += getattr(self.async_errors, attr).eq(getattr(self.outputs, attr)) class DRTIOSatellite(Module): - def __init__(self, chanif, channels, rx_synchronizer=None, fine_ts_width=3, full_ts_width=63): - if rx_synchronizer is None: - rx_synchronizer = GenericRXSynchronizer() - self.submodules += rx_synchronizer + def __init__(self, tsc, chanif, rx_synchronizer=None): + self.reset = CSRStorage(reset=1) + self.reset_phy = CSRStorage(reset=1) + self.tsc_loaded = CSR() + # master interface in the rtio domain + self.cri = cri.Interface() + self.async_errors = Record(async_errors_layout) + + self.clock_domains.cd_rio = ClockDomain() + self.clock_domains.cd_rio_phy = ClockDomain() + self.comb += [ + self.cd_rio.clk.eq(ClockSignal("rtio")), + self.cd_rio_phy.clk.eq(ClockSignal("rtio")) + ] + reset = Signal() + reset_phy = Signal() + reset.attr.add("no_retiming") + reset_phy.attr.add("no_retiming") + self.sync += [ + reset.eq(self.reset.storage), + reset_phy.eq(self.reset_phy.storage) + ] + self.specials += [ + AsyncResetSynchronizer(self.cd_rio, reset), + AsyncResetSynchronizer(self.cd_rio_phy, reset_phy) + ] self.submodules.link_layer = link_layer.LinkLayer( chanif.encoder, chanif.decoders) self.comb += self.link_layer.rx_ready.eq(chanif.rx_ready) + if rx_synchronizer is None: + rx_synchronizer = GenericRXSynchronizer() + self.submodules += rx_synchronizer + link_layer_sync = SimpleNamespace( tx_aux_frame=self.link_layer.tx_aux_frame, tx_aux_data=self.link_layer.tx_aux_data, @@ -75,33 +127,33 @@ class DRTIOSatellite(Module): ) self.submodules.link_stats = link_layer.LinkLayerStats(link_layer_sync, "rtio") self.submodules.rt_packet = ClockDomainsRenamer("rtio")( - rt_packet_satellite.RTPacketSatellite(link_layer_sync)) + rt_packet_satellite.RTPacketSatellite(link_layer_sync, interface=self.cri)) + self.comb += self.rt_packet.reset.eq(self.cd_rio.rst) - self.submodules.ios = rt_ios_satellite.IOS( - self.rt_packet, channels, fine_ts_width, full_ts_width) - - self.submodules.rt_errors = rt_errors_satellite.RTErrorsSatellite( - self.rt_packet, self.ios) - - self.clock_domains.cd_rio = ClockDomain() - self.clock_domains.cd_rio_phy = ClockDomain() self.comb += [ - self.cd_rio.clk.eq(ClockSignal("rtio")), - self.cd_rio.rst.eq(self.rt_packet.reset), - self.cd_rio_phy.clk.eq(ClockSignal("rtio")), - self.cd_rio_phy.rst.eq(self.rt_packet.reset_phy), + tsc.load.eq(self.rt_packet.tsc_load), + tsc.load_value.eq(self.rt_packet.tsc_load_value) ] - self.submodules.aux_controller = aux_controller.AuxController( - self.link_layer) + ps_tsc_load = PulseSynchronizer("rtio", "sys") + self.submodules += ps_tsc_load + self.comb += ps_tsc_load.i.eq(self.rt_packet.tsc_load) + self.sync += [ + If(self.tsc_loaded.re, self.tsc_loaded.w.eq(0)), + If(ps_tsc_load.o, self.tsc_loaded.w.eq(1)) + ] + + self.submodules.rt_errors = rt_errors_satellite.RTErrorsSatellite( + self.rt_packet, tsc, self.async_errors) def get_csrs(self): - return (self.link_layer.get_csrs() + self.link_stats.get_csrs() + - self.rt_errors.get_csrs() + self.aux_controller.get_csrs()) + return ([self.reset, self.reset_phy, self.tsc_loaded] + + self.link_layer.get_csrs() + self.link_stats.get_csrs() + + self.rt_errors.get_csrs()) class DRTIOMaster(Module): - def __init__(self, chanif, channel_count=1024, fine_ts_width=3): + def __init__(self, tsc, chanif): self.submodules.link_layer = link_layer.LinkLayer( chanif.encoder, chanif.decoders) self.comb += self.link_layer.rx_ready.eq(chanif.rx_ready) @@ -109,16 +161,33 @@ class DRTIOMaster(Module): self.submodules.link_stats = link_layer.LinkLayerStats(self.link_layer, "rtio_rx") self.submodules.rt_packet = rt_packet_master.RTPacketMaster(self.link_layer) self.submodules.rt_controller = rt_controller_master.RTController( - self.rt_packet, channel_count, fine_ts_width) - self.submodules.rt_manager = rt_controller_master.RTManager(self.rt_packet) - self.cri = self.rt_controller.cri - - self.submodules.aux_controller = aux_controller.AuxController( - self.link_layer) + tsc, self.rt_packet) def get_csrs(self): return (self.link_layer.get_csrs() + self.link_stats.get_csrs() + - self.rt_controller.get_csrs() + - self.rt_manager.get_csrs() + - self.aux_controller.get_csrs()) + self.rt_controller.get_csrs()) + + @property + def cri(self): + return self.rt_controller.cri + + +class DRTIORepeater(Module): + def __init__(self, tsc, chanif): + self.submodules.link_layer = link_layer.LinkLayer( + chanif.encoder, chanif.decoders) + self.comb += self.link_layer.rx_ready.eq(chanif.rx_ready) + + self.submodules.link_stats = link_layer.LinkLayerStats(self.link_layer, "rtio_rx") + self.submodules.rt_packet = rt_packet_repeater.RTPacketRepeater(tsc, self.link_layer) + self.submodules.rt_controller = rt_controller_repeater.RTController(self.rt_packet) + + def get_csrs(self): + return (self.link_layer.get_csrs() + + self.link_stats.get_csrs() + + self.rt_controller.get_csrs()) + + @property + def cri(self): + return self.rt_packet.cri diff --git a/artiq/gateware/drtio/link_layer.py b/artiq/gateware/drtio/link_layer.py index 50f2ca6a3..a4779e912 100644 --- a/artiq/gateware/drtio/link_layer.py +++ b/artiq/gateware/drtio/link_layer.py @@ -224,7 +224,7 @@ class LinkLayerRX(Module): class LinkLayer(Module, AutoCSR): def __init__(self, encoder, decoders): - self.link_status = CSRStatus() + self.rx_up = CSRStatus() self.rx_disable = CSRStorage() self.tx_force_aux_zero = CSRStorage() self.tx_force_rt_zero = CSRStorage() @@ -254,14 +254,14 @@ class LinkLayer(Module, AutoCSR): # # # - ready = Signal() - ready_r = Signal() - self.sync.rtio += ready_r.eq(ready) - ready_rx = Signal() - ready_r.attr.add("no_retiming") + rx_up = Signal() + rx_up_r = Signal() + self.sync.rtio += rx_up_r.eq(rx_up) + rx_up_rx = Signal() + rx_up_r.attr.add("no_retiming") self.specials += [ - MultiReg(ready_r, ready_rx, "rtio_rx"), - MultiReg(ready_r, self.link_status.status)] + MultiReg(rx_up_r, rx_up_rx, "rtio_rx"), + MultiReg(rx_up_r, self.rx_up.status)] tx_force_aux_zero_rtio = Signal() tx_force_rt_zero_rtio = Signal() @@ -286,11 +286,11 @@ class LinkLayer(Module, AutoCSR): # to be recaptured by RXSynchronizer. self.sync.rtio_rx += [ self.rx_aux_stb.eq(rx.aux_stb), - self.rx_aux_frame.eq(rx.aux_frame & ready_rx & ~rx_disable_rx), - self.rx_aux_frame_perm.eq(rx.aux_frame & ready_rx), + self.rx_aux_frame.eq(rx.aux_frame & rx_up_rx & ~rx_disable_rx), + self.rx_aux_frame_perm.eq(rx.aux_frame & rx_up_rx), self.rx_aux_data.eq(rx.aux_data), - self.rx_rt_frame.eq(rx.rt_frame & ready_rx & ~rx_disable_rx), - self.rx_rt_frame_perm.eq(rx.rt_frame & ready_rx), + self.rx_rt_frame.eq(rx.rt_frame & rx_up_rx & ~rx_disable_rx), + self.rx_rt_frame_perm.eq(rx.rt_frame & rx_up_rx), self.rx_rt_data.eq(rx.rt_data) ] @@ -308,7 +308,7 @@ class LinkLayer(Module, AutoCSR): If(wait_scrambler.done, NextState("READY")) ) fsm.act("READY", - ready.eq(1), + rx_up.eq(1), If(~self.rx_ready, NextState("WAIT_RX_READY")) ) diff --git a/artiq/gateware/drtio/rt_controller_master.py b/artiq/gateware/drtio/rt_controller_master.py index d97f418c6..fdd340be8 100644 --- a/artiq/gateware/drtio/rt_controller_master.py +++ b/artiq/gateware/drtio/rt_controller_master.py @@ -3,75 +3,56 @@ from migen import * from migen.genlib.cdc import MultiReg from migen.genlib.misc import WaitTimer -from migen.genlib.resetsync import AsyncResetSynchronizer from misoc.interconnect.csr import * -from artiq.gateware.rtio.cdc import RTIOCounter from artiq.gateware.rtio import cri class _CSRs(AutoCSR): def __init__(self): + self.reset = CSRStorage() + self.protocol_error = CSR(3) - self.chan_sel_override = CSRStorage(16) - self.chan_sel_override_en = CSRStorage() - - self.tsc_correction = CSRStorage(64) self.set_time = CSR() - self.underflow_margin = CSRStorage(16, reset=200) + self.underflow_margin = CSRStorage(16, reset=300) - self.reset = CSR() - self.reset_phy = CSR() + self.force_destination = CSRStorage() + self.destination = CSRStorage(8) - self.o_get_fifo_space = CSR() - self.o_dbg_fifo_space = CSRStatus(16) - self.o_dbg_last_timestamp = CSRStatus(64) - self.o_dbg_fifo_space_req_cnt = CSRStatus(32) - self.o_reset_channel_status = CSR() + self.o_get_buffer_space = CSR() + self.o_dbg_buffer_space = CSRStatus(16) + self.o_dbg_buffer_space_req_cnt = CSRStatus(32) self.o_wait = CSRStatus() class RTController(Module): - def __init__(self, rt_packet, channel_count, fine_ts_width): + def __init__(self, tsc, rt_packet): self.csrs = _CSRs() self.cri = cri.Interface() # protocol errors err_unknown_packet_type = Signal() err_packet_truncated = Signal() - signal_fifo_space_timeout = Signal() - err_fifo_space_timeout = Signal() - self.sync.sys_with_rst += [ + signal_buffer_space_timeout = Signal() + err_buffer_space_timeout = Signal() + self.sync += [ If(self.csrs.protocol_error.re, If(self.csrs.protocol_error.r[0], err_unknown_packet_type.eq(0)), If(self.csrs.protocol_error.r[1], err_packet_truncated.eq(0)), - If(self.csrs.protocol_error.r[2], err_fifo_space_timeout.eq(0)) + If(self.csrs.protocol_error.r[2], err_buffer_space_timeout.eq(0)) ), If(rt_packet.err_unknown_packet_type, err_unknown_packet_type.eq(1)), If(rt_packet.err_packet_truncated, err_packet_truncated.eq(1)), - If(signal_fifo_space_timeout, err_fifo_space_timeout.eq(1)) + If(signal_buffer_space_timeout, err_buffer_space_timeout.eq(1)) ] self.comb += self.csrs.protocol_error.w.eq( - Cat(err_unknown_packet_type, err_packet_truncated, err_fifo_space_timeout)) + Cat(err_unknown_packet_type, err_packet_truncated, err_buffer_space_timeout)) - # channel selection - chan_sel = Signal(16) - self.comb += chan_sel.eq( - Mux(self.csrs.chan_sel_override_en.storage, - self.csrs.chan_sel_override.storage, - self.cri.chan_sel[:16])) - - # master RTIO counter and counter synchronization - self.submodules.counter = RTIOCounter(64-fine_ts_width) - self.comb += self.cri.counter.eq(self.counter.value_sys << fine_ts_width) - tsc_correction = Signal(64) - self.csrs.tsc_correction.storage.attr.add("no_retiming") - self.specials += MultiReg(self.csrs.tsc_correction.storage, tsc_correction) + # TSC synchronization self.comb += [ - rt_packet.tsc_value.eq( - self.counter.value_rtio + tsc_correction), + rt_packet.tsc_value.eq(tsc.coarse_ts), self.csrs.set_time.w.eq(rt_packet.set_time_stb) ] self.sync += [ @@ -79,51 +60,25 @@ class RTController(Module): If(self.csrs.set_time.re, rt_packet.set_time_stb.eq(1)) ] - # reset - self.sync += [ - If(rt_packet.reset_ack, rt_packet.reset_stb.eq(0)), - If(self.csrs.reset.re, - rt_packet.reset_stb.eq(1), - rt_packet.reset_phy.eq(0) - ), - If(self.csrs.reset_phy.re, - rt_packet.reset_stb.eq(1), - rt_packet.reset_phy.eq(1) - ), - ] - - local_reset = Signal(reset=1) - self.sync += local_reset.eq(self.csrs.reset.re) - local_reset.attr.add("no_retiming") - self.clock_domains.cd_sys_with_rst = ClockDomain() - self.clock_domains.cd_rtio_with_rst = ClockDomain() - self.comb += [ - self.cd_sys_with_rst.clk.eq(ClockSignal()), - self.cd_sys_with_rst.rst.eq(local_reset) - ] - self.comb += self.cd_rtio_with_rst.clk.eq(ClockSignal("rtio")) - self.specials += AsyncResetSynchronizer(self.cd_rtio_with_rst, local_reset) - - # remote channel status cache - fifo_spaces_mem = Memory(16, channel_count) - fifo_spaces = fifo_spaces_mem.get_port(write_capable=True) - self.specials += fifo_spaces_mem, fifo_spaces - last_timestamps_mem = Memory(64, channel_count) - last_timestamps = last_timestamps_mem.get_port(write_capable=True) - self.specials += last_timestamps_mem, last_timestamps + # chan_sel forcing + chan_sel = Signal(24) + self.comb += chan_sel.eq(Mux(self.csrs.force_destination.storage, + self.csrs.destination.storage << 16, + self.cri.chan_sel)) # common packet fields - rt_packet_fifo_request = Signal() + rt_packet_buffer_request = Signal() rt_packet_read_request = Signal() self.comb += [ - fifo_spaces.adr.eq(chan_sel), - last_timestamps.adr.eq(chan_sel), - last_timestamps.dat_w.eq(self.cri.timestamp), - rt_packet.sr_channel.eq(chan_sel), + rt_packet.sr_chan_sel.eq(chan_sel), rt_packet.sr_address.eq(self.cri.o_address), rt_packet.sr_data.eq(self.cri.o_data), - rt_packet.sr_timestamp.eq(self.cri.timestamp), - If(rt_packet_fifo_request, + If(rt_packet_read_request, + rt_packet.sr_timestamp.eq(self.cri.i_timeout) + ).Else( + rt_packet.sr_timestamp.eq(self.cri.o_timestamp) + ), + If(rt_packet_buffer_request, rt_packet.sr_notwrite.eq(1), rt_packet.sr_address.eq(0) ), @@ -136,29 +91,41 @@ class RTController(Module): # output status o_status_wait = Signal() o_status_underflow = Signal() - o_status_sequence_error = Signal() self.comb += [ - self.cri.o_status.eq(Cat( - o_status_wait, o_status_underflow, o_status_sequence_error)), + self.cri.o_status.eq(Cat(o_status_wait, o_status_underflow)), self.csrs.o_wait.status.eq(o_status_wait) ] - o_sequence_error_set = Signal() o_underflow_set = Signal() - self.sync.sys_with_rst += [ + self.sync += [ If(self.cri.cmd == cri.commands["write"], - o_status_underflow.eq(0), - o_status_sequence_error.eq(0), + o_status_underflow.eq(0) ), - If(o_underflow_set, o_status_underflow.eq(1)), - If(o_sequence_error_set, o_status_sequence_error.eq(1)) + If(o_underflow_set, o_status_underflow.eq(1)) ] timeout_counter = WaitTimer(8191) self.submodules += timeout_counter - cond_sequence_error = self.cri.timestamp < last_timestamps.dat_r - cond_underflow = ((self.cri.timestamp[fine_ts_width:] - - self.csrs.underflow_margin.storage[fine_ts_width:]) < self.counter.value_sys) + cond_underflow = Signal() + self.comb += cond_underflow.eq((self.cri.o_timestamp[tsc.glbl_fine_ts_width:] + - self.csrs.underflow_margin.storage[tsc.glbl_fine_ts_width:]) < tsc.coarse_ts_sys) + + # buffer space + buffer_space = Memory(16, 256) + buffer_space_port = buffer_space.get_port(write_capable=True) + self.specials += buffer_space, buffer_space_port + + buffer_space_load = Signal() + buffer_space_dec = Signal() + self.comb += [ + buffer_space_port.adr.eq(chan_sel[16:]), + buffer_space_port.we.eq(buffer_space_load | buffer_space_dec), + If(buffer_space_load, + buffer_space_port.dat_w.eq(rt_packet.buffer_space) + ).Else( + buffer_space_port.dat_w.eq(buffer_space_port.dat_r - 1) + ) + ] # input status i_status_wait_event = Signal() @@ -168,7 +135,7 @@ class RTController(Module): i_status_wait_event, i_status_overflow, i_status_wait_status)) load_read_reply = Signal() - self.sync.sys_with_rst += [ + self.sync += [ If(load_read_reply, i_status_wait_event.eq(0), i_status_overflow.eq(0), @@ -185,61 +152,56 @@ class RTController(Module): ] # FSM - fsm = ClockDomainsRenamer("sys_with_rst")(FSM()) + fsm = FSM() self.submodules += fsm fsm.act("IDLE", If(self.cri.cmd == cri.commands["write"], - If(cond_sequence_error, - o_sequence_error_set.eq(1) - ).Elif(cond_underflow, + If(cond_underflow, o_underflow_set.eq(1) ).Else( NextState("WRITE") ) ), If(self.cri.cmd == cri.commands["read"], NextState("READ")), - If(self.csrs.o_get_fifo_space.re, NextState("GET_FIFO_SPACE")) + If(self.csrs.o_get_buffer_space.re, NextState("GET_BUFFER_SPACE")) ) fsm.act("WRITE", o_status_wait.eq(1), rt_packet.sr_stb.eq(1), If(rt_packet.sr_ack, - fifo_spaces.we.eq(1), - fifo_spaces.dat_w.eq(fifo_spaces.dat_r - 1), - last_timestamps.we.eq(1), - If(fifo_spaces.dat_r <= 1, - NextState("GET_FIFO_SPACE") + buffer_space_dec.eq(1), + If(buffer_space_port.dat_r <= 1, + NextState("GET_BUFFER_SPACE") ).Else( NextState("IDLE") ) ) ) - fsm.act("GET_FIFO_SPACE", + fsm.act("GET_BUFFER_SPACE", o_status_wait.eq(1), - rt_packet.fifo_space_not_ack.eq(1), - rt_packet_fifo_request.eq(1), + rt_packet.buffer_space_not_ack.eq(1), + rt_packet_buffer_request.eq(1), rt_packet.sr_stb.eq(1), If(rt_packet.sr_ack, - NextState("GET_FIFO_SPACE_REPLY") + NextState("GET_BUFFER_SPACE_REPLY") ) ) - fsm.act("GET_FIFO_SPACE_REPLY", + fsm.act("GET_BUFFER_SPACE_REPLY", o_status_wait.eq(1), - fifo_spaces.dat_w.eq(rt_packet.fifo_space), - fifo_spaces.we.eq(1), - rt_packet.fifo_space_not_ack.eq(1), - If(rt_packet.fifo_space_not, - If(rt_packet.fifo_space != 0, + buffer_space_load.eq(1), + rt_packet.buffer_space_not_ack.eq(1), + If(rt_packet.buffer_space_not, + If(rt_packet.buffer_space != 0, NextState("IDLE") ).Else( - NextState("GET_FIFO_SPACE") + NextState("GET_BUFFER_SPACE") ) ), timeout_counter.wait.eq(1), If(timeout_counter.done, - signal_fifo_space_timeout.eq(1), - NextState("GET_FIFO_SPACE") + signal_buffer_space_timeout.eq(1), + NextState("IDLE") ) ) fsm.act("READ", @@ -254,51 +216,19 @@ class RTController(Module): fsm.act("GET_READ_REPLY", i_status_wait_status.eq(1), rt_packet.read_not_ack.eq(1), - If(rt_packet.read_not, + If(self.csrs.reset.storage | rt_packet.read_not, load_read_reply.eq(1), NextState("IDLE") ) ) - # channel state access - self.comb += [ - self.csrs.o_dbg_fifo_space.status.eq(fifo_spaces.dat_r), - self.csrs.o_dbg_last_timestamp.status.eq(last_timestamps.dat_r), - If(self.csrs.o_reset_channel_status.re, - fifo_spaces.dat_w.eq(0), - fifo_spaces.we.eq(1), - last_timestamps.dat_w.eq(0), - last_timestamps.we.eq(1) - ) - ] + # debug CSRs + self.comb += self.csrs.o_dbg_buffer_space.status.eq(buffer_space_port.dat_r), self.sync += \ - If((rt_packet.sr_stb & rt_packet.sr_ack & rt_packet_fifo_request), - self.csrs.o_dbg_fifo_space_req_cnt.status.eq( - self.csrs.o_dbg_fifo_space_req_cnt.status + 1) + If((rt_packet.sr_stb & rt_packet.sr_ack & rt_packet_buffer_request), + self.csrs.o_dbg_buffer_space_req_cnt.status.eq( + self.csrs.o_dbg_buffer_space_req_cnt.status + 1) ) def get_csrs(self): return self.csrs.get_csrs() - - -class RTManager(Module, AutoCSR): - def __init__(self, rt_packet): - self.request_echo = CSR() - - self.update_packet_cnt = CSR() - self.packet_cnt_tx = CSRStatus(32) - self.packet_cnt_rx = CSRStatus(32) - - # # # - - self.comb += self.request_echo.w.eq(rt_packet.echo_stb) - self.sync += [ - If(rt_packet.echo_ack, rt_packet.echo_stb.eq(0)), - If(self.request_echo.re, rt_packet.echo_stb.eq(1)) - ] - - self.sync += \ - If(self.update_packet_cnt.re, - self.packet_cnt_tx.status.eq(rt_packet.packet_cnt_tx), - self.packet_cnt_rx.status.eq(rt_packet.packet_cnt_rx) - ) diff --git a/artiq/gateware/drtio/rt_controller_repeater.py b/artiq/gateware/drtio/rt_controller_repeater.py new file mode 100644 index 000000000..0fe10d1de --- /dev/null +++ b/artiq/gateware/drtio/rt_controller_repeater.py @@ -0,0 +1,61 @@ +from migen import * +from migen.genlib.cdc import MultiReg, BlindTransfer + +from misoc.interconnect.csr import * + +from artiq.gateware.drtio.cdc import CrossDomainRequest + + +class RTController(Module, AutoCSR): + def __init__(self, rt_packet): + self.reset = CSRStorage() + self.set_time = CSR() + self.protocol_error = CSR(4) + self.command_missed_cmd = CSRStatus(2) + self.command_missed_chan_sel = CSRStatus(24) + self.buffer_space_timeout_dest = CSRStatus(8) + + self.specials += MultiReg(self.reset.storage, rt_packet.reset, "rtio") + + set_time_stb = Signal() + set_time_ack = Signal() + self.submodules += CrossDomainRequest("rtio", + set_time_stb, set_time_ack, None, + rt_packet.set_time_stb, rt_packet.set_time_ack, None) + self.sync += [ + If(set_time_ack, set_time_stb.eq(0)), + If(self.set_time.re, set_time_stb.eq(1)) + ] + self.comb += self.set_time.w.eq(set_time_stb) + + errors = [ + (rt_packet.err_unknown_packet_type, "rtio_rx", None, None), + (rt_packet.err_packet_truncated, "rtio_rx", None, None), + (rt_packet.err_command_missed, "rtio", + Cat(rt_packet.command_missed_cmd, rt_packet.command_missed_chan_sel), + Cat(self.command_missed_cmd.status, self.command_missed_chan_sel.status)), + (rt_packet.err_buffer_space_timeout, "rtio", + rt_packet.buffer_space_destination, self.buffer_space_timeout_dest.status) + ] + + for n, (err_i, err_cd, din, dout) in enumerate(errors): + if din is not None: + data_width = len(din) + else: + data_width = 0 + + xfer = BlindTransfer(err_cd, "sys", data_width=data_width) + self.submodules += xfer + + self.comb += xfer.i.eq(err_i) + + err_pending = Signal() + self.sync += [ + If(self.protocol_error.re & self.protocol_error.r[n], err_pending.eq(0)), + If(xfer.o, err_pending.eq(1)) + ] + self.comb += self.protocol_error.w[n].eq(err_pending) + + if din is not None: + self.comb += xfer.data_i.eq(din) + self.sync += If(xfer.o & ~err_pending, dout.eq(xfer.data_o)) diff --git a/artiq/gateware/drtio/rt_errors_satellite.py b/artiq/gateware/drtio/rt_errors_satellite.py index e93a7bc9c..0fcbd024f 100644 --- a/artiq/gateware/drtio/rt_errors_satellite.py +++ b/artiq/gateware/drtio/rt_errors_satellite.py @@ -1,37 +1,86 @@ """Protocol error reporting for satellites.""" from migen import * -from misoc.interconnect.csr import * +from migen.genlib.cdc import BlindTransfer -from artiq.gateware.rtio.cdc import BlindTransfer +from misoc.interconnect.csr import * class RTErrorsSatellite(Module, AutoCSR): - def __init__(self, rt_packet, ios): + def __init__(self, rt_packet, tsc, async_errors): self.protocol_error = CSR(5) - self.rtio_error = CSR(2) + self.underflow_channel = CSRStatus(16) + self.underflow_timestamp_event = CSRStatus(64) + self.underflow_timestamp_counter = CSRStatus(64) + self.buffer_space_timeout_dest = CSRStatus(8) + + self.rtio_error = CSR(3) + self.sequence_error_channel = CSRStatus(16) + self.collision_channel = CSRStatus(16) + self.busy_channel = CSRStatus(16) def error_csr(csr, *sources): - for n, source in enumerate(sources): - pending = Signal(related=source) - xfer = BlindTransfer(odomain="sys") + for n, (source, detect_edges, din, dout) in enumerate(sources): + assert isinstance(source, Signal) + + if din is not None: + data_width = len(din) + else: + data_width = 0 + xfer = BlindTransfer("rio", "sys", data_width=data_width) self.submodules += xfer - self.comb += xfer.i.eq(source) + + if detect_edges: + source_r = Signal() + self.sync.rio += source_r.eq(source) + self.comb += xfer.i.eq(source & ~source_r) + else: + self.comb += xfer.i.eq(source) + + pending = Signal(related=source) self.sync += [ If(csr.re & csr.r[n], pending.eq(0)), If(xfer.o, pending.eq(1)) ] self.comb += csr.w[n].eq(pending) - # The master is normally responsible for avoiding output overflows, - # output underflows, and sequence errors. - # Error reports here are only for diagnosing internal ARTIQ bugs. - error_csr(self.protocol_error, - rt_packet.unknown_packet_type, - rt_packet.packet_truncated, - ios.write_underflow, - ios.write_overflow, - ios.write_sequence_error) + if din is not None: + self.comb += xfer.data_i.eq(din) + self.sync += If(xfer.o & ~pending, dout.eq(xfer.data_o)) + + cri = rt_packet.cri + + # The master is normally responsible for avoiding output overflows + # and output underflows. The error reports here are only for diagnosing + # internal ARTIQ bugs. + underflow = Signal() + overflow = Signal() + underflow_error_cri = Signal(16+64+64) + underflow_error_csr = Signal(16+64+64) + self.comb += [ + underflow.eq(cri.o_status[1]), + overflow.eq(cri.o_status[0]), + underflow_error_cri.eq(Cat(cri.chan_sel[:16], + cri.o_timestamp, + tsc.full_ts_cri)), + Cat(self.underflow_channel.status, + self.underflow_timestamp_event.status, + self.underflow_timestamp_counter.status).eq(underflow_error_csr) + ] + error_csr(self.protocol_error, + (rt_packet.unknown_packet_type, False, None, None), + (rt_packet.packet_truncated, False, None, None), + (rt_packet.buffer_space_timeout, False, + cri.chan_sel[16:], self.buffer_space_timeout_dest.status), + (underflow, True, underflow_error_cri, underflow_error_csr), + (overflow, True, None, None) + ) + error_csr(self.rtio_error, - ios.collision, - ios.busy) + (async_errors.sequence_error, False, + async_errors.sequence_error_channel, self.sequence_error_channel.status), + (async_errors.collision, False, + async_errors.collision_channel, self.collision_channel.status), + (async_errors.busy, False, + async_errors.busy_channel, self.busy_channel.status) + ) diff --git a/artiq/gateware/drtio/rt_ios_satellite.py b/artiq/gateware/drtio/rt_ios_satellite.py deleted file mode 100644 index f9fc9096e..000000000 --- a/artiq/gateware/drtio/rt_ios_satellite.py +++ /dev/null @@ -1,246 +0,0 @@ -"""Real-time I/O scheduler for satellites""" - -from migen import * -from migen.genlib.fifo import SyncFIFOBuffered -from migen.genlib.record import * - -from artiq.gateware.rtio import rtlink - - -class IOS(Module): - def __init__(self, rt_packet, channels, max_fine_ts_width, full_ts_width): - self.write_underflow = Signal() - self.write_overflow = Signal() - self.write_sequence_error = Signal() - self.collision = Signal() - self.busy = Signal() - - self.rt_packet = rt_packet - self.max_fine_ts_width = max_fine_ts_width - - self.tsc = Signal(full_ts_width - max_fine_ts_width) - self.sync.rtio += \ - If(rt_packet.tsc_load, - self.tsc.eq(rt_packet.tsc_load_value) - ).Else( - self.tsc.eq(self.tsc + 1) - ) - self.comb += rt_packet.tsc_input.eq(self.tsc) - - self.sync.rio += [ - self.write_underflow.eq(0), - self.write_overflow.eq(0), - self.collision.eq(0), - self.busy.eq(0) - ] - for n, channel in enumerate(channels): - self.add_output(n, channel) - self.add_input(n, channel) - - def add_output(self, n, channel): - rt_packet = self.rt_packet - max_fine_ts_width = self.max_fine_ts_width - - interface = channel.interface.o - data_width = rtlink.get_data_width(interface) - address_width = rtlink.get_address_width(interface) - fine_ts_width = rtlink.get_fine_ts_width(interface) - assert fine_ts_width <= max_fine_ts_width - - we = Signal() - self.comb += we.eq(rt_packet.write_stb - & (rt_packet.write_channel == n)) - write_timestamp = rt_packet.write_timestamp[max_fine_ts_width-fine_ts_width:] - write_timestamp_coarse = rt_packet.write_timestamp[max_fine_ts_width:] - write_timestamp_fine = rt_packet.write_timestamp[max_fine_ts_width-fine_ts_width:max_fine_ts_width] - - # latency compensation - if interface.delay: - tsc_comp = Signal.like(self.tsc) - self.sync.rtio += tsc_comp.eq(self.tsc - interface.delay + 1) - else: - tsc_comp = self.tsc - - # FIFO - ev_layout = [] - if data_width: - ev_layout.append(("data", data_width)) - if address_width: - ev_layout.append(("address", address_width)) - ev_layout.append(("timestamp", len(self.tsc) + fine_ts_width)) - - fifo = ClockDomainsRenamer("rio")( - SyncFIFOBuffered(layout_len(ev_layout), channel.ofifo_depth)) - self.submodules += fifo - fifo_in = Record(ev_layout) - fifo_out = Record(ev_layout) - self.comb += [ - fifo.din.eq(fifo_in.raw_bits()), - fifo_out.raw_bits().eq(fifo.dout) - ] - - # Buffer - buf_pending = Signal() - buf = Record(ev_layout) - buf_just_written = Signal() - - # Special cases - replace = Signal() - sequence_error = Signal() - collision = Signal() - any_error = Signal() - if interface.enable_replace: - # Note: replace may be asserted at the same time as collision - # when addresses are different. In that case, it is a collision. - self.sync.rio += replace.eq(write_timestamp == buf.timestamp) - # Detect sequence errors on coarse timestamps only - # so that they are mutually exclusive with collision errors. - self.sync.rio += sequence_error.eq(write_timestamp_coarse < buf.timestamp[fine_ts_width:]) - if interface.enable_replace: - if address_width: - different_addresses = rt_packet.write_address != buf.address - else: - different_addresses = 0 - if fine_ts_width: - self.sync.rio += collision.eq( - (write_timestamp_coarse == buf.timestamp[fine_ts_width:]) - & ((write_timestamp_fine != buf.timestamp[:fine_ts_width]) - |different_addresses)) - else: - self.sync.rio += collision.eq( - (write_timestamp == buf.timestamp) & different_addresses) - else: - self.sync.rio += collision.eq( - write_timestamp_coarse == buf.timestamp[fine_ts_width:]) - self.comb += any_error.eq(sequence_error | collision) - self.sync.rio += [ - If(we & sequence_error, self.write_sequence_error.eq(1)), - If(we & collision, self.collision.eq(1)) - ] - - # Buffer read and FIFO write - self.comb += fifo_in.eq(buf) - in_guard_time = Signal() - self.comb += in_guard_time.eq( - buf.timestamp[fine_ts_width:] < tsc_comp + 4) - self.sync.rio += If(in_guard_time, buf_pending.eq(0)) - report_underflow = Signal() - self.comb += \ - If(buf_pending, - If(in_guard_time, - If(buf_just_written, - report_underflow.eq(1) - ).Else( - fifo.we.eq(1) - ) - ), - If(we & ~replace & ~any_error, - fifo.we.eq(1) - ) - ) - self.sync.rio += If(report_underflow, self.write_underflow.eq(1)) - - # Buffer write - # Must come after read to handle concurrent read+write properly - self.sync.rio += [ - buf_just_written.eq(0), - If(we & ~any_error, - buf_just_written.eq(1), - buf_pending.eq(1), - buf.timestamp.eq(write_timestamp), - buf.data.eq(rt_packet.write_data) if data_width else [], - buf.address.eq(rt_packet.write_address) if address_width else [], - ), - If(we & ~fifo.writable, self.write_overflow.eq(1)) - ] - - # FIFO level - self.sync.rio += \ - If(rt_packet.fifo_space_update & - (rt_packet.fifo_space_channel == n), - rt_packet.fifo_space.eq(channel.ofifo_depth - fifo.level)) - - # FIFO read - self.sync.rio += [ - fifo.re.eq(0), - interface.stb.eq(0), - If(fifo.readable & - (fifo_out.timestamp[fine_ts_width:] == tsc_comp), - fifo.re.eq(1), - interface.stb.eq(1) - ) - ] - if data_width: - self.sync.rio += interface.data.eq(fifo_out.data) - if address_width: - self.sync.rio += interface.address.eq(fifo_out.address) - if fine_ts_width: - self.sync.rio += interface.fine_ts.eq(fifo_out.timestamp[:fine_ts_width]) - - self.sync.rio += If(interface.stb & interface.busy, self.busy.eq(1)) - - def add_input(self, n, channel): - rt_packet = self.rt_packet - - interface = channel.interface.i - if interface is None: - return - data_width = rtlink.get_data_width(interface) - fine_ts_width = rtlink.get_fine_ts_width(interface) - - selected = Signal() - self.comb += selected.eq(rt_packet.read_channel == n) - - # latency compensation - if interface.delay: - tsc_comp = Signal.like(self.tsc) - self.sync.rtio += tsc_comp.eq(self.tsc - interface.delay + 1) - else: - tsc_comp = self.tsc - - # FIFO - ev_layout = [] - if data_width: - ev_layout.append(("data", data_width)) - if interface.timestamped: - ev_layout.append(("timestamp", len(self.tsc) + fine_ts_width)) - - fifo = ClockDomainsRenamer("rio")( - SyncFIFOBuffered(layout_len(ev_layout), channel.ififo_depth)) - self.submodules += fifo - fifo_in = Record(ev_layout) - fifo_out = Record(ev_layout) - self.comb += [ - fifo.din.eq(fifo_in.raw_bits()), - fifo_out.raw_bits().eq(fifo.dout) - ] - - # FIFO write - if data_width: - self.comb += fifo_in.data.eq(interface.data) - if interface.timestamped: - if fine_ts_width: - full_ts = Cat(interface.fine_ts, tsc_comp) - else: - full_ts = tsc_comp - self.comb += fifo_in.timestamp.eq(full_ts) - self.comb += fifo.we.eq(interface.stb) - - overflow = Signal() - self.comb += If(selected, rt_packet.read_overflow.eq(overflow)) - self.sync.rio += [ - If(selected & rt_packet.read_overflow_ack, overflow.eq(0)), - If(fifo.we & ~fifo.writable, overflow.eq(1)) - ] - - # FIFO read - if data_width: - self.comb += If(selected, rt_packet.read_data.eq(fifo_out.data)) - if interface.timestamped: - self.comb += If(selected, rt_packet.read_timestamp.eq(fifo_out.timestamp)) - self.comb += [ - If(selected, - rt_packet.read_readable.eq(fifo.readable), - fifo.re.eq(rt_packet.read_consume) - ) - ] diff --git a/artiq/gateware/drtio/rt_packet_master.py b/artiq/gateware/drtio/rt_packet_master.py index a32004ebd..4fd26f85d 100644 --- a/artiq/gateware/drtio/rt_packet_master.py +++ b/artiq/gateware/drtio/rt_packet_master.py @@ -3,88 +3,38 @@ from migen import * from migen.genlib.fsm import * from migen.genlib.fifo import AsyncFIFO -from migen.genlib.cdc import PulseSynchronizer +from migen.genlib.cdc import BlindTransfer -from artiq.gateware.rtio.cdc import GrayCodeTransfer, BlindTransfer +from artiq.gateware.rtio.cdc import GrayCodeTransfer +from artiq.gateware.drtio.cdc import CrossDomainRequest, CrossDomainNotification from artiq.gateware.drtio.rt_serializer import * -class _CrossDomainRequest(Module): - def __init__(self, domain, - req_stb, req_ack, req_data, - srv_stb, srv_ack, srv_data): - dsync = getattr(self.sync, domain) - - request = PulseSynchronizer("sys", domain) - reply = PulseSynchronizer(domain, "sys") - self.submodules += request, reply - - ongoing = Signal() - self.comb += request.i.eq(~ongoing & req_stb) - self.sync += [ - req_ack.eq(reply.o), - If(req_stb, ongoing.eq(1)), - If(req_ack, ongoing.eq(0)) - ] - if req_data is not None: - req_data_r = Signal.like(req_data) - req_data_r.attr.add("no_retiming") - self.sync += If(req_stb, req_data_r.eq(req_data)) - dsync += [ - If(request.o, srv_stb.eq(1)), - If(srv_ack, srv_stb.eq(0)) - ] - if req_data is not None: - dsync += If(request.o, srv_data.eq(req_data_r)) - self.comb += reply.i.eq(srv_stb & srv_ack) - - -class _CrossDomainNotification(Module): - def __init__(self, domain, - emi_stb, emi_data, - rec_stb, rec_ack, rec_data): - emi_data_r = Signal(len(emi_data)) - emi_data_r.attr.add("no_retiming") - dsync = getattr(self.sync, domain) - dsync += If(emi_stb, emi_data_r.eq(emi_data)) - - ps = PulseSynchronizer(domain, "sys") - self.submodules += ps - self.comb += ps.i.eq(emi_stb) - self.sync += [ - If(rec_ack, rec_stb.eq(0)), - If(ps.o, - rec_data.eq(emi_data_r), - rec_stb.eq(1) - ) - ] - - class RTPacketMaster(Module): def __init__(self, link_layer, sr_fifo_depth=4): # all interface signals in sys domain unless otherwise specified # standard request interface # - # notwrite=1 address=0 FIFO space request + # notwrite=1 address=0 buffer space request # notwrite=1 address=1 read request # # optimized for write throughput # requests are performed on the DRTIO link preserving their order of issue - # this is important for FIFO space requests, which have to be ordered + # this is important for buffer space requests, which have to be ordered # wrt writes. self.sr_stb = Signal() self.sr_ack = Signal() self.sr_notwrite = Signal() self.sr_timestamp = Signal(64) - self.sr_channel = Signal(16) - self.sr_address = Signal(16) + self.sr_chan_sel = Signal(24) + self.sr_address = Signal(8) self.sr_data = Signal(512) - # fifo space reply interface - self.fifo_space_not = Signal() - self.fifo_space_not_ack = Signal() - self.fifo_space = Signal(16) + # buffer space reply interface + self.buffer_space_not = Signal() + self.buffer_space_not_ack = Signal() + self.buffer_space = Signal(16) # read reply interface self.read_not = Signal() @@ -111,11 +61,6 @@ class RTPacketMaster(Module): # a set_time request pending self.tsc_value = Signal(64) - # reset interface - self.reset_stb = Signal() - self.reset_ack = Signal() - self.reset_phy = Signal() - # rx errors self.err_unknown_packet_type = Signal() self.err_packet_truncated = Signal() @@ -140,20 +85,20 @@ class RTPacketMaster(Module): self.submodules += rx_dp # Write FIFO and extra data count - sr_fifo = ClockDomainsRenamer({"write": "sys_with_rst", "read": "rtio_with_rst"})( - AsyncFIFO(1+64+16+16+512, sr_fifo_depth)) + sr_fifo = ClockDomainsRenamer({"write": "sys", "read": "rtio"})( + AsyncFIFO(1+64+24+8+512, sr_fifo_depth)) self.submodules += sr_fifo sr_notwrite_d = Signal() sr_timestamp_d = Signal(64) - sr_channel_d = Signal(16) - sr_address_d = Signal(16) + sr_chan_sel_d = Signal(24) + sr_address_d = Signal(8) sr_data_d = Signal(512) self.comb += [ sr_fifo.we.eq(self.sr_stb), self.sr_ack.eq(sr_fifo.writable), - sr_fifo.din.eq(Cat(self.sr_notwrite, self.sr_timestamp, self.sr_channel, + sr_fifo.din.eq(Cat(self.sr_notwrite, self.sr_timestamp, self.sr_chan_sel, self.sr_address, self.sr_data)), - Cat(sr_notwrite_d, sr_timestamp_d, sr_channel_d, + Cat(sr_notwrite_d, sr_timestamp_d, sr_chan_sel_d, sr_address_d, sr_data_d).eq(sr_fifo.dout) ] @@ -170,15 +115,15 @@ class RTPacketMaster(Module): sr_notwrite = Signal() sr_timestamp = Signal(64) - sr_channel = Signal(16) - sr_address = Signal(16) + sr_chan_sel = Signal(24) + sr_address = Signal(8) sr_extra_data_cnt = Signal(8) sr_data = Signal(512) self.sync.rtio += If(sr_fifo.re, sr_notwrite.eq(sr_notwrite_d), sr_timestamp.eq(sr_timestamp_d), - sr_channel.eq(sr_channel_d), + sr_chan_sel.eq(sr_chan_sel_d), sr_address.eq(sr_address_d), sr_data.eq(sr_data_d)) @@ -209,28 +154,21 @@ class RTPacketMaster(Module): ) # CDC - fifo_space_not = Signal() - fifo_space = Signal(16) - self.submodules += _CrossDomainNotification("rtio_rx", - fifo_space_not, fifo_space, - self.fifo_space_not, self.fifo_space_not_ack, self.fifo_space) + buffer_space_not = Signal() + buffer_space = Signal(16) + self.submodules += CrossDomainNotification("rtio_rx", "sys", + buffer_space_not, buffer_space, + self.buffer_space_not, self.buffer_space_not_ack, self.buffer_space) set_time_stb = Signal() set_time_ack = Signal() - self.submodules += _CrossDomainRequest("rtio", + self.submodules += CrossDomainRequest("rtio", self.set_time_stb, self.set_time_ack, None, set_time_stb, set_time_ack, None) - reset_stb = Signal() - reset_ack = Signal() - reset_phy = Signal() - self.submodules += _CrossDomainRequest("rtio", - self.reset_stb, self.reset_ack, self.reset_phy, - reset_stb, reset_ack, reset_phy) - echo_stb = Signal() echo_ack = Signal() - self.submodules += _CrossDomainRequest("rtio", + self.submodules += CrossDomainRequest("rtio", self.echo_stb, self.echo_ack, None, echo_stb, echo_ack, None) @@ -239,7 +177,7 @@ class RTPacketMaster(Module): read_is_overflow = Signal() read_data = Signal(32) read_timestamp = Signal(64) - self.submodules += _CrossDomainNotification("rtio_rx", + self.submodules += CrossDomainNotification("rtio_rx", "sys", read_not, Cat(read_no_event, read_is_overflow, read_data, read_timestamp), @@ -271,10 +209,14 @@ class RTPacketMaster(Module): self.sync.rtio += If(tsc_value_load, tsc_value.eq(self.tsc_value)) tx_fsm.act("IDLE", + # Ensure 2 cycles between frames on the link. + NextState("READY") + ) + tx_fsm.act("READY", If(sr_buf_readable, If(sr_notwrite, Case(sr_address[0], { - 0: NextState("FIFO_SPACE"), + 0: NextState("BUFFER_SPACE"), 1: NextState("READ") }), ).Else( @@ -287,15 +229,13 @@ class RTPacketMaster(Module): ).Elif(set_time_stb, tsc_value_load.eq(1), NextState("SET_TIME") - ).Elif(reset_stb, - NextState("RESET") ) ) ) tx_fsm.act("WRITE", tx_dp.send("write", timestamp=sr_timestamp, - channel=sr_channel, + chan_sel=sr_chan_sel, address=sr_address, extra_data_cnt=sr_extra_data_cnt, short_data=sr_data[:short_data_len]), @@ -316,15 +256,15 @@ class RTPacketMaster(Module): NextState("IDLE") ) ) - tx_fsm.act("FIFO_SPACE", - tx_dp.send("fifo_space_request", channel=sr_channel), + tx_fsm.act("BUFFER_SPACE", + tx_dp.send("buffer_space_request", destination=sr_chan_sel[16:]), If(tx_dp.packet_last, sr_buf_re.eq(1), NextState("IDLE") ) ) tx_fsm.act("READ", - tx_dp.send("read_request", channel=sr_channel, timeout=sr_timestamp), + tx_dp.send("read_request", chan_sel=sr_chan_sel, timeout=sr_timestamp), If(tx_dp.packet_last, sr_buf_re.eq(1), NextState("IDLE") @@ -344,13 +284,6 @@ class RTPacketMaster(Module): NextState("IDLE") ) ) - tx_fsm.act("RESET", - tx_dp.send("reset", phy=reset_phy), - If(tx_dp.packet_last, - reset_ack.eq(1), - NextState("IDLE") - ) - ) # RX FSM rx_fsm = ClockDomainsRenamer("rtio_rx")(FSM(reset_state="INPUT")) @@ -369,7 +302,7 @@ class RTPacketMaster(Module): If(rx_dp.packet_last, Case(rx_dp.packet_type, { rx_plm.types["echo_reply"]: echo_received_now.eq(1), - rx_plm.types["fifo_space_reply"]: NextState("FIFO_SPACE"), + rx_plm.types["buffer_space_reply"]: NextState("BUFFER_SPACE"), rx_plm.types["read_reply"]: NextState("READ_REPLY"), rx_plm.types["read_reply_noevent"]: NextState("READ_REPLY_NOEVENT"), "default": err_unknown_packet_type.i.eq(1) @@ -382,9 +315,9 @@ class RTPacketMaster(Module): err_packet_truncated.i.eq(1) ) ) - rx_fsm.act("FIFO_SPACE", - fifo_space_not.eq(1), - fifo_space.eq(rx_dp.packet_as["fifo_space_reply"].space), + rx_fsm.act("BUFFER_SPACE", + buffer_space_not.eq(1), + buffer_space.eq(rx_dp.packet_as["buffer_space_reply"].space), NextState("INPUT") ) rx_fsm.act("READ_REPLY", diff --git a/artiq/gateware/drtio/rt_packet_repeater.py b/artiq/gateware/drtio/rt_packet_repeater.py new file mode 100644 index 000000000..9374d1a18 --- /dev/null +++ b/artiq/gateware/drtio/rt_packet_repeater.py @@ -0,0 +1,334 @@ +from migen import * +from migen.genlib.fsm import * +from migen.genlib.misc import WaitTimer + + +from artiq.gateware.rtio import cri +from artiq.gateware.drtio.cdc import CrossDomainNotification +from artiq.gateware.drtio.rt_serializer import * + + +class RTPacketRepeater(Module): + def __init__(self, tsc, link_layer): + # in rtio domain + self.reset = Signal() + + # CRI target interface in rtio domain + self.cri = cri.Interface() + + # in rtio_rx domain + self.err_unknown_packet_type = Signal() + self.err_packet_truncated = Signal() + + # in rtio domain + self.err_command_missed = Signal() + self.command_missed_cmd = Signal(2) + self.command_missed_chan_sel = Signal(24) + self.err_buffer_space_timeout = Signal() + self.buffer_space_destination = Signal(8) + + # set_time interface, in rtio domain + self.set_time_stb = Signal() + self.set_time_ack = Signal() + + # # # + + # RX/TX datapath + assert len(link_layer.tx_rt_data) == len(link_layer.rx_rt_data) + assert len(link_layer.tx_rt_data) % 8 == 0 + ws = len(link_layer.tx_rt_data) + tx_plm = get_m2s_layouts(ws) + tx_dp = ClockDomainsRenamer("rtio")(TransmitDatapath( + link_layer.tx_rt_frame, link_layer.tx_rt_data, tx_plm)) + self.submodules += tx_dp + rx_plm = get_s2m_layouts(ws) + rx_dp = ClockDomainsRenamer("rtio_rx")(ReceiveDatapath( + link_layer.rx_rt_frame, link_layer.rx_rt_data, rx_plm)) + self.submodules += rx_dp + + # TSC sync + tsc_value = Signal(64) + tsc_value_load = Signal() + self.sync.rtio += If(tsc_value_load, tsc_value.eq(tsc.coarse_ts)) + + # CRI buffer stage 1 + cb0_loaded = Signal() + cb0_ack = Signal() + + cb0_cmd = Signal(2) + cb0_timestamp = Signal(64) + cb0_chan_sel = Signal(24) + cb0_o_address = Signal(8) + cb0_o_data = Signal(512) + self.sync.rtio += [ + If(self.reset | cb0_ack, + cb0_loaded.eq(0), + cb0_cmd.eq(cri.commands["nop"]) + ), + If(~self.reset & ~cb0_loaded & (self.cri.cmd != cri.commands["nop"]), + cb0_loaded.eq(1), + cb0_cmd.eq(self.cri.cmd), + If(self.cri.cmd == cri.commands["read"], + cb0_timestamp.eq(self.cri.i_timeout) + ).Else( + cb0_timestamp.eq(self.cri.o_timestamp) + ), + cb0_chan_sel.eq(self.cri.chan_sel), + cb0_o_address.eq(self.cri.o_address), + cb0_o_data.eq(self.cri.o_data) + ), + self.err_command_missed.eq(cb0_loaded & (self.cri.cmd != cri.commands["nop"])), + self.command_missed_chan_sel.eq(self.cri.chan_sel), + self.command_missed_cmd.eq(self.cri.cmd) + ] + + # CRI buffer stage 2 and write data slicer + cb_loaded = Signal() + cb_ack = Signal() + + cb_cmd = Signal(2) + cb_timestamp = Signal(64) + cb_chan_sel = Signal(24) + cb_o_address = Signal(8) + cb_o_data = Signal(512) + self.sync.rtio += [ + If(self.reset | cb_ack, + cb_loaded.eq(0), + cb_cmd.eq(cri.commands["nop"]) + ), + If(~self.reset & ~cb_loaded & cb0_loaded, + cb_loaded.eq(1), + cb_cmd.eq(cb0_cmd), + cb_timestamp.eq(cb0_timestamp), + cb_chan_sel.eq(cb0_chan_sel), + cb_o_address.eq(cb0_o_address), + cb_o_data.eq(cb0_o_data) + ) + ] + self.comb += cb0_ack.eq(~cb_loaded) + + wb_extra_data_cnt = Signal(8) + short_data_len = tx_plm.field_length("write", "short_data") + wb_extra_data_a = Signal(512) + self.comb += wb_extra_data_a.eq(self.cri.o_data[short_data_len:]) + for i in range(512//ws): + self.sync.rtio += If(self.cri.cmd == cri.commands["write"], + If(wb_extra_data_a[ws*i:ws*(i+1)] != 0, wb_extra_data_cnt.eq(i+1))) + + wb_extra_data = Signal(512) + self.sync.rtio += If(self.cri.cmd == cri.commands["write"], + wb_extra_data.eq(wb_extra_data_a)) + + extra_data_ce = Signal() + extra_data_last = Signal() + extra_data_counter = Signal(max=512//ws+1) + self.comb += [ + Case(extra_data_counter, + {i+1: tx_dp.raw_data.eq(wb_extra_data[i*ws:(i+1)*ws]) + for i in range(512//ws)}), + extra_data_last.eq(extra_data_counter == wb_extra_data_cnt) + ] + self.sync.rtio += \ + If(extra_data_ce, + extra_data_counter.eq(extra_data_counter + 1), + ).Else( + extra_data_counter.eq(1) + ) + + # Buffer space + self.sync.rtio += If(self.cri.cmd == cri.commands["get_buffer_space"], + self.buffer_space_destination.eq(self.cri.chan_sel[16:])) + + rx_buffer_space_not = Signal() + rx_buffer_space = Signal(16) + buffer_space_not = Signal() + buffer_space_not_ack = Signal() + self.submodules += CrossDomainNotification("rtio_rx", "rtio", + rx_buffer_space_not, rx_buffer_space, + buffer_space_not, buffer_space_not_ack, + self.cri.o_buffer_space) + + timeout_counter = ClockDomainsRenamer("rtio")(WaitTimer(8191)) + self.submodules += timeout_counter + + # Read + read_not = Signal() + read_no_event = Signal() + read_is_overflow = Signal() + read_data = Signal(32) + read_timestamp = Signal(64) + rtio_read_not = Signal() + rtio_read_not_ack = Signal() + rtio_read_no_event = Signal() + rtio_read_is_overflow = Signal() + rtio_read_data = Signal(32) + rtio_read_timestamp = Signal(64) + self.submodules += CrossDomainNotification("rtio_rx", "rtio", + read_not, + Cat(read_no_event, read_is_overflow, read_data, read_timestamp), + + rtio_read_not, rtio_read_not_ack, + Cat(rtio_read_no_event, rtio_read_is_overflow, + rtio_read_data, rtio_read_timestamp)) + self.comb += [ + read_is_overflow.eq(rx_dp.packet_as["read_reply_noevent"].overflow), + read_data.eq(rx_dp.packet_as["read_reply"].data), + read_timestamp.eq(rx_dp.packet_as["read_reply"].timestamp) + ] + + # input status + i_status_wait_event = Signal() + i_status_overflow = Signal() + self.comb += self.cri.i_status.eq(Cat( + i_status_wait_event, i_status_overflow, cb0_loaded | cb_loaded)) + + load_read_reply = Signal() + self.sync.rtio += [ + If(load_read_reply, + i_status_wait_event.eq(0), + i_status_overflow.eq(0), + If(rtio_read_no_event, + If(rtio_read_is_overflow, + i_status_overflow.eq(1) + ).Else( + i_status_wait_event.eq(1) + ) + ), + self.cri.i_data.eq(rtio_read_data), + self.cri.i_timestamp.eq(rtio_read_timestamp) + ) + ] + + # TX and CRI FSM + tx_fsm = ClockDomainsRenamer("rtio")(FSM(reset_state="IDLE")) + self.submodules += tx_fsm + + tx_fsm.act("IDLE", + # Ensure 2 cycles between frames on the link. + NextState("READY") + ) + tx_fsm.act("READY", + If(self.set_time_stb, + tsc_value_load.eq(1), + NextState("SET_TIME") + ).Else( + If(cb_cmd == cri.commands["write"], NextState("WRITE")), + If(cb_cmd == cri.commands["get_buffer_space"], NextState("BUFFER_SPACE")), + If(cb_cmd == cri.commands["read"], NextState("READ")) + ) + ) + + tx_fsm.act("SET_TIME", + tx_dp.send("set_time", timestamp=tsc_value), + If(tx_dp.packet_last, + self.set_time_ack.eq(1), + NextState("IDLE") + ) + ) + + tx_fsm.act("WRITE", + tx_dp.send("write", + timestamp=cb_timestamp, + chan_sel=cb_chan_sel, + address=cb_o_address, + extra_data_cnt=wb_extra_data_cnt, + short_data=cb_o_data[:short_data_len]), + If(tx_dp.packet_last, + If(wb_extra_data_cnt == 0, + cb_ack.eq(1), + NextState("IDLE") + ).Else( + NextState("WRITE_EXTRA") + ) + ) + ) + tx_fsm.act("WRITE_EXTRA", + tx_dp.raw_stb.eq(1), + extra_data_ce.eq(1), + If(extra_data_last, + cb_ack.eq(1), + NextState("IDLE") + ) + ) + + tx_fsm.act("BUFFER_SPACE", + tx_dp.send("buffer_space_request", destination=self.buffer_space_destination), + If(tx_dp.packet_last, + buffer_space_not_ack.eq(1), + NextState("GET_BUFFER_SPACE_REPLY") + ) + ) + tx_fsm.act("GET_BUFFER_SPACE_REPLY", + timeout_counter.wait.eq(1), + If(timeout_counter.done, + self.err_buffer_space_timeout.eq(1), + cb_ack.eq(1), + NextState("READY") + ).Else( + If(buffer_space_not, + self.cri.o_buffer_space_valid.eq(1), + cb_ack.eq(1), + NextState("READY") + ), + ) + ) + + tx_fsm.act("READ", + tx_dp.send("read_request", + chan_sel=cb_chan_sel, + timeout=cb_timestamp), + rtio_read_not_ack.eq(1), + If(tx_dp.packet_last, + NextState("GET_READ_REPLY") + ) + ) + tx_fsm.act("GET_READ_REPLY", + rtio_read_not_ack.eq(1), + If(self.reset | rtio_read_not, + load_read_reply.eq(1), + cb_ack.eq(1), + NextState("READY") + ) + ) + + # RX FSM + rx_fsm = ClockDomainsRenamer("rtio_rx")(FSM(reset_state="INPUT")) + self.submodules += rx_fsm + + ongoing_packet_next = Signal() + ongoing_packet = Signal() + self.sync.rtio_rx += ongoing_packet.eq(ongoing_packet_next) + + rx_fsm.act("INPUT", + If(rx_dp.frame_r, + rx_dp.packet_buffer_load.eq(1), + If(rx_dp.packet_last, + Case(rx_dp.packet_type, { + rx_plm.types["buffer_space_reply"]: NextState("BUFFER_SPACE"), + rx_plm.types["read_reply"]: NextState("READ_REPLY"), + rx_plm.types["read_reply_noevent"]: NextState("READ_REPLY_NOEVENT"), + "default": self.err_unknown_packet_type.eq(1) + }) + ).Else( + ongoing_packet_next.eq(1) + ) + ), + If(~rx_dp.frame_r & ongoing_packet, + self.err_packet_truncated.eq(1) + ) + ) + rx_fsm.act("BUFFER_SPACE", + rx_buffer_space_not.eq(1), + rx_buffer_space.eq(rx_dp.packet_as["buffer_space_reply"].space), + NextState("INPUT") + ) + rx_fsm.act("READ_REPLY", + read_not.eq(1), + read_no_event.eq(0), + NextState("INPUT") + ) + rx_fsm.act("READ_REPLY_NOEVENT", + read_not.eq(1), + read_no_event.eq(1), + NextState("INPUT") + ) diff --git a/artiq/gateware/drtio/rt_packet_satellite.py b/artiq/gateware/drtio/rt_packet_satellite.py index 249f8ca27..79a48f493 100644 --- a/artiq/gateware/drtio/rt_packet_satellite.py +++ b/artiq/gateware/drtio/rt_packet_satellite.py @@ -2,41 +2,26 @@ from migen import * from migen.genlib.fsm import * +from migen.genlib.misc import WaitTimer +from artiq.gateware.rtio import cri from artiq.gateware.drtio.rt_serializer import * class RTPacketSatellite(Module): - def __init__(self, link_layer): + def __init__(self, link_layer, interface=None): + self.reset = Signal() + self.unknown_packet_type = Signal() self.packet_truncated = Signal() + self.buffer_space_timeout = Signal() self.tsc_load = Signal() self.tsc_load_value = Signal(64) - self.tsc_input = Signal(64) - self.reset = Signal(reset=1) - self.reset_phy = Signal(reset=1) - - self.fifo_space_channel = Signal(16) - self.fifo_space_update = Signal() - self.fifo_space = Signal(16) - - # write parameters are stable one cycle before stb is asserted, - # and when stb is asserted. - self.write_stb = Signal() - self.write_timestamp = Signal(64) - self.write_channel = Signal(16) - self.write_address = Signal(16) - self.write_data = Signal(512) - - self.read_channel = Signal(16) - self.read_readable = Signal() - self.read_consume = Signal() - self.read_data = Signal(32) - self.read_timestamp = Signal(64) - self.read_overflow = Signal() - self.read_overflow_ack = Signal() + if interface is None: + interface = cri.Interface() + self.cri = interface # # # @@ -69,55 +54,54 @@ class RTPacketSatellite(Module): # RX->TX echo_req = Signal() - fifo_space_set = Signal() - fifo_space_req = Signal() - fifo_space_ack = Signal() + buffer_space_set = Signal() + buffer_space_req = Signal() + buffer_space_ack = Signal() self.sync += [ - If(fifo_space_ack, fifo_space_req.eq(0)), - If(fifo_space_set, fifo_space_req.eq(1)), + If(buffer_space_ack, buffer_space_req.eq(0)), + If(buffer_space_set, buffer_space_req.eq(1)), ] - # RX FSM - self.comb += [ - self.tsc_load_value.eq( - rx_dp.packet_as["set_time"].timestamp), - self.fifo_space_channel.eq( - rx_dp.packet_as["fifo_space_request"].channel), - self.write_timestamp.eq( - rx_dp.packet_as["write"].timestamp), - self.write_channel.eq( - rx_dp.packet_as["write"].channel), - self.write_address.eq( - rx_dp.packet_as["write"].address), - self.write_data.eq( - Cat(rx_dp.packet_as["write"].short_data, write_data_buffer)), - ] - - reset = Signal() - reset_phy = Signal() - self.sync += [ - self.reset.eq(reset), - self.reset_phy.eq(reset_phy) - ] + buffer_space_update = Signal() + buffer_space = Signal(16) + self.sync += If(buffer_space_update, buffer_space.eq(self.cri.o_buffer_space)) load_read_request = Signal() clear_read_request = Signal() read_request_pending = Signal() - read_request_time_limit = Signal(64) - read_request_timeout = Signal() - read_request_wait = Signal() # 1 cycle latency channel→(data,overflow) and time_limit→timeout self.sync += [ If(clear_read_request | self.reset, read_request_pending.eq(0) ), - read_request_wait.eq(0), If(load_read_request, read_request_pending.eq(1), - read_request_wait.eq(1), - self.read_channel.eq(rx_dp.packet_as["read_request"].channel), - read_request_time_limit.eq(rx_dp.packet_as["read_request"].timeout) + ) + ] + + # RX FSM + cri_read = Signal() + cri_buffer_space = Signal() + self.comb += [ + self.tsc_load_value.eq( + rx_dp.packet_as["set_time"].timestamp), + If(cri_read | read_request_pending, + self.cri.chan_sel.eq( + rx_dp.packet_as["read_request"].chan_sel), + ).Elif(cri_buffer_space, + self.cri.chan_sel.eq( + rx_dp.packet_as["buffer_space_request"].destination << 16) + ).Else( + self.cri.chan_sel.eq( + rx_dp.packet_as["write"].chan_sel), ), - read_request_timeout.eq(self.tsc_input >= read_request_time_limit), + self.cri.i_timeout.eq( + rx_dp.packet_as["read_request"].timeout), + self.cri.o_timestamp.eq( + rx_dp.packet_as["write"].timestamp), + self.cri.o_address.eq( + rx_dp.packet_as["write"].address), + self.cri.o_data.eq( + Cat(rx_dp.packet_as["write"].short_data, write_data_buffer)), ] rx_fsm = FSM(reset_state="INPUT") @@ -127,6 +111,9 @@ class RTPacketSatellite(Module): ongoing_packet = Signal() self.sync += ongoing_packet.eq(ongoing_packet_next) + timeout_counter = WaitTimer(8191) + self.submodules += timeout_counter + rx_fsm.act("INPUT", If(rx_dp.frame_r, rx_dp.packet_buffer_load.eq(1), @@ -136,9 +123,8 @@ class RTPacketSatellite(Module): # mechanism rx_plm.types["echo_request"]: echo_req.eq(1), rx_plm.types["set_time"]: NextState("SET_TIME"), - rx_plm.types["reset"]: NextState("RESET"), rx_plm.types["write"]: NextState("WRITE"), - rx_plm.types["fifo_space_request"]: NextState("FIFO_SPACE"), + rx_plm.types["buffer_space_request"]: NextState("BUFFER_SPACE_REQUEST"), rx_plm.types["read_request"]: NextState("READ_REQUEST"), "default": self.unknown_packet_type.eq(1) }) @@ -154,19 +140,11 @@ class RTPacketSatellite(Module): self.tsc_load.eq(1), NextState("INPUT") ) - rx_fsm.act("RESET", - If(rx_dp.packet_as["reset"].phy, - reset_phy.eq(1) - ).Else( - reset.eq(1) - ), - NextState("INPUT") - ) + # CRI mux defaults to write information rx_fsm.act("WRITE", If(write_data_buffer_cnt == rx_dp.packet_as["write"].extra_data_cnt, - self.write_stb.eq(1), - NextState("INPUT") + NextState("WRITE_CMD") ).Else( write_data_buffer_load.eq(1), If(~rx_dp.frame_r, @@ -175,14 +153,41 @@ class RTPacketSatellite(Module): ) ) ) - rx_fsm.act("FIFO_SPACE", - fifo_space_set.eq(1), - self.fifo_space_update.eq(1), + rx_fsm.act("WRITE_CMD", + self.cri.cmd.eq(cri.commands["write"]), NextState("INPUT") ) + rx_fsm.act("BUFFER_SPACE_REQUEST", + cri_buffer_space.eq(1), + NextState("BUFFER_SPACE_REQUEST_CMD") + ) + rx_fsm.act("BUFFER_SPACE_REQUEST_CMD", + cri_buffer_space.eq(1), + self.cri.cmd.eq(cri.commands["get_buffer_space"]), + NextState("BUFFER_SPACE") + ) + rx_fsm.act("BUFFER_SPACE", + cri_buffer_space.eq(1), + timeout_counter.wait.eq(1), + If(timeout_counter.done, + self.buffer_space_timeout.eq(1), + NextState("INPUT") + ).Elif(self.cri.o_buffer_space_valid, + buffer_space_set.eq(1), + buffer_space_update.eq(1), + NextState("INPUT") + ) + ) + rx_fsm.act("READ_REQUEST", + cri_read.eq(1), + NextState("READ_REQUEST_CMD") + ) + rx_fsm.act("READ_REQUEST_CMD", load_read_request.eq(1), + cri_read.eq(1), + self.cri.cmd.eq(cri.commands["read"]), NextState("INPUT") ) @@ -192,11 +197,11 @@ class RTPacketSatellite(Module): tx_fsm.act("IDLE", If(echo_req, NextState("ECHO")), - If(fifo_space_req, NextState("FIFO_SPACE")), - If(~read_request_wait & read_request_pending, - If(read_request_timeout, NextState("READ_TIMEOUT")), - If(self.read_overflow, NextState("READ_OVERFLOW")), - If(self.read_readable, NextState("READ")) + If(buffer_space_req, NextState("BUFFER_SPACE")), + If(read_request_pending & ~self.cri.i_status[2], + NextState("READ"), + If(self.cri.i_status[0], NextState("READ_TIMEOUT")), + If(self.cri.i_status[1], NextState("READ_OVERFLOW")) ) ) @@ -205,9 +210,9 @@ class RTPacketSatellite(Module): If(tx_dp.packet_last, NextState("IDLE")) ) - tx_fsm.act("FIFO_SPACE", - fifo_space_ack.eq(1), - tx_dp.send("fifo_space_reply", space=self.fifo_space), + tx_fsm.act("BUFFER_SPACE", + buffer_space_ack.eq(1), + tx_dp.send("buffer_space_reply", space=buffer_space), If(tx_dp.packet_last, NextState("IDLE")) ) @@ -219,18 +224,14 @@ class RTPacketSatellite(Module): tx_fsm.act("READ_OVERFLOW", tx_dp.send("read_reply_noevent", overflow=1), clear_read_request.eq(1), - If(tx_dp.packet_last, - self.read_overflow_ack.eq(1), - NextState("IDLE") - ) + If(tx_dp.packet_last, NextState("IDLE")) ) tx_fsm.act("READ", tx_dp.send("read_reply", - timestamp=self.read_timestamp, - data=self.read_data), - clear_read_request.eq(1), + timestamp=self.cri.i_timestamp, + data=self.cri.i_data), If(tx_dp.packet_last, - self.read_consume.eq(1), + clear_read_request.eq(1), NextState("IDLE") ) ) diff --git a/artiq/gateware/drtio/rt_serializer.py b/artiq/gateware/drtio/rt_serializer.py index df1e2e5c6..01e5cf19e 100644 --- a/artiq/gateware/drtio/rt_serializer.py +++ b/artiq/gateware/drtio/rt_serializer.py @@ -18,7 +18,7 @@ class PacketLayoutManager: self.layouts = dict() self.types = dict() self.type_names = dict() - + def add_type(self, name, *fields, pad=True): type_n = len(self.types) self.types[name] = type_n @@ -47,16 +47,15 @@ def get_m2s_layouts(alignment): plm.add_type("echo_request") plm.add_type("set_time", ("timestamp", 64)) - plm.add_type("reset", ("phy", 1)) plm.add_type("write", ("timestamp", 64), - ("channel", 16), - ("address", 16), + ("chan_sel", 24), + ("address", 8), ("extra_data_cnt", 8), ("short_data", short_data_len)) - plm.add_type("fifo_space_request", ("channel", 16)) + plm.add_type("buffer_space_request", ("destination", 8)) - plm.add_type("read_request", ("channel", 16), ("timeout", 64)) + plm.add_type("read_request", ("chan_sel", 24), ("timeout", 64)) return plm @@ -66,7 +65,7 @@ def get_s2m_layouts(alignment): plm.add_type("echo_reply") - plm.add_type("fifo_space_reply", ("space", 16)) + plm.add_type("buffer_space_reply", ("space", 16)) plm.add_type("read_reply", ("timestamp", 64), ("data", 32)) plm.add_type("read_reply_noevent", ("overflow", 1)) # overflow=0→timeout @@ -110,7 +109,7 @@ class ReceiveDatapath(Module): packet_buffer_count = Signal(max=w_in_packet+1) self.sync += \ If(self.packet_buffer_load, - Case(packet_buffer_count, + Case(packet_buffer_count, {i: packet_buffer[i*ws:(i+1)*ws].eq(self.data_r) for i in range(w_in_packet)}), packet_buffer_count.eq(packet_buffer_count + 1) diff --git a/artiq/gateware/drtio/rx_synchronizer.py b/artiq/gateware/drtio/rx_synchronizer.py new file mode 100644 index 000000000..90fddb97c --- /dev/null +++ b/artiq/gateware/drtio/rx_synchronizer.py @@ -0,0 +1,62 @@ +from migen import * +from migen.genlib.cdc import ElasticBuffer + + +class GenericRXSynchronizer(Module): + """Simple RX synchronizer based on the portable Migen elastic buffer. + + Introduces timing non-determinism in the satellite RX path, e.g. + echo_request/echo_reply RTT and TSC sync, but useful for testing. + """ + def __init__(self): + self.signals = [] + + def resync(self, signal): + synchronized = Signal.like(signal, related=signal) + self.signals.append((signal, synchronized)) + return synchronized + + def do_finalize(self): + eb = ElasticBuffer(sum(len(s[0]) for s in self.signals), 4, "rtio_rx", "rtio") + self.submodules += eb + self.comb += [ + eb.din.eq(Cat(*[s[0] for s in self.signals])), + Cat(*[s[1] for s in self.signals]).eq(eb.dout) + ] + + +class XilinxRXSynchronizer(Module): + """Deterministic RX synchronizer using a relatively placed macro + to put the clock-domain-crossing FFs right next to each other. + + To meet setup/hold constraints at the receiving FFs, adjust the phase shift + of the jitter cleaner. + + We assume that FPGA routing variations are small enough to be negligible. + """ + def __init__(self): + self.signals = [] + + def resync(self, signal): + synchronized = Signal.like(signal, related=signal) + self.signals.append((signal, synchronized)) + return synchronized + + def do_finalize(self): + l = sum(len(s[0]) for s in self.signals) + din = Signal(l) + inter = Signal(l) + dout = Signal(l) + self.comb += [ + din.eq(Cat(*[s[0] for s in self.signals])), + Cat(*[s[1] for s in self.signals]).eq(dout) + ] + + for i in range(l): + hu_set = ("HU_SET", "drtio_rx_synchronizer") + self.specials += [ + Instance("FD", i_C=ClockSignal("rtio_rx"), i_D=din[i], o_Q=inter[i], + attr={hu_set, ("RLOC", "X0Y{}".format(i))}), + Instance("FD", i_C=ClockSignal("rtio"), i_D=inter[i], o_Q=dout[i], + attr={hu_set, ("RLOC", "X1Y{}".format(i))}) + ] diff --git a/artiq/gateware/drtio/siphaser.py b/artiq/gateware/drtio/siphaser.py new file mode 100644 index 000000000..81dacaed0 --- /dev/null +++ b/artiq/gateware/drtio/siphaser.py @@ -0,0 +1,114 @@ +from migen import * +from migen.genlib.cdc import MultiReg, PulseSynchronizer + +from misoc.interconnect.csr import * + + +# This code assumes 125/62.5MHz reference clock and 125MHz or 150MHz RTIO +# frequency. + +class SiPhaser7Series(Module, AutoCSR): + def __init__(self, si5324_clkin, rx_synchronizer, + ref_clk=None, ref_div2=False, ultrascale=False, rtio_clk_freq=150e6): + self.switch_clocks = CSRStorage() + self.phase_shift = CSR() + self.phase_shift_done = CSRStatus(reset=1) + self.error = CSR() + + assert rtio_clk_freq in (125e6, 150e6) + + # 125MHz/62.5MHz reference clock to 125MHz/150MHz. VCO @ 750MHz. + # Used to provide a startup clock to the transceiver through the Si, + # we do not use the crystal reference so that the PFD (f3) frequency + # can be high. + mmcm_freerun_fb = Signal() + mmcm_freerun_output_raw = Signal() + self.specials += \ + Instance("MMCME2_BASE", + p_CLKIN1_PERIOD=16.0 if ref_div2 else 8.0, + i_CLKIN1=ClockSignal("sys") if ref_clk is None else ref_clk, + i_RST=ResetSignal("sys") if ref_clk is None else 0, + + p_CLKFBOUT_MULT_F=12.0 if ref_div2 else 6.0, + p_DIVCLK_DIVIDE=1, + + o_CLKFBOUT=mmcm_freerun_fb, i_CLKFBIN=mmcm_freerun_fb, + + p_CLKOUT0_DIVIDE_F=750e6/rtio_clk_freq, + o_CLKOUT0=mmcm_freerun_output_raw, + ) + if ultrascale: + mmcm_freerun_output = Signal() + self.specials += Instance("BUFG", i_I=mmcm_freerun_output_raw, o_O=mmcm_freerun_output) + else: + mmcm_freerun_output = mmcm_freerun_output_raw + + # 125MHz/150MHz to 125MHz/150MHz with controllable phase shift, + # VCO @ 1000MHz/1200MHz. + # Inserted between CDR and output to Si, used to correct + # non-determinstic skew of Si5324. + mmcm_ps_fb = Signal() + mmcm_ps_output = Signal() + mmcm_ps_psdone = Signal() + self.specials += \ + Instance("MMCME2_ADV", + p_CLKIN1_PERIOD=1e9/rtio_clk_freq, + i_CLKIN1=ClockSignal("rtio_rx0"), + i_RST=ResetSignal("rtio_rx0"), + i_CLKINSEL=1, # yes, 1=CLKIN1 0=CLKIN2 + + p_CLKFBOUT_MULT_F=8.0, + p_CLKOUT0_DIVIDE_F=8.0, + p_DIVCLK_DIVIDE=1, + + o_CLKFBOUT=mmcm_ps_fb, i_CLKFBIN=mmcm_ps_fb, + + p_CLKOUT0_USE_FINE_PS="TRUE", + o_CLKOUT0=mmcm_ps_output, + + i_PSCLK=ClockSignal(), + i_PSEN=self.phase_shift.re, + i_PSINCDEC=self.phase_shift.r, + o_PSDONE=mmcm_ps_psdone, + ) + self.sync += [ + If(self.phase_shift.re, self.phase_shift_done.status.eq(0)), + If(mmcm_ps_psdone, self.phase_shift_done.status.eq(1)) + ] + + si5324_clkin_se = Signal() + self.specials += [ + Instance("BUFGMUX", + i_I0=mmcm_freerun_output, + i_I1=mmcm_ps_output, + i_S=self.switch_clocks.storage, + o_O=si5324_clkin_se + ), + Instance("OBUFDS", + i_I=si5324_clkin_se, + o_O=si5324_clkin.p, o_OB=si5324_clkin.n + ) + ] + + # The RX synchronizer is tested for setup/hold violations by feeding it a + # toggling pattern and checking that the same toggling pattern comes out. + toggle_in = Signal() + self.sync.rtio_rx0 += toggle_in.eq(~toggle_in) + toggle_out = rx_synchronizer.resync(toggle_in) + + toggle_out_expected = Signal() + self.sync.rtio += toggle_out_expected.eq(~toggle_out) + + error = Signal() + error_clear = PulseSynchronizer("sys", "rtio") + self.submodules += error_clear + self.sync.rtio += [ + If(toggle_out != toggle_out_expected, error.eq(1)), + If(error_clear.o, error.eq(0)) + ] + self.specials += MultiReg(error, self.error.w) + self.comb += error_clear.i.eq(self.error.re) + + # expose MMCM outputs - used for clock constraints + self.mmcm_freerun_output = mmcm_freerun_output + self.mmcm_ps_output = mmcm_ps_output diff --git a/artiq/gateware/drtio/transceiver/clock_aligner.py b/artiq/gateware/drtio/transceiver/clock_aligner.py new file mode 100644 index 000000000..9d17d2b77 --- /dev/null +++ b/artiq/gateware/drtio/transceiver/clock_aligner.py @@ -0,0 +1,114 @@ +from math import ceil +from functools import reduce +from operator import add + +from migen import * +from migen.genlib.cdc import MultiReg, PulseSynchronizer + + +# Changes the phase of the transceiver RX clock to align the comma to +# the LSBs of RXDATA, fixing the latency. +# +# This is implemented by repeatedly resetting the transceiver until it +# gives out the correct phase. Each reset gives a random phase. +# +# If Xilinx had designed the GTX transceiver correctly, RXSLIDE_MODE=PMA +# would achieve this faster and in a cleaner way. But: +# * the phase jumps are of 2 UI at every second RXSLIDE pulse, instead +# of 1 UI at every pulse. It is unclear what the latency becomes. +# * RXSLIDE_MODE=PMA cannot be used with the RX buffer bypassed. +# Those design flaws make RXSLIDE_MODE=PMA yet another broken and useless +# transceiver "feature". +# +# Warning: Xilinx transceivers are LSB first, and comma needs to be flipped +# compared to the usual 8b10b binary representation. +class BruteforceClockAligner(Module): + def __init__(self, comma, tx_clk_freq, check_period=6e-3): + self.rxdata = Signal(20) + self.restart = Signal() + + self.ready = Signal() + + check_max_val = ceil(check_period*tx_clk_freq) + check_counter = Signal(max=check_max_val+1) + check = Signal() + reset_check_counter = Signal() + self.sync.rtio_tx += [ + check.eq(0), + If(reset_check_counter, + check_counter.eq(check_max_val) + ).Else( + If(check_counter == 0, + check.eq(1), + check_counter.eq(check_max_val) + ).Else( + check_counter.eq(check_counter-1) + ) + ) + ] + + checks_reset = PulseSynchronizer("rtio_tx", "rtio_rx") + self.submodules += checks_reset + + comma_n = ~comma & 0b1111111111 + comma_seen_rxclk = Signal() + comma_seen = Signal() + comma_seen_rxclk.attr.add("no_retiming") + self.specials += MultiReg(comma_seen_rxclk, comma_seen) + self.sync.rtio_rx += \ + If(checks_reset.o, + comma_seen_rxclk.eq(0) + ).Elif((self.rxdata[:10] == comma) | (self.rxdata[:10] == comma_n), + comma_seen_rxclk.eq(1) + ) + + error_seen_rxclk = Signal() + error_seen = Signal() + error_seen_rxclk.attr.add("no_retiming") + self.specials += MultiReg(error_seen_rxclk, error_seen) + rx1cnt = Signal(max=11) + self.sync.rtio_rx += [ + rx1cnt.eq(reduce(add, [self.rxdata[i] for i in range(10)])), + If(checks_reset.o, + error_seen_rxclk.eq(0) + ).Elif((rx1cnt != 4) & (rx1cnt != 5) & (rx1cnt != 6), + error_seen_rxclk.eq(1) + ) + ] + + fsm = ClockDomainsRenamer("rtio_tx")(FSM(reset_state="WAIT_COMMA")) + self.submodules += fsm + + fsm.act("WAIT_COMMA", + If(check, + # Errors are still OK at this stage, as the transceiver + # has just been reset and may output garbage data. + If(comma_seen, + NextState("WAIT_NOERROR") + ).Else( + self.restart.eq(1) + ), + checks_reset.i.eq(1) + ) + ) + fsm.act("WAIT_NOERROR", + If(check, + If(comma_seen & ~error_seen, + NextState("READY") + ).Else( + self.restart.eq(1), + NextState("WAIT_COMMA") + ), + checks_reset.i.eq(1) + ) + ) + fsm.act("READY", + reset_check_counter.eq(1), + self.ready.eq(1), + If(error_seen, + checks_reset.i.eq(1), + self.restart.eq(1), + NextState("WAIT_COMMA") + ) + ) + diff --git a/artiq/gateware/drtio/transceiver/gth_ultrascale.py b/artiq/gateware/drtio/transceiver/gth_ultrascale.py new file mode 100644 index 000000000..ddc88037a --- /dev/null +++ b/artiq/gateware/drtio/transceiver/gth_ultrascale.py @@ -0,0 +1,704 @@ +from functools import reduce +from operator import or_, and_ + +from migen import * +from migen.genlib.resetsync import AsyncResetSynchronizer + +from misoc.cores.code_8b10b import Encoder, Decoder + +from microscope import * + +from artiq.gateware.drtio.core import TransceiverInterface, ChannelInterface +from artiq.gateware.drtio.transceiver.clock_aligner import BruteforceClockAligner +from artiq.gateware.drtio.transceiver.gth_ultrascale_init import * + + +class GTHSingle(Module): + def __init__(self, refclk, pads, sys_clk_freq, rtio_clk_freq, rtiox_mul, dw, mode): + assert (dw == 20) or (dw == 40) + assert mode in ["single", "master", "slave"] + self.mode = mode + + # phase alignment + self.txsyncallin = Signal() + self.txphaligndone = Signal() + self.txsyncallin = Signal() + self.txsyncin = Signal() + self.txsyncout = Signal() + self.txdlysreset = Signal() + + # # # + + self.txenable = Signal() + nwords = dw//10 + self.submodules.encoder = encoder = ClockDomainsRenamer("rtio_tx")( + Encoder(nwords, True)) + self.submodules.decoders = decoders = [ClockDomainsRenamer("rtio_rx")( + (Decoder(True))) for _ in range(nwords)] + self.rx_ready = Signal() + + # transceiver direct clock outputs + # for OBUFDS_GTE3 + self.rxrecclkout = Signal() + # useful to specify clock constraints in a way palatable to Vivado + self.txoutclk = Signal() + self.rxoutclk = Signal() + + # # # + + # TX generates RTIO clock, init must be in system domain + self.submodules.tx_init = tx_init = GTHInit(sys_clk_freq, False, mode) + # RX receives restart commands from RTIO domain + rx_init = ClockDomainsRenamer("rtio_tx")(GTHInit(rtio_clk_freq, True)) + self.submodules += rx_init + + cpll_reset = Signal() + cpll_lock = Signal() + self.comb += [ + cpll_reset.eq(tx_init.pllreset), + tx_init.plllock.eq(cpll_lock), + rx_init.plllock.eq(cpll_lock) + ] + + txdata = Signal(dw) + rxdata = Signal(dw) + rxphaligndone = Signal() + gth_params = dict( + p_ACJTAG_DEBUG_MODE =0b0, + p_ACJTAG_MODE =0b0, + p_ACJTAG_RESET =0b0, + p_ADAPT_CFG0 =0b1111100000000000, + p_ADAPT_CFG1 =0b0000000000000000, + p_ALIGN_COMMA_DOUBLE ="FALSE", + p_ALIGN_COMMA_ENABLE =0b0000000000, + p_ALIGN_COMMA_WORD =1, + p_ALIGN_MCOMMA_DET ="FALSE", + p_ALIGN_MCOMMA_VALUE =0b1010000011, + p_ALIGN_PCOMMA_DET ="FALSE", + p_ALIGN_PCOMMA_VALUE =0b0101111100, + p_A_RXOSCALRESET =0b0, + p_A_RXPROGDIVRESET =0b0, + p_A_TXPROGDIVRESET =0b0, + p_CBCC_DATA_SOURCE_SEL ="ENCODED", + p_CDR_SWAP_MODE_EN =0b0, + p_CHAN_BOND_KEEP_ALIGN ="FALSE", + p_CHAN_BOND_MAX_SKEW =1, + p_CHAN_BOND_SEQ_1_1 =0b0000000000, + p_CHAN_BOND_SEQ_1_2 =0b0000000000, + p_CHAN_BOND_SEQ_1_3 =0b0000000000, + p_CHAN_BOND_SEQ_1_4 =0b0000000000, + p_CHAN_BOND_SEQ_1_ENABLE =0b1111, + p_CHAN_BOND_SEQ_2_1 =0b0000000000, + p_CHAN_BOND_SEQ_2_2 =0b0000000000, + p_CHAN_BOND_SEQ_2_3 =0b0000000000, + p_CHAN_BOND_SEQ_2_4 =0b0000000000, + p_CHAN_BOND_SEQ_2_ENABLE =0b1111, + p_CHAN_BOND_SEQ_2_USE ="FALSE", + p_CHAN_BOND_SEQ_LEN =1, + p_CLK_CORRECT_USE ="FALSE", + p_CLK_COR_KEEP_IDLE ="FALSE", + p_CLK_COR_MAX_LAT =20, + p_CLK_COR_MIN_LAT =18, + p_CLK_COR_PRECEDENCE ="TRUE", + p_CLK_COR_REPEAT_WAIT =0, + p_CLK_COR_SEQ_1_1 =0b0000000000, + p_CLK_COR_SEQ_1_2 =0b0000000000, + p_CLK_COR_SEQ_1_3 =0b0000000000, + p_CLK_COR_SEQ_1_4 =0b0000000000, + p_CLK_COR_SEQ_1_ENABLE =0b1111, + p_CLK_COR_SEQ_2_1 =0b0000000000, + p_CLK_COR_SEQ_2_2 =0b0000000000, + p_CLK_COR_SEQ_2_3 =0b0000000000, + p_CLK_COR_SEQ_2_4 =0b0000000000, + p_CLK_COR_SEQ_2_ENABLE =0b1111, + p_CLK_COR_SEQ_2_USE ="FALSE", + p_CLK_COR_SEQ_LEN =1, + p_CPLL_CFG0 =0b0110011111111000, + p_CPLL_CFG1 =0b1010010010101100, + p_CPLL_CFG2 =0b0000000000000111, + p_CPLL_CFG3 =0b000000, + p_CPLL_FBDIV =5, + p_CPLL_FBDIV_45 =4, + p_CPLL_INIT_CFG0 =0b0000001010110010, + p_CPLL_INIT_CFG1 =0b00000000, + p_CPLL_LOCK_CFG =0b0000000111101000, + p_CPLL_REFCLK_DIV =1, + p_DDI_CTRL =0b00, + p_DDI_REALIGN_WAIT =15, + p_DEC_MCOMMA_DETECT ="FALSE", + p_DEC_PCOMMA_DETECT ="FALSE", + p_DEC_VALID_COMMA_ONLY ="FALSE", + p_DFE_D_X_REL_POS =0b0, + p_DFE_VCM_COMP_EN =0b0, + p_DMONITOR_CFG0 =0b0000000000, + p_DMONITOR_CFG1 =0b00000000, + p_ES_CLK_PHASE_SEL =0b0, + p_ES_CONTROL =0b000000, + p_ES_ERRDET_EN ="FALSE", + p_ES_EYE_SCAN_EN ="FALSE", + p_ES_HORZ_OFFSET =0b000000000000, + p_ES_PMA_CFG =0b0000000000, + p_ES_PRESCALE =0b00000, + p_ES_QUALIFIER0 =0b0000000000000000, + p_ES_QUALIFIER1 =0b0000000000000000, + p_ES_QUALIFIER2 =0b0000000000000000, + p_ES_QUALIFIER3 =0b0000000000000000, + p_ES_QUALIFIER4 =0b0000000000000000, + p_ES_QUAL_MASK0 =0b0000000000000000, + p_ES_QUAL_MASK1 =0b0000000000000000, + p_ES_QUAL_MASK2 =0b0000000000000000, + p_ES_QUAL_MASK3 =0b0000000000000000, + p_ES_QUAL_MASK4 =0b0000000000000000, + p_ES_SDATA_MASK0 =0b0000000000000000, + p_ES_SDATA_MASK1 =0b0000000000000000, + p_ES_SDATA_MASK2 =0b0000000000000000, + p_ES_SDATA_MASK3 =0b0000000000000000, + p_ES_SDATA_MASK4 =0b0000000000000000, + p_EVODD_PHI_CFG =0b00000000000, + p_EYE_SCAN_SWAP_EN =0b0, + p_FTS_DESKEW_SEQ_ENABLE =0b1111, + p_FTS_LANE_DESKEW_CFG =0b1111, + p_FTS_LANE_DESKEW_EN ="FALSE", + p_GEARBOX_MODE =0b00000, + p_GM_BIAS_SELECT =0b0, + p_LOCAL_MASTER =0b1, + p_OOBDIVCTL =0b00, + p_OOB_PWRUP =0b0, + p_PCI3_AUTO_REALIGN ="OVR_1K_BLK", + p_PCI3_PIPE_RX_ELECIDLE =0b0, + p_PCI3_RX_ASYNC_EBUF_BYPASS =0b00, + p_PCI3_RX_ELECIDLE_EI2_ENABLE =0b0, + p_PCI3_RX_ELECIDLE_H2L_COUNT =0b000000, + p_PCI3_RX_ELECIDLE_H2L_DISABLE =0b000, + p_PCI3_RX_ELECIDLE_HI_COUNT =0b000000, + p_PCI3_RX_ELECIDLE_LP4_DISABLE =0b0, + p_PCI3_RX_FIFO_DISABLE =0b0, + p_PCIE_BUFG_DIV_CTRL =0b0001000000000000, + p_PCIE_RXPCS_CFG_GEN3 =0b0000001010100100, + p_PCIE_RXPMA_CFG =0b0000000000001010, + p_PCIE_TXPCS_CFG_GEN3 =0b0010010010100100, + p_PCIE_TXPMA_CFG =0b0000000000001010, + p_PCS_PCIE_EN ="FALSE", + p_PCS_RSVD0 =0b0000000000000000, + p_PCS_RSVD1 =0b000, + p_PD_TRANS_TIME_FROM_P2 =0b000000111100, + p_PD_TRANS_TIME_NONE_P2 =0b00011001, + p_PD_TRANS_TIME_TO_P2 =0b01100100, + p_PLL_SEL_MODE_GEN12 =0b00, + p_PLL_SEL_MODE_GEN3 =0b11, + p_PMA_RSV1 =0b1111000000000000, + p_PROCESS_PAR =0b010, + p_RATE_SW_USE_DRP =0b1, + p_RESET_POWERSAVE_DISABLE =0b0, + ) + gth_params.update( + p_RXBUFRESET_TIME =0b00011, + p_RXBUF_ADDR_MODE ="FAST", + p_RXBUF_EIDLE_HI_CNT =0b1000, + p_RXBUF_EIDLE_LO_CNT =0b0000, + p_RXBUF_EN ="FALSE", + p_RXBUF_RESET_ON_CB_CHANGE ="TRUE", + p_RXBUF_RESET_ON_COMMAALIGN ="FALSE", + p_RXBUF_RESET_ON_EIDLE ="FALSE", + p_RXBUF_RESET_ON_RATE_CHANGE ="TRUE", + p_RXBUF_THRESH_OVFLW =0, + p_RXBUF_THRESH_OVRD ="FALSE", + p_RXBUF_THRESH_UNDFLW =0, + p_RXCDRFREQRESET_TIME =0b00001, + p_RXCDRPHRESET_TIME =0b00001, + p_RXCDR_CFG0 =0b0000000000000000, + p_RXCDR_CFG0_GEN3 =0b0000000000000000, + p_RXCDR_CFG1 =0b0000000000000000, + p_RXCDR_CFG1_GEN3 =0b0000000000000000, + p_RXCDR_CFG2 =0b0000011111010110, + p_RXCDR_CFG2_GEN3 =0b0000011111100110, + p_RXCDR_CFG3 =0b0000000000000000, + p_RXCDR_CFG3_GEN3 =0b0000000000000000, + p_RXCDR_CFG4 =0b0000000000000000, + p_RXCDR_CFG4_GEN3 =0b0000000000000000, + p_RXCDR_CFG5 =0b0000000000000000, + p_RXCDR_CFG5_GEN3 =0b0000000000000000, + p_RXCDR_FR_RESET_ON_EIDLE =0b0, + p_RXCDR_HOLD_DURING_EIDLE =0b0, + p_RXCDR_LOCK_CFG0 =0b0100010010000000, + p_RXCDR_LOCK_CFG1 =0b0101111111111111, + p_RXCDR_LOCK_CFG2 =0b0111011111000011, + p_RXCDR_PH_RESET_ON_EIDLE =0b0, + p_RXCFOK_CFG0 =0b0100000000000000, + p_RXCFOK_CFG1 =0b0000000001100101, + p_RXCFOK_CFG2 =0b0000000000101110, + p_RXDFELPMRESET_TIME =0b0001111, + p_RXDFELPM_KL_CFG0 =0b0000000000000000, + p_RXDFELPM_KL_CFG1 =0b0000000000000010, + p_RXDFELPM_KL_CFG2 =0b0000000000000000, + p_RXDFE_CFG0 =0b0000101000000000, + p_RXDFE_CFG1 =0b0000000000000000, + p_RXDFE_GC_CFG0 =0b0000000000000000, + p_RXDFE_GC_CFG1 =0b0111100001110000, + p_RXDFE_GC_CFG2 =0b0000000000000000, + p_RXDFE_H2_CFG0 =0b0000000000000000, + p_RXDFE_H2_CFG1 =0b0000000000000000, + p_RXDFE_H3_CFG0 =0b0100000000000000, + p_RXDFE_H3_CFG1 =0b0000000000000000, + p_RXDFE_H4_CFG0 =0b0010000000000000, + p_RXDFE_H4_CFG1 =0b0000000000000011, + p_RXDFE_H5_CFG0 =0b0010000000000000, + p_RXDFE_H5_CFG1 =0b0000000000000011, + p_RXDFE_H6_CFG0 =0b0010000000000000, + p_RXDFE_H6_CFG1 =0b0000000000000000, + p_RXDFE_H7_CFG0 =0b0010000000000000, + p_RXDFE_H7_CFG1 =0b0000000000000000, + p_RXDFE_H8_CFG0 =0b0010000000000000, + p_RXDFE_H8_CFG1 =0b0000000000000000, + p_RXDFE_H9_CFG0 =0b0010000000000000, + p_RXDFE_H9_CFG1 =0b0000000000000000, + p_RXDFE_HA_CFG0 =0b0010000000000000, + p_RXDFE_HA_CFG1 =0b0000000000000000, + p_RXDFE_HB_CFG0 =0b0010000000000000, + p_RXDFE_HB_CFG1 =0b0000000000000000, + p_RXDFE_HC_CFG0 =0b0000000000000000, + p_RXDFE_HC_CFG1 =0b0000000000000000, + p_RXDFE_HD_CFG0 =0b0000000000000000, + p_RXDFE_HD_CFG1 =0b0000000000000000, + p_RXDFE_HE_CFG0 =0b0000000000000000, + p_RXDFE_HE_CFG1 =0b0000000000000000, + p_RXDFE_HF_CFG0 =0b0000000000000000, + p_RXDFE_HF_CFG1 =0b0000000000000000, + p_RXDFE_OS_CFG0 =0b1000000000000000, + p_RXDFE_OS_CFG1 =0b0000000000000000, + p_RXDFE_UT_CFG0 =0b1000000000000000, + p_RXDFE_UT_CFG1 =0b0000000000000011, + p_RXDFE_VP_CFG0 =0b1010101000000000, + p_RXDFE_VP_CFG1 =0b0000000000110011, + p_RXDLY_CFG =0b0000000000011111, + p_RXDLY_LCFG =0b0000000000110000, + p_RXELECIDLE_CFG ="SIGCFG_4", + p_RXGBOX_FIFO_INIT_RD_ADDR =4, + p_RXGEARBOX_EN ="FALSE", + p_RXISCANRESET_TIME =0b00001, + p_RXLPM_CFG =0b0000000000000000, + p_RXLPM_GC_CFG =0b0001000000000000, + p_RXLPM_KH_CFG0 =0b0000000000000000, + p_RXLPM_KH_CFG1 =0b0000000000000010, + p_RXLPM_OS_CFG0 =0b1000000000000000, + p_RXLPM_OS_CFG1 =0b0000000000000010, + p_RXOOB_CFG =0b000000110, + p_RXOOB_CLK_CFG ="PMA", + p_RXOSCALRESET_TIME =0b00011, + p_RXOUT_DIV =2, + p_RXPCSRESET_TIME =0b00011, + p_RXPHBEACON_CFG =0b0000000000000000, + p_RXPHDLY_CFG =0b0010000000100000, + p_RXPHSAMP_CFG =0b0010000100000000, + p_RXPHSLIP_CFG =0b0110011000100010, + p_RXPH_MONITOR_SEL =0b00000, + p_RXPI_CFG0 =0b00, + p_RXPI_CFG1 =0b00, + p_RXPI_CFG2 =0b00, + p_RXPI_CFG3 =0b00, + p_RXPI_CFG4 =0b1, + p_RXPI_CFG5 =0b1, + p_RXPI_CFG6 =0b000, + p_RXPI_LPM =0b0, + p_RXPI_VREFSEL =0b0, + p_RXPMACLK_SEL ="DATA", + p_RXPMARESET_TIME =0b00011, + p_RXPRBS_ERR_LOOPBACK =0b0, + p_RXPRBS_LINKACQ_CNT =15, + p_RXSLIDE_AUTO_WAIT =7, + p_RXSLIDE_MODE ="OFF", + p_RXSYNC_MULTILANE =0b0, + p_RXSYNC_OVRD =0b0, + p_RXSYNC_SKIP_DA =0b0, + p_RX_AFE_CM_EN =0b0, + p_RX_BIAS_CFG0 =0b0000101010110100, + p_RX_BUFFER_CFG =0b000000, + p_RX_CAPFF_SARC_ENB =0b0, + p_RX_CLK25_DIV =6, + p_RX_CLKMUX_EN =0b1, + p_RX_CLK_SLIP_OVRD =0b00000, + p_RX_CM_BUF_CFG =0b1010, + p_RX_CM_BUF_PD =0b0, + p_RX_CM_SEL =0b11, + p_RX_CM_TRIM =0b1010, + p_RX_CTLE3_LPF =0b00000001, + p_RX_DATA_WIDTH =dw, + p_RX_DDI_SEL =0b000000, + p_RX_DEFER_RESET_BUF_EN ="TRUE", + p_RX_DFELPM_CFG0 =0b0110, + p_RX_DFELPM_CFG1 =0b1, + p_RX_DFELPM_KLKH_AGC_STUP_EN =0b1, + p_RX_DFE_AGC_CFG0 =0b10, + p_RX_DFE_AGC_CFG1 =0b100, + p_RX_DFE_KL_LPM_KH_CFG0 =0b01, + p_RX_DFE_KL_LPM_KH_CFG1 =0b100, + p_RX_DFE_KL_LPM_KL_CFG0 =0b01, + p_RX_DFE_KL_LPM_KL_CFG1 =0b100, + p_RX_DFE_LPM_HOLD_DURING_EIDLE =0b0, + p_RX_DISPERR_SEQ_MATCH ="TRUE", + p_RX_DIVRESET_TIME =0b00001, + p_RX_EN_HI_LR =0b0, + p_RX_EYESCAN_VS_CODE =0b0000000, + p_RX_EYESCAN_VS_NEG_DIR =0b0, + p_RX_EYESCAN_VS_RANGE =0b00, + p_RX_EYESCAN_VS_UT_SIGN =0b0, + p_RX_FABINT_USRCLK_FLOP =0b0, + p_RX_INT_DATAWIDTH =dw==40, + p_RX_PMA_POWER_SAVE =0b0, + p_RX_PROGDIV_CFG =0.0, + p_RX_SAMPLE_PERIOD =0b111, + p_RX_SIG_VALID_DLY =11, + p_RX_SUM_DFETAPREP_EN =0b0, + p_RX_SUM_IREF_TUNE =0b0000, + p_RX_SUM_RES_CTRL =0b00, + p_RX_SUM_VCMTUNE =0b0000, + p_RX_SUM_VCM_OVWR =0b0, + p_RX_SUM_VREF_TUNE =0b000, + p_RX_TUNE_AFE_OS =0b10, + p_RX_WIDEMODE_CDR =0b0, + p_RX_XCLK_SEL ="RXUSR", + p_SAS_MAX_COM =64, + p_SAS_MIN_COM =36, + p_SATA_BURST_SEQ_LEN =0b1110, + p_SATA_CPLL_CFG ="VCO_3000MHZ", + p_SATA_MAX_BURST =8, + p_SATA_MAX_INIT =21, + p_SATA_MAX_WAKE =7, + p_SATA_MIN_BURST =4, + p_SATA_MIN_INIT =12, + p_SATA_MIN_WAKE =4, + p_SHOW_REALIGN_COMMA ="TRUE", + p_SIM_RECEIVER_DETECT_PASS ="TRUE", + p_SIM_RESET_SPEEDUP ="TRUE", + p_SIM_TX_EIDLE_DRIVE_LEVEL =0b0, + p_SIM_VERSION =2, + p_TAPDLY_SET_TX =0b00, + p_TEMPERATUR_PAR =0b0010, + p_TERM_RCAL_CFG =0b100001000010000, + p_TERM_RCAL_OVRD =0b000, + p_TRANS_TIME_RATE =0b00001110, + p_TST_RSV0 =0b00000000, + p_TST_RSV1 =0b00000000, + ) + gth_params.update( + p_TXBUF_EN ="FALSE", + p_TXBUF_RESET_ON_RATE_CHANGE ="TRUE", + p_TXDLY_CFG =0b0000000000001001, + p_TXDLY_LCFG =0b0000000001010000, + p_TXDRVBIAS_N =0b1010, + p_TXDRVBIAS_P =0b1010, + p_TXFIFO_ADDR_CFG ="LOW", + p_TXGBOX_FIFO_INIT_RD_ADDR =4, + p_TXGEARBOX_EN ="FALSE", + p_TXOUT_DIV =2, + p_TXPCSRESET_TIME =0b00011, + p_TXPHDLY_CFG0 =0b0010000000100000, + p_TXPHDLY_CFG1 =0b0000000001110101, + p_TXPH_CFG =0b0000100110000000, + p_TXPH_MONITOR_SEL =0b00000, + p_TXPI_CFG0 =0b00, + p_TXPI_CFG1 =0b00, + p_TXPI_CFG2 =0b00, + p_TXPI_CFG3 =0b1, + p_TXPI_CFG4 =0b1, + p_TXPI_CFG5 =0b000, + p_TXPI_GRAY_SEL =0b0, + p_TXPI_INVSTROBE_SEL =0b0, + p_TXPI_LPM =0b0, + p_TXPI_PPMCLK_SEL ="TXUSRCLK2", + p_TXPI_PPM_CFG =0b00000000, + p_TXPI_SYNFREQ_PPM =0b001, + p_TXPI_VREFSEL =0b0, + p_TXPMARESET_TIME =0b00011, + p_TXSYNC_MULTILANE =0 if mode == "single" else 1, + p_TXSYNC_OVRD =0b0, + p_TXSYNC_SKIP_DA =0b0, + p_TX_CLK25_DIV =6, + p_TX_CLKMUX_EN =0b1, + p_TX_DATA_WIDTH =dw, + p_TX_DCD_CFG =0b000010, + p_TX_DCD_EN =0b0, + p_TX_DEEMPH0 =0b000000, + p_TX_DEEMPH1 =0b000000, + p_TX_DIVRESET_TIME =0b00001, + p_TX_DRIVE_MODE ="DIRECT", + p_TX_EIDLE_ASSERT_DELAY =0b100, + p_TX_EIDLE_DEASSERT_DELAY =0b011, + p_TX_EML_PHI_TUNE =0b0, + p_TX_FABINT_USRCLK_FLOP =0b0, + p_TX_IDLE_DATA_ZERO =0b0, + p_TX_INT_DATAWIDTH =dw==40, + p_TX_LOOPBACK_DRIVE_HIZ ="FALSE", + p_TX_MAINCURSOR_SEL =0b0, + p_TX_MARGIN_FULL_0 =0b1001111, + p_TX_MARGIN_FULL_1 =0b1001110, + p_TX_MARGIN_FULL_2 =0b1001100, + p_TX_MARGIN_FULL_3 =0b1001010, + p_TX_MARGIN_FULL_4 =0b1001000, + p_TX_MARGIN_LOW_0 =0b1000110, + p_TX_MARGIN_LOW_1 =0b1000101, + p_TX_MARGIN_LOW_2 =0b1000011, + p_TX_MARGIN_LOW_3 =0b1000010, + p_TX_MARGIN_LOW_4 =0b1000000, + p_TX_MODE_SEL =0b000, + p_TX_PMADATA_OPT =0b0, + p_TX_PMA_POWER_SAVE =0b0, + p_TX_PROGCLK_SEL ="PREPI", + p_TX_PROGDIV_CFG =dw/rtiox_mul, + p_TX_QPI_STATUS_EN =0b0, + p_TX_RXDETECT_CFG =0b00000000110010, + p_TX_RXDETECT_REF =0b100, + p_TX_SAMPLE_PERIOD =0b111, + p_TX_SARC_LPBK_ENB =0b0, + p_TX_XCLK_SEL ="TXUSR", + p_USE_PCS_CLK_PHASE_SEL =0b0, + p_WB_MODE =0b00, + ) + gth_params.update( + # Reset modes + i_GTRESETSEL=0, + i_RESETOVRD=0, + + i_CPLLRESET=0, + i_CPLLPD=cpll_reset, + o_CPLLLOCK=cpll_lock, + i_CPLLLOCKEN=1, + i_CPLLREFCLKSEL=0b001, + i_TSTIN=2**20-1, + i_GTREFCLK0=refclk, + + # TX clock + o_TXOUTCLK=self.txoutclk, + i_TXSYSCLKSEL=0b00, + i_TXPLLCLKSEL=0b00, + i_TXOUTCLKSEL=0b101, + + # TX Startup/Reset + i_GTTXRESET=tx_init.gtXxreset, + o_TXRESETDONE=tx_init.Xxresetdone, + i_TXDLYSRESET=tx_init.Xxdlysreset if mode != "slave" else self.txdlysreset, + o_TXDLYSRESETDONE=tx_init.Xxdlysresetdone, + o_TXPHALIGNDONE=tx_init.Xxphaligndone, + i_TXUSERRDY=tx_init.Xxuserrdy, + i_TXSYNCMODE=mode != "slave", + + i_TXSYNCALLIN=self.txsyncallin, + i_TXSYNCIN=self.txsyncin, + o_TXSYNCOUT=self.txsyncout, + + # TX data + i_TXINHIBIT=~self.txenable, + i_TXCTRL0=Cat(*[txdata[10*i+8] for i in range(nwords)]), + i_TXCTRL1=Cat(*[txdata[10*i+9] for i in range(nwords)]), + i_TXDATA=Cat(*[txdata[10*i:10*i+8] for i in range(nwords)]), + i_TXUSRCLK=ClockSignal("rtio_tx"), + i_TXUSRCLK2=ClockSignal("rtio_tx"), + + # TX electrical + i_TXPD=0b00, + i_TXBUFDIFFCTRL=0b000, + i_TXDIFFCTRL=0b1100, + + # RX Startup/Reset + i_GTRXRESET=rx_init.gtXxreset, + o_RXRESETDONE=rx_init.Xxresetdone, + i_RXDLYSRESET=rx_init.Xxdlysreset, + o_RXPHALIGNDONE=rxphaligndone, + i_RXSYNCALLIN=rxphaligndone, + i_RXUSERRDY=rx_init.Xxuserrdy, + i_RXSYNCIN=0, + i_RXSYNCMODE=1, + o_RXSYNCDONE=rx_init.Xxsyncdone, + + # RX AFE + i_RXDFEAGCCTRL=1, + i_RXDFEXYDEN=1, + i_RXLPMEN=1, + i_RXOSINTCFG=0xd, + i_RXOSINTEN=1, + + # RX clock + i_RXRATE=0, + i_RXDLYBYPASS=0, + i_RXSYSCLKSEL=0b00, + i_RXOUTCLKSEL=0b010, + i_RXPLLCLKSEL=0b00, + o_RXRECCLKOUT=self.rxrecclkout, + o_RXOUTCLK=self.rxoutclk, + i_RXUSRCLK=ClockSignal("rtio_rx"), + i_RXUSRCLK2=ClockSignal("rtio_rx"), + + # RX data + o_RXCTRL0=Cat(*[rxdata[10*i+8] for i in range(nwords)]), + o_RXCTRL1=Cat(*[rxdata[10*i+9] for i in range(nwords)]), + o_RXDATA=Cat(*[rxdata[10*i:10*i+8] for i in range(nwords)]), + + # RX electrical + i_RXPD=Replicate(rx_init.restart, 2), + i_RXELECIDLEMODE=0b11, + + # Pads + i_GTHRXP=pads.rxp, + i_GTHRXN=pads.rxn, + o_GTHTXP=pads.txp, + o_GTHTXN=pads.txn + ) + self.specials += Instance("GTHE3_CHANNEL", **gth_params) + self.comb += self.txphaligndone.eq(tx_init.Xxphaligndone) + + self.submodules += [ + add_probe_async("drtio_gth", "cpll_lock", cpll_lock), + add_probe_async("drtio_gth", "txuserrdy", tx_init.Xxuserrdy), + add_probe_async("drtio_gth", "tx_init_done", tx_init.done), + add_probe_async("drtio_gth", "rxuserrdy", rx_init.Xxuserrdy), + add_probe_async("drtio_gth", "rx_init_done", rx_init.done), + add_probe_buffer("drtio_gth", "txdata", txdata, clock_domain="rtio_tx"), + add_probe_buffer("drtio_gth", "rxdata", rxdata, clock_domain="rtio_rx") + ] + + # tx clocking + tx_reset_deglitched = Signal() + tx_reset_deglitched.attr.add("no_retiming") + self.sync += tx_reset_deglitched.eq(~tx_init.done) + self.clock_domains.cd_rtio_tx = ClockDomain() + self.clock_domains.cd_rtiox_tx = ClockDomain() + if mode == "master" or mode == "single": + self.specials += [ + Instance("BUFG_GT", i_I=self.txoutclk, o_O=self.cd_rtiox_tx.clk, i_DIV=0), + Instance("BUFG_GT", i_I=self.txoutclk, o_O=self.cd_rtio_tx.clk, i_DIV=rtiox_mul-1) + ] + self.specials += AsyncResetSynchronizer(self.cd_rtio_tx, tx_reset_deglitched) + + # rx clocking + rx_reset_deglitched = Signal() + rx_reset_deglitched.attr.add("no_retiming") + self.sync.rtio_tx += rx_reset_deglitched.eq(~rx_init.done) + self.clock_domains.cd_rtio_rx = ClockDomain() + self.specials += [ + Instance("BUFG_GT", i_I=self.rxoutclk, o_O=self.cd_rtio_rx.clk), + AsyncResetSynchronizer(self.cd_rtio_rx, rx_reset_deglitched) + ] + + # tx data + self.comb += txdata.eq(Cat(*[encoder.output[i] for i in range(nwords)])) + + # rx data + for i in range(nwords): + self.comb += decoders[i].input.eq(rxdata[10*i:10*(i+1)]) + + # clock alignment + clock_aligner = BruteforceClockAligner(0b0101111100, rtio_clk_freq) + self.submodules += clock_aligner + self.comb += [ + clock_aligner.rxdata.eq(rxdata), + rx_init.restart.eq(clock_aligner.restart), + self.rx_ready.eq(clock_aligner.ready) + ] + self.submodules += add_probe_async("drtio_gth", "clock_aligner_ready", clock_aligner.ready) + + +class GTHTXPhaseAlignement(Module): + # TX Buffer Bypass in Single-Lane/Multi-Lane Auto Mode (ug576) + def __init__(self, gths): + txsyncallin = Signal() + txsync = Signal() + txphaligndone = Signal(len(gths)) + txdlysreset = Signal() + ready_for_align = Signal(len(gths)) + all_ready_for_align = Signal() + + for i, gth in enumerate(gths): + # Common to all transceivers + self.comb += [ + ready_for_align[i].eq(1), + gth.txsyncin.eq(txsync), + gth.txsyncallin.eq(txsyncallin), + txphaligndone[i].eq(gth.txphaligndone) + ] + # Specific to Master or Single transceivers + if gth.mode == "master" or gth.mode == "single": + self.comb += [ + gth.tx_init.all_ready_for_align.eq(all_ready_for_align), + txsync.eq(gth.txsyncout), + txdlysreset.eq(gth.tx_init.Xxdlysreset) + ] + # Specific to Slave transceivers + else: + self.comb += [ + ready_for_align[i].eq(gth.tx_init.ready_for_align), + gth.txdlysreset.eq(txdlysreset), + ] + + self.comb += [ + txsyncallin.eq(reduce(and_, [txphaligndone[i] for i in range(len(gths))])), + all_ready_for_align.eq(reduce(and_, [ready_for_align[i] for i in range(len(gths))])) + ] + + +class GTH(Module, TransceiverInterface): + def __init__(self, clock_pads, data_pads, sys_clk_freq, rtio_clk_freq, rtiox_mul=2, dw=20, master=0, clock_recout_pads=None): + self.nchannels = nchannels = len(data_pads) + self.gths = [] + + # # # + + create_buf = hasattr(clock_pads, "p") + if create_buf: + refclk = Signal() + ibufds_ceb = Signal() + self.specials += Instance("IBUFDS_GTE3", + i_CEB=ibufds_ceb, + i_I=clock_pads.p, + i_IB=clock_pads.n, + o_O=refclk) + else: + refclk = clock_pads + + rtio_tx_clk = Signal() + channel_interfaces = [] + for i in range(nchannels): + if nchannels == 1: + mode = "single" + else: + mode = "master" if i == master else "slave" + gth = GTHSingle(refclk, data_pads[i], sys_clk_freq, rtio_clk_freq, rtiox_mul, dw, mode) + if mode == "master": + self.comb += rtio_tx_clk.eq(gth.cd_rtio_tx.clk) + elif mode == "slave": + self.comb += gth.cd_rtio_tx.clk.eq(rtio_tx_clk) + self.gths.append(gth) + setattr(self.submodules, "gth"+str(i), gth) + channel_interface = ChannelInterface(gth.encoder, gth.decoders) + self.comb += channel_interface.rx_ready.eq(gth.rx_ready) + channel_interfaces.append(channel_interface) + + self.submodules.tx_phase_alignment = GTHTXPhaseAlignement(self.gths) + + TransceiverInterface.__init__(self, channel_interfaces) + for n, gth in enumerate(self.gths): + self.comb += gth.txenable.eq(self.txenable.storage[n]) + self.clock_domains.cd_rtiox = ClockDomain(reset_less=True) + if create_buf: + # GTH PLLs recover on their own from an interrupted clock input, + # but be paranoid about HMC7043 noise. + self.stable_clkin.storage.attr.add("no_retiming") + self.comb += ibufds_ceb.eq(~self.stable_clkin.storage) + + self.comb += [ + self.cd_rtio.clk.eq(self.gths[master].cd_rtio_tx.clk), + self.cd_rtiox.clk.eq(self.gths[master].cd_rtiox_tx.clk), + self.cd_rtio.rst.eq(reduce(or_, [gth.cd_rtio_tx.rst for gth in self.gths])) + ] + for i in range(nchannels): + self.comb += [ + getattr(self, "cd_rtio_rx" + str(i)).clk.eq(self.gths[i].cd_rtio_rx.clk), + getattr(self, "cd_rtio_rx" + str(i)).rst.eq(self.gths[i].cd_rtio_rx.rst) + ] + + if clock_recout_pads is not None: + self.specials += Instance("OBUFDS_GTE3", + p_REFCLK_EN_TX_PATH=0b1, + p_REFCLK_ICNTL_TX=0b00111, + i_I=self.gths[0].rxrecclkout, + i_CEB=0, + o_O=clock_recout_pads.p, o_OB=clock_recout_pads.n) diff --git a/artiq/gateware/drtio/transceiver/gth_ultrascale_init.py b/artiq/gateware/drtio/transceiver/gth_ultrascale_init.py new file mode 100644 index 000000000..30645b876 --- /dev/null +++ b/artiq/gateware/drtio/transceiver/gth_ultrascale_init.py @@ -0,0 +1,157 @@ +from math import ceil + +from migen import * +from migen.genlib.cdc import MultiReg +from migen.genlib.misc import WaitTimer + + +__all__ = ["GTHInit"] + + +class GTHInit(Module): + def __init__(self, sys_clk_freq, rx, mode="master"): + assert not (rx and mode != "master") + self.done = Signal() + self.restart = Signal() + + # GTH signals + self.plllock = Signal() + self.pllreset = Signal() + self.gtXxreset = Signal() + self.Xxresetdone = Signal() + self.Xxdlysreset = Signal() + self.Xxdlysresetdone = Signal() + self.Xxphaligndone = Signal() + self.Xxsyncdone = Signal() + self.Xxuserrdy = Signal() + + self.all_ready_for_align = Signal(reset=1) + self.ready_for_align = Signal() + + # # # + + # Double-latch transceiver asynch outputs + plllock = Signal() + Xxresetdone = Signal() + Xxdlysresetdone = Signal() + Xxphaligndone = Signal() + Xxsyncdone = Signal() + self.specials += [ + MultiReg(self.plllock, plllock), + MultiReg(self.Xxresetdone, Xxresetdone), + MultiReg(self.Xxdlysresetdone, Xxdlysresetdone), + MultiReg(self.Xxphaligndone, Xxphaligndone), + MultiReg(self.Xxsyncdone, Xxsyncdone) + ] + + # Deglitch FSM outputs driving transceiver asynch inputs + gtXxreset = Signal() + Xxdlysreset = Signal() + Xxuserrdy = Signal() + self.sync += [ + self.gtXxreset.eq(gtXxreset), + self.Xxdlysreset.eq(Xxdlysreset), + self.Xxuserrdy.eq(Xxuserrdy) + ] + + # PLL reset must be at least 2us + pll_reset_cycles = ceil(2000*sys_clk_freq/1000000000) + pll_reset_timer = WaitTimer(pll_reset_cycles) + self.submodules += pll_reset_timer + + startup_fsm = ResetInserter()(FSM(reset_state="RESET_ALL")) + self.submodules += startup_fsm + + ready_timer = WaitTimer(int(sys_clk_freq/1000)) + self.submodules += ready_timer + self.comb += [ + ready_timer.wait.eq(~self.done & ~startup_fsm.reset), + startup_fsm.reset.eq(self.restart | ready_timer.done) + ] + + if rx: + cdr_stable_timer = WaitTimer(1024) + self.submodules += cdr_stable_timer + + Xxphaligndone_r = Signal(reset=1) + Xxphaligndone_rising = Signal() + self.sync += Xxphaligndone_r.eq(Xxphaligndone) + self.comb += Xxphaligndone_rising.eq(Xxphaligndone & ~Xxphaligndone_r) + + startup_fsm.act("RESET_ALL", + gtXxreset.eq(1), + self.pllreset.eq(1), + pll_reset_timer.wait.eq(1), + If(pll_reset_timer.done, + NextState("RELEASE_PLL_RESET") + ) + ) + startup_fsm.act("RELEASE_PLL_RESET", + gtXxreset.eq(1), + If(plllock, NextState("RELEASE_GTH_RESET")) + ) + # Release GTH reset and wait for GTH resetdone + # (from UG476, GTH is reset on falling edge + # of gtXxreset) + if rx: + startup_fsm.act("RELEASE_GTH_RESET", + Xxuserrdy.eq(1), + cdr_stable_timer.wait.eq(1), + If(Xxresetdone & cdr_stable_timer.done, NextState("ALIGN")) + ) + else: + startup_fsm.act("RELEASE_GTH_RESET", + Xxuserrdy.eq(1), + If(Xxresetdone, + If(mode == "slave", + NextState("WAIT_ALIGN") + ).Else( + NextState("ALIGN") + ) + ) + ) + # Start delay alignment (pulse) + startup_fsm.act("ALIGN", + Xxuserrdy.eq(1), + If(self.all_ready_for_align, + Xxdlysreset.eq(1), + NextState("WAIT_ALIGN") + ) + ) + if rx: + # Wait for delay alignment + startup_fsm.act("WAIT_ALIGN", + Xxuserrdy.eq(1), + If(Xxsyncdone, + NextState("READY") + ) + ) + else: + # Wait for delay alignment + startup_fsm.act("WAIT_ALIGN", + Xxuserrdy.eq(1), + self.ready_for_align.eq(1), + If(Xxdlysresetdone, + If(mode == "slave", + NextState("WAIT_LAST_ALIGN_DONE") + ).Else( + NextState("WAIT_FIRST_ALIGN_DONE") + ) + ) + ) + + # Wait 2 rising edges of Xxphaligndone + # (from UG576 in TX Buffer Bypass in Single-Lane Auto Mode) + startup_fsm.act("WAIT_FIRST_ALIGN_DONE", + Xxuserrdy.eq(1), + If(Xxphaligndone_rising, NextState("WAIT_LAST_ALIGN_DONE")) + ) + startup_fsm.act("WAIT_LAST_ALIGN_DONE", + Xxuserrdy.eq(1), + If(Xxphaligndone_rising, NextState("READY")) + ) + startup_fsm.act("READY", + Xxuserrdy.eq(1), + self.done.eq(1), + If(self.restart, NextState("RESET_ALL")) + ) diff --git a/artiq/gateware/drtio/transceiver/gtp_7series.py b/artiq/gateware/drtio/transceiver/gtp_7series.py new file mode 100644 index 000000000..5da42b22b --- /dev/null +++ b/artiq/gateware/drtio/transceiver/gtp_7series.py @@ -0,0 +1,765 @@ +from functools import reduce +from operator import or_ + +from migen import * +from migen.genlib.resetsync import AsyncResetSynchronizer + +from misoc.cores.code_8b10b import Encoder, Decoder + +from artiq.gateware.drtio.core import TransceiverInterface, ChannelInterface +from artiq.gateware.drtio.transceiver.clock_aligner import BruteforceClockAligner +from artiq.gateware.drtio.transceiver.gtp_7series_init import * + + +class GTPSingle(Module): + def __init__(self, qpll_channel, pads, sys_clk_freq, rtio_clk_freq, mode): + assert mode in ["single", "master", "slave"] + self.mode = mode + + # # # + + self.stable_clkin = Signal() + self.txenable = Signal() + self.submodules.encoder = encoder = ClockDomainsRenamer("rtio_tx")( + Encoder(2, True)) + self.submodules.decoders = decoders = [ClockDomainsRenamer("rtio_rx")( + (Decoder(True))) for _ in range(2)] + self.rx_ready = Signal() + + # transceiver direct clock outputs + # useful to specify clock constraints in a way palatable to Vivado + self.txoutclk = Signal() + self.rxoutclk = Signal() + + # # # + + # TX generates RTIO clock, init must be in system domain + self.submodules.tx_init = tx_init = GTPTXInit(sys_clk_freq, mode) + # RX receives restart commands from RTIO domain + rx_init = ClockDomainsRenamer("rtio_tx")(GTPRXInit(rtio_clk_freq)) + self.submodules += rx_init + + self.comb += [ + tx_init.stable_clkin.eq(self.stable_clkin), + qpll_channel.reset.eq(tx_init.pllreset), + tx_init.plllock.eq(qpll_channel.lock) + ] + + txdata = Signal(20) + rxdata = Signal(20) + rxphaligndone = Signal() + gtp_params = dict( + # Simulation-Only Attributes + p_SIM_RECEIVER_DETECT_PASS ="TRUE", + p_SIM_TX_EIDLE_DRIVE_LEVEL ="X", + p_SIM_RESET_SPEEDUP ="FALSE", + p_SIM_VERSION ="2.0", + + # RX Byte and Word Alignment Attributes + p_ALIGN_COMMA_DOUBLE ="FALSE", + p_ALIGN_COMMA_ENABLE =0b1111111111, + p_ALIGN_COMMA_WORD =1, + p_ALIGN_MCOMMA_DET ="TRUE", + p_ALIGN_MCOMMA_VALUE =0b1010000011, + p_ALIGN_PCOMMA_DET ="TRUE", + p_ALIGN_PCOMMA_VALUE =0b0101111100, + p_SHOW_REALIGN_COMMA ="FALSE", + p_RXSLIDE_AUTO_WAIT =7, + p_RXSLIDE_MODE ="PCS", + p_RX_SIG_VALID_DLY =10, + + # RX 8B/10B Decoder Attributes + p_RX_DISPERR_SEQ_MATCH ="FALSE", + p_DEC_MCOMMA_DETECT ="TRUE", + p_DEC_PCOMMA_DETECT ="TRUE", + p_DEC_VALID_COMMA_ONLY ="FALSE", + + # RX Clock Correction Attributes + p_CBCC_DATA_SOURCE_SEL ="ENCODED", + p_CLK_COR_SEQ_2_USE ="FALSE", + p_CLK_COR_KEEP_IDLE ="FALSE", + p_CLK_COR_MAX_LAT =9, + p_CLK_COR_MIN_LAT =7, + p_CLK_COR_PRECEDENCE ="TRUE", + p_CLK_COR_REPEAT_WAIT =0, + p_CLK_COR_SEQ_LEN =1, + p_CLK_COR_SEQ_1_ENABLE =0b1111, + p_CLK_COR_SEQ_1_1 =0b0100000000, + p_CLK_COR_SEQ_1_2 =0b0000000000, + p_CLK_COR_SEQ_1_3 =0b0000000000, + p_CLK_COR_SEQ_1_4 =0b0000000000, + p_CLK_CORRECT_USE ="FALSE", + p_CLK_COR_SEQ_2_ENABLE =0b1111, + p_CLK_COR_SEQ_2_1 =0b0100000000, + p_CLK_COR_SEQ_2_2 =0b0000000000, + p_CLK_COR_SEQ_2_3 =0b0000000000, + p_CLK_COR_SEQ_2_4 =0b0000000000, + + # RX Channel Bonding Attributes + p_CHAN_BOND_KEEP_ALIGN ="FALSE", + p_CHAN_BOND_MAX_SKEW =1, + p_CHAN_BOND_SEQ_LEN =1, + p_CHAN_BOND_SEQ_1_1 =0b0000000000, + p_CHAN_BOND_SEQ_1_2 =0b0000000000, + p_CHAN_BOND_SEQ_1_3 =0b0000000000, + p_CHAN_BOND_SEQ_1_4 =0b0000000000, + p_CHAN_BOND_SEQ_1_ENABLE =0b1111, + p_CHAN_BOND_SEQ_2_1 =0b0000000000, + p_CHAN_BOND_SEQ_2_2 =0b0000000000, + p_CHAN_BOND_SEQ_2_3 =0b0000000000, + p_CHAN_BOND_SEQ_2_4 =0b0000000000, + p_CHAN_BOND_SEQ_2_ENABLE =0b1111, + p_CHAN_BOND_SEQ_2_USE ="FALSE", + p_FTS_DESKEW_SEQ_ENABLE =0b1111, + p_FTS_LANE_DESKEW_CFG =0b1111, + p_FTS_LANE_DESKEW_EN ="FALSE", + + # RX Margin Analysis Attributes + p_ES_CONTROL =0b000000, + p_ES_ERRDET_EN ="FALSE", + p_ES_EYE_SCAN_EN ="FALSE", + p_ES_HORZ_OFFSET =0x010, + p_ES_PMA_CFG =0b0000000000, + p_ES_PRESCALE =0b00000, + p_ES_QUALIFIER =0x00000000000000000000, + p_ES_QUAL_MASK =0x00000000000000000000, + p_ES_SDATA_MASK =0x00000000000000000000, + p_ES_VERT_OFFSET =0b000000000, + + # FPGA RX Interface Attributes + p_RX_DATA_WIDTH =20, + + # PMA Attributes + p_OUTREFCLK_SEL_INV =0b11, + p_PMA_RSV =0x00000333, + p_PMA_RSV2 =0x00002040, + p_PMA_RSV3 =0b00, + p_PMA_RSV4 =0b0000, + p_RX_BIAS_CFG =0b0000111100110011, + p_DMONITOR_CFG =0x000A00, + p_RX_CM_SEL =0b01, + p_RX_CM_TRIM =0b0000, + p_RX_DEBUG_CFG =0b00000000000000, + p_RX_OS_CFG =0b0000010000000, + p_TERM_RCAL_CFG =0b100001000010000, + p_TERM_RCAL_OVRD =0b000, + p_TST_RSV =0x00000000, + p_RX_CLK25_DIV =5, + p_TX_CLK25_DIV =5, + p_UCODEER_CLR =0b0, + + # PCI Express Attributes + p_PCS_PCIE_EN ="FALSE", + + # PCS Attributes + p_PCS_RSVD_ATTR =0x000000000000, + + # RX Buffer Attributes + p_RXBUF_ADDR_MODE ="FAST", + p_RXBUF_EIDLE_HI_CNT =0b1000, + p_RXBUF_EIDLE_LO_CNT =0b0000, + p_RXBUF_EN ="FALSE", + p_RX_BUFFER_CFG =0b000000, + p_RXBUF_RESET_ON_CB_CHANGE ="TRUE", + p_RXBUF_RESET_ON_COMMAALIGN ="FALSE", + p_RXBUF_RESET_ON_EIDLE ="FALSE", + p_RXBUF_RESET_ON_RATE_CHANGE ="TRUE", + p_RXBUFRESET_TIME =0b00001, + p_RXBUF_THRESH_OVFLW =61, + p_RXBUF_THRESH_OVRD ="FALSE", + p_RXBUF_THRESH_UNDFLW =4, + p_RXDLY_CFG =0x001F, + p_RXDLY_LCFG =0x030, + p_RXDLY_TAP_CFG =0x0000, + p_RXPH_CFG =0xC00002, + p_RXPHDLY_CFG =0x084020, + p_RXPH_MONITOR_SEL =0b00000, + p_RX_XCLK_SEL ="RXUSR", + p_RX_DDI_SEL =0b000000, + p_RX_DEFER_RESET_BUF_EN ="TRUE", + + # CDR Attributes + p_RXCDR_CFG =0x0001107FE206021081010, + p_RXCDR_FR_RESET_ON_EIDLE =0b0, + p_RXCDR_HOLD_DURING_EIDLE =0b0, + p_RXCDR_PH_RESET_ON_EIDLE =0b0, + p_RXCDR_LOCK_CFG =0b001001, + + # RX Initialization and Reset Attributes + p_RXCDRFREQRESET_TIME =0b00001, + p_RXCDRPHRESET_TIME =0b00001, + p_RXISCANRESET_TIME =0b00001, + p_RXPCSRESET_TIME =0b00001, + p_RXPMARESET_TIME =0b00011, + + # RX OOB Signaling Attributes + p_RXOOB_CFG =0b0000110, + + # RX Gearbox Attributes + p_RXGEARBOX_EN ="FALSE", + p_GEARBOX_MODE =0b000, + + # PRBS Detection Attribute + p_RXPRBS_ERR_LOOPBACK =0b0, + + # Power-Down Attributes + p_PD_TRANS_TIME_FROM_P2 =0x03c, + p_PD_TRANS_TIME_NONE_P2 =0x3c, + p_PD_TRANS_TIME_TO_P2 =0x64, + + # RX OOB Signaling Attributes + p_SAS_MAX_COM =64, + p_SAS_MIN_COM =36, + p_SATA_BURST_SEQ_LEN =0b0101, + p_SATA_BURST_VAL =0b100, + p_SATA_EIDLE_VAL =0b100, + p_SATA_MAX_BURST =8, + p_SATA_MAX_INIT =21, + p_SATA_MAX_WAKE =7, + p_SATA_MIN_BURST =4, + p_SATA_MIN_INIT =12, + p_SATA_MIN_WAKE =4, + + # RX Fabric Clock Output Control Attributes + p_TRANS_TIME_RATE =0x0E, + + # TX Buffer Attributes + p_TXBUF_EN ="FALSE", + p_TXBUF_RESET_ON_RATE_CHANGE ="TRUE", + p_TXDLY_CFG =0x001F, + p_TXDLY_LCFG =0x030, + p_TXDLY_TAP_CFG =0x0000, + p_TXPH_CFG =0x0780, + p_TXPHDLY_CFG =0x084020, + p_TXPH_MONITOR_SEL =0b00000, + p_TX_XCLK_SEL ="TXUSR", + + # FPGA TX Interface Attributes + p_TX_DATA_WIDTH =20, + + # TX Configurable Driver Attributes + p_TX_DEEMPH0 =0b000000, + p_TX_DEEMPH1 =0b000000, + p_TX_EIDLE_ASSERT_DELAY =0b110, + p_TX_EIDLE_DEASSERT_DELAY =0b100, + p_TX_LOOPBACK_DRIVE_HIZ ="FALSE", + p_TX_MAINCURSOR_SEL =0b0, + p_TX_DRIVE_MODE ="DIRECT", + p_TX_MARGIN_FULL_0 =0b1001110, + p_TX_MARGIN_FULL_1 =0b1001001, + p_TX_MARGIN_FULL_2 =0b1000101, + p_TX_MARGIN_FULL_3 =0b1000010, + p_TX_MARGIN_FULL_4 =0b1000000, + p_TX_MARGIN_LOW_0 =0b1000110, + p_TX_MARGIN_LOW_1 =0b1000100, + p_TX_MARGIN_LOW_2 =0b1000010, + p_TX_MARGIN_LOW_3 =0b1000000, + p_TX_MARGIN_LOW_4 =0b1000000, + + # TX Gearbox Attributes + p_TXGEARBOX_EN ="FALSE", + + # TX Initialization and Reset Attributes + p_TXPCSRESET_TIME =0b00001, + p_TXPMARESET_TIME =0b00001, + + # TX Receiver Detection Attributes + p_TX_RXDETECT_CFG =0x1832, + p_TX_RXDETECT_REF =0b100, + + # JTAG Attributes + p_ACJTAG_DEBUG_MODE =0b0, + p_ACJTAG_MODE =0b0, + p_ACJTAG_RESET =0b0, + + # CDR Attributes + p_CFOK_CFG =0x49000040E80, + p_CFOK_CFG2 =0b0100000, + p_CFOK_CFG3 =0b0100000, + p_CFOK_CFG4 =0b0, + p_CFOK_CFG5 =0x0, + p_CFOK_CFG6 =0b0000, + p_RXOSCALRESET_TIME =0b00011, + p_RXOSCALRESET_TIMEOUT =0b00000, + + # PMA Attributes + p_CLK_COMMON_SWING =0b0, + p_RX_CLKMUX_EN =0b1, + p_TX_CLKMUX_EN =0b1, + p_ES_CLK_PHASE_SEL =0b0, + p_USE_PCS_CLK_PHASE_SEL =0b0, + p_PMA_RSV6 =0b0, + p_PMA_RSV7 =0b0, + + # TX Configuration Driver Attributes + p_TX_PREDRIVER_MODE =0b0, + p_PMA_RSV5 =0b0, + p_SATA_PLL_CFG ="VCO_3000MHZ", + + # RX Fabric Clock Output Control Attributes + p_RXOUT_DIV =2, + + # TX Fabric Clock Output Control Attributes + p_TXOUT_DIV =2, + + # RX Phase Interpolator Attributes + p_RXPI_CFG0 =0b000, + p_RXPI_CFG1 =0b1, + p_RXPI_CFG2 =0b1, + + # RX Equalizer Attributes + p_ADAPT_CFG0 =0x00000, + p_RXLPMRESET_TIME =0b0001111, + p_RXLPM_BIAS_STARTUP_DISABLE =0b0, + p_RXLPM_CFG =0b0110, + p_RXLPM_CFG1 =0b0, + p_RXLPM_CM_CFG =0b0, + p_RXLPM_GC_CFG =0b111100010, + p_RXLPM_GC_CFG2 =0b001, + p_RXLPM_HF_CFG =0b00001111110000, + p_RXLPM_HF_CFG2 =0b01010, + p_RXLPM_HF_CFG3 =0b0000, + p_RXLPM_HOLD_DURING_EIDLE =0b0, + p_RXLPM_INCM_CFG =0b0, + p_RXLPM_IPCM_CFG =0b1, + p_RXLPM_LF_CFG =0b000000001111110000, + p_RXLPM_LF_CFG2 =0b01010, + p_RXLPM_OSINT_CFG =0b100, + + # TX Phase Interpolator PPM Controller Attributes + p_TXPI_CFG0 =0b00, + p_TXPI_CFG1 =0b00, + p_TXPI_CFG2 =0b00, + p_TXPI_CFG3 =0b0, + p_TXPI_CFG4 =0b0, + p_TXPI_CFG5 =0b000, + p_TXPI_GREY_SEL =0b0, + p_TXPI_INVSTROBE_SEL =0b0, + p_TXPI_PPMCLK_SEL ="TXUSRCLK2", + p_TXPI_PPM_CFG =0x00, + p_TXPI_SYNFREQ_PPM =0b001, + + # LOOPBACK Attributes + p_LOOPBACK_CFG =0b0, + p_PMA_LOOPBACK_CFG =0b0, + + # RX OOB Signalling Attributes + p_RXOOB_CLK_CFG ="PMA", + + # TX OOB Signalling Attributes + p_TXOOB_CFG =0b0, + + # RX Buffer Attributes + p_RXSYNC_MULTILANE =0b0, + p_RXSYNC_OVRD =0b0, + p_RXSYNC_SKIP_DA =0b0, + + # TX Buffer Attributes + p_TXSYNC_MULTILANE =0b0, + p_TXSYNC_OVRD =0b1, + p_TXSYNC_SKIP_DA =0b0 + ) + gtp_params.update( + # CPLL Ports + i_GTRSVD =0b0000000000000000, + i_PCSRSVDIN =0b0000000000000000, + i_TSTIN =0b11111111111111111111, + + # Channel - DRP Ports + i_DRPADDR=rx_init.drpaddr, + i_DRPCLK=ClockSignal("rtio_tx"), + i_DRPDI=rx_init.drpdi, + o_DRPDO=rx_init.drpdo, + i_DRPEN=rx_init.drpen, + o_DRPRDY=rx_init.drprdy, + i_DRPWE=rx_init.drpwe, + # FPGA TX Interface Datapath Configuration + i_TX8B10BEN =0, + # Loopback Ports + i_LOOPBACK =0, + # PCI Express Ports + #o_PHYSTATUS =, + i_RXRATE =0, + #o_RXVALID =, + # PMA Reserved Ports + i_PMARSVDIN3 =0b0, + i_PMARSVDIN4 =0b0, + # Power-Down Ports + i_RXPD =Cat(rx_init.gtrxpd, rx_init.gtrxpd), + i_TXPD =0b00, + # RX 8B/10B Decoder Ports + i_SETERRSTATUS =0, + # RX Initialization and Reset Ports + i_EYESCANRESET =0, + i_RXUSERRDY =rx_init.rxuserrdy, + # RX Margin Analysis Ports + #o_EYESCANDATAERROR =, + i_EYESCANMODE =0, + i_EYESCANTRIGGER =0, + # Receive Ports + i_CLKRSVD0 =0, + i_CLKRSVD1 =0, + i_DMONFIFORESET =0, + i_DMONITORCLK =0, + o_RXPMARESETDONE =rx_init.rxpmaresetdone, + i_SIGVALIDCLK =0, + # Receive Ports - CDR Ports + i_RXCDRFREQRESET =0, + i_RXCDRHOLD =0, + #o_RXCDRLOCK =, + i_RXCDROVRDEN =0, + i_RXCDRRESET =0, + i_RXCDRRESETRSV =0, + i_RXOSCALRESET =0, + i_RXOSINTCFG =0b0010, + #o_RXOSINTDONE =, + i_RXOSINTHOLD =0, + i_RXOSINTOVRDEN =0, + i_RXOSINTPD =0, + #o_RXOSINTSTARTED =, + i_RXOSINTSTROBE =0, + #o_RXOSINTSTROBESTARTED =, + i_RXOSINTTESTOVRDEN =0, + # Receive Ports - Clock Correction Ports + #o_RXCLKCORCNT =, + # Receive Ports - FPGA RX Interface Datapath Configuration + i_RX8B10BEN =0, + # Receive Ports - FPGA RX Interface Ports + o_RXDATA =Cat(rxdata[:8], rxdata[10:18]), + i_RXUSRCLK =ClockSignal("rtio_rx"), + i_RXUSRCLK2 =ClockSignal("rtio_rx"), + # Receive Ports - Pattern Checker Ports + #o_RXPRBSERR =, + i_RXPRBSSEL =0, + # Receive Ports - Pattern Checker ports + i_RXPRBSCNTRESET =0, + # Receive Ports - RX 8B/10B Decoder Ports + #o_RXCHARISCOMMA =, + o_RXCHARISK =Cat(rxdata[8], rxdata[18]), + o_RXDISPERR =Cat(rxdata[9], rxdata[19]), + + #o_RXNOTINTABLE =, + # Receive Ports - RX AFE Ports + i_GTPRXN =pads.rxn, + i_GTPRXP =pads.rxp, + i_PMARSVDIN2 =0b0, + #o_PMARSVDOUT0 =, + #o_PMARSVDOUT1 =, + # Receive Ports - RX Buffer Bypass Ports + i_RXBUFRESET =0, + #o_RXBUFSTATUS =, + i_RXDDIEN =1, + i_RXDLYBYPASS =0, + i_RXDLYEN =1, + i_RXDLYOVRDEN =0, + i_RXDLYSRESET =rx_init.rxdlysreset, + o_RXDLYSRESETDONE =rx_init.rxdlysresetdone, + i_RXPHALIGN =0, + o_RXPHALIGNDONE =rxphaligndone, + i_RXPHALIGNEN =0, + i_RXPHDLYPD =0, + i_RXPHDLYRESET =0, + #o_RXPHMONITOR =, + i_RXPHOVRDEN =0, + #o_RXPHSLIPMONITOR =, + #o_RXSTATUS =, + i_RXSYNCALLIN =rxphaligndone, + o_RXSYNCDONE =rx_init.rxsyncdone, + i_RXSYNCIN =0, + i_RXSYNCMODE =1, + #o_RXSYNCOUT =, + # Receive Ports - RX Byte and Word Alignment Ports + #o_RXBYTEISALIGNED =, + #o_RXBYTEREALIGN =, + #o_RXCOMMADET =, + i_RXCOMMADETEN =1, + i_RXMCOMMAALIGNEN =0, + i_RXPCOMMAALIGNEN =0, + i_RXSLIDE =0, + # Receive Ports - RX Channel Bonding Ports + #o_RXCHANBONDSEQ =, + i_RXCHBONDEN =0, + i_RXCHBONDI =0b0000, + i_RXCHBONDLEVEL =0, + i_RXCHBONDMASTER =0, + #o_RXCHBONDO =, + i_RXCHBONDSLAVE =0, + # Receive Ports - RX Channel Bonding Ports + #o_RXCHANISALIGNED =, + #o_RXCHANREALIGN =, + # Receive Ports - RX Decision Feedback Equalizer + #o_DMONITOROUT =, + i_RXADAPTSELTEST =0, + i_RXDFEXYDEN =0, + i_RXOSINTEN =0b1, + i_RXOSINTID0 =0, + i_RXOSINTNTRLEN =0, + #o_RXOSINTSTROBEDONE =, + # Receive Ports - RX Driver,OOB signalling,Coupling and Eq.,CDR + i_RXLPMLFOVRDEN =0, + i_RXLPMOSINTNTRLEN =0, + # Receive Ports - RX Equalizer Ports + i_RXLPMHFHOLD =0, + i_RXLPMHFOVRDEN =0, + i_RXLPMLFHOLD =0, + i_RXOSHOLD =0, + i_RXOSOVRDEN =0, + # Receive Ports - RX Fabric ClocK Output Control Ports + #o_RXRATEDONE =, + # Receive Ports - RX Fabric Clock Output Control Ports + i_RXRATEMODE =0b0, + # Receive Ports - RX Fabric Output Control Ports + o_RXOUTCLK =self.rxoutclk, + #o_RXOUTCLKFABRIC =, + #o_RXOUTCLKPCS =, + i_RXOUTCLKSEL =0b010, + # Receive Ports - RX Gearbox Ports + #o_RXDATAVALID =, + #o_RXHEADER =, + #o_RXHEADERVALID =, + #o_RXSTARTOFSEQ =, + i_RXGEARBOXSLIP =0, + # Receive Ports - RX Initialization and Reset Ports + i_GTRXRESET =rx_init.gtrxreset, + i_RXLPMRESET =0, + i_RXOOBRESET =0, + i_RXPCSRESET =0, + i_RXPMARESET =0, + # Receive Ports - RX OOB Signaling ports + #o_RXCOMSASDET =, + #o_RXCOMWAKEDET =, + #o_RXCOMINITDET =, + #o_RXELECIDLE =, + i_RXELECIDLEMODE =0b11, + + # Receive Ports - RX Polarity Control Ports + i_RXPOLARITY =0, + # Receive Ports -RX Initialization and Reset Ports + o_RXRESETDONE =rx_init.rxresetdone, + # TX Buffer Bypass Ports + i_TXPHDLYTSTCLK =0, + # TX Configurable Driver Ports + i_TXPOSTCURSOR =0b00000, + i_TXPOSTCURSORINV =0, + i_TXPRECURSOR =0, + i_TXPRECURSORINV =0, + # TX Fabric Clock Output Control Ports + i_TXRATEMODE =0, + # TX Initialization and Reset Ports + i_CFGRESET =0, + i_GTTXRESET =tx_init.gttxreset, + #o_PCSRSVDOUT =, + i_TXUSERRDY =tx_init.txuserrdy, + # TX Phase Interpolator PPM Controller Ports + i_TXPIPPMEN =0, + i_TXPIPPMOVRDEN =0, + i_TXPIPPMPD =0, + i_TXPIPPMSEL =0, + i_TXPIPPMSTEPSIZE =0, + # Transceiver Reset Mode Operation + i_GTRESETSEL =0, + i_RESETOVRD =0, + # Transmit Ports + #o_TXPMARESETDONE =, + # Transmit Ports - Configurable Driver Ports + i_PMARSVDIN0 =0b0, + i_PMARSVDIN1 =0b0, + # Transmit Ports - FPGA TX Interface Ports + i_TXDATA =Cat(txdata[:8], txdata[10:18]), + i_TXUSRCLK =ClockSignal("rtio_tx"), + i_TXUSRCLK2 =ClockSignal("rtio_tx"), + + # Transmit Ports - PCI Express Ports + i_TXELECIDLE =0, + i_TXMARGIN =0, + i_TXRATE =0, + i_TXSWING =0, + # Transmit Ports - Pattern Generator Ports + i_TXPRBSFORCEERR =0, + # Transmit Ports - TX 8B/10B Encoder Ports + i_TX8B10BBYPASS =0, + i_TXCHARDISPMODE =Cat(txdata[9], txdata[19]), + i_TXCHARDISPVAL =Cat(txdata[8], txdata[18]), + i_TXCHARISK =0, + # Transmit Ports - TX Buffer Bypass Ports + i_TXDLYBYPASS =0, + i_TXDLYEN =tx_init.txdlyen, + i_TXDLYHOLD =0, + i_TXDLYOVRDEN =0, + i_TXDLYSRESET =tx_init.txdlysreset, + o_TXDLYSRESETDONE =tx_init.txdlysresetdone, + i_TXDLYUPDOWN =0, + i_TXPHALIGN =tx_init.txphalign, + o_TXPHALIGNDONE =tx_init.txphaligndone, + i_TXPHALIGNEN =1, + i_TXPHDLYPD =0, + i_TXPHDLYRESET =0, + i_TXPHINIT =tx_init.txphinit, + o_TXPHINITDONE =tx_init.txphinitdone, + i_TXPHOVRDEN =0, + # Transmit Ports - TX Buffer Ports + #o_TXBUFSTATUS =, + # Transmit Ports - TX Buffer and Phase Alignment Ports + i_TXSYNCALLIN =0, + #o_TXSYNCDONE =, + #i_TXSYNCIN =0, + #i_TXSYNCMODE =0, + #o_TXSYNCOUT =, + # Transmit Ports - TX Configurable Driver Ports + o_GTPTXN =pads.txn, + o_GTPTXP =pads.txp, + i_TXBUFDIFFCTRL =0b100, + i_TXDEEMPH =0, + i_TXDIFFCTRL =0b1000, + i_TXDIFFPD =0, + i_TXINHIBIT =~self.txenable, + i_TXMAINCURSOR =0b0000000, + i_TXPISOPD =0, + # Transmit Ports - TX Fabric Clock Output Control Ports + o_TXOUTCLK =self.txoutclk, + #o_TXOUTCLKFABRIC =, + #o_TXOUTCLKPCS =, + i_TXOUTCLKSEL =0b011, + #o_TXRATEDONE =, + # Transmit Ports - TX Gearbox Ports + #o_TXGEARBOXREADY =, + i_TXHEADER =0, + i_TXSEQUENCE =0, + i_TXSTARTSEQ =0, + # Transmit Ports - TX Initialization and Reset Ports + i_TXPCSRESET =0, + i_TXPMARESET =0, + o_TXRESETDONE =tx_init.txresetdone, + # Transmit Ports - TX OOB signalling Ports + #o_TXCOMFINISH =, + i_TXCOMINIT =0, + i_TXCOMSAS =0, + i_TXCOMWAKE =0, + i_TXPDELECIDLEMODE =0, + # Transmit Ports - TX Polarity Control Ports + i_TXPOLARITY =0, + # Transmit Ports - TX Receiver Detection Ports + i_TXDETECTRX =0, + # Transmit Ports - pattern Generator Ports + i_TXPRBSSEL =0 + ) + if qpll_channel.index == 0: + gtp_params.update( + i_RXSYSCLKSEL=0b00, + i_TXSYSCLKSEL=0b00, + i_PLL0CLK=qpll_channel.clk, + i_PLL0REFCLK=qpll_channel.refclk, + i_PLL1CLK=0, + i_PLL1REFCLK=0, + ) + elif qpll_channel.index == 1: + gtp_params.update( + i_RXSYSCLKSEL=0b11, + i_TXSYSCLKSEL=0b11, + i_PLL0CLK=0, + i_PLL0REFCLK=0, + i_PLL1CLK=qpll_channel.clk, + i_PLL1REFCLK=qpll_channel.refclk, + ) + else: + raise ValueError + self.specials += Instance("GTPE2_CHANNEL", **gtp_params) + + # tx clocking + tx_reset_deglitched = Signal() + tx_reset_deglitched.attr.add("no_retiming") + self.sync += tx_reset_deglitched.eq(~tx_init.done) + self.clock_domains.cd_rtio_tx = ClockDomain() + if mode == "master" or mode == "single": + self.specials += Instance("BUFG", i_I=self.txoutclk, o_O=self.cd_rtio_tx.clk) + self.specials += AsyncResetSynchronizer(self.cd_rtio_tx, tx_reset_deglitched) + + # rx clocking + rx_reset_deglitched = Signal() + rx_reset_deglitched.attr.add("no_retiming") + self.sync.rtio_tx += rx_reset_deglitched.eq(~rx_init.done) + self.clock_domains.cd_rtio_rx = ClockDomain() + self.specials += [ + Instance("BUFG", i_I=self.rxoutclk, o_O=self.cd_rtio_rx.clk), + AsyncResetSynchronizer(self.cd_rtio_rx, rx_reset_deglitched) + ] + + # tx data + self.comb += txdata.eq(Cat(*[encoder.output[i] for i in range(2)])) + + # rx data + for i in range(2): + self.comb += decoders[i].input.eq(rxdata[10*i:10*(i+1)]) + + # clock alignment + clock_aligner = BruteforceClockAligner(0b0101111100, rtio_clk_freq, check_period=12e-3) + self.submodules += clock_aligner + self.comb += [ + clock_aligner.rxdata.eq(rxdata), + rx_init.restart.eq(clock_aligner.restart), + self.rx_ready.eq(clock_aligner.ready) + ] + + +class GTPTXPhaseAlignement(Module): + # TX Buffer Bypass in Single-Lane/Multi-Lane Auto Mode (ug482) + def __init__(self, gtps): + master_phaligndone = Signal() + slaves_phaligndone = Signal(reset=1) + # Specific to Slave transceivers + for gtp in gtps: + if gtp.mode == "slave": + self.comb += gtp.tx_init.master_phaligndone.eq(master_phaligndone) + slaves_phaligndone = slaves_phaligndone & gtp.tx_init.done + # Specific to Master transceivers + for gtp in gtps: + if gtp.mode == "master": + self.comb += [ + master_phaligndone.eq(gtp.tx_init.master_phaligndone), + gtp.tx_init.slaves_phaligndone.eq(slaves_phaligndone) + ] + + +class GTP(Module, TransceiverInterface): + def __init__(self, qpll_channel, data_pads, sys_clk_freq, rtio_clk_freq, master=0): + self.nchannels = nchannels = len(data_pads) + self.gtps = [] + + # # # + + rtio_tx_clk = Signal() + channel_interfaces = [] + for i in range(nchannels): + if nchannels == 1: + mode = "single" + else: + mode = "master" if i == master else "slave" + gtp = GTPSingle(qpll_channel, data_pads[i], sys_clk_freq, rtio_clk_freq, mode) + if mode == "slave": + self.comb += gtp.cd_rtio_tx.clk.eq(rtio_tx_clk) + else: + self.comb += rtio_tx_clk.eq(gtp.cd_rtio_tx.clk) + self.gtps.append(gtp) + setattr(self.submodules, "gtp"+str(i), gtp) + channel_interface = ChannelInterface(gtp.encoder, gtp.decoders) + self.comb += channel_interface.rx_ready.eq(gtp.rx_ready) + channel_interfaces.append(channel_interface) + + self.submodules.tx_phase_alignment = GTPTXPhaseAlignement(self.gtps) + + TransceiverInterface.__init__(self, channel_interfaces) + for n, gtp in enumerate(self.gtps): + self.comb += [ + gtp.stable_clkin.eq(self.stable_clkin.storage), + gtp.txenable.eq(self.txenable.storage[n]) + ] + + self.comb += [ + self.cd_rtio.clk.eq(self.gtps[master].cd_rtio_tx.clk), + self.cd_rtio.rst.eq(reduce(or_, [gtp.cd_rtio_tx.rst for gtp in self.gtps])) + ] + for i in range(nchannels): + self.comb += [ + getattr(self, "cd_rtio_rx" + str(i)).clk.eq(self.gtps[i].cd_rtio_rx.clk), + getattr(self, "cd_rtio_rx" + str(i)).rst.eq(self.gtps[i].cd_rtio_rx.rst) + ] diff --git a/artiq/gateware/drtio/transceiver/gtp_7series_init.py b/artiq/gateware/drtio/transceiver/gtp_7series_init.py new file mode 100644 index 000000000..8916c2c36 --- /dev/null +++ b/artiq/gateware/drtio/transceiver/gtp_7series_init.py @@ -0,0 +1,343 @@ +from math import ceil + +from migen import * +from migen.genlib.cdc import MultiReg, PulseSynchronizer +from migen.genlib.misc import WaitTimer + + +__all__ = ["GTPTXInit", "GTPRXInit"] + + +class GTPTXInit(Module): + def __init__(self, sys_clk_freq, mode="single"): + self.stable_clkin = Signal() + self.done = Signal() + self.restart = Signal() + + # GTP signals + self.plllock = Signal() + self.pllreset = Signal() + self.gttxreset = Signal() + self.gttxreset.attr.add("no_retiming") + self.txresetdone = Signal() + self.txdlysreset = Signal() + self.txdlysresetdone = Signal() + self.txphinit = Signal() + self.txphinitdone = Signal() + self.txphalign = Signal() + self.txphaligndone = Signal() + self.txdlyen = Signal() + self.txuserrdy = Signal() + + self.master_phaligndone = Signal() + self.slaves_phaligndone = Signal() + + # # # + + # Double-latch transceiver asynch outputs + plllock = Signal() + txresetdone = Signal() + txdlysresetdone = Signal() + txphinitdone = Signal() + txphaligndone = Signal() + self.specials += [ + MultiReg(self.plllock, plllock), + MultiReg(self.txresetdone, txresetdone), + MultiReg(self.txdlysresetdone, txdlysresetdone), + MultiReg(self.txphinitdone, txphinitdone), + MultiReg(self.txphaligndone, txphaligndone) + ] + + # Deglitch FSM outputs driving transceiver asynch inputs + gttxreset = Signal() + txdlysreset = Signal() + txphinit = Signal() + txphalign = Signal() + txdlyen = Signal() + txuserrdy = Signal() + self.sync += [ + self.gttxreset.eq(gttxreset), + self.txdlysreset.eq(txdlysreset), + self.txphinit.eq(txphinit), + self.txphalign.eq(txphalign), + self.txdlyen.eq(txdlyen), + self.txuserrdy.eq(txuserrdy) + ] + + # PLL reset must be at least 500us + pll_reset_cycles = ceil(500e-9*sys_clk_freq) + pll_reset_timer = WaitTimer(pll_reset_cycles) + self.submodules += pll_reset_timer + + startup_fsm = ResetInserter()(FSM(reset_state="PLL_RESET")) + self.submodules += startup_fsm + + ready_timer = WaitTimer(int(1e-3*sys_clk_freq)) + self.submodules += ready_timer + self.comb += [ + ready_timer.wait.eq(~self.done & ~startup_fsm.reset), + startup_fsm.reset.eq(self.restart | ready_timer.done) + ] + + txphaligndone_r = Signal(reset=1) + txphaligndone_rising = Signal() + self.sync += txphaligndone_r.eq(txphaligndone) + self.comb += txphaligndone_rising.eq(txphaligndone & ~txphaligndone_r) + + startup_fsm.act("PLL_RESET", + self.pllreset.eq(1), + pll_reset_timer.wait.eq(1), + If(pll_reset_timer.done & self.stable_clkin, + NextState("GTP_RESET") + ) + ) + startup_fsm.act("GTP_RESET", + gttxreset.eq(1), + If(plllock, + NextState("WAIT_GTP_RESET_DONE") + ) + ) + # Release GTP reset and wait for GTP resetdone + # (from UG482, GTP is reset on falling edge + # of gttxreset) + startup_fsm.act("WAIT_GTP_RESET_DONE", + txuserrdy.eq(1), + If(txresetdone, NextState("ALIGN")) + ) + # Start delay alignment + startup_fsm.act("ALIGN", + txuserrdy.eq(1), + txdlysreset.eq(1), + If(txdlysresetdone, + If(mode == "slave", + NextState("WAIT_MASTER") + ).Else( + NextState("PHALIGN") + ) + ) + ) + startup_fsm.act("WAIT_MASTER", + txuserrdy.eq(1), + If(self.master_phaligndone, + NextState("PHALIGN") + ) + ) + # Start phase alignment + startup_fsm.act("PHALIGN", + txuserrdy.eq(1), + txphinit.eq(1), + If(txphinitdone, + NextState("WAIT_FIRST_ALIGN_DONE") + ) + ) + # Wait N rising edges of Xxphaligndone + # N=2 for Single, 3 for Master, 1 for Slave + # (from UGB482 in TX Buffer Bypass in Multi/Single-Lane Auto Mode) + startup_fsm.act("WAIT_FIRST_ALIGN_DONE", + txuserrdy.eq(1), + txphalign.eq(1), + If(txphaligndone_rising, + If(mode == "slave", + NextState("READY") + ).Else( + NextState("WAIT_SECOND_ALIGN_DONE") + ) + ) + ) + startup_fsm.act("WAIT_SECOND_ALIGN_DONE", + txuserrdy.eq(1), + txdlyen.eq(1), + If(txphaligndone_rising, + If(mode == "master", + NextState("WAIT_SLAVES") + ).Else( + NextState("READY") + ) + ) + ) + startup_fsm.act("WAIT_SLAVES", + txuserrdy.eq(1), + self.master_phaligndone.eq(1), + If(self.slaves_phaligndone, + NextState("WAIT_THIRD_ALIGN_DONE") + ) + ) + startup_fsm.act("WAIT_THIRD_ALIGN_DONE", + txuserrdy.eq(1), + txdlyen.eq(1), + If(txphaligndone_rising, + NextState("READY") + ) + ) + startup_fsm.act("READY", + txuserrdy.eq(1), + self.done.eq(1), + If(self.restart, NextState("PLL_RESET")) + ) + + +class GTPRXInit(Module): + def __init__(self, sys_clk_freq): + self.done = Signal() + self.restart = Signal() + + # GTP signals + self.gtrxreset = Signal() + self.gtrxreset.attr.add("no_retiming") + self.gtrxpd = Signal() + self.rxresetdone = Signal() + self.rxdlysreset = Signal() + self.rxdlysresetdone = Signal() + self.rxphalign = Signal() + self.rxdlyen = Signal() + self.rxuserrdy = Signal() + self.rxsyncdone = Signal() + self.rxpmaresetdone = Signal() + + self.drpaddr = Signal(9) + self.drpen = Signal() + self.drpdi = Signal(16) + self.drprdy = Signal() + self.drpdo = Signal(16) + self.drpwe = Signal() + + # # # + + drpvalue = Signal(16) + drpmask = Signal() + self.comb += [ + self.drpaddr.eq(0x011), + If(drpmask, + self.drpdi.eq(drpvalue & 0xf7ff) + ).Else( + self.drpdi.eq(drpvalue) + ) + ] + + rxpmaresetdone = Signal() + self.specials += MultiReg(self.rxpmaresetdone, rxpmaresetdone) + rxpmaresetdone_r = Signal() + self.sync += rxpmaresetdone_r.eq(rxpmaresetdone) + + # Double-latch transceiver asynch outputs + rxresetdone = Signal() + rxdlysresetdone = Signal() + rxsyncdone = Signal() + self.specials += [ + MultiReg(self.rxresetdone, rxresetdone), + MultiReg(self.rxdlysresetdone, rxdlysresetdone), + MultiReg(self.rxsyncdone, rxsyncdone) + ] + + # Deglitch FSM outputs driving transceiver asynch inputs + gtrxreset = Signal() + gtrxpd = Signal() + rxdlysreset = Signal() + rxphalign = Signal() + rxdlyen = Signal() + rxuserrdy = Signal() + self.sync += [ + self.gtrxreset.eq(gtrxreset), + self.gtrxpd.eq(gtrxpd), + self.rxdlysreset.eq(rxdlysreset), + self.rxphalign.eq(rxphalign), + self.rxdlyen.eq(rxdlyen), + self.rxuserrdy.eq(rxuserrdy) + ] + + startup_fsm = ResetInserter()(FSM(reset_state="GTP_PD")) + self.submodules += startup_fsm + + ready_timer = WaitTimer(int(4e-3*sys_clk_freq)) + self.submodules += ready_timer + self.comb += [ + ready_timer.wait.eq(~self.done & ~startup_fsm.reset), + startup_fsm.reset.eq(self.restart | ready_timer.done) + ] + + cdr_stable_timer = WaitTimer(1024) + self.submodules += cdr_stable_timer + + startup_fsm.act("GTP_PD", + gtrxpd.eq(1), + NextState("GTP_RESET") + ) + startup_fsm.act("GTP_RESET", + gtrxreset.eq(1), + NextState("DRP_READ_ISSUE") + ) + startup_fsm.act("DRP_READ_ISSUE", + gtrxreset.eq(1), + self.drpen.eq(1), + NextState("DRP_READ_WAIT") + ) + startup_fsm.act("DRP_READ_WAIT", + gtrxreset.eq(1), + If(self.drprdy, + NextValue(drpvalue, self.drpdo), + NextState("DRP_MOD_ISSUE") + ) + ) + startup_fsm.act("DRP_MOD_ISSUE", + gtrxreset.eq(1), + drpmask.eq(1), + self.drpen.eq(1), + self.drpwe.eq(1), + NextState("DRP_MOD_WAIT") + ) + startup_fsm.act("DRP_MOD_WAIT", + gtrxreset.eq(1), + If(self.drprdy, + NextState("WAIT_PMARST_FALL") + ) + ) + startup_fsm.act("WAIT_PMARST_FALL", + rxuserrdy.eq(1), + If(rxpmaresetdone_r & ~rxpmaresetdone, + NextState("DRP_RESTORE_ISSUE") + ) + ) + startup_fsm.act("DRP_RESTORE_ISSUE", + rxuserrdy.eq(1), + self.drpen.eq(1), + self.drpwe.eq(1), + NextState("DRP_RESTORE_WAIT") + ) + startup_fsm.act("DRP_RESTORE_WAIT", + rxuserrdy.eq(1), + If(self.drprdy, + NextState("WAIT_GTP_RESET_DONE") + ) + ) + # Release GTP reset and wait for GTP resetdone + # (from UG482, GTP is reset on falling edge + # of gtrxreset) + startup_fsm.act("WAIT_GTP_RESET_DONE", + rxuserrdy.eq(1), + cdr_stable_timer.wait.eq(1), + If(rxresetdone & cdr_stable_timer.done, + NextState("ALIGN") + ) + ) + # Start delay alignment + startup_fsm.act("ALIGN", + rxuserrdy.eq(1), + rxdlysreset.eq(1), + If(rxdlysresetdone, + NextState("WAIT_ALIGN_DONE") + ) + ) + # Wait for delay alignment + startup_fsm.act("WAIT_ALIGN_DONE", + rxuserrdy.eq(1), + If(rxsyncdone, + NextState("READY") + ) + ) + startup_fsm.act("READY", + rxuserrdy.eq(1), + self.done.eq(1), + If(self.restart, + NextState("GTP_PD") + ) + ) diff --git a/artiq/gateware/drtio/wrpll/__init__.py b/artiq/gateware/drtio/wrpll/__init__.py new file mode 100644 index 000000000..25e510f4c --- /dev/null +++ b/artiq/gateware/drtio/wrpll/__init__.py @@ -0,0 +1,2 @@ +from artiq.gateware.drtio.wrpll.core import WRPLL +from artiq.gateware.drtio.wrpll.ddmtd import DDMTDSamplerExtFF, DDMTDSamplerGTP diff --git a/artiq/gateware/drtio/wrpll/core.py b/artiq/gateware/drtio/wrpll/core.py new file mode 100644 index 000000000..52bc91ab7 --- /dev/null +++ b/artiq/gateware/drtio/wrpll/core.py @@ -0,0 +1,156 @@ +from migen import * +from migen.genlib.resetsync import AsyncResetSynchronizer +from migen.genlib.cdc import MultiReg, PulseSynchronizer +from misoc.interconnect.csr import * + +from artiq.gateware.drtio.wrpll.si549 import Si549 +from artiq.gateware.drtio.wrpll.ddmtd import DDMTD, Collector +from artiq.gateware.drtio.wrpll import thls, filters + + +class FrequencyCounter(Module, AutoCSR): + def __init__(self, timer_width=23, counter_width=23, domains=["helper", "rtio", "rtio_rx0"]): + for domain in domains: + name = "counter_" + domain + counter = CSRStatus(counter_width, name=name) + setattr(self, name, counter) + self.update_en = CSRStorage() + + timer = Signal(timer_width) + timer_tick = Signal() + self.sync += Cat(timer, timer_tick).eq(timer + 1) + + for domain in domains: + sync_domain = getattr(self.sync, domain) + divider = Signal(2) + sync_domain += divider.eq(divider + 1) + + divided = Signal() + divided.attr.add("no_retiming") + sync_domain += divided.eq(divider[-1]) + divided_sys = Signal() + self.specials += MultiReg(divided, divided_sys) + + divided_sys_r = Signal() + divided_tick = Signal() + self.sync += divided_sys_r.eq(divided_sys) + self.comb += divided_tick.eq(divided_sys & ~divided_sys_r) + + counter = Signal(counter_width) + counter_csr = getattr(self, "counter_" + domain) + self.sync += [ + If(timer_tick, + If(self.update_en.storage, counter_csr.status.eq(counter)), + counter.eq(0), + ).Else( + If(divided_tick, counter.eq(counter + 1)) + ) + ] + + +class WRPLL(Module, AutoCSR): + def __init__(self, helper_clk_pads, main_dcxo_i2c, helper_dxco_i2c, ddmtd_inputs, N=15): + self.helper_reset = CSRStorage(reset=1) + self.collector_reset = CSRStorage(reset=1) + self.filter_reset = CSRStorage(reset=1) + self.adpll_offset_helper = CSRStorage(24) + self.adpll_offset_main = CSRStorage(24) + + self.tag_arm = CSR() + self.main_diff_tag = CSRStatus(32) + self.helper_diff_tag = CSRStatus(32) + self.ref_tag = CSRStatus(N) + self.main_tag = CSRStatus(N) + + main_diff_tag_32 = Signal((32, True)) + helper_diff_tag_32 = Signal((32, True)) + self.comb += [ + self.main_diff_tag.status.eq(main_diff_tag_32), + self.helper_diff_tag.status.eq(helper_diff_tag_32) + ] + + self.clock_domains.cd_helper = ClockDomain() + self.clock_domains.cd_collector = ClockDomain() + self.clock_domains.cd_filter = ClockDomain() + self.helper_reset.storage.attr.add("no_retiming") + self.filter_reset.storage.attr.add("no_retiming") + self.specials += Instance("IBUFGDS", + i_I=helper_clk_pads.p, i_IB=helper_clk_pads.n, + o_O=self.cd_helper.clk) + self.comb += [ + self.cd_collector.clk.eq(self.cd_collector.clk), + self.cd_filter.clk.eq(self.cd_helper.clk), + ] + self.specials += [ + AsyncResetSynchronizer(self.cd_helper, self.helper_reset.storage), + AsyncResetSynchronizer(self.cd_collector, self.collector_reset.storage), + AsyncResetSynchronizer(self.cd_filter, self.filter_reset.storage) + ] + + self.submodules.helper_dcxo = Si549(helper_dxco_i2c) + self.submodules.main_dcxo = Si549(main_dcxo_i2c) + + # for diagnostics and PLL initialization + self.submodules.frequency_counter = FrequencyCounter() + + ddmtd_counter = Signal(N) + self.sync.helper += ddmtd_counter.eq(ddmtd_counter + 1) + self.submodules.ddmtd_ref = DDMTD(ddmtd_counter, ddmtd_inputs.rec_clk) + self.submodules.ddmtd_main = DDMTD(ddmtd_counter, ddmtd_inputs.main_xo) + + collector_cd = ClockDomainsRenamer("collector") + filter_cd = ClockDomainsRenamer("filter") + self.submodules.collector = collector_cd(Collector(N)) + self.submodules.filter_helper = filter_cd( + thls.make(filters.helper, data_width=48)) + self.submodules.filter_main = filter_cd( + thls.make(filters.main, data_width=48)) + + self.comb += [ + self.collector.tag_ref.eq(self.ddmtd_ref.h_tag), + self.collector.ref_stb.eq(self.ddmtd_ref.h_tag_update), + self.collector.tag_main.eq(self.ddmtd_main.h_tag), + self.collector.main_stb.eq(self.ddmtd_main.h_tag_update) + ] + + collector_stb_ps = PulseSynchronizer("helper", "sys") + self.submodules += collector_stb_ps + self.sync.helper += collector_stb_ps.i.eq(self.collector.out_stb) + collector_stb_sys = Signal() + self.sync += collector_stb_sys.eq(collector_stb_ps.o) + + main_diff_tag_sys = Signal((N+2, True)) + helper_diff_tag_sys = Signal((N+2, True)) + ref_tag_sys = Signal(N) + main_tag_sys = Signal(N) + self.specials += MultiReg(self.collector.out_main, main_diff_tag_sys) + self.specials += MultiReg(self.collector.out_helper, helper_diff_tag_sys) + self.specials += MultiReg(self.collector.tag_ref, ref_tag_sys) + self.specials += MultiReg(self.collector.tag_main, main_tag_sys) + + self.sync += [ + If(self.tag_arm.re & self.tag_arm.r, self.tag_arm.w.eq(1)), + If(collector_stb_sys, + self.tag_arm.w.eq(0), + If(self.tag_arm.w, + main_diff_tag_32.eq(main_diff_tag_sys), + helper_diff_tag_32.eq(helper_diff_tag_sys), + self.ref_tag.status.eq(ref_tag_sys), + self.main_tag.status.eq(main_tag_sys) + ) + ) + ] + + self.comb += [ + self.filter_helper.input.eq(self.collector.out_helper << 22), + self.filter_helper.input_stb.eq(self.collector.out_stb), + self.filter_main.input.eq(self.collector.out_main), + self.filter_main.input_stb.eq(self.collector.out_stb) + ] + + self.sync.helper += [ + self.helper_dcxo.adpll_stb.eq(self.filter_helper.output_stb), + self.helper_dcxo.adpll.eq(self.filter_helper.output + self.adpll_offset_helper.storage), + self.main_dcxo.adpll_stb.eq(self.filter_main.output_stb), + self.main_dcxo.adpll.eq(self.filter_main.output + self.adpll_offset_main.storage) + ] diff --git a/artiq/gateware/drtio/wrpll/ddmtd.py b/artiq/gateware/drtio/wrpll/ddmtd.py new file mode 100644 index 000000000..ddeeac54e --- /dev/null +++ b/artiq/gateware/drtio/wrpll/ddmtd.py @@ -0,0 +1,221 @@ +from migen import * +from migen.genlib.cdc import PulseSynchronizer, MultiReg +from migen.genlib.fsm import FSM +from misoc.interconnect.csr import * + + +class DDMTDSamplerExtFF(Module): + def __init__(self, ddmtd_inputs): + self.rec_clk = Signal() + self.main_xo = Signal() + + # # # + + # TODO: s/h timing at FPGA pads + if hasattr(ddmtd_inputs, "rec_clk"): + rec_clk_1 = ddmtd_inputs.rec_clk + else: + rec_clk_1 = Signal() + self.specials += Instance("IBUFDS", + i_I=ddmtd_inputs.rec_clk_p, i_IB=ddmtd_inputs.rec_clk_n, + o_O=rec_clk_1) + if hasattr(ddmtd_inputs, "main_xo"): + main_xo_1 = ddmtd_inputs.main_xo + else: + main_xo_1 = Signal() + self.specials += Instance("IBUFDS", + i_I=ddmtd_inputs.main_xo_p, i_IB=ddmtd_inputs.main_xo_n, + o_O=main_xo_1) + self.specials += [ + Instance("FD", i_C=ClockSignal("helper"), + i_D=rec_clk_1, o_Q=self.rec_clk, + attr={("IOB", "TRUE")}), + Instance("FD", i_C=ClockSignal("helper"), + i_D=main_xo_1, o_Q=self.main_xo, + attr={("IOB", "TRUE")}), + ] + + +class DDMTDSamplerGTP(Module): + def __init__(self, gtp, main_xo_pads): + self.rec_clk = Signal() + self.main_xo = Signal() + + # # # + + # Getting the main XO signal from IBUFDS_GTE2 is problematic because + # the transceiver PLL craps out if an improper clock signal is applied, + # so we are disabling the buffer until the clock is stable. + main_xo_se = Signal() + rec_clk_1 = Signal() + main_xo_1 = Signal() + self.specials += [ + Instance("IBUFDS", + i_I=main_xo_pads.p, i_IB=main_xo_pads.n, + o_O=main_xo_se), + Instance("FD", i_C=ClockSignal("helper"), + i_D=gtp.cd_rtio_rx0.clk, o_Q=rec_clk_1, + attr={("DONT_TOUCH", "TRUE")}), + Instance("FD", i_C=ClockSignal("helper"), + i_D=rec_clk_1, o_Q=self.rec_clk, + attr={("DONT_TOUCH", "TRUE")}), + Instance("FD", i_C=ClockSignal("helper"), + i_D=main_xo_se, o_Q=main_xo_1, + attr={("IOB", "TRUE")}), + Instance("FD", i_C=ClockSignal("helper"), + i_D=main_xo_1, o_Q=self.main_xo, + attr={("DONT_TOUCH", "TRUE")}), + ] + + +class DDMTDDeglitcherFirstEdge(Module): + def __init__(self, input_signal, blind_period=128): + self.detect = Signal() + self.tag_correction = 0 + + rising = Signal() + input_signal_r = Signal() + self.sync.helper += [ + input_signal_r.eq(input_signal), + rising.eq(input_signal & ~input_signal_r) + ] + + blind_counter = Signal(max=blind_period) + self.sync.helper += [ + If(blind_counter != 0, blind_counter.eq(blind_counter - 1)), + If(input_signal_r, blind_counter.eq(blind_period - 1)), + self.detect.eq(rising & (blind_counter == 0)) + ] + + +class DDMTD(Module): + def __init__(self, counter, input_signal): + + # in helper clock domain + self.h_tag = Signal(len(counter)) + self.h_tag_update = Signal() + + # # # + + deglitcher = DDMTDDeglitcherFirstEdge(input_signal) + self.submodules += deglitcher + + self.sync.helper += [ + self.h_tag_update.eq(0), + If(deglitcher.detect, + self.h_tag_update.eq(1), + self.h_tag.eq(counter + deglitcher.tag_correction) + ) + ] + + +class Collector(Module): + """Generates loop filter inputs from DDMTD outputs. + + The input to the main DCXO lock loop filter is the difference between the + reference and main tags after unwrapping (see below). + + The input to the helper DCXO lock loop filter is the difference between the + current reference tag and the previous reference tag after unwrapping. + + When the WR PLL is locked, the following ideally (no noise/jitter) obtain: + - f_main = f_ref + - f_helper = f_ref * 2^N/(2^N+1) + - f_beat = f_ref - f_helper = f_ref / (2^N + 1) (cycle time is: dt=1/f_beat) + - the reference and main DCXO tags are equal to each other at every cycle + (the main DCXO lock drives this difference to 0) + - the reference and main DCXO tags both have the same value at each cycle + (the tag difference for each DDMTD is given by + f_helper*dt = f_helper/f_beat = 2^N, which causes the N-bit DDMTD counter + to wrap around and come back to its previous value) + + Note that we currently lock the frequency of the helper DCXO to the + reference clock, not it's phase. As a result, while the tag differences are + controlled, their absolute values are arbitrary. We could consider moving + the helper lock to a phase lock at some point in the future... + + Since the DDMTD counter is only N bits, it is possible for tag values to + wrap around. This will happen frequently if the locked tags happens to be + near the edges of the counter, so that jitter can easily cause a phase wrap. + But, it can also easily happen during lock acquisition or other transients. + To avoid glitches in the output, we unwrap the tag differences. Currently + we do this in hardware, but we should consider extending the processor to + allow us to do it inside the filters. Since the processor uses wider + signals, this would significantly extend the overall glitch-free + range of the PLL and may aid lock acquisition. + """ + def __init__(self, N): + self.ref_stb = Signal() + self.main_stb = Signal() + self.tag_ref = Signal(N) + self.tag_main = Signal(N) + + self.out_stb = Signal() + self.out_main = Signal((N+2, True)) + self.out_helper = Signal((N+2, True)) + self.out_tag_ref = Signal(N) + self.out_tag_main = Signal(N) + + tag_ref_r = Signal(N) + tag_main_r = Signal(N) + main_tag_diff = Signal((N+2, True)) + helper_tag_diff = Signal((N+2, True)) + + # # # + + fsm = FSM(reset_state="IDLE") + self.submodules += fsm + + fsm.act("IDLE", + NextValue(self.out_stb, 0), + If(self.ref_stb & self.main_stb, + NextValue(tag_ref_r, self.tag_ref), + NextValue(tag_main_r, self.tag_main), + NextState("DIFF") + ).Elif(self.ref_stb, + NextValue(tag_ref_r, self.tag_ref), + NextState("WAITMAIN") + ).Elif(self.main_stb, + NextValue(tag_main_r, self.tag_main), + NextState("WAITREF") + ) + ) + fsm.act("WAITREF", + If(self.ref_stb, + NextValue(tag_ref_r, self.tag_ref), + NextState("DIFF") + ) + ) + fsm.act("WAITMAIN", + If(self.main_stb, + NextValue(tag_main_r, self.tag_main), + NextState("DIFF") + ) + ) + fsm.act("DIFF", + NextValue(main_tag_diff, tag_main_r - tag_ref_r), + NextValue(helper_tag_diff, tag_ref_r - self.out_tag_ref), + NextState("UNWRAP") + ) + fsm.act("UNWRAP", + If(main_tag_diff - self.out_main > 2**(N-1), + NextValue(main_tag_diff, main_tag_diff - 2**N) + ).Elif(self.out_main - main_tag_diff > 2**(N-1), + NextValue(main_tag_diff, main_tag_diff + 2**N) + ), + + If(helper_tag_diff - self.out_helper > 2**(N-1), + NextValue(helper_tag_diff, helper_tag_diff - 2**N) + ).Elif(self.out_helper - helper_tag_diff > 2**(N-1), + NextValue(helper_tag_diff, helper_tag_diff + 2**N) + ), + NextState("OUTPUT") + ) + fsm.act("OUTPUT", + NextValue(self.out_tag_ref, tag_ref_r), + NextValue(self.out_tag_main, tag_main_r), + NextValue(self.out_main, main_tag_diff), + NextValue(self.out_helper, helper_tag_diff), + NextValue(self.out_stb, 1), + NextState("IDLE") + ) diff --git a/artiq/gateware/drtio/wrpll/filters.py b/artiq/gateware/drtio/wrpll/filters.py new file mode 100644 index 000000000..470f17bf3 --- /dev/null +++ b/artiq/gateware/drtio/wrpll/filters.py @@ -0,0 +1,61 @@ +helper_xn1 = 0 +helper_xn2 = 0 +helper_yn0 = 0 +helper_yn1 = 0 +helper_yn2 = 0 +helper_out = 0 + +main_xn1 = 0 +main_xn2 = 0 +main_yn0 = 0 +main_yn1 = 0 +main_yn2 = 0 + + +def helper(tag_diff): + global helper_xn1, helper_xn2, helper_yn0, \ + helper_yn1, helper_yn2, helper_out + + helper_xn0 = 0 - tag_diff # *(2**22) + + helper_yr = 4294967296 + + helper_yn2 = helper_yn1 + helper_yn1 = helper_yn0 + + helper_yn0 = (284885690 * (helper_xn0 + + (217319150 * helper_xn1 >> 44) + - (17591968725108 * helper_xn2 >> 44) + ) >> 44 + ) + (35184372088832*helper_yn1 >> 44) - helper_yn2 + + helper_xn2 = helper_xn1 + helper_xn1 = helper_xn0 + + helper_out = 268435456*helper_yn0 >> 44 + helper_out = min(helper_out, helper_yr) + helper_out = max(helper_out, 0 - helper_yr) + + return helper_out + + +def main(main_xn0): + global main_xn1, main_xn2, main_yn0, main_yn1, main_yn2 + + main_yr = 4294967296 + + main_yn2 = main_yn1 + main_yn1 = main_yn0 + main_yn0 = ( + ((133450380908*(((35184372088832*main_xn0) >> 44) + + ((17592186044417*main_xn1) >> 44))) >> 44) + + ((29455872930889*main_yn1) >> 44) - + ((12673794781453*main_yn2) >> 44)) + + main_xn2 = main_xn1 + main_xn1 = main_xn0 + + main_yn0 = min(main_yn0, main_yr) + main_yn0 = max(main_yn0, 0 - main_yr) + + return main_yn0 diff --git a/artiq/gateware/drtio/wrpll/si549.py b/artiq/gateware/drtio/wrpll/si549.py new file mode 100644 index 000000000..46ce0c138 --- /dev/null +++ b/artiq/gateware/drtio/wrpll/si549.py @@ -0,0 +1,340 @@ +from migen import * +from migen.genlib.fsm import * +from migen.genlib.cdc import MultiReg, PulseSynchronizer, BlindTransfer + +from misoc.interconnect.csr import * + + +class I2CClockGen(Module): + def __init__(self, width): + self.load = Signal(width) + self.clk2x = Signal() + + cnt = Signal.like(self.load) + self.comb += [ + self.clk2x.eq(cnt == 0), + ] + self.sync += [ + If(self.clk2x, + cnt.eq(self.load), + ).Else( + cnt.eq(cnt - 1), + ) + ] + + +class I2CMasterMachine(Module): + def __init__(self, clock_width): + self.scl = Signal(reset=1) + self.sda_o = Signal(reset=1) + self.sda_i = Signal() + + self.submodules.cg = CEInserter()(I2CClockGen(clock_width)) + self.start = Signal() + self.stop = Signal() + self.write = Signal() + self.ack = Signal() + self.data = Signal(8) + self.ready = Signal() + + ### + + bits = Signal(4) + data = Signal(8) + + fsm = CEInserter()(FSM("IDLE")) + self.submodules += fsm + + fsm.act("IDLE", + self.ready.eq(1), + If(self.start, + NextState("START0"), + ).Elif(self.stop, + NextState("STOP0"), + ).Elif(self.write, + NextValue(bits, 8), + NextValue(data, self.data), + NextState("WRITE0") + ) + ) + + fsm.act("START0", + NextValue(self.scl, 1), + NextState("START1") + ) + fsm.act("START1", + NextValue(self.sda_o, 0), + NextState("IDLE") + ) + + fsm.act("STOP0", + NextValue(self.scl, 0), + NextState("STOP1") + ) + fsm.act("STOP1", + NextValue(self.sda_o, 0), + NextState("STOP2") + ) + fsm.act("STOP2", + NextValue(self.scl, 1), + NextState("STOP3") + ) + fsm.act("STOP3", + NextValue(self.sda_o, 1), + NextState("IDLE") + ) + + fsm.act("WRITE0", + NextValue(self.scl, 0), + NextState("WRITE1") + ) + fsm.act("WRITE1", + If(bits == 0, + NextValue(self.sda_o, 1), + NextState("READACK0"), + ).Else( + NextValue(self.sda_o, data[7]), + NextState("WRITE2"), + ) + ) + fsm.act("WRITE2", + NextValue(self.scl, 1), + NextValue(data[1:], data[:-1]), + NextValue(bits, bits - 1), + NextState("WRITE0"), + ) + fsm.act("READACK0", + NextValue(self.scl, 1), + NextState("READACK1"), + ) + fsm.act("READACK1", + NextValue(self.ack, ~self.sda_i), + NextState("IDLE") + ) + + run = Signal() + idle = Signal() + self.comb += [ + run.eq((self.start | self.stop | self.write) & self.ready), + idle.eq(~run & fsm.ongoing("IDLE")), + self.cg.ce.eq(~idle), + fsm.ce.eq(run | self.cg.clk2x), + ] + + +class ADPLLProgrammer(Module): + def __init__(self): + self.i2c_divider = Signal(16) + self.i2c_address = Signal(7) + + self.adpll = Signal(24) + self.stb = Signal() + self.busy = Signal() + self.nack = Signal() + + self.scl = Signal() + self.sda_i = Signal() + self.sda_o = Signal() + + self.scl.attr.add("no_retiming") + self.sda_o.attr.add("no_retiming") + + # # # + + master = I2CMasterMachine(16) + self.submodules += master + + self.comb += [ + master.cg.load.eq(self.i2c_divider), + self.scl.eq(master.scl), + master.sda_i.eq(self.sda_i), + self.sda_o.eq(master.sda_o) + ] + + fsm = FSM() + self.submodules += fsm + + adpll = Signal.like(self.adpll) + + fsm.act("IDLE", + If(self.stb, + NextValue(adpll, self.adpll), + NextState("START") + ) + ) + fsm.act("START", + master.start.eq(1), + If(master.ready, NextState("DEVADDRESS")) + ) + fsm.act("DEVADDRESS", + master.data.eq(self.i2c_address << 1), + master.write.eq(1), + If(master.ready, NextState("REGADRESS")) + ) + fsm.act("REGADRESS", + master.data.eq(231), + master.write.eq(1), + If(master.ready, + If(master.ack, + NextState("DATA0") + ).Else( + self.nack.eq(1), + NextState("STOP") + ) + ) + ) + fsm.act("DATA0", + master.data.eq(adpll[0:8]), + master.write.eq(1), + If(master.ready, + If(master.ack, + NextState("DATA1") + ).Else( + self.nack.eq(1), + NextState("STOP") + ) + ) + ) + fsm.act("DATA1", + master.data.eq(adpll[8:16]), + master.write.eq(1), + If(master.ready, + If(master.ack, + NextState("DATA2") + ).Else( + self.nack.eq(1), + NextState("STOP") + ) + ) + ) + fsm.act("DATA2", + master.data.eq(adpll[16:24]), + master.write.eq(1), + If(master.ready, + If(~master.ack, self.nack.eq(1)), + NextState("STOP") + ) + ) + fsm.act("STOP", + master.stop.eq(1), + If(master.ready, + If(~master.ack, self.nack.eq(1)), + NextState("IDLE") + ) + ) + + self.comb += self.busy.eq(~fsm.ongoing("IDLE")) + + +def simulate_programmer(): + from migen.sim.core import run_simulation + + dut = ADPLLProgrammer() + + def generator(): + yield dut.i2c_divider.eq(4) + yield dut.i2c_address.eq(0x55) + yield + yield dut.adpll.eq(0x123456) + yield dut.stb.eq(1) + yield + yield dut.stb.eq(0) + yield + while (yield dut.busy): + yield + for _ in range(20): + yield + + run_simulation(dut, generator(), vcd_name="tb.vcd") + + +class Si549(Module, AutoCSR): + def __init__(self, pads): + self.gpio_enable = CSRStorage(reset=1) + self.gpio_in = CSRStatus(2) + self.gpio_out = CSRStorage(2) + self.gpio_oe = CSRStorage(2) + + self.i2c_divider = CSRStorage(16, reset=75) + self.i2c_address = CSRStorage(7) + self.errors = CSR(2) + + # in helper clock domain + self.adpll = Signal(24) + self.adpll_stb = Signal() + + # # # + + programmer = ClockDomainsRenamer("helper")(ADPLLProgrammer()) + self.submodules += programmer + + self.i2c_divider.storage.attr.add("no_retiming") + self.i2c_address.storage.attr.add("no_retiming") + self.specials += [ + MultiReg(self.i2c_divider.storage, programmer.i2c_divider, "helper"), + MultiReg(self.i2c_address.storage, programmer.i2c_address, "helper") + ] + self.comb += [ + programmer.adpll.eq(self.adpll), + programmer.stb.eq(self.adpll_stb) + ] + + self.gpio_enable.storage.attr.add("no_retiming") + self.gpio_out.storage.attr.add("no_retiming") + self.gpio_oe.storage.attr.add("no_retiming") + + # SCL GPIO and mux + ts_scl = TSTriple(1) + self.specials += ts_scl.get_tristate(pads.scl) + + status = Signal() + self.comb += self.gpio_in.status[0].eq(status) + + self.specials += MultiReg(ts_scl.i, status) + self.comb += [ + If(self.gpio_enable.storage, + ts_scl.o.eq(self.gpio_out.storage[0]), + ts_scl.oe.eq(self.gpio_oe.storage[0]) + ).Else( + ts_scl.o.eq(0), + ts_scl.oe.eq(~programmer.scl) + ) + ] + + # SDA GPIO and mux + ts_sda = TSTriple(1) + self.specials += ts_sda.get_tristate(pads.sda) + + status = Signal() + self.comb += self.gpio_in.status[1].eq(status) + + self.specials += MultiReg(ts_sda.i, status) + self.comb += [ + If(self.gpio_enable.storage, + ts_sda.o.eq(self.gpio_out.storage[1]), + ts_sda.oe.eq(self.gpio_oe.storage[1]) + ).Else( + ts_sda.o.eq(0), + ts_sda.oe.eq(~programmer.sda_o) + ) + ] + self.specials += MultiReg(ts_sda.i, programmer.sda_i, "helper") + + # Error reporting + collision_cdc = BlindTransfer("helper", "sys") + self.submodules += collision_cdc + self.comb += collision_cdc.i.eq(programmer.stb & programmer.busy) + + nack_cdc = PulseSynchronizer("helper", "sys") + self.submodules += nack_cdc + self.comb += nack_cdc.i.eq(programmer.nack) + + for n, trig in enumerate([collision_cdc.o, nack_cdc.o]): + self.sync += [ + If(self.errors.re & self.errors.r[n], self.errors.w[n].eq(0)), + If(trig, self.errors.w[n].eq(1)) + ] + + +if __name__ == "__main__": + simulate_programmer() diff --git a/artiq/gateware/drtio/wrpll/thls.py b/artiq/gateware/drtio/wrpll/thls.py new file mode 100644 index 000000000..b11459692 --- /dev/null +++ b/artiq/gateware/drtio/wrpll/thls.py @@ -0,0 +1,618 @@ +import inspect +import ast +from copy import copy +import operator +from functools import reduce +from collections import OrderedDict + +from migen import * +from migen.genlib.fsm import * + + +class Isn: + def __init__(self, immediate=None, inputs=None, outputs=None): + if inputs is None: + inputs = [] + if outputs is None: + outputs = [] + self.immediate = immediate + self.inputs = inputs + self.outputs = outputs + + def __repr__(self): + r = "<" + r += self.__class__.__name__ + if self.immediate is not None: + r += " (" + str(self.immediate) + ")" + for inp in self.inputs: + r += " r" + str(inp) + if self.outputs: + r += " ->" + for outp in self.outputs: + r += " r" + str(outp) + r += ">" + return r + + +class NopIsn(Isn): + opcode = 0 + +class AddIsn(Isn): + opcode = 1 + +class SubIsn(Isn): + opcode = 2 + +class MulShiftIsn(Isn): + opcode = 3 + +# opcode = 4: MulShift with alternate shift + +class MinIsn(Isn): + opcode = 5 + +class MaxIsn(Isn): + opcode = 6 + +class CopyIsn(Isn): + opcode = 7 + +class InputIsn(Isn): + opcode = 8 + +class OutputIsn(Isn): + opcode = 9 + +class EndIsn(Isn): + opcode = 10 + + +class ASTCompiler: + def __init__(self): + self.program = [] + self.data = [] + self.next_ssa_reg = -1 + self.constants = dict() + self.names = dict() + self.globals = OrderedDict() + + def get_ssa_reg(self): + r = self.next_ssa_reg + self.next_ssa_reg -= 1 + return r + + def add_global(self, name): + if name not in self.globals: + r = len(self.data) + self.data.append(0) + self.names[name] = r + self.globals[name] = r + + def input(self, name): + target = self.get_ssa_reg() + self.program.append(InputIsn(outputs=[target])) + self.names[name] = target + + def emit(self, node): + if isinstance(node, ast.BinOp): + if isinstance(node.op, ast.RShift): + if not isinstance(node.left, ast.BinOp) or not isinstance(node.left.op, ast.Mult): + raise NotImplementedError + if not isinstance(node.right, ast.Num): + raise NotImplementedError + left = self.emit(node.left.left) + right = self.emit(node.left.right) + cons = lambda **kwargs: MulShiftIsn(immediate=node.right.n, **kwargs) + else: + left = self.emit(node.left) + right = self.emit(node.right) + if isinstance(node.op, ast.Add): + cons = AddIsn + elif isinstance(node.op, ast.Sub): + cons = SubIsn + elif isinstance(node.op, ast.Mult): + cons = lambda **kwargs: MulShiftIsn(immediate=0, **kwargs) + else: + raise NotImplementedError + output = self.get_ssa_reg() + self.program.append(cons(inputs=[left, right], outputs=[output])) + return output + elif isinstance(node, ast.Call): + if not isinstance(node.func, ast.Name): + raise NotImplementedError + funcname = node.func.id + if node.keywords: + raise NotImplementedError + inputs = [self.emit(x) for x in node.args] + if funcname == "min": + cons = MinIsn + elif funcname == "max": + cons = MaxIsn + else: + raise NotImplementedError + output = self.get_ssa_reg() + self.program.append(cons(inputs=inputs, outputs=[output])) + return output + elif isinstance(node, (ast.Num, ast.UnaryOp)): + if isinstance(node, ast.UnaryOp): + if not isinstance(node.operand, ast.Num): + raise NotImplementedError + if isinstance(node.op, ast.UAdd): + transform = lambda x: x + elif isinstance(node.op, ast.USub): + transform = operator.neg + elif isinstance(node.op, ast.Invert): + transform = operator.invert + else: + raise NotImplementedError + node = node.operand + else: + transform = lambda x: x + n = transform(node.n) + if n in self.constants: + return self.constants[n] + else: + r = len(self.data) + self.data.append(n) + self.constants[n] = r + return r + elif isinstance(node, ast.Name): + return self.names[node.id] + elif isinstance(node, ast.Assign): + output = self.emit(node.value) + for target in node.targets: + assert isinstance(target, ast.Name) + self.names[target.id] = output + elif isinstance(node, ast.Return): + value = self.emit(node.value) + self.program.append(OutputIsn(inputs=[value])) + elif isinstance(node, ast.Global): + pass + else: + raise NotImplementedError + + +class Processor: + def __init__(self, data_width=32, multiplier_stages=2): + self.data_width = data_width + self.multiplier_stages = multiplier_stages + self.multiplier_shifts = [] + self.program_rom_size = None + self.data_ram_size = None + self.opcode_bits = 4 + self.reg_bits = None + + def get_instruction_latency(self, isn): + return { + AddIsn: 2, + SubIsn: 2, + MulShiftIsn: 1 + self.multiplier_stages, + MinIsn: 2, + MaxIsn: 2, + CopyIsn: 1, + InputIsn: 1 + }[isn.__class__] + + def encode_instruction(self, isn, exit): + opcode = isn.opcode + if isn.immediate is not None and not isinstance(isn, MulShiftIsn): + r0 = isn.immediate + if len(isn.inputs) >= 1: + r1 = isn.inputs[0] + else: + r1 = 0 + else: + if len(isn.inputs) >= 1: + r0 = isn.inputs[0] + else: + r0 = 0 + if len(isn.inputs) >= 2: + r1 = isn.inputs[1] + else: + r1 = 0 + r = 0 + for value, bits in ((exit, self.reg_bits), (r1, self.reg_bits), (r0, self.reg_bits), (opcode, self.opcode_bits)): + r <<= bits + r |= value + return r + + def instruction_bits(self): + return 3*self.reg_bits + self.opcode_bits + + def implement(self, program, data): + return ProcessorImpl(self, program, data) + + +class Scheduler: + def __init__(self, processor, reserved_data, program): + self.processor = processor + self.reserved_data = reserved_data + self.used_registers = set(range(self.reserved_data)) + self.exits = dict() + self.program = program + self.remaining = copy(program) + self.output = [] + + def allocate_register(self): + r = min(set(range(max(self.used_registers) + 2)) - self.used_registers) + self.used_registers.add(r) + return r + + def free_register(self, r): + assert r >= self.reserved_data + self.used_registers.discard(r) + + def find_inputs(self, cycle, isn): + mapped_inputs = [] + for inp in isn.inputs: + if inp >= 0: + mapped_inputs.append(inp) + else: + found = False + for i in range(cycle): + if i in self.exits: + r, rm = self.exits[i] + if r == inp: + mapped_inputs.append(rm) + found = True + break + if not found: + return None + return mapped_inputs + + def schedule_one(self, isn): + cycle = len(self.output) + mapped_inputs = self.find_inputs(cycle, isn) + if mapped_inputs is None: + return False + + if isn.outputs: + # check that exit slot is free + latency = self.processor.get_instruction_latency(isn) + exit = cycle + latency + if exit in self.exits: + return False + + # avoid RAW hazard with global writeback + for output in isn.outputs: + if output >= 0: + for risn in self.remaining: + for inp in risn.inputs: + if inp == output: + return False + + # Instruction can be scheduled + + self.remaining.remove(isn) + + for inp, minp in zip(isn.inputs, mapped_inputs): + can_free = inp < 0 and all(inp != rinp for risn in self.remaining for rinp in risn.inputs) + if can_free: + self.free_register(minp) + + if isn.outputs: + assert len(isn.outputs) == 1 + if isn.outputs[0] < 0: + output = self.allocate_register() + else: + output = isn.outputs[0] + self.exits[exit] = (isn.outputs[0], output) + self.output.append(isn.__class__(immediate=isn.immediate, inputs=mapped_inputs)) + + return True + + def schedule(self): + while self.remaining: + success = False + for isn in self.remaining: + if self.schedule_one(isn): + success = True + break + if not success: + self.output.append(NopIsn()) + self.output += [NopIsn()]*(max(self.exits.keys()) - len(self.output) + 1) + return self.output + + +class CompiledProgram: + def __init__(self, processor, program, exits, data, glbs): + self.processor = processor + self.program = program + self.exits = exits + self.data = data + self.globals = glbs + + def pretty_print(self): + for cycle, isn in enumerate(self.program): + l = "{:4d} {:15}".format(cycle, str(isn)) + if cycle in self.exits: + l += " -> r{}".format(self.exits[cycle]) + print(l) + + def dimension_processor(self): + self.processor.program_rom_size = len(self.program) + self.processor.data_ram_size = len(self.data) + self.processor.reg_bits = (self.processor.data_ram_size - 1).bit_length() + for isn in self.program: + if isinstance(isn, MulShiftIsn) and isn.immediate not in self.processor.multiplier_shifts: + self.processor.multiplier_shifts.append(isn.immediate) + + def encode(self): + r = [] + for i, isn in enumerate(self.program): + exit = self.exits.get(i, 0) + r.append(self.processor.encode_instruction(isn, exit)) + return r + + +def compile(processor, function): + node = ast.parse(inspect.getsource(function)) + assert isinstance(node, ast.Module) + assert len(node.body) == 1 + node = node.body[0] + assert isinstance(node, ast.FunctionDef) + assert len(node.args.args) == 1 + arg = node.args.args[0].arg + body = node.body + + astcompiler = ASTCompiler() + for node in body: + if isinstance(node, ast.Global): + for name in node.names: + astcompiler.add_global(name) + arg_r = astcompiler.input(arg) + for node in body: + astcompiler.emit(node) + if isinstance(node, ast.Return): + break + for glbl, location in astcompiler.globals.items(): + new_location = astcompiler.names[glbl] + if new_location != location: + astcompiler.program.append(CopyIsn(inputs=[new_location], outputs=[location])) + + scheduler = Scheduler(processor, len(astcompiler.data), astcompiler.program) + scheduler.schedule() + + program = copy(scheduler.output) + program.append(EndIsn()) + + max_reg = max(max(max(isn.inputs + [0]) for isn in program), max(v[1] for k, v in scheduler.exits.items())) + + return CompiledProgram( + processor=processor, + program=program, + exits={k: v[1] for k, v in scheduler.exits.items()}, + data=astcompiler.data + [0]*(max_reg - len(astcompiler.data) + 1), + glbs=astcompiler.globals) + + +class BaseUnit(Module): + def __init__(self, data_width): + self.stb_i = Signal() + self.i0 = Signal((data_width, True)) + self.i1 = Signal((data_width, True)) + self.stb_o = Signal() + self.o = Signal((data_width, True)) + + +class NopUnit(BaseUnit): + pass + + +class OpUnit(BaseUnit): + def __init__(self, op, data_width, stages, op_data_width=None): + BaseUnit.__init__(self, data_width) + # work around Migen's mishandling of Verilog's cretinous operator width rules + if op_data_width is None: + op_data_width = data_width + + if stages > 1: + # Vivado backward retiming for DSP does not work correctly if DSP inputs + # are not registered. + i0 = Signal.like(self.i0) + i1 = Signal.like(self.i1) + stb_i = Signal() + self.sync += [ + i0.eq(self.i0), + i1.eq(self.i1), + stb_i.eq(self.stb_i) + ] + output_stages = stages - 1 + else: + i0, i1, stb_i = self.i0, self.i1, self.stb_i + output_stages = stages + + o = Signal((op_data_width, True)) + self.comb += o.eq(op(i0, i1)) + stb_o = stb_i + for i in range(output_stages): + n_o = Signal((data_width, True)) + if stages > 1: + n_o.attr.add(("retiming_backward", 1)) + n_stb_o = Signal() + self.sync += [ + n_o.eq(o), + n_stb_o.eq(stb_o) + ] + o = n_o + stb_o = n_stb_o + self.comb += [ + self.o.eq(o), + self.stb_o.eq(stb_o) + ] + + +class SelectUnit(BaseUnit): + def __init__(self, op, data_width): + BaseUnit.__init__(self, data_width) + + self.sync += [ + self.stb_o.eq(self.stb_i), + If(op(self.i0, self.i1), + self.o.eq(self.i0) + ).Else( + self.o.eq(self.i1) + ) + ] + + +class CopyUnit(BaseUnit): + def __init__(self, data_width): + BaseUnit.__init__(self, data_width) + + self.comb += [ + self.stb_o.eq(self.stb_i), + self.o.eq(self.i0) + ] + + +class InputUnit(BaseUnit): + def __init__(self, data_width, input_stb, input): + BaseUnit.__init__(self, data_width) + self.buffer = Signal(data_width) + + self.comb += [ + self.stb_o.eq(self.stb_i), + self.o.eq(self.buffer) + ] + + +class OutputUnit(BaseUnit): + def __init__(self, data_width, output_stb, output): + BaseUnit.__init__(self, data_width) + + self.sync += [ + output_stb.eq(self.stb_i), + output.eq(self.i0) + ] + + +class ProcessorImpl(Module): + def __init__(self, pd, program, data): + self.input_stb = Signal() + self.input = Signal((pd.data_width, True)) + + self.output_stb = Signal() + self.output = Signal((pd.data_width, True)) + + self.busy = Signal() + + # # # + + program_mem = Memory(pd.instruction_bits(), pd.program_rom_size, init=program) + data_mem0 = Memory(pd.data_width, pd.data_ram_size, init=data) + data_mem1 = Memory(pd.data_width, pd.data_ram_size, init=data) + self.specials += program_mem, data_mem0, data_mem1 + + pc = Signal(pd.instruction_bits()) + pc_next = Signal.like(pc) + pc_en = Signal() + self.sync += pc.eq(pc_next) + self.comb += [ + If(pc_en, + pc_next.eq(pc + 1) + ).Else( + pc_next.eq(0) + ) + ] + program_mem_port = program_mem.get_port() + self.specials += program_mem_port + self.comb += program_mem_port.adr.eq(pc_next) + + s = 0 + opcode = Signal(pd.opcode_bits) + self.comb += opcode.eq(program_mem_port.dat_r[s:s+pd.opcode_bits]) + s += pd.opcode_bits + r0 = Signal(pd.reg_bits) + self.comb += r0.eq(program_mem_port.dat_r[s:s+pd.reg_bits]) + s += pd.reg_bits + r1 = Signal(pd.reg_bits) + self.comb += r1.eq(program_mem_port.dat_r[s:s+pd.reg_bits]) + s += pd.reg_bits + exit = Signal(pd.reg_bits) + self.comb += exit.eq(program_mem_port.dat_r[s:s+pd.reg_bits]) + + data_read_port0 = data_mem0.get_port() + data_read_port1 = data_mem1.get_port() + self.specials += data_read_port0, data_read_port1 + self.comb += [ + data_read_port0.adr.eq(r0), + data_read_port1.adr.eq(r1) + ] + + data_write_port = data_mem0.get_port(write_capable=True) + data_write_port_dup = data_mem1.get_port(write_capable=True) + self.specials += data_write_port, data_write_port_dup + self.comb += [ + data_write_port_dup.we.eq(data_write_port.we), + data_write_port_dup.adr.eq(data_write_port.adr), + data_write_port_dup.dat_w.eq(data_write_port.dat_w), + data_write_port.adr.eq(exit) + ] + + nop = NopUnit(pd.data_width) + adder = OpUnit(operator.add, pd.data_width, 1) + subtractor = OpUnit(operator.sub, pd.data_width, 1) + if pd.multiplier_shifts: + if len(pd.multiplier_shifts) != 1: + raise NotImplementedError + multiplier = OpUnit(lambda a, b: a * b >> pd.multiplier_shifts[0], + pd.data_width, pd.multiplier_stages, op_data_width=2*pd.data_width) + else: + multiplier = NopUnit(pd.data_width) + minu = SelectUnit(operator.lt, pd.data_width) + maxu = SelectUnit(operator.gt, pd.data_width) + copier = CopyUnit(pd.data_width) + inu = InputUnit(pd.data_width, self.input_stb, self.input) + outu = OutputUnit(pd.data_width, self.output_stb, self.output) + units = [nop, adder, subtractor, multiplier, minu, maxu, copier, inu, outu] + self.submodules += units + + for unit in units: + self.sync += unit.stb_i.eq(0) + self.comb += [ + unit.i0.eq(data_read_port0.dat_r), + unit.i1.eq(data_read_port1.dat_r), + If(unit.stb_o, + data_write_port.we.eq(1), + data_write_port.dat_w.eq(unit.o) + ) + ] + + decode_table = [ + (NopIsn.opcode, nop), + (AddIsn.opcode, adder), + (SubIsn.opcode, subtractor), + (MulShiftIsn.opcode, multiplier), + (MulShiftIsn.opcode + 1, multiplier), + (MinIsn.opcode, minu), + (MaxIsn.opcode, maxu), + (CopyIsn.opcode, copier), + (InputIsn.opcode, inu), + (OutputIsn.opcode, outu) + ] + for allocated_opcode, unit in decode_table: + self.sync += If(pc_en & (opcode == allocated_opcode), unit.stb_i.eq(1)) + + fsm = FSM() + self.submodules += fsm + fsm.act("IDLE", + pc_en.eq(0), + NextValue(inu.buffer, self.input), + If(self.input_stb, NextState("PROCESSING")) + ) + fsm.act("PROCESSING", + self.busy.eq(1), + pc_en.eq(1), + If(opcode == EndIsn.opcode, + pc_en.eq(0), + NextState("IDLE") + ) + ) + + +def make(function, **kwargs): + proc = Processor(**kwargs) + cp = compile(proc, function) + cp.dimension_processor() + return proc.implement(cp.encode(), cp.data) diff --git a/artiq/gateware/dsp/fir.py b/artiq/gateware/dsp/fir.py index 516ca5c7f..002487e32 100644 --- a/artiq/gateware/dsp/fir.py +++ b/artiq/gateware/dsp/fir.py @@ -69,7 +69,7 @@ class ParallelFIR(Module): n = len(coefficients) # input and output: old to new, decreasing delay self.i = [Signal((width, True)) for i in range(p)] - self.o = [Signal((width, True)) for i in range(p)] + self.o = [Signal((width, True), reset_less=True) for i in range(p)] self.latency = (n + 1)//2//p + 2 w = _widths[arch] diff --git a/artiq/gateware/dsp/sawg.py b/artiq/gateware/dsp/sawg.py index 2ac997ad2..e0e9d1db4 100644 --- a/artiq/gateware/dsp/sawg.py +++ b/artiq/gateware/dsp/sawg.py @@ -183,8 +183,8 @@ class Channel(Module, SatAddMixin): b.ce.eq(cfg.ce), u.o.ack.eq(cfg.ce), Cat(b.clr, a1.clr, a2.clr).eq(cfg.clr), - Cat(b.xi).eq(Cat(hbf[0].o)), - Cat(b.yi).eq(Cat(hbf[1].o)), + [i.eq(j) for i, j in zip(b.xi, hbf[0].o)], + [i.eq(j) for i, j in zip(b.yi, hbf[1].o)], ] hbf[0].i.reset_less = True hbf[1].i.reset_less = True @@ -194,7 +194,7 @@ class Channel(Module, SatAddMixin): limits=cfg.limits[1], clipped=cfg.clipped[1])), hbf[1].i.eq(self.sat_add((a1.yo[0], a2.yo[0]), width=len(hbf[1].i), - limits=cfg.limits[1], clipped=cfg.clipped[1])), + limits=cfg.limits[1])), ] # wire up outputs and q_{i,o} exchange for o, x, y in zip(self.o, b.xo, self.y_in): @@ -203,8 +203,12 @@ class Channel(Module, SatAddMixin): o_y = Signal.like(y) self.comb += [ o_offset.eq(u.o.a0[-len(o):]), - o_x.eq(Mux(cfg.iq_en[0], x, 0)), - o_y.eq(Mux(cfg.iq_en[1], y, 0)), + If(cfg.iq_en[0], + o_x.eq(x) + ), + If(cfg.iq_en[1], + o_y.eq(y) + ), ] self.sync += [ o.eq(self.sat_add((o_offset, o_x, o_y), @@ -213,4 +217,4 @@ class Channel(Module, SatAddMixin): ] def connect_y(self, buddy): - self.comb += Cat(buddy.y_in).eq(Cat(self.b.yo)) + self.comb += [i.eq(j) for i, j in zip(buddy.y_in, self.b.yo)] diff --git a/artiq/gateware/eem.py b/artiq/gateware/eem.py new file mode 100644 index 000000000..627bc29e8 --- /dev/null +++ b/artiq/gateware/eem.py @@ -0,0 +1,659 @@ +from migen import * +from migen.build.generic_platform import * +from migen.genlib.io import DifferentialOutput + +from artiq.gateware import rtio +from artiq.gateware.rtio.phy import spi2, ad53xx_monitor, grabber +from artiq.gateware.suservo import servo, pads as servo_pads +from artiq.gateware.rtio.phy import servo as rtservo, fastino, phaser + + +def _eem_signal(i): + n = "d{}".format(i) + if i == 0: + n += "_cc" + return n + + +def _eem_pin(eem, i, pol): + return "eem{}:{}_{}".format(eem, _eem_signal(i), pol) + + +class _EEM: + @classmethod + def add_extension(cls, target, eem, *args, **kwargs): + name = cls.__name__ + target.platform.add_extension(cls.io(eem, *args, **kwargs)) + print("{} (EEM{}) starting at RTIO channel 0x{:06x}" + .format(name, eem, len(target.rtio_channels))) + + +class DIO(_EEM): + @staticmethod + def io(eem, iostandard="LVDS_25"): + return [("dio{}".format(eem), i, + Subsignal("p", Pins(_eem_pin(eem, i, "p"))), + Subsignal("n", Pins(_eem_pin(eem, i, "n"))), + IOStandard(iostandard)) + for i in range(8)] + + @classmethod + def add_std(cls, target, eem, ttl03_cls, ttl47_cls, iostandard="LVDS_25", + edge_counter_cls=None): + cls.add_extension(target, eem, iostandard=iostandard) + + phys = [] + for i in range(4): + pads = target.platform.request("dio{}".format(eem), i) + phy = ttl03_cls(pads.p, pads.n) + phys.append(phy) + target.submodules += phy + target.rtio_channels.append(rtio.Channel.from_phy(phy)) + for i in range(4): + pads = target.platform.request("dio{}".format(eem), 4+i) + phy = ttl47_cls(pads.p, pads.n) + phys.append(phy) + target.submodules += phy + target.rtio_channels.append(rtio.Channel.from_phy(phy)) + + if edge_counter_cls is not None: + for phy in phys: + state = getattr(phy, "input_state", None) + if state is not None: + counter = edge_counter_cls(state) + target.submodules += counter + target.rtio_channels.append(rtio.Channel.from_phy(counter)) + + +class Urukul(_EEM): + @staticmethod + def io(eem, eem_aux, iostandard="LVDS_25"): + ios = [ + ("urukul{}_spi_p".format(eem), 0, + Subsignal("clk", Pins(_eem_pin(eem, 0, "p"))), + Subsignal("mosi", Pins(_eem_pin(eem, 1, "p"))), + Subsignal("miso", Pins(_eem_pin(eem, 2, "p"))), + Subsignal("cs_n", Pins( + *(_eem_pin(eem, i + 3, "p") for i in range(3)))), + IOStandard(iostandard), + ), + ("urukul{}_spi_n".format(eem), 0, + Subsignal("clk", Pins(_eem_pin(eem, 0, "n"))), + Subsignal("mosi", Pins(_eem_pin(eem, 1, "n"))), + Subsignal("miso", Pins(_eem_pin(eem, 2, "n"))), + Subsignal("cs_n", Pins( + *(_eem_pin(eem, i + 3, "n") for i in range(3)))), + IOStandard(iostandard), + ), + ] + ttls = [(6, eem, "io_update"), + (7, eem, "dds_reset_sync_in", Misc("IOB=TRUE"))] + if eem_aux is not None: + ttls += [(0, eem_aux, "sync_clk"), + (1, eem_aux, "sync_in"), + (2, eem_aux, "io_update_ret"), + (3, eem_aux, "nu_mosi3"), + (4, eem_aux, "sw0"), + (5, eem_aux, "sw1"), + (6, eem_aux, "sw2"), + (7, eem_aux, "sw3")] + for i, j, sig, *extra_args in ttls: + ios.append( + ("urukul{}_{}".format(eem, sig), 0, + Subsignal("p", Pins(_eem_pin(j, i, "p"))), + Subsignal("n", Pins(_eem_pin(j, i, "n"))), + IOStandard(iostandard), *extra_args + )) + return ios + + @staticmethod + def io_qspi(eem0, eem1, iostandard="LVDS_25"): + ios = [ + ("urukul{}_spi_p".format(eem0), 0, + Subsignal("clk", Pins(_eem_pin(eem0, 0, "p"))), + Subsignal("mosi", Pins(_eem_pin(eem0, 1, "p"))), + Subsignal("cs_n", Pins( + _eem_pin(eem0, 3, "p"), _eem_pin(eem0, 4, "p"))), + IOStandard(iostandard), + ), + ("urukul{}_spi_n".format(eem0), 0, + Subsignal("clk", Pins(_eem_pin(eem0, 0, "n"))), + Subsignal("mosi", Pins(_eem_pin(eem0, 1, "n"))), + Subsignal("cs_n", Pins( + _eem_pin(eem0, 3, "n"), _eem_pin(eem0, 4, "n"))), + IOStandard(iostandard), + ), + ] + ttls = [(6, eem0, "io_update"), + (7, eem0, "dds_reset_sync_in"), + (4, eem1, "sw0"), + (5, eem1, "sw1"), + (6, eem1, "sw2"), + (7, eem1, "sw3")] + for i, j, sig in ttls: + ios.append( + ("urukul{}_{}".format(eem0, sig), 0, + Subsignal("p", Pins(_eem_pin(j, i, "p"))), + Subsignal("n", Pins(_eem_pin(j, i, "n"))), + IOStandard(iostandard) + )) + ios += [ + ("urukul{}_qspi_p".format(eem0), 0, + Subsignal("cs", Pins(_eem_pin(eem0, 5, "p"))), + Subsignal("clk", Pins(_eem_pin(eem0, 2, "p"))), + Subsignal("mosi0", Pins(_eem_pin(eem1, 0, "p"))), + Subsignal("mosi1", Pins(_eem_pin(eem1, 1, "p"))), + Subsignal("mosi2", Pins(_eem_pin(eem1, 2, "p"))), + Subsignal("mosi3", Pins(_eem_pin(eem1, 3, "p"))), + IOStandard(iostandard), + ), + ("urukul{}_qspi_n".format(eem0), 0, + Subsignal("cs", Pins(_eem_pin(eem0, 5, "n"))), + Subsignal("clk", Pins(_eem_pin(eem0, 2, "n"))), + Subsignal("mosi0", Pins(_eem_pin(eem1, 0, "n"))), + Subsignal("mosi1", Pins(_eem_pin(eem1, 1, "n"))), + Subsignal("mosi2", Pins(_eem_pin(eem1, 2, "n"))), + Subsignal("mosi3", Pins(_eem_pin(eem1, 3, "n"))), + IOStandard(iostandard), + ), + ] + return ios + + @classmethod + def add_std(cls, target, eem, eem_aux, ttl_out_cls, sync_gen_cls=None, + iostandard="LVDS_25"): + cls.add_extension(target, eem, eem_aux, iostandard=iostandard) + + phy = spi2.SPIMaster(target.platform.request("urukul{}_spi_p".format(eem)), + target.platform.request("urukul{}_spi_n".format(eem))) + target.submodules += phy + target.rtio_channels.append(rtio.Channel.from_phy(phy, ififo_depth=4)) + + pads = target.platform.request("urukul{}_dds_reset_sync_in".format(eem)) + pad = Signal(reset=0) + target.specials += DifferentialOutput(pad, pads.p, pads.n) + if sync_gen_cls is not None: # AD9910 variant and SYNC_IN from EEM + phy = sync_gen_cls(pad, ftw_width=4) + target.submodules += phy + target.rtio_channels.append(rtio.Channel.from_phy(phy)) + + pads = target.platform.request("urukul{}_io_update".format(eem)) + phy = ttl_out_cls(pads.p, pads.n) + target.submodules += phy + target.rtio_channels.append(rtio.Channel.from_phy(phy)) + if eem_aux is not None: + for signal in "sw0 sw1 sw2 sw3".split(): + pads = target.platform.request("urukul{}_{}".format(eem, signal)) + phy = ttl_out_cls(pads.p, pads.n) + target.submodules += phy + target.rtio_channels.append(rtio.Channel.from_phy(phy)) + +class Sampler(_EEM): + @staticmethod + def io(eem, eem_aux, iostandard="LVDS_25"): + ios = [ + ("sampler{}_adc_spi_p".format(eem), 0, + Subsignal("clk", Pins(_eem_pin(eem, 0, "p"))), + Subsignal("miso", Pins(_eem_pin(eem, 1, "p"))), + IOStandard(iostandard), + ), + ("sampler{}_adc_spi_n".format(eem), 0, + Subsignal("clk", Pins(_eem_pin(eem, 0, "n"))), + Subsignal("miso", Pins(_eem_pin(eem, 1, "n"))), + IOStandard(iostandard), + ), + ("sampler{}_pgia_spi_p".format(eem), 0, + Subsignal("clk", Pins(_eem_pin(eem, 4, "p"))), + Subsignal("mosi", Pins(_eem_pin(eem, 5, "p"))), + Subsignal("miso", Pins(_eem_pin(eem, 6, "p"))), + Subsignal("cs_n", Pins(_eem_pin(eem, 7, "p"))), + IOStandard(iostandard), + ), + ("sampler{}_pgia_spi_n".format(eem), 0, + Subsignal("clk", Pins(_eem_pin(eem, 4, "n"))), + Subsignal("mosi", Pins(_eem_pin(eem, 5, "n"))), + Subsignal("miso", Pins(_eem_pin(eem, 6, "n"))), + Subsignal("cs_n", Pins(_eem_pin(eem, 7, "n"))), + IOStandard(iostandard), + ), + ] + [ + ("sampler{}_{}".format(eem, sig), 0, + Subsignal("p", Pins(_eem_pin(j, i, "p"))), + Subsignal("n", Pins(_eem_pin(j, i, "n"))), + IOStandard(iostandard) + ) for i, j, sig in [ + (2, eem, "sdr"), + (3, eem, "cnv") + ] + ] + if eem_aux is not None: + ios += [ + ("sampler{}_adc_data_p".format(eem), 0, + Subsignal("clkout", Pins(_eem_pin(eem_aux, 0, "p"))), + Subsignal("sdoa", Pins(_eem_pin(eem_aux, 1, "p"))), + Subsignal("sdob", Pins(_eem_pin(eem_aux, 2, "p"))), + Subsignal("sdoc", Pins(_eem_pin(eem_aux, 3, "p"))), + Subsignal("sdod", Pins(_eem_pin(eem_aux, 4, "p"))), + Misc("DIFF_TERM=TRUE"), + IOStandard(iostandard), + ), + ("sampler{}_adc_data_n".format(eem), 0, + Subsignal("clkout", Pins(_eem_pin(eem_aux, 0, "n"))), + Subsignal("sdoa", Pins(_eem_pin(eem_aux, 1, "n"))), + Subsignal("sdob", Pins(_eem_pin(eem_aux, 2, "n"))), + Subsignal("sdoc", Pins(_eem_pin(eem_aux, 3, "n"))), + Subsignal("sdod", Pins(_eem_pin(eem_aux, 4, "n"))), + Misc("DIFF_TERM=TRUE"), + IOStandard(iostandard), + ), + ] + return ios + + @classmethod + def add_std(cls, target, eem, eem_aux, ttl_out_cls, iostandard="LVDS_25"): + cls.add_extension(target, eem, eem_aux, iostandard=iostandard) + + phy = spi2.SPIMaster( + target.platform.request("sampler{}_adc_spi_p".format(eem)), + target.platform.request("sampler{}_adc_spi_n".format(eem))) + target.submodules += phy + target.rtio_channels.append(rtio.Channel.from_phy(phy, ififo_depth=4)) + phy = spi2.SPIMaster( + target.platform.request("sampler{}_pgia_spi_p".format(eem)), + target.platform.request("sampler{}_pgia_spi_n".format(eem))) + target.submodules += phy + + target.rtio_channels.append(rtio.Channel.from_phy(phy, ififo_depth=4)) + pads = target.platform.request("sampler{}_cnv".format(eem)) + phy = ttl_out_cls(pads.p, pads.n) + target.submodules += phy + + target.rtio_channels.append(rtio.Channel.from_phy(phy)) + sdr = target.platform.request("sampler{}_sdr".format(eem)) + target.specials += DifferentialOutput(1, sdr.p, sdr.n) + + +class Novogorny(_EEM): + @staticmethod + def io(eem, iostandard="LVDS_25"): + return [ + ("novogorny{}_spi_p".format(eem), 0, + Subsignal("clk", Pins(_eem_pin(eem, 0, "p"))), + Subsignal("mosi", Pins(_eem_pin(eem, 1, "p"))), + Subsignal("miso", Pins(_eem_pin(eem, 2, "p"))), + Subsignal("cs_n", Pins( + _eem_pin(eem, 3, "p"), _eem_pin(eem, 4, "p"))), + IOStandard(iostandard), + ), + ("novogorny{}_spi_n".format(eem), 0, + Subsignal("clk", Pins(_eem_pin(eem, 0, "n"))), + Subsignal("mosi", Pins(_eem_pin(eem, 1, "n"))), + Subsignal("miso", Pins(_eem_pin(eem, 2, "n"))), + Subsignal("cs_n", Pins( + _eem_pin(eem, 3, "n"), _eem_pin(eem, 4, "n"))), + IOStandard(iostandard), + ), + ] + [ + ("novogorny{}_{}".format(eem, sig), 0, + Subsignal("p", Pins(_eem_pin(j, i, "p"))), + Subsignal("n", Pins(_eem_pin(j, i, "n"))), + IOStandard(iostandard) + ) for i, j, sig in [ + (5, eem, "cnv"), + (6, eem, "busy"), + (7, eem, "scko"), + ] + ] + + @classmethod + def add_std(cls, target, eem, ttl_out_cls, iostandard="LVDS_25"): + cls.add_extension(target, eem, iostandard=iostandard) + + phy = spi2.SPIMaster(target.platform.request("novogorny{}_spi_p".format(eem)), + target.platform.request("novogorny{}_spi_n".format(eem))) + target.submodules += phy + target.rtio_channels.append(rtio.Channel.from_phy(phy, ififo_depth=16)) + + pads = target.platform.request("novogorny{}_cnv".format(eem)) + phy = ttl_out_cls(pads.p, pads.n) + target.submodules += phy + target.rtio_channels.append(rtio.Channel.from_phy(phy)) + + +class Zotino(_EEM): + @staticmethod + def io(eem, iostandard="LVDS_25"): + return [ + ("zotino{}_spi_p".format(eem), 0, + Subsignal("clk", Pins(_eem_pin(eem, 0, "p"))), + Subsignal("mosi", Pins(_eem_pin(eem, 1, "p"))), + Subsignal("miso", Pins(_eem_pin(eem, 2, "p"))), + Subsignal("cs_n", Pins( + _eem_pin(eem, 3, "p"), _eem_pin(eem, 4, "p"))), + IOStandard(iostandard), + ), + ("zotino{}_spi_n".format(eem), 0, + Subsignal("clk", Pins(_eem_pin(eem, 0, "n"))), + Subsignal("mosi", Pins(_eem_pin(eem, 1, "n"))), + Subsignal("miso", Pins(_eem_pin(eem, 2, "n"))), + Subsignal("cs_n", Pins( + _eem_pin(eem, 3, "n"), _eem_pin(eem, 4, "n"))), + IOStandard(iostandard), + ), + ] + [ + ("zotino{}_{}".format(eem, sig), 0, + Subsignal("p", Pins(_eem_pin(j, i, "p"))), + Subsignal("n", Pins(_eem_pin(j, i, "n"))), + IOStandard(iostandard) + ) for i, j, sig in [ + (5, eem, "ldac_n"), + (6, eem, "busy"), + (7, eem, "clr_n"), + ] + ] + + @classmethod + def add_std(cls, target, eem, ttl_out_cls, iostandard="LVDS_25"): + cls.add_extension(target, eem, iostandard=iostandard) + + spi_phy = spi2.SPIMaster(target.platform.request("zotino{}_spi_p".format(eem)), + target.platform.request("zotino{}_spi_n".format(eem))) + target.submodules += spi_phy + target.rtio_channels.append(rtio.Channel.from_phy(spi_phy, ififo_depth=4)) + + pads = target.platform.request("zotino{}_ldac_n".format(eem)) + ldac_phy = ttl_out_cls(pads.p, pads.n) + target.submodules += ldac_phy + target.rtio_channels.append(rtio.Channel.from_phy(ldac_phy)) + + pads = target.platform.request("zotino{}_clr_n".format(eem)) + clr_phy = ttl_out_cls(pads.p, pads.n) + target.submodules += clr_phy + target.rtio_channels.append(rtio.Channel.from_phy(clr_phy)) + + dac_monitor = ad53xx_monitor.AD53XXMonitor(spi_phy.rtlink, ldac_phy.rtlink) + target.submodules += dac_monitor + spi_phy.probes.extend(dac_monitor.probes) + + +class Grabber(_EEM): + @staticmethod + def io(eem, eem_aux, iostandard="LVDS_25"): + ios = [ + ("grabber{}_video".format(eem), 0, + Subsignal("clk_p", Pins(_eem_pin(eem, 0, "p"))), + Subsignal("clk_n", Pins(_eem_pin(eem, 0, "n"))), + Subsignal("sdi_p", Pins(*[_eem_pin(eem, i, "p") for i in range(1, 5)])), + Subsignal("sdi_n", Pins(*[_eem_pin(eem, i, "n") for i in range(1, 5)])), + IOStandard(iostandard), Misc("DIFF_TERM=TRUE") + ), + ("grabber{}_cc0".format(eem), 0, + Subsignal("p", Pins(_eem_pin(eem_aux, 5, "p"))), + Subsignal("n", Pins(_eem_pin(eem_aux, 5, "n"))), + IOStandard(iostandard) + ), + ("grabber{}_cc1".format(eem), 0, + Subsignal("p", Pins(_eem_pin(eem_aux, 6, "p"))), + Subsignal("n", Pins(_eem_pin(eem_aux, 6, "n"))), + IOStandard(iostandard) + ), + ("grabber{}_cc2".format(eem), 0, + Subsignal("p", Pins(_eem_pin(eem_aux, 7, "p"))), + Subsignal("n", Pins(_eem_pin(eem_aux, 7, "n"))), + IOStandard(iostandard) + ), + ] + if eem_aux is not None: + ios += [ + ("grabber{}_video_m".format(eem), 0, + Subsignal("clk_p", Pins(_eem_pin(eem_aux, 0, "p"))), + Subsignal("clk_n", Pins(_eem_pin(eem_aux, 0, "n"))), + Subsignal("sdi_p", Pins(*[_eem_pin(eem_aux, i, "p") for i in range(1, 5)])), + Subsignal("sdi_n", Pins(*[_eem_pin(eem_aux, i, "n") for i in range(1, 5)])), + IOStandard(iostandard), Misc("DIFF_TERM=TRUE") + ), + ("grabber{}_serrx".format(eem), 0, + Subsignal("p", Pins(_eem_pin(eem_aux, 5, "p"))), + Subsignal("n", Pins(_eem_pin(eem_aux, 5, "n"))), + IOStandard(iostandard), Misc("DIFF_TERM=TRUE") + ), + ("grabber{}_sertx".format(eem), 0, + Subsignal("p", Pins(_eem_pin(eem_aux, 6, "p"))), + Subsignal("n", Pins(_eem_pin(eem_aux, 6, "n"))), + IOStandard(iostandard) + ), + ("grabber{}_cc3".format(eem), 0, + Subsignal("p", Pins(_eem_pin(eem_aux, 7, "p"))), + Subsignal("n", Pins(_eem_pin(eem_aux, 7, "n"))), + IOStandard(iostandard) + ), + ] + return ios + + @classmethod + def add_std(cls, target, eem, eem_aux=None, eem_aux2=None, ttl_out_cls=None, iostandard="LVDS_25"): + cls.add_extension(target, eem, eem_aux, iostandard=iostandard) + + pads = target.platform.request("grabber{}_video".format(eem)) + target.platform.add_period_constraint(pads.clk_p, 14.71) + phy = grabber.Grabber(pads) + name = "grabber{}".format(len(target.grabber_csr_group)) + setattr(target.submodules, name, phy) + + target.platform.add_false_path_constraints( + target.crg.cd_sys.clk, phy.deserializer.cd_cl.clk) + # Avoid bogus s/h violations at the clock input being sampled + # by the ISERDES. This uses dynamic calibration. + target.platform.add_false_path_constraints( + pads.clk_p, phy.deserializer.cd_cl7x.clk) + + target.grabber_csr_group.append(name) + target.csr_devices.append(name) + target.rtio_channels += [ + rtio.Channel(phy.config), + rtio.Channel(phy.gate_data) + ] + + if ttl_out_cls is not None: + for signal in "cc0 cc1 cc2".split(): + pads = target.platform.request("grabber{}_{}".format(eem, signal)) + phy = ttl_out_cls(pads.p, pads.n) + target.submodules += phy + target.rtio_channels.append(rtio.Channel.from_phy(phy)) + if eem_aux is not None: + pads = target.platform.request("grabber{}_cc3".format(eem)) + phy = ttl_out_cls(pads.p, pads.n) + target.submodules += phy + target.rtio_channels.append(rtio.Channel.from_phy(phy)) + + +class SUServo(_EEM): + @staticmethod + def io(*eems, iostandard="LVDS_25"): + assert len(eems) in (4, 6) + io = (Sampler.io(*eems[0:2], iostandard=iostandard) + + Urukul.io_qspi(*eems[2:4], iostandard=iostandard)) + if len(eems) == 6: # two Urukuls + io += Urukul.io_qspi(*eems[4:6], iostandard=iostandard) + return io + + @classmethod + def add_std(cls, target, eems_sampler, eems_urukul, + t_rtt=4, clk=1, shift=11, profile=5, + iostandard="LVDS_25"): + """Add a 8-channel Sampler-Urukul Servo + + :param t_rtt: upper estimate for clock round-trip propagation time from + ``sck`` at the FPGA to ``clkout`` at the FPGA, measured in RTIO + coarse cycles (default: 4). This is the sum of the round-trip + cabling delay and the 8 ns max propagation delay on Sampler (ADC + and LVDS drivers). Increasing ``t_rtt`` increases servo latency. + With all other parameters at their default values, ``t_rtt`` values + above 4 also increase the servo period (reduce servo bandwidth). + :param clk: DDS SPI clock cycle half-width in RTIO coarse cycles + (default: 1) + :param shift: fixed-point scaling factor for IIR coefficients + (default: 11) + :param profile: log2 of the number of profiles for each DDS channel + (default: 5) + """ + cls.add_extension( + target, *(eems_sampler + sum(eems_urukul, [])), + iostandard=iostandard) + eem_sampler = "sampler{}".format(eems_sampler[0]) + eem_urukul = ["urukul{}".format(i[0]) for i in eems_urukul] + + sampler_pads = servo_pads.SamplerPads(target.platform, eem_sampler) + urukul_pads = servo_pads.UrukulPads( + target.platform, *eem_urukul) + target.submodules += sampler_pads, urukul_pads + # timings in units of RTIO coarse period + adc_p = servo.ADCParams(width=16, channels=8, lanes=4, t_cnvh=4, + # account for SCK DDR to CONV latency + # difference (4 cycles measured) + t_conv=57 - 4, t_rtt=t_rtt + 4) + iir_p = servo.IIRWidths(state=25, coeff=18, adc=16, asf=14, word=16, + accu=48, shift=shift, channel=3, + profile=profile, dly=8) + dds_p = servo.DDSParams(width=8 + 32 + 16 + 16, + channels=adc_p.channels, clk=clk) + su = servo.Servo(sampler_pads, urukul_pads, adc_p, iir_p, dds_p) + su = ClockDomainsRenamer("rio_phy")(su) + # explicitly name the servo submodule to enable the migen namer to derive + # a name for the adc return clock domain + setattr(target.submodules, "suservo_eem{}".format(eems_sampler[0]), su) + + ctrls = [rtservo.RTServoCtrl(ctrl) for ctrl in su.iir.ctrl] + target.submodules += ctrls + target.rtio_channels.extend( + rtio.Channel.from_phy(ctrl) for ctrl in ctrls) + mem = rtservo.RTServoMem(iir_p, su) + target.submodules += mem + target.rtio_channels.append(rtio.Channel.from_phy(mem, ififo_depth=4)) + + phy = spi2.SPIMaster( + target.platform.request("{}_pgia_spi_p".format(eem_sampler)), + target.platform.request("{}_pgia_spi_n".format(eem_sampler))) + target.submodules += phy + target.rtio_channels.append(rtio.Channel.from_phy(phy, ififo_depth=4)) + + for i in range(2): + if len(eem_urukul) > i: + spi_p, spi_n = ( + target.platform.request("{}_spi_p".format(eem_urukul[i])), + target.platform.request("{}_spi_n".format(eem_urukul[i]))) + else: # create a dummy bus + spi_p = Record([("clk", 1), ("cs_n", 1)]) # mosi, cs_n + spi_n = None + + phy = spi2.SPIMaster(spi_p, spi_n) + target.submodules += phy + target.rtio_channels.append(rtio.Channel.from_phy(phy, ififo_depth=4)) + + for j, eem_urukuli in enumerate(eem_urukul): + pads = target.platform.request("{}_dds_reset_sync_in".format(eem_urukuli)) + target.specials += DifferentialOutput(0, pads.p, pads.n) + + for i, signal in enumerate("sw0 sw1 sw2 sw3".split()): + pads = target.platform.request("{}_{}".format(eem_urukuli, signal)) + target.specials += DifferentialOutput( + su.iir.ctrl[j*4 + i].en_out, pads.p, pads.n) + + +class Mirny(_EEM): + @staticmethod + def io(eem, iostandard="LVDS_25"): + ios = [ + ("mirny{}_spi_p".format(eem), 0, + Subsignal("clk", Pins(_eem_pin(eem, 0, "p"))), + Subsignal("mosi", Pins(_eem_pin(eem, 1, "p"))), + Subsignal("miso", Pins(_eem_pin(eem, 2, "p"))), + Subsignal("cs_n", Pins(_eem_pin(eem, 3, "p"))), + IOStandard(iostandard), + ), + ("mirny{}_spi_n".format(eem), 0, + Subsignal("clk", Pins(_eem_pin(eem, 0, "n"))), + Subsignal("mosi", Pins(_eem_pin(eem, 1, "n"))), + Subsignal("miso", Pins(_eem_pin(eem, 2, "n"))), + Subsignal("cs_n", Pins(_eem_pin(eem, 3, "n"))), + IOStandard(iostandard), + ), + ] + for i in range(4): + ios.append( + ("mirny{}_io{}".format(eem, i), 0, + Subsignal("p", Pins(_eem_pin(eem, 4 + i, "p"))), + Subsignal("n", Pins(_eem_pin(eem, 4 + i, "n"))), + IOStandard(iostandard) + )) + return ios + + @classmethod + def add_std(cls, target, eem, ttl_out_cls, iostandard="LVDS_25"): + cls.add_extension(target, eem, iostandard=iostandard) + + phy = spi2.SPIMaster( + target.platform.request("mirny{}_spi_p".format(eem)), + target.platform.request("mirny{}_spi_n".format(eem))) + target.submodules += phy + target.rtio_channels.append(rtio.Channel.from_phy(phy, ififo_depth=4)) + + for i in range(4): + pads = target.platform.request("mirny{}_io{}".format(eem, i)) + phy = ttl_out_cls(pads.p, pads.n) + target.submodules += phy + target.rtio_channels.append(rtio.Channel.from_phy(phy)) + + +class Fastino(_EEM): + @staticmethod + def io(eem, iostandard="LVDS_25"): + return [ + ("fastino{}_ser_{}".format(eem, pol), 0, + Subsignal("clk", Pins(_eem_pin(eem, 0, pol))), + Subsignal("mosi", Pins(*(_eem_pin(eem, i, pol) + for i in range(1, 7)))), + Subsignal("miso", Pins(_eem_pin(eem, 7, pol)), + Misc("DIFF_TERM=TRUE")), + IOStandard(iostandard), + ) for pol in "pn"] + + @classmethod + def add_std(cls, target, eem, log2_width, iostandard="LVDS_25"): + cls.add_extension(target, eem, iostandard=iostandard) + + phy = fastino.Fastino(target.platform.request("fastino{}_ser_p".format(eem)), + target.platform.request("fastino{}_ser_n".format(eem)), + log2_width=log2_width) + target.submodules += phy + target.rtio_channels.append(rtio.Channel.from_phy(phy, ififo_depth=4)) + + +class Phaser(_EEM): + @staticmethod + def io(eem, iostandard="LVDS_25"): + return [ + ("phaser{}_ser_{}".format(eem, pol), 0, + Subsignal("clk", Pins(_eem_pin(eem, 0, pol))), + Subsignal("mosi", Pins(*(_eem_pin(eem, i, pol) + for i in range(1, 7)))), + Subsignal("miso", Pins(_eem_pin(eem, 7, pol)), + Misc("DIFF_TERM=TRUE")), + IOStandard(iostandard), + ) for pol in "pn"] + + @classmethod + def add_std(cls, target, eem, iostandard="LVDS_25"): + cls.add_extension(target, eem, iostandard=iostandard) + + phy = phaser.Phaser( + target.platform.request("phaser{}_ser_p".format(eem)), + target.platform.request("phaser{}_ser_n".format(eem))) + target.submodules += phy + target.rtio_channels.extend([ + rtio.Channel.from_phy(phy, ififo_depth=4), + rtio.Channel.from_phy(phy.ch0.frequency), + rtio.Channel.from_phy(phy.ch0.phase_amplitude), + rtio.Channel.from_phy(phy.ch1.frequency), + rtio.Channel.from_phy(phy.ch1.phase_amplitude), + ]) diff --git a/artiq/gateware/fmcdio_vhdci_eem.py b/artiq/gateware/fmcdio_vhdci_eem.py new file mode 100644 index 000000000..0296efe8e --- /dev/null +++ b/artiq/gateware/fmcdio_vhdci_eem.py @@ -0,0 +1,29 @@ +from migen.build.generic_platform import * + +from artiq.coredevice.fmcdio_vhdci_eem import * + + +io = [ + ("fmcdio_dirctl", 0, + Subsignal("clk", Pins("LPC:LA32_N")), + Subsignal("ser", Pins("LPC:LA33_P")), + Subsignal("latch", Pins("LPC:LA32_P")), + IOStandard("LVCMOS18") + ), +] + +def _get_connectors(): + connectors = [] + for i in range(4): + connections = dict() + for j, pair in enumerate(eem_fmc_connections[i]): + for pn in "n", "p": + cc = "cc_" if j == 0 else "" + lpc_cc = "CC_" if eem_fmc_connections[i][j] in (0, 1, 17, 18) else "" + connections["d{}_{}{}".format(j, cc, pn)] = \ + "LPC:LA{:02d}_{}{}".format(pair, lpc_cc, pn.upper()) + connectors.append(("eem{}".format(i), connections)) + return connectors + + +connectors = _get_connectors() diff --git a/artiq/devices/__init__.py b/artiq/gateware/grabber/__init__.py similarity index 100% rename from artiq/devices/__init__.py rename to artiq/gateware/grabber/__init__.py diff --git a/artiq/gateware/grabber/core.py b/artiq/gateware/grabber/core.py new file mode 100644 index 000000000..4baaf98c3 --- /dev/null +++ b/artiq/gateware/grabber/core.py @@ -0,0 +1,181 @@ +from migen import * +from migen.genlib.cdc import MultiReg +from misoc.interconnect.csr import * + + +class FrequencyCounter(Module, AutoCSR): + def __init__(self, width=8): + self.freq_count = CSRStatus(width) + + # # # + + toggle = Signal(reset_less=True) + toggle_sys = Signal() + toggle.attr.add("no_retiming") + self.sync.cl += toggle.eq(~toggle) + self.specials += MultiReg(toggle, toggle_sys) + + timer = Signal(width+1) + tick = Signal(reset=1) + count = Signal(width) + toggle_sys_r = Signal() + self.sync += [ + Cat(timer, tick).eq(timer + 1), + toggle_sys_r.eq(toggle_sys), + If(tick, + self.freq_count.status.eq(count), + count.eq(0) + ).Else( + If(toggle_sys & ~toggle_sys_r, count.eq(count + 1)) + ) + ] + + +bitseq = [ + # 0 1 2 3 4 5 6 + 6, 5, 4, 3, 2, 1, 27, + + # 7 8 9 10 11 12 13 + 26, 0, 13, 12, 11, 10, 9, + + # 14 15 16 17 18 19 20 + 25, 24, 8, 7, 20, 19, 18, + + # 21 22 23 + 17, 23, 22 +] + +assert len(set(bitseq)) == 24 + + +class Parser(Module, AutoCSR): + """Parses 28 bit encoded words and track pixel coordinates.""" + def __init__(self, width): + self.cl = cl = Signal(28) + + self.last_x = CSRStatus(width) + self.last_y = CSRStatus(width) + + self.pix = pix = Record([ + ("x", width), + ("y", width), + ("a", 8), + ("b", 8), + ("c", 8), + ("stb", 1), + ("eop", 1), + ]) + + # # # + + last_x = Signal(width) + last_y = Signal(width) + + lval = Signal() + fval = Signal() + dval = Signal() + last_lval = Signal() + last_fval = Signal() + self.comb += [ + Cat(dval, fval, lval).eq(cl[14:17]), + pix.stb.eq(dval & fval & lval), + pix.eop.eq(~fval & last_fval), + Cat(pix.a, pix.b, pix.c).eq(Cat(cl[i] for i in bitseq)) + ] + self.sync.cl += [ + last_lval.eq(lval), + last_fval.eq(fval), + If(dval, + pix.x.eq(pix.x + 1), + ), + If(~lval, + If(last_lval, + last_x.eq(pix.x), + pix.y.eq(pix.y + 1) + ), + pix.x.eq(0) + ), + If(~fval, + If(last_fval, + last_y.eq(pix.y) + ), + pix.y.eq(0) + ) + ] + + last_x.attr.add("no_retiming") + last_y.attr.add("no_retiming") + self.specials += [ + MultiReg(last_x, self.last_x.status), + MultiReg(last_y, self.last_y.status) + ] + + +class ROI(Module): + """ROI Engine. For each frame, accumulates pixels values within a + rectangular region of interest, and reports the total.""" + + @staticmethod + def count_len(width, shift): + # limit width to 31 to avoid problems with CPUs and RTIO inputs + return min(31, 2*width + 16 - shift) + + def __init__(self, pix, shift): + count_len = ROI.count_len(len(pix.x), shift) + + self.cfg = cfg = Record([ + ("x0", len(pix.x)), + ("x1", len(pix.x)), + ("y0", len(pix.y)), + ("y1", len(pix.y)), + ]) + self.out = out = Record([ + ("update", 1), + # registered output - can be used as CDC input + ("count", count_len), + ]) + + # # # + + # stage 1 - generate "good" (in-ROI) signals + y_good = Signal() + x_good = Signal() + stb = Signal() + eop = Signal() + gray = Signal(16) + self.sync.cl += [ + If(pix.y == cfg.y0, + y_good.eq(1) + ), + If(pix.y == cfg.y1, + y_good.eq(0) + ), + If(pix.x == cfg.x0, + x_good.eq(1) + ), + If(pix.x == cfg.x1, + x_good.eq(0) + ), + If(pix.eop, + y_good.eq(0), + x_good.eq(0) + ), + gray.eq(Cat(pix.a, pix.b)[shift:]), + stb.eq(pix.stb), + eop.eq(pix.eop) + ] + + # stage 2 - accumulate + count = Signal(count_len) + self.sync.cl += [ + If(stb & x_good & y_good, + count.eq(count + gray), + ), + + out.update.eq(0), + If(eop, + count.eq(0), + out.update.eq(1), + out.count.eq(count) + ) + ] diff --git a/artiq/gateware/grabber/deserializer_7series.py b/artiq/gateware/grabber/deserializer_7series.py new file mode 100644 index 000000000..0224127fe --- /dev/null +++ b/artiq/gateware/grabber/deserializer_7series.py @@ -0,0 +1,118 @@ +from migen import * +from migen.genlib.cdc import MultiReg +from migen.genlib.resetsync import AsyncResetSynchronizer + +from misoc.interconnect.csr import * + + +# See: +# http://www.volkerschatz.com/hardware/clink.html + +class Deserializer(Module, AutoCSR): + def __init__(self, pins): + self.pll_reset = CSRStorage(reset=1) + self.pll_locked = CSRStatus() + self.phase_shift = CSR() + self.phase_shift_done = CSRStatus(reset=1) + self.clk_sampled = CSRStatus(7) + + self.q_clk = Signal(7) + self.q = Signal(7*len(pins.sdi_p)) + + self.clock_domains.cd_cl = ClockDomain() + self.clock_domains.cd_cl7x = ClockDomain() + + # # # + + clk_se = Signal() + self.specials += Instance("IBUFDS", + i_I=pins.clk_p, i_IB=pins.clk_n, o_O=clk_se) + + clk_se_iserdes = Signal() + self.specials += [ + Instance("ISERDESE2", + p_DATA_WIDTH=7, p_DATA_RATE="SDR", + p_SERDES_MODE="MASTER", p_INTERFACE_TYPE="NETWORKING", + p_NUM_CE=1, + + i_D=clk_se, + o_O=clk_se_iserdes, + i_CE1=1, + i_CLKDIV=ClockSignal("cl"), i_RST=ResetSignal("cl"), + i_CLK=ClockSignal("cl7x"), i_CLKB=~ClockSignal("cl7x"), + o_Q1=self.q_clk[6], + o_Q2=self.q_clk[5], o_Q3=self.q_clk[4], + o_Q4=self.q_clk[3], o_Q5=self.q_clk[2], + o_Q6=self.q_clk[1], o_Q7=self.q_clk[0] + ) + ] + + sdi_se = Signal(len(pins.sdi_p)) + for i in range(len(pins.sdi_p)): + self.specials += [ + Instance("IBUFDS", i_I=pins.sdi_p[i], i_IB=pins.sdi_n[i], + o_O=sdi_se[i]), + Instance("ISERDESE2", + p_DATA_WIDTH=7, p_DATA_RATE="SDR", + p_SERDES_MODE="MASTER", p_INTERFACE_TYPE="NETWORKING", + p_NUM_CE=1, + + i_D=sdi_se[i], + i_CE1=1, + i_CLKDIV=ClockSignal("cl"), i_RST=ResetSignal("cl"), + i_CLK=ClockSignal("cl7x"), i_CLKB=~ClockSignal("cl7x"), + o_Q1=self.q[7*i+6], + o_Q2=self.q[7*i+5], o_Q3=self.q[7*i+4], + o_Q4=self.q[7*i+3], o_Q5=self.q[7*i+2], + o_Q6=self.q[7*i+1], o_Q7=self.q[7*i+0] + ) + ] + + # CL clock frequency 40-85MHz + # A7-2 MMCM VCO frequency 600-1440MHz + # A7-2 PLL VCO frequency 800-1866MHz + # with current MMCM settings, CL frequency limited to 40-~68MHz + # TODO: switch to the PLL, whose VCO range better matches the CL + # clock frequencies. Needs DRP for dynamic phase shift, see XAPP888. + pll_reset = Signal(reset=1) + mmcm_fb = Signal() + mmcm_locked = Signal() + mmcm_ps_psdone = Signal() + cl7x_clk = Signal() + self.specials += [ + Instance("MMCME2_ADV", + p_CLKIN1_PERIOD=18.0, + i_CLKIN1=clk_se_iserdes, + i_RST=pll_reset, + i_CLKINSEL=1, # yes, 1=CLKIN1 0=CLKIN2 + + p_CLKFBOUT_MULT_F=21.0, + p_DIVCLK_DIVIDE=1, + o_LOCKED=mmcm_locked, + + o_CLKFBOUT=mmcm_fb, i_CLKFBIN=mmcm_fb, + + p_CLKOUT1_USE_FINE_PS="TRUE", + p_CLKOUT1_DIVIDE=3, + p_CLKOUT1_PHASE=0.0, + o_CLKOUT1=cl7x_clk, + + i_PSCLK=ClockSignal(), + i_PSEN=self.phase_shift.re, + i_PSINCDEC=self.phase_shift.r, + o_PSDONE=mmcm_ps_psdone, + ), + Instance("BUFR", p_BUFR_DIVIDE="7", i_CLR=~mmcm_locked, + i_I=cl7x_clk, o_O=self.cd_cl.clk), + Instance("BUFIO", i_I=cl7x_clk, o_O=self.cd_cl7x.clk), + AsyncResetSynchronizer(self.cd_cl, ~mmcm_locked), + ] + self.sync += [ + If(self.phase_shift.re, self.phase_shift_done.status.eq(0)), + If(mmcm_ps_psdone, self.phase_shift_done.status.eq(1)) + ] + self.specials += MultiReg(self.q_clk, self.clk_sampled.status) + + self.specials += MultiReg(mmcm_locked, self.pll_locked.status) + pll_reset.attr.add("no_retiming") + self.sync += pll_reset.eq(self.pll_reset.storage) diff --git a/artiq/gateware/jesd204_tools.py b/artiq/gateware/jesd204_tools.py new file mode 100644 index 000000000..d05d6432f --- /dev/null +++ b/artiq/gateware/jesd204_tools.py @@ -0,0 +1,290 @@ +from collections import namedtuple + +from migen import * +from migen.genlib.cdc import MultiReg, BusSynchronizer +from migen.genlib.resetsync import AsyncResetSynchronizer +from misoc.interconnect.csr import * + +from jesd204b.common import (JESD204BTransportSettings, + JESD204BPhysicalSettings, + JESD204BSettings) +from jesd204b.phy.gth import (GTHChannelPLL as JESD204BGTHChannelPLL, + GTHQuadPLL as JESD204BGTHQuadPLL, + GTHTransmitter as JESD204BGTHTransmitter, + GTHInit as JESD204BGTHInit, + GTHTransmitterInterconnect as JESD204BGTHTransmitterInterconnect) +from jesd204b.phy import JESD204BPhyTX +from jesd204b.core import JESD204BCoreTX +from jesd204b.core import JESD204BCoreTXControl + + +class UltrascaleCRG(Module, AutoCSR): + linerate = int(6e9) # linerate = 20*data_rate*4/8 = data_rate*10 + # data_rate = dac_rate/interp_factor + refclk_freq = int(150e6) + fabric_freq = int(125e6) + + def __init__(self, platform, use_rtio_clock=False): + self.jreset = CSRStorage(reset=1) + self.refclk = Signal() + self.clock_domains.cd_jesd = ClockDomain() + + refclk2 = Signal() + refclk_pads = platform.request("dac_refclk", 0) + platform.add_period_constraint(refclk_pads.p, 1e9/self.refclk_freq) + self.specials += [ + Instance("IBUFDS_GTE3", i_CEB=0, p_REFCLK_HROW_CK_SEL=0b00, + i_I=refclk_pads.p, i_IB=refclk_pads.n, + o_O=self.refclk, o_ODIV2=refclk2), + AsyncResetSynchronizer(self.cd_jesd, self.jreset.storage), + ] + + if use_rtio_clock: + self.cd_jesd.clk.attr.add("keep") + self.comb += self.cd_jesd.clk.eq(ClockSignal("rtio")) + else: + self.specials += Instance("BUFG_GT", i_I=refclk2, o_O=self.cd_jesd.clk) + + +PhyPads = namedtuple("PhyPads", "txp txn") + + +class UltrascaleTX(Module, AutoCSR): + def __init__(self, platform, sys_crg, jesd_crg, dac, pll_type="cpll", tx_half=False): + # Note: In general, the choice between channel and quad PLLs can be made based on the "nominal operating ranges", which are (see UG576, Ch.2): + # CPLL: 2.0 - 6.25 GHz + # QPLL0: 9.8 - 16.375 GHz + # QPLL1: 8.0 - 13.0 GHz + # However, the exact frequency and/or linerate range should be checked according to the model and speed grade from their corresponding datasheets. + pll_cls = { + "cpll": JESD204BGTHChannelPLL, + "qpll": JESD204BGTHQuadPLL + }[pll_type] + ps = JESD204BPhysicalSettings(l=8, m=4, n=16, np=16) + ts = JESD204BTransportSettings(f=2, s=2, k=16, cs=0) + settings = JESD204BSettings(ps, ts, did=0x5a, bid=0x5) + + jesd_pads = platform.request("dac_jesd", dac) + plls = [] + phys = [] + for i in range(len(jesd_pads.txp)): + pll = pll_cls( + jesd_crg.refclk, jesd_crg.refclk_freq, jesd_crg.linerate) + self.submodules += pll + plls.append(pll) + # QPLL quads + if pll_type == "qpll": + gthe3_common_cfgs = [] + for i in range(0, len(plls), 4): + # GTHE3_COMMON common signals + qpll_clk = Signal() + qpll_refclk = Signal() + qpll_reset = Signal() + qpll_lock = Signal() + # GTHE3_COMMON + self.specials += pll_cls.get_gthe3_common( + jesd_crg.refclk, jesd_crg.refclk_freq, jesd_crg.linerate, + qpll_clk, qpll_refclk, qpll_reset, qpll_lock) + gthe3_common_cfgs.append({ + "clk": qpll_clk, + "refclk": qpll_refclk, + "reset": qpll_reset, + "lock": qpll_lock + }) + # Per-channel PLL phys + for i, pll in enumerate(plls): + # PhyTX + phy = JESD204BPhyTX( + pll, jesd_crg.refclk, PhyPads(jesd_pads.txp[i], jesd_pads.txn[i]), + jesd_crg.fabric_freq, transceiver="gth", tx_half=tx_half) + phys.append(phy) + if tx_half: + platform.add_period_constraint(phy.transmitter.cd_tx_half.clk, + 80*1e9/jesd_crg.linerate) + platform.add_false_path_constraints( + sys_crg.cd_sys.clk, + jesd_crg.cd_jesd.clk, + phy.transmitter.cd_tx_half.clk) + else: + platform.add_period_constraint(phy.transmitter.cd_tx.clk, + 40*1e9/jesd_crg.linerate) + platform.add_false_path_constraints( + sys_crg.cd_sys.clk, + jesd_crg.cd_jesd.clk, + phy.transmitter.cd_tx.clk) + # CHANNEL & init interconnects + for i, (pll, phy) in enumerate(zip(plls, phys)): + # CPLLs: 1 init per channel + if pll_type == "cpll": + phy_channel_cfg = {} + # Connect reset/lock to init + pll_reset = pll.reset + pll_lock = pll.lock + self.submodules += JESD204BGTHTransmitterInterconnect( + pll_reset, pll_lock, phy.transmitter, phy.transmitter.init) + # QPLL: 4 inits and 4 channels per quad + elif pll_type == "qpll": + # Connect clk/refclk to CHANNEL + phy_cfg = gthe3_common_cfgs[int(i//4)] + phy_channel_cfg = { + "qpll_clk": phy_cfg["clk"], + "qpll_refclk": phy_cfg["refclk"] + } + # Connect reset/lock to init + pll_reset = phy_cfg["reset"] + pll_lock = phy_cfg["lock"] + if i % 4 == 0: + self.submodules += JESD204BGTHTransmitterInterconnect( + pll_reset, pll_lock, phy.transmitter, + [phys[j].transmitter.init for j in range(i, min(len(phys), i+4))]) + # GTHE3_CHANNEL + self.specials += JESD204BGTHTransmitter.get_gthe3_channel( + pll, phy.transmitter, **phy_channel_cfg) + + self.submodules.core = JESD204BCoreTX( + phys, settings, converter_data_width=64) + self.submodules.control = JESD204BCoreTXControl(self.core) + self.core.register_jsync(platform.request("dac_sync", dac)) + + +class DDMTDEdgeDetector(Module): + def __init__(self, i): + self.rising = Signal() + + history = Signal(4) + deglitched = Signal() + self.sync.helper += history.eq(Cat(history[1:], i)) + self.comb += deglitched.eq(i | history[0] | history[1] | history[2] | history[3]) + + deglitched_r = Signal() + self.sync.helper += [ + deglitched_r.eq(deglitched), + self.rising.eq(deglitched & ~deglitched_r) + ] + + +# See "Digital femtosecond time difference circuit for CERN's timing system" +# by P. Moreira and I. Darwazeh +class DDMTD(Module, AutoCSR): + def __init__(self, input_pads, rtio_clk_freq=150e6): + N = 64 + self.reset = CSRStorage(reset=1) + self.locked = CSRStatus() + self.dt = CSRStatus(N.bit_length()) + + # # # + + self.clock_domains.cd_helper = ClockDomain(reset_less=True) + helper_locked = Signal() + helper_fb = Signal() + helper_output = Signal() + + input_se = Signal() + beat1 = Signal() + beat2 = Signal() + self.specials += [ + Instance("MMCME2_BASE", + p_CLKIN1_PERIOD=1e9/rtio_clk_freq, + i_CLKIN1=ClockSignal("rtio"), + i_RST=self.reset.storage, + o_LOCKED=helper_locked, + + # VCO at 1200MHz with 150MHz RTIO frequency + p_CLKFBOUT_MULT_F=8.0, + p_DIVCLK_DIVIDE=1, + + o_CLKFBOUT=helper_fb, i_CLKFBIN=helper_fb, + + # helper PLL ratio: 64/65 (N=64) + p_CLKOUT0_DIVIDE_F=8.125, + o_CLKOUT0=helper_output, + ), + MultiReg(helper_locked, self.locked.status), + Instance("BUFG", i_I=helper_output, o_O=self.cd_helper.clk), + Instance("IBUFDS", i_I=input_pads.p, i_IB=input_pads.n, o_O=input_se), + Instance("FD", i_C=self.cd_helper.clk, i_D=input_se, o_Q=beat1, attr={("IOB", "TRUE")}), + Instance("FD", i_C=self.cd_helper.clk, i_D=ClockSignal("rtio"), o_Q=beat2), + ] + + ed1 = DDMTDEdgeDetector(beat1) + ed2 = DDMTDEdgeDetector(beat2) + self.submodules += ed1, ed2 + + counting = Signal() + counter = Signal(N.bit_length()) + result = Signal.like(counter) + self.sync.helper += [ + If(counting, + counter.eq(counter + 1) + ).Else( + result.eq(counter) + ), + + If(ed1.rising, counting.eq(1), counter.eq(0)), + If(ed2.rising, counting.eq(0)) + ] + + bsync = BusSynchronizer(len(result), "helper", "sys") + self.submodules += bsync + self.comb += [ + bsync.i.eq(result), + self.dt.status.eq(bsync.o) + ] + + +# This assumes: +# * fine RTIO frequency (rtiox) = 2*RTIO frequency +# * JESD and coarse RTIO clocks are the same +# (only reset may differ). +class SysrefSampler(Module, AutoCSR): + def __init__(self, sysref_pads, coarse_ts, sysref_phase_bits=8): + self.sh_error = CSRStatus() + self.sh_error_reset = CSRStorage() + # Note: only the lower log2(RTIO frequency / SYSREF frequency) bits are stable + self.sysref_phase = CSRStatus(8) + + self.jref = Signal() + + # # # + + sysref_se = Signal() + sysref_oversample = Signal(4) + self.specials += [ + Instance("IBUFDS", i_I=sysref_pads.p, i_IB=sysref_pads.n, o_O=sysref_se), + Instance("ISERDESE3", + p_IS_CLK_INVERTED=0, + p_IS_CLK_B_INVERTED=1, + p_DATA_WIDTH=4, + + i_D=sysref_se, + i_RST=ResetSignal("rtio"), + i_FIFO_RD_EN=0, + i_CLK=ClockSignal("rtiox"), + i_CLK_B=ClockSignal("rtiox"), # locally inverted + i_CLKDIV=ClockSignal("rtio"), + o_Q=sysref_oversample) + ] + + self.comb += self.jref.eq(sysref_oversample[1]) + sh_error = Signal() + sh_error_reset = Signal() + self.sync.rtio += [ + If(~( (sysref_oversample[0] == sysref_oversample[1]) + & (sysref_oversample[1] == sysref_oversample[2])), + sh_error.eq(1) + ), + If(sh_error_reset, sh_error.eq(0)) + ] + self.specials += [ + MultiReg(self.sh_error_reset.storage, sh_error_reset, "rtio"), + MultiReg(sh_error, self.sh_error.status) + ] + + jref_r = Signal() + sysref_phase_rtio = Signal(sysref_phase_bits) + self.sync.rtio += [ + jref_r.eq(self.jref), + If(self.jref & ~jref_r, sysref_phase_rtio.eq(coarse_ts)) + ] + sysref_phase_rtio.attr.add("no_retiming") + self.specials += MultiReg(sysref_phase_rtio, self.sysref_phase.status) diff --git a/artiq/gateware/remote_csr.py b/artiq/gateware/remote_csr.py deleted file mode 100644 index 7acbba798..000000000 --- a/artiq/gateware/remote_csr.py +++ /dev/null @@ -1,42 +0,0 @@ -from collections import OrderedDict -from operator import itemgetter -import csv - -from misoc.interconnect.csr import CSRStatus, CSRStorage - - -def _get_csr_data(csv_file): - csr_data = OrderedDict() - with open(csv_file) as csv_file_f: - csv_reader = csv.reader(csv_file_f) - for name, address, length, ro in csv_reader: - region_name, csr_name = name.split(".") - address = int(address, 0) - length = int(length, 0) - ro = ro == "ro" - if region_name not in csr_data: - csr_data[region_name] = [] - csr_data[region_name].append((csr_name, address, length, ro)) - return csr_data - - -def get_remote_csr_regions(offset, csv_file): - busword = 32 - regions = [] - for region_name, csrs_info in _get_csr_data(csv_file).items(): - csrs_info = sorted(csrs_info, key=itemgetter(1)) - origin = csrs_info[0][1] - next_address = origin - csrs = [] - for csr_name, address, length, ro in csrs_info: - if address != next_address: - raise ValueError("CSRs are not contiguous") - nr = (length + busword - 1)//busword - next_address += nr*busword//8 - if ro: - csr = CSRStatus(length, name=csr_name) - else: - csr = CSRStorage(length, name=csr_name) - csrs.append(csr) - regions.append((region_name, offset + origin, busword, csrs)) - return regions diff --git a/artiq/gateware/rtio/__init__.py b/artiq/gateware/rtio/__init__.py index 18feec299..af4989f7c 100644 --- a/artiq/gateware/rtio/__init__.py +++ b/artiq/gateware/rtio/__init__.py @@ -1,5 +1,7 @@ -from artiq.gateware.rtio.cri import KernelInitiator, CRIInterconnectShared -from artiq.gateware.rtio.core import Channel, LogChannel, Core +from artiq.gateware.rtio.tsc import TSC +from artiq.gateware.rtio.cri import KernelInitiator, CRIInterconnectShared, RoutingTableAccess +from artiq.gateware.rtio.channel import Channel, LogChannel +from artiq.gateware.rtio.core import Core from artiq.gateware.rtio.analyzer import Analyzer from artiq.gateware.rtio.moninj import MonInj from artiq.gateware.rtio.dma import DMA diff --git a/artiq/gateware/rtio/analyzer.py b/artiq/gateware/rtio/analyzer.py index 2461f80c1..342da2acf 100644 --- a/artiq/gateware/rtio/analyzer.py +++ b/artiq/gateware/rtio/analyzer.py @@ -43,7 +43,7 @@ assert layout_len(stopped_layout) == message_len class MessageEncoder(Module, AutoCSR): - def __init__(self, cri, enable): + def __init__(self, tsc, cri, enable): self.source = stream.Endpoint([("data", message_len)]) self.overflow = CSRStatus() @@ -67,10 +67,10 @@ class MessageEncoder(Module, AutoCSR): self.comb += [ input_output.channel.eq(cri.chan_sel), input_output.address_padding.eq(cri.o_address), - input_output.rtio_counter.eq(cri.counter), + input_output.rtio_counter.eq(tsc.full_ts_cri), If(cri.cmd == cri_commands["write"], input_output.message_type.eq(MessageType.output.value), - input_output.timestamp.eq(cri.timestamp), + input_output.timestamp.eq(cri.o_timestamp), input_output.data.eq(cri.o_data) ).Else( input_output.message_type.eq(MessageType.input.value), @@ -85,7 +85,7 @@ class MessageEncoder(Module, AutoCSR): self.comb += [ exception.message_type.eq(MessageType.exception.value), exception.channel.eq(cri.chan_sel), - exception.rtio_counter.eq(cri.counter), + exception.rtio_counter.eq(tsc.full_ts_cri), ] just_written = Signal() self.sync += just_written.eq(cri.cmd == cri_commands["write"]) @@ -94,10 +94,6 @@ class MessageEncoder(Module, AutoCSR): exception_stb.eq(1), exception.exception_type.eq(ExceptionType.o_underflow.value) ), - If(just_written & cri.o_status[2], - exception_stb.eq(1), - exception.exception_type.eq(ExceptionType.o_sequence_error.value) - ), If(read_overflow, exception_stb.eq(1), exception.exception_type.eq(ExceptionType.i_overflow.value) @@ -107,7 +103,7 @@ class MessageEncoder(Module, AutoCSR): stopped = Record(stopped_layout) self.comb += [ stopped.message_type.eq(MessageType.stopped.value), - stopped.rtio_counter.eq(cri.counter), + stopped.rtio_counter.eq(tsc.full_ts_cri), ] enable_r = Signal() @@ -197,13 +193,13 @@ class DMAWriter(Module, AutoCSR): class Analyzer(Module, AutoCSR): - def __init__(self, cri, membus, fifo_depth=128): + def __init__(self, tsc, cri, membus, fifo_depth=128): # shutdown procedure: set enable to 0, wait until busy=0 self.enable = CSRStorage() self.busy = CSRStatus() self.submodules.message_encoder = MessageEncoder( - cri, self.enable.storage) + tsc, cri, self.enable.storage) self.submodules.fifo = stream.SyncFIFO( [("data", message_len)], fifo_depth, True) self.submodules.converter = stream.Converter( diff --git a/artiq/gateware/rtio/cdc.py b/artiq/gateware/rtio/cdc.py index af93b4105..bd0b11d37 100644 --- a/artiq/gateware/rtio/cdc.py +++ b/artiq/gateware/rtio/cdc.py @@ -2,19 +2,19 @@ from migen import * from migen.genlib.cdc import * -__all__ = ["GrayCodeTransfer", "RTIOCounter", "BlindTransfer"] +__all__ = ["GrayCodeTransfer"] # note: transfer is in rtio/sys domains and not affected by the reset CSRs class GrayCodeTransfer(Module): def __init__(self, width): self.i = Signal(width) # in rtio domain - self.o = Signal(width) # in sys domain + self.o = Signal(width, reset_less=True) # in sys domain # # # # convert to Gray code - value_gray_rtio = Signal(width) + value_gray_rtio = Signal(width, reset_less=True) self.sync.rtio += value_gray_rtio.eq(self.i ^ self.i[1:]) # transfer to system clock domain value_gray_sys = Signal(width) @@ -26,42 +26,3 @@ class GrayCodeTransfer(Module): for i in reversed(range(width-1)): self.comb += value_sys[i].eq(value_sys[i+1] ^ value_gray_sys[i]) self.sync += self.o.eq(value_sys) - - -class RTIOCounter(Module): - def __init__(self, width): - self.width = width - # Timestamp counter in RTIO domain - self.value_rtio = Signal(width) - # Timestamp counter resynchronized to sys domain - # Lags behind value_rtio, monotonic and glitch-free - self.value_sys = Signal(width) - - # # # - - # note: counter is in rtio domain and never affected by the reset CSRs - self.sync.rtio += self.value_rtio.eq(self.value_rtio + 1) - gt = GrayCodeTransfer(width) - self.submodules += gt - self.comb += gt.i.eq(self.value_rtio), self.value_sys.eq(gt.o) - - -class BlindTransfer(Module): - def __init__(self, idomain="rio", odomain="rsys"): - self.i = Signal() - self.o = Signal() - - ps = PulseSynchronizer(idomain, odomain) - ps_ack = PulseSynchronizer(odomain, idomain) - self.submodules += ps, ps_ack - blind = Signal() - isync = getattr(self.sync, idomain) - isync += [ - If(self.i, blind.eq(1)), - If(ps_ack.o, blind.eq(0)) - ] - self.comb += [ - ps.i.eq(self.i & ~blind), - ps_ack.i.eq(ps.o), - self.o.eq(ps.o) - ] diff --git a/artiq/gateware/rtio/channel.py b/artiq/gateware/rtio/channel.py new file mode 100644 index 000000000..d7b9a60e4 --- /dev/null +++ b/artiq/gateware/rtio/channel.py @@ -0,0 +1,36 @@ +import warnings + +from artiq.gateware.rtio import rtlink + + +class Channel: + def __init__(self, interface, probes=None, overrides=None, + ofifo_depth=None, ififo_depth=64): + if probes is None: + probes = [] + if overrides is None: + overrides = [] + + self.interface = interface + self.probes = probes + self.overrides = overrides + if ofifo_depth is None: + ofifo_depth = 64 + else: + warnings.warn("ofifo_depth is deprecated", FutureWarning) + self.ofifo_depth = ofifo_depth + self.ififo_depth = ififo_depth + + @classmethod + def from_phy(cls, phy, **kwargs): + probes = getattr(phy, "probes", []) + overrides = getattr(phy, "overrides", []) + return cls(phy.rtlink, probes, overrides, **kwargs) + + +class LogChannel: + """A degenerate channel used to log messages into the analyzer.""" + def __init__(self): + self.interface = rtlink.Interface(rtlink.OInterface(32)) + self.probes = [] + self.overrides = [] diff --git a/artiq/gateware/rtio/core.py b/artiq/gateware/rtio/core.py index 76bcd09ee..0b26a1126 100644 --- a/artiq/gateware/rtio/core.py +++ b/artiq/gateware/rtio/core.py @@ -2,302 +2,30 @@ from functools import reduce from operator import and_ from migen import * -from migen.genlib.record import Record -from migen.genlib.fifo import AsyncFIFO from migen.genlib.resetsync import AsyncResetSynchronizer +from migen.genlib.cdc import BlindTransfer from misoc.interconnect.csr import * -from artiq.gateware.rtio import cri, rtlink -from artiq.gateware.rtio.cdc import * - - -# CHOOSING A GUARD TIME -# -# The buffer must be transferred to the FIFO soon enough to account for: -# * transfer of counter to sys domain: Tio + 2*Tsys + Tsys -# * FIFO latency: Tsys + 2*Tio -# * FIFO buffer latency: Tio -# Therefore we must choose: -# guard_io_cycles > (4*Tio + 4*Tsys)/Tio -# -# We are writing to the FIFO from the buffer when the guard time has been -# reached. This can fill the FIFO and deassert the writable flag. A race -# condition occurs that causes problems if the deassertion happens between -# the CPU checking the writable flag (and reading 1) and writing a new event. -# -# When the FIFO is about to be full, it contains fifo_depth-1 events of -# strictly increasing timestamps. -# -# Thus the FIFO-filling event's timestamp must satisfy: -# timestamp*Tio > (fifo_depth-1)*Tio + time -# We also have (guard time reached): -# timestamp*Tio < time + guard_io_cycles*Tio -# [NB: time > counter.value_sys*Tio] -# Thus we must have: -# guard_io_cycles > fifo_depth-1 -# -# We can prevent overflows by choosing instead: -# guard_io_cycles < fifo_depth-1 - -class _OutputManager(Module): - def __init__(self, interface, counter, fifo_depth, guard_io_cycles): - data_width = rtlink.get_data_width(interface) - address_width = rtlink.get_address_width(interface) - fine_ts_width = rtlink.get_fine_ts_width(interface) - - ev_layout = [] - if data_width: - ev_layout.append(("data", data_width)) - if address_width: - ev_layout.append(("address", address_width)) - ev_layout.append(("timestamp", counter.width + fine_ts_width)) - # ev must be valid 1 cycle before we to account for the latency in - # generating replace, sequence_error and collision - self.ev = Record(ev_layout) - - self.writable = Signal() - self.we = Signal() # maximum throughput 1/2 - - self.underflow = Signal() # valid 1 cycle after we, pulsed - self.sequence_error = Signal() - self.collision = Signal() - self.busy = Signal() # pulsed - - # # # - - # FIFO - fifo = ClockDomainsRenamer({"write": "rsys", "read": "rio"})( - AsyncFIFO(layout_len(ev_layout), fifo_depth)) - self.submodules += fifo - fifo_in = Record(ev_layout) - fifo_out = Record(ev_layout) - self.comb += [ - fifo.din.eq(fifo_in.raw_bits()), - fifo_out.raw_bits().eq(fifo.dout) - ] - - # Buffer - buf_pending = Signal() - buf = Record(ev_layout) - buf_just_written = Signal() - - # Special cases - replace = Signal(reset_less=True) - sequence_error = Signal(reset_less=True) - collision = Signal(reset_less=True) - any_error = Signal() - if interface.enable_replace: - # Note: replace may be asserted at the same time as collision - # when addresses are different. In that case, it is a collision. - self.sync.rsys += replace.eq(self.ev.timestamp == buf.timestamp) - # Detect sequence errors on coarse timestamps only - # so that they are mutually exclusive with collision errors. - self.sync.rsys += sequence_error.eq(self.ev.timestamp[fine_ts_width:] < - buf.timestamp[fine_ts_width:]) - if interface.enable_replace: - if address_width: - different_addresses = self.ev.address != buf.address - else: - different_addresses = 0 - if fine_ts_width: - self.sync.rsys += collision.eq( - (self.ev.timestamp[fine_ts_width:] == buf.timestamp[fine_ts_width:]) - & ((self.ev.timestamp[:fine_ts_width] != buf.timestamp[:fine_ts_width]) - |different_addresses)) - else: - self.sync.rsys += collision.eq( - (self.ev.timestamp == buf.timestamp) & different_addresses) - else: - self.sync.rsys += collision.eq( - self.ev.timestamp[fine_ts_width:] == buf.timestamp[fine_ts_width:]) - self.comb += [ - any_error.eq(sequence_error | collision), - self.sequence_error.eq(self.we & sequence_error), - self.collision.eq(self.we & collision) - ] - - # Buffer read and FIFO write - self.comb += fifo_in.eq(buf) - in_guard_time = Signal() - self.comb += in_guard_time.eq( - buf.timestamp[fine_ts_width:] - < counter.value_sys + guard_io_cycles) - self.sync.rsys += If(in_guard_time, buf_pending.eq(0)) - self.comb += \ - If(buf_pending, - If(in_guard_time, - If(buf_just_written, - self.underflow.eq(1) - ).Else( - fifo.we.eq(1) - ) - ), - If(self.we & ~replace & ~any_error, - fifo.we.eq(1) - ) - ) - - # Buffer write - # Must come after read to handle concurrent read+write properly - self.sync.rsys += [ - buf_just_written.eq(0), - If(self.we & ~any_error, - buf_just_written.eq(1), - buf_pending.eq(1), - buf.eq(self.ev) - ) - ] - self.comb += self.writable.eq(fifo.writable) - - # Buffer output of FIFO to improve timing - dout_stb = Signal() - dout_ack = Signal() - dout = Record(ev_layout) - self.sync.rio += \ - If(fifo.re, - dout_stb.eq(1), - dout.eq(fifo_out) - ).Elif(dout_ack, - dout_stb.eq(0) - ) - self.comb += fifo.re.eq(fifo.readable & (~dout_stb | dout_ack)) - - # latency compensation - if interface.delay: - counter_rtio = Signal.like(counter.value_rtio, reset_less=True) - self.sync.rtio += counter_rtio.eq(counter.value_rtio - - (interface.delay + 1)) - else: - counter_rtio = counter.value_rtio - - # FIFO read through buffer - self.comb += [ - dout_ack.eq( - dout.timestamp[fine_ts_width:] == counter_rtio), - interface.stb.eq(dout_stb & dout_ack) - ] - - busy_transfer = BlindTransfer() - self.submodules += busy_transfer - self.comb += [ - busy_transfer.i.eq(interface.stb & interface.busy), - self.busy.eq(busy_transfer.o), - ] - - if data_width: - self.comb += interface.data.eq(dout.data) - if address_width: - self.comb += interface.address.eq(dout.address) - if fine_ts_width: - self.comb += interface.fine_ts.eq(dout.timestamp[:fine_ts_width]) - - -class _InputManager(Module): - def __init__(self, interface, counter, fifo_depth): - data_width = rtlink.get_data_width(interface) - fine_ts_width = rtlink.get_fine_ts_width(interface) - - ev_layout = [] - if data_width: - ev_layout.append(("data", data_width)) - if interface.timestamped: - ev_layout.append(("timestamp", counter.width + fine_ts_width)) - self.ev = Record(ev_layout) - - self.readable = Signal() - self.re = Signal() - - self.overflow = Signal() # pulsed - - # # # - - fifo = ClockDomainsRenamer({"read": "rsys", "write": "rio"})( - AsyncFIFO(layout_len(ev_layout), fifo_depth)) - self.submodules += fifo - fifo_in = Record(ev_layout) - fifo_out = Record(ev_layout) - self.comb += [ - fifo.din.eq(fifo_in.raw_bits()), - fifo_out.raw_bits().eq(fifo.dout) - ] - - # latency compensation - if interface.delay: - counter_rtio = Signal.like(counter.value_rtio, reset_less=True) - self.sync.rtio += counter_rtio.eq(counter.value_rtio - - (interface.delay + 1)) - else: - counter_rtio = counter.value_rtio - - # FIFO write - if data_width: - self.comb += fifo_in.data.eq(interface.data) - if interface.timestamped: - if fine_ts_width: - full_ts = Cat(interface.fine_ts, counter_rtio) - else: - full_ts = counter_rtio - self.comb += fifo_in.timestamp.eq(full_ts) - self.comb += fifo.we.eq(interface.stb) - - # FIFO read - self.comb += [ - self.ev.eq(fifo_out), - self.readable.eq(fifo.readable), - fifo.re.eq(self.re) - ] - - overflow_transfer = BlindTransfer() - self.submodules += overflow_transfer - self.comb += [ - overflow_transfer.i.eq(fifo.we & ~fifo.writable), - self.overflow.eq(overflow_transfer.o), - ] - - -class Channel: - def __init__(self, interface, probes=None, overrides=None, - ofifo_depth=64, ififo_depth=64): - if probes is None: - probes = [] - if overrides is None: - overrides = [] - - self.interface = interface - self.probes = probes - self.overrides = overrides - self.ofifo_depth = ofifo_depth - self.ififo_depth = ififo_depth - - @classmethod - def from_phy(cls, phy, **kwargs): - probes = getattr(phy, "probes", []) - overrides = getattr(phy, "overrides", []) - return cls(phy.rtlink, probes, overrides, **kwargs) - - -class LogChannel: - """A degenerate channel used to log messages into the analyzer.""" - def __init__(self): - self.interface = rtlink.Interface(rtlink.OInterface(32)) - self.probes = [] - self.overrides = [] +from artiq.gateware.rtio import cri +from artiq.gateware.rtio import rtlink +from artiq.gateware.rtio.channel import * +from artiq.gateware.rtio.sed.core import * +from artiq.gateware.rtio.input_collector import * class Core(Module, AutoCSR): - def __init__(self, channels, fine_ts_width=None, guard_io_cycles=20): - if fine_ts_width is None: - fine_ts_width = max(rtlink.get_fine_ts_width(c.interface) - for c in channels) - + def __init__(self, tsc, channels, lane_count=8, fifo_depth=128): self.cri = cri.Interface() self.reset = CSR() self.reset_phy = CSR() - self.async_error = CSR(2) + self.async_error = CSR(3) + self.collision_channel = CSRStatus(16) + self.busy_channel = CSRStatus(16) + self.sequence_error_channel = CSRStatus(16) # Clocking/Reset # Create rsys, rio and rio_phy domains based on sys and rtio - # with reset controlled by CRI. + # with reset controlled by CSR. # # The `rio` CD contains logic that is reset with `core.reset()`. # That's state that could unduly affect subsequent experiments, @@ -327,125 +55,66 @@ class Core(Module, AutoCSR): self.specials += AsyncResetSynchronizer(self.cd_rio, cmd_reset) self.specials += AsyncResetSynchronizer(self.cd_rio_phy, cmd_reset_phy) - # Managers - self.submodules.counter = RTIOCounter(len(self.cri.timestamp) - fine_ts_width) + # TSC + chan_fine_ts_width = max(max(rtlink.get_fine_ts_width(channel.interface.o) + for channel in channels), + max(rtlink.get_fine_ts_width(channel.interface.i) + for channel in channels)) + assert tsc.glbl_fine_ts_width >= chan_fine_ts_width - # Collision is not an asynchronous error with local RTIO, but - # we treat it as such for consistency with DRTIO, where collisions - # are reported by the satellites. - o_underflow = Signal() - o_sequence_error = Signal() + # Outputs/Inputs + quash_channels = [n for n, c in enumerate(channels) if isinstance(c, LogChannel)] + + outputs = SED(channels, tsc.glbl_fine_ts_width, "async", + quash_channels=quash_channels, + lane_count=lane_count, fifo_depth=fifo_depth, + interface=self.cri) + self.submodules += outputs + self.comb += outputs.coarse_timestamp.eq(tsc.coarse_ts) + self.sync += outputs.minimum_coarse_timestamp.eq(tsc.coarse_ts_sys + 16) + + inputs = InputCollector(tsc, channels, "async", + quash_channels=quash_channels, + interface=self.cri) + self.submodules += inputs + + # Asychronous output errors + o_collision_sync = BlindTransfer("rio", "rsys", data_width=16) + o_busy_sync = BlindTransfer("rio", "rsys", data_width=16) + self.submodules += o_collision_sync, o_busy_sync o_collision = Signal() o_busy = Signal() - self.sync.rsys += [ - If(self.cri.cmd == cri.commands["write"], - o_underflow.eq(0), - o_sequence_error.eq(0), - ) - ] + o_sequence_error = Signal() self.sync += [ If(self.async_error.re, If(self.async_error.r[0], o_collision.eq(0)), If(self.async_error.r[1], o_busy.eq(0)), + If(self.async_error.r[2], o_sequence_error.eq(0)), + ), + If(o_collision_sync.o, + o_collision.eq(1), + If(~o_collision, + self.collision_channel.status.eq(o_collision_sync.data_o) + ) + ), + If(o_busy_sync.o, + o_busy.eq(1), + If(~o_busy, + self.busy_channel.status.eq(o_busy_sync.data_o) + ) + ), + If(outputs.sequence_error, + o_sequence_error.eq(1), + If(~o_sequence_error, + self.sequence_error_channel.status.eq(outputs.sequence_error_channel) + ) ) ] + self.comb += self.async_error.w.eq(Cat(o_collision, o_busy, o_sequence_error)) - o_statuses, i_statuses = [], [] - i_datas, i_timestamps = [], [] - i_ack = Signal() - sel = self.cri.chan_sel[:16] - for n, channel in enumerate(channels): - if isinstance(channel, LogChannel): - o_statuses.append(1) - i_datas.append(0) - i_timestamps.append(0) - i_statuses.append(0) - continue - - selected = Signal() - self.comb += selected.eq(sel == n) - - o_manager = _OutputManager(channel.interface.o, self.counter, - channel.ofifo_depth, guard_io_cycles) - self.submodules += o_manager - - if hasattr(o_manager.ev, "data"): - self.comb += o_manager.ev.data.eq(self.cri.o_data) - if hasattr(o_manager.ev, "address"): - self.comb += o_manager.ev.address.eq(self.cri.o_address) - ts_shift = len(self.cri.timestamp) - len(o_manager.ev.timestamp) - self.comb += o_manager.ev.timestamp.eq(self.cri.timestamp[ts_shift:]) - - self.comb += o_manager.we.eq(selected & (self.cri.cmd == cri.commands["write"])) - - self.sync.rsys += [ - If(o_manager.underflow, o_underflow.eq(1)), - If(o_manager.sequence_error, o_sequence_error.eq(1)) - ] - self.sync += [ - If(o_manager.collision, o_collision.eq(1)), - If(o_manager.busy, o_busy.eq(1)) - ] - o_statuses.append(o_manager.writable) - - if channel.interface.i is not None: - i_manager = _InputManager(channel.interface.i, self.counter, - channel.ififo_depth) - self.submodules += i_manager - - if hasattr(i_manager.ev, "data"): - i_datas.append(i_manager.ev.data) - else: - i_datas.append(0) - if channel.interface.i.timestamped: - ts_shift = (len(self.cri.i_timestamp) - len(i_manager.ev.timestamp)) - i_timestamps.append(i_manager.ev.timestamp << ts_shift) - else: - i_timestamps.append(0) - - overflow = Signal() - self.sync.rsys += [ - If(selected & i_ack, - overflow.eq(0)), - If(i_manager.overflow, - overflow.eq(1)) - ] - self.comb += i_manager.re.eq(selected & i_ack & ~overflow) - i_statuses.append(Cat(i_manager.readable & ~overflow, overflow)) - - else: - i_datas.append(0) - i_timestamps.append(0) - i_statuses.append(0) - - o_status_raw = Signal() self.comb += [ - o_status_raw.eq(Array(o_statuses)[sel]), - self.cri.o_status.eq(Cat( - ~o_status_raw, o_underflow, o_sequence_error)), - self.async_error.w.eq(Cat(o_collision, o_busy)) + o_collision_sync.i.eq(outputs.collision), + o_collision_sync.data_i.eq(outputs.collision_channel), + o_busy_sync.i.eq(outputs.busy), + o_busy_sync.data_i.eq(outputs.busy_channel) ] - - i_status_raw = Signal(2) - self.comb += i_status_raw.eq(Array(i_statuses)[sel]) - input_timeout = Signal.like(self.cri.timestamp) - input_pending = Signal() - self.sync.rsys += [ - i_ack.eq(0), - If(i_ack, - self.cri.i_status.eq(Cat(~i_status_raw[0], i_status_raw[1], 0)), - self.cri.i_data.eq(Array(i_datas)[sel]), - self.cri.i_timestamp.eq(Array(i_timestamps)[sel]), - ), - If((self.cri.counter >= input_timeout) | (i_status_raw != 0), - If(input_pending, i_ack.eq(1)), - input_pending.eq(0) - ), - If(self.cri.cmd == cri.commands["read"], - input_timeout.eq(self.cri.timestamp), - input_pending.eq(1), - self.cri.i_status.eq(0b100) - ) - ] - - self.comb += self.cri.counter.eq(self.counter.value_sys << fine_ts_width) diff --git a/artiq/gateware/rtio/cri.py b/artiq/gateware/rtio/cri.py index f282ae307..c735b9e5f 100644 --- a/artiq/gateware/rtio/cri.py +++ b/artiq/gateware/rtio/cri.py @@ -2,66 +2,83 @@ from migen import * from migen.genlib.record import * +from migen.genlib.cdc import MultiReg from misoc.interconnect.csr import * +# CRI write happens in 3 cycles: +# 1. set timestamp and channel +# 2. set other payload elements and issue write command +# 3. check status + commands = { "nop": 0, - "write": 1, # i_status should have the "wait for status" bit set until # an event is available, or timestamp is reached. - "read": 2 + "read": 2, + # targets must assert o_buffer_space_valid in response + # to this command + "get_buffer_space": 3 } layout = [ ("cmd", 2, DIR_M_TO_S), - # 8 MSBs of chan_sel are used to select core + # 8 MSBs of chan_sel = routing destination + # 16 LSBs of chan_sel = channel within the destination ("chan_sel", 24, DIR_M_TO_S), - ("timestamp", 64, DIR_M_TO_S), + ("o_timestamp", 64, DIR_M_TO_S), ("o_data", 512, DIR_M_TO_S), - ("o_address", 16, DIR_M_TO_S), + ("o_address", 8, DIR_M_TO_S), # o_status bits: - # <0:wait> <1:underflow> <2:sequence_error> + # <0:wait> <1:underflow> <2:destination unreachable> ("o_status", 3, DIR_S_TO_M), + # pessimistic estimate of the number of outputs events that can be + # written without waiting. + # this feature may be omitted on systems without DRTIO. + ("o_buffer_space_valid", 1, DIR_S_TO_M), + ("o_buffer_space", 16, DIR_S_TO_M), + + ("i_timeout", 64, DIR_M_TO_S), ("i_data", 32, DIR_S_TO_M), ("i_timestamp", 64, DIR_S_TO_M), # i_status bits: # <0:wait for event (command timeout)> <1:overflow> <2:wait for status> + # <3:destination unreachable> # <0> and <1> are mutually exclusive. <1> has higher priority. - ("i_status", 3, DIR_S_TO_M), - - ("counter", 64, DIR_S_TO_M) + ("i_status", 4, DIR_S_TO_M), ] class Interface(Record): - def __init__(self): - Record.__init__(self, layout) + def __init__(self, **kwargs): + Record.__init__(self, layout, **kwargs) class KernelInitiator(Module, AutoCSR): - def __init__(self, cri=None): - self.chan_sel = CSRStorage(24) - self.timestamp = CSRStorage(64) + def __init__(self, tsc, cri=None, now64=False): + self.target = CSRStorage(32) + if now64: + self.now = CSRStorage(64) + else: + # not using CSRStorage atomic_write feature here to make storage reset_less + self.now_hi = CSR(32) + self.now_lo = CSR(32) - # Writing timestamp clears o_data. This implements automatic + # Writing target clears o_data. This implements automatic # zero-extension of output event data by the gateware. When staging an - # output event, always write timestamp before o_data. + # output event, always write target before o_data. self.o_data = CSRStorage(512, write_from_dev=True) - self.o_address = CSRStorage(16) - self.o_we = CSR() self.o_status = CSRStatus(3) + self.i_timeout = CSRStorage(64) self.i_data = CSRStatus(32) self.i_timestamp = CSRStatus(64) - self.i_request = CSR() - self.i_status = CSRStatus(3) - self.i_overflow_reset = CSR() + self.i_status = CSRStatus(4) self.counter = CSRStatus(64) self.counter_update = CSR() @@ -72,30 +89,45 @@ class KernelInitiator(Module, AutoCSR): # # # + if now64: + now = self.now.storage + else: + now = Signal(64, reset_less=True) + now_hi_backing = Signal(32) + self.sync += [ + If(self.now_hi.re, now_hi_backing.eq(self.now_hi.r)), + If(self.now_lo.re, now.eq(Cat(self.now_lo.r, now_hi_backing))) + ] + self.comb += [ + self.now_hi.w.eq(now[32:]), + self.now_lo.w.eq(now[:32]) + ] + self.comb += [ self.cri.cmd.eq(commands["nop"]), - If(self.o_we.re, self.cri.cmd.eq(commands["write"])), - If(self.i_request.re, self.cri.cmd.eq(commands["read"])), + If(self.o_data.re, self.cri.cmd.eq(commands["write"])), + If(self.i_timeout.re, self.cri.cmd.eq(commands["read"])), - self.cri.chan_sel.eq(self.chan_sel.storage), - self.cri.timestamp.eq(self.timestamp.storage), + self.cri.chan_sel.eq(self.target.storage[8:]), + self.cri.o_timestamp.eq(now), self.cri.o_data.eq(self.o_data.storage), - self.cri.o_address.eq(self.o_address.storage), + self.cri.o_address.eq(self.target.storage[:8]), self.o_status.status.eq(self.cri.o_status), + self.cri.i_timeout.eq(self.i_timeout.storage), self.i_data.status.eq(self.cri.i_data), self.i_timestamp.status.eq(self.cri.i_timestamp), self.i_status.status.eq(self.cri.i_status), self.o_data.dat_w.eq(0), - self.o_data.we.eq(self.timestamp.re), + self.o_data.we.eq(self.target.re), ] - self.sync += If(self.counter_update.re, self.counter.status.eq(self.cri.counter)) + self.sync += If(self.counter_update.re, self.counter.status.eq(tsc.full_ts_cri)) class CRIDecoder(Module): - def __init__(self, slaves=2, master=None): + def __init__(self, slaves=2, master=None, mode="async", enable_routing=False): if isinstance(slaves, int): slaves = [Interface() for _ in range(slaves)] if master is None: @@ -105,8 +137,37 @@ class CRIDecoder(Module): # # # - selected = Signal(8) - self.sync += selected.eq(self.master.chan_sel[16:]) + # routing + if enable_routing: + destination_unreachable = Interface() + self.comb += [ + destination_unreachable.o_status.eq(4), + destination_unreachable.i_status.eq(8) + ] + slaves = slaves[:] + slaves.append(destination_unreachable) + target_len = 2**(len(slaves) - 1).bit_length() + slaves += [destination_unreachable]*(target_len - len(slaves)) + + slave_bits = bits_for(len(slaves)-1) + selected = Signal(slave_bits) + + if enable_routing: + self.specials.routing_table = Memory(slave_bits, 256) + + if mode == "async": + rtp_decoder = self.routing_table.get_port() + elif mode == "sync": + rtp_decoder = self.routing_table.get_port(clock_domain="rtio") + else: + raise ValueError + self.specials += rtp_decoder + self.comb += [ + rtp_decoder.adr.eq(self.master.chan_sel[16:]), + selected.eq(rtp_decoder.dat_r) + ] + else: + self.sync += selected.eq(self.master.chan_sel[16:]) # master -> slave for n, slave in enumerate(slaves): @@ -126,7 +187,7 @@ class CRIDecoder(Module): class CRISwitch(Module, AutoCSR): - def __init__(self, masters=2, slave=None): + def __init__(self, masters=2, slave=None, mode="async"): if isinstance(masters, int): masters = [Interface() for _ in range(masters)] if slave is None: @@ -138,6 +199,15 @@ class CRISwitch(Module, AutoCSR): # # # + if mode == "async": + selected = self.selected.storage + elif mode == "sync": + self.selected.storage.attr.add("no_retiming") + selected = Signal.like(self.selected.storage) + self.specials += MultiReg(self.selected.storage, selected, "rtio") + else: + raise ValueError + if len(masters) == 1: self.comb += masters[0].connect(slave) else: @@ -145,7 +215,7 @@ class CRISwitch(Module, AutoCSR): for name, size, direction in layout: if direction == DIR_M_TO_S: choices = Array(getattr(m, name) for m in masters) - self.comb += getattr(slave, name).eq(choices[self.selected.storage]) + self.comb += getattr(slave, name).eq(choices[selected]) # connect slave->master signals for name, size, direction in layout: @@ -155,11 +225,31 @@ class CRISwitch(Module, AutoCSR): dest = getattr(m, name) self.comb += dest.eq(source) + class CRIInterconnectShared(Module): - def __init__(self, masters=2, slaves=2): + def __init__(self, masters=2, slaves=2, mode="async", enable_routing=False): shared = Interface() - self.submodules.switch = CRISwitch(masters, shared) - self.submodules.decoder = CRIDecoder(slaves, shared) + self.submodules.switch = CRISwitch(masters, shared, mode) + self.submodules.decoder = CRIDecoder(slaves, shared, mode, enable_routing) def get_csrs(self): return self.switch.get_csrs() + + +class RoutingTableAccess(Module, AutoCSR): + def __init__(self, interconnect): + if isinstance(interconnect, CRIInterconnectShared): + interconnect = interconnect.decoder + + rtp_csr = interconnect.routing_table.get_port(write_capable=True) + self.specials += rtp_csr + + self.destination = CSRStorage(8) + self.hop = CSR(len(rtp_csr.dat_w)) + + self.comb += [ + rtp_csr.adr.eq(self.destination.storage), + rtp_csr.dat_w.eq(self.hop.r), + rtp_csr.we.eq(self.hop.re), + self.hop.w.eq(rtp_csr.dat_r) + ] diff --git a/artiq/gateware/rtio/dma.py b/artiq/gateware/rtio/dma.py index ecfb3b5f5..20d558672 100644 --- a/artiq/gateware/rtio/dma.py +++ b/artiq/gateware/rtio/dma.py @@ -12,9 +12,7 @@ def _reverse_bytes(s, g): class WishboneReader(Module): - def __init__(self, bus=None): - if bus is None: - bus = wishbone.Interface + def __init__(self, bus): self.bus = bus aw = len(bus.adr) @@ -148,7 +146,7 @@ record_layout = [ ("length", 8), # of whole record (header+data) ("channel", 24), ("timestamp", 64), - ("address", 16), + ("address", 8), ("data", 512) # variable length ] @@ -242,9 +240,7 @@ class TimeOffset(Module, AutoCSR): class CRIMaster(Module, AutoCSR): def __init__(self): - self.error_status = CSRStatus(3) # same encoding as RTIO status - self.error_underflow_reset = CSR() - self.error_sequence_error_reset = CSR() + self.error = CSR(2) self.error_channel = CSRStatus(24) self.error_timestamp = CSRStatus(64) @@ -256,23 +252,27 @@ class CRIMaster(Module, AutoCSR): # # # - error_set = Signal(2) - for i, rcsr in enumerate([self.error_underflow_reset, self.error_sequence_error_reset]): - # bit 0 is RTIO wait and always 0 here - bit = i + 1 - self.sync += [ - If(error_set[i], - self.error_status.status[bit].eq(1), - self.error_channel.status.eq(self.sink.channel), - self.error_timestamp.status.eq(self.sink.timestamp), - self.error_address.status.eq(self.sink.address) - ), - If(rcsr.re, self.error_status.status[bit].eq(0)) - ] + underflow_trigger = Signal() + link_error_trigger = Signal() + self.sync += [ + If(underflow_trigger, + self.error.w.eq(1), + self.error_channel.status.eq(self.sink.channel), + self.error_timestamp.status.eq(self.sink.timestamp), + self.error_address.status.eq(self.sink.address) + ), + If(link_error_trigger, + self.error.w.eq(2), + self.error_channel.status.eq(self.sink.channel), + self.error_timestamp.status.eq(self.sink.timestamp), + self.error_address.status.eq(self.sink.address) + ), + If(self.error.re, self.error.w.eq(0)) + ] self.comb += [ self.cri.chan_sel.eq(self.sink.channel), - self.cri.timestamp.eq(self.sink.timestamp), + self.cri.o_timestamp.eq(self.sink.timestamp), self.cri.o_address.eq(self.sink.address), self.cri.o_data.eq(self.sink.data) ] @@ -281,7 +281,7 @@ class CRIMaster(Module, AutoCSR): self.submodules += fsm fsm.act("IDLE", - If(self.error_status.status == 0, + If(self.error.w == 0, If(self.sink.stb, If(self.sink.eop, # last packet contains dummy data, discard it @@ -307,15 +307,20 @@ class CRIMaster(Module, AutoCSR): NextState("IDLE") ), If(self.cri.o_status[1], NextState("UNDERFLOW")), - If(self.cri.o_status[2], NextState("SEQUENCE_ERROR")) + If(self.cri.o_status[2], NextState("LINK_ERROR")) + ) + fsm.act("UNDERFLOW", + self.busy.eq(1), + underflow_trigger.eq(1), + self.sink.ack.eq(1), + NextState("IDLE") + ) + fsm.act("LINK_ERROR", + self.busy.eq(1), + link_error_trigger.eq(1), + self.sink.ack.eq(1), + NextState("IDLE") ) - for n, name in enumerate(["UNDERFLOW", "SEQUENCE_ERROR"]): - fsm.act(name, - self.busy.eq(1), - error_set.eq(1 << n), - self.sink.ack.eq(1), - NextState("IDLE") - ) class DMA(Module): diff --git a/artiq/gateware/rtio/input_collector.py b/artiq/gateware/rtio/input_collector.py new file mode 100644 index 000000000..a68f5a7ff --- /dev/null +++ b/artiq/gateware/rtio/input_collector.py @@ -0,0 +1,141 @@ +from migen import * +from migen.genlib.record import Record +from migen.genlib.fifo import * +from migen.genlib.cdc import BlindTransfer + +from artiq.gateware.rtio import cri +from artiq.gateware.rtio import rtlink + + +__all__ = ["InputCollector"] + + +def get_channel_layout(coarse_ts_width, interface): + data_width = rtlink.get_data_width(interface) + fine_ts_width = rtlink.get_fine_ts_width(interface) + + layout = [] + if data_width: + layout.append(("data", data_width)) + if interface.timestamped: + layout.append(("timestamp", coarse_ts_width + fine_ts_width)) + + return layout + + +class InputCollector(Module): + def __init__(self, tsc, channels, mode, quash_channels=[], interface=None): + if interface is None: + interface = cri.Interface() + self.cri = interface + + # # # + + if mode == "sync": + fifo_factory = SyncFIFOBuffered + sync_io = self.sync + sync_cri = self.sync + elif mode == "async": + fifo_factory = lambda *args: ClockDomainsRenamer({"write": "rio", "read": "rsys"})(AsyncFIFO(*args)) + sync_io = self.sync.rio + sync_cri = self.sync.rsys + else: + raise ValueError + + i_statuses, i_datas, i_timestamps = [], [], [] + i_ack = Signal() + sel = self.cri.chan_sel[:16] + for n, channel in enumerate(channels): + iif = channel.interface.i + if iif is None or n in quash_channels: + i_datas.append(0) + i_timestamps.append(0) + i_statuses.append(0) + continue + + # FIFO + layout = get_channel_layout(len(tsc.coarse_ts), iif) + fifo = fifo_factory(layout_len(layout), channel.ififo_depth) + self.submodules += fifo + fifo_in = Record(layout) + fifo_out = Record(layout) + self.comb += [ + fifo.din.eq(fifo_in.raw_bits()), + fifo_out.raw_bits().eq(fifo.dout) + ] + + # FIFO write + if iif.delay: + counter_rtio = Signal.like(tsc.coarse_ts, reset_less=True) + sync_io += counter_rtio.eq(tsc.coarse_ts - (iif.delay + 1)) + else: + counter_rtio = tsc.coarse_ts + if hasattr(fifo_in, "data"): + self.comb += fifo_in.data.eq(iif.data) + if hasattr(fifo_in, "timestamp"): + if hasattr(iif, "fine_ts"): + full_ts = Cat(iif.fine_ts, counter_rtio) + else: + full_ts = counter_rtio + self.comb += fifo_in.timestamp.eq(full_ts) + self.comb += fifo.we.eq(iif.stb) + + overflow_io = Signal() + self.comb += overflow_io.eq(fifo.we & ~fifo.writable) + if mode == "sync": + overflow_trigger = overflow_io + elif mode == "async": + overflow_transfer = BlindTransfer("rio", "rsys") + self.submodules += overflow_transfer + self.comb += overflow_transfer.i.eq(overflow_io) + overflow_trigger = overflow_transfer.o + else: + raise ValueError + + # FIFO read, CRI connection + if hasattr(fifo_out, "data"): + i_datas.append(fifo_out.data) + else: + i_datas.append(0) + if hasattr(fifo_out, "timestamp"): + ts_shift = 64 - len(fifo_out.timestamp) + i_timestamps.append(fifo_out.timestamp << ts_shift) + else: + i_timestamps.append(0) + + selected = Signal() + self.comb += selected.eq(sel == n) + + overflow = Signal() + sync_cri += [ + If(selected & i_ack, + overflow.eq(0)), + If(overflow_trigger, + overflow.eq(1)) + ] + self.comb += fifo.re.eq(selected & i_ack & ~overflow) + i_statuses.append(Cat(fifo.readable & ~overflow, overflow)) + + i_status_raw = Signal(2) + self.comb += i_status_raw.eq(Array(i_statuses)[sel]) + input_timeout = Signal.like(self.cri.i_timeout, reset_less=True) + input_pending = Signal() + self.cri.i_data.reset_less = True + self.cri.i_timestamp.reset_less = True + sync_cri += [ + i_ack.eq(0), + If(i_ack, + self.cri.i_status.eq(Cat(~i_status_raw[0], i_status_raw[1], 0)), + self.cri.i_data.eq(Array(i_datas)[sel]), + self.cri.i_timestamp.eq(Array(i_timestamps)[sel]), + ), + If((tsc.full_ts_cri >= input_timeout) | (i_status_raw != 0), + If(input_pending, i_ack.eq(1)), + input_pending.eq(0) + ), + If(self.cri.cmd == cri.commands["read"], + input_timeout.eq(self.cri.i_timeout), + input_pending.eq(1), + self.cri.i_status.eq(0b100) + ) + ] diff --git a/artiq/gateware/rtio/phy/ad5360_monitor.py b/artiq/gateware/rtio/phy/ad53xx_monitor.py similarity index 65% rename from artiq/gateware/rtio/phy/ad5360_monitor.py rename to artiq/gateware/rtio/phy/ad53xx_monitor.py index fad34ecc6..267649734 100644 --- a/artiq/gateware/rtio/phy/ad5360_monitor.py +++ b/artiq/gateware/rtio/phy/ad53xx_monitor.py @@ -1,10 +1,10 @@ from migen import * -from artiq.coredevice.spi import SPI_XFER_ADDR, SPI_DATA_ADDR -from artiq.coredevice.ad5360 import _AD5360_CMD_DATA, _AD5360_WRITE_CHANNEL +from artiq.coredevice.spi2 import SPI_CONFIG_ADDR, SPI_DATA_ADDR +from artiq.coredevice.ad53xx import AD53XX_CMD_DATA, ad53xx_cmd_write_ch -class AD5360Monitor(Module): +class AD53XXMonitor(Module): def __init__(self, spi_rtlink, ldac_rtlink=None, cs_no=0, cs_onehot=False, nchannels=32): self.probes = [Signal(16) for i in range(nchannels)] @@ -22,25 +22,27 @@ class AD5360Monitor(Module): If(ldac_oif.stb & ttl_level_adr & ~ldac_oif.data[0], [probe.eq(write_target) for probe, write_target in zip(self.probes, write_targets)] ) - + spi_oif = spi_rtlink.o selected = Signal() if cs_onehot: self.sync.rio_phy += [ - If(spi_oif.stb & (spi_oif.address == SPI_XFER_ADDR), - selected.eq(spi_oif.data[cs_no]) + If(spi_oif.stb & (spi_oif.address == SPI_CONFIG_ADDR), + selected.eq(spi_oif.data[24 + cs_no]) ) ] else: self.sync.rio_phy += [ - If(spi_oif.stb & (spi_oif.address == SPI_XFER_ADDR), - selected.eq(spi_oif.data[:16] == cs_no) + If(spi_oif.stb & (spi_oif.address == SPI_CONFIG_ADDR), + selected.eq(spi_oif.data[24:] == cs_no) ) ] - writes = {(_AD5360_CMD_DATA | _AD5360_WRITE_CHANNEL(i)) >> 16: t.eq(spi_oif.data[8:24]) - for i, t in enumerate(write_targets)} + writes = { + ad53xx_cmd_write_ch(channel=i, value=0, op=AD53XX_CMD_DATA) >> 16: + t.eq(spi_oif.data[8:24]) + for i, t in enumerate(write_targets)} self.sync.rio_phy += [ If(spi_oif.stb & (spi_oif.address == SPI_DATA_ADDR), Case(spi_oif.data[24:], writes) diff --git a/artiq/gateware/rtio/phy/dds.py b/artiq/gateware/rtio/phy/dds.py index d0c58ccfa..c542937fa 100644 --- a/artiq/gateware/rtio/phy/dds.py +++ b/artiq/gateware/rtio/phy/dds.py @@ -4,11 +4,11 @@ from artiq.gateware import ad9_dds from artiq.gateware.rtio.phy.wishbone import RT2WB -class _AD9_DDS(Module): - def __init__(self, ftw_base, pads, nchannels, onehot=False, **kwargs): +class AD9914(Module): + def __init__(self, pads, nchannels, onehot=False, **kwargs): self.submodules._ll = ClockDomainsRenamer("rio_phy")( ad9_dds.AD9_DDS(pads, **kwargs)) - self.submodules._rt2wb = RT2WB(len(pads.a)+1, self._ll.bus) + self.submodules._rt2wb = RT2WB(len(pads.a)+1, self._ll.bus, write_only=True) self.rtlink = self._rt2wb.rtlink self.probes = [Signal(32) for i in range(nchannels)] @@ -38,13 +38,13 @@ class _AD9_DDS(Module): if len(pads.d) == 8: self.sync.rio_phy += \ If(selected(c), [ - If(current_address == ftw_base+i, + If(current_address == 0x11+i, ftw[i*8:(i+1)*8].eq(current_data)) for i in range(4)]) elif len(pads.d) == 16: self.sync.rio_phy += \ If(selected(c), [ - If(current_address == ftw_base+2*i, + If(current_address == 0x11+2*i, ftw[i*16:(i+1)*16].eq(current_data)) for i in range(2)]) else: @@ -54,8 +54,3 @@ class _AD9_DDS(Module): self.sync.rio_phy += If(current_address == 2**len(pads.a), [ If(selected(c), probe.eq(ftw)) for c, (probe, ftw) in enumerate(zip(self.probes, ftws))]) - - -class AD9914(_AD9_DDS): - def __init__(self, *args, **kwargs): - _AD9_DDS.__init__(self, 0x2d, *args, **kwargs) diff --git a/artiq/gateware/rtio/phy/edge_counter.py b/artiq/gateware/rtio/phy/edge_counter.py new file mode 100644 index 000000000..6c295abe8 --- /dev/null +++ b/artiq/gateware/rtio/phy/edge_counter.py @@ -0,0 +1,79 @@ +from migen import * +from artiq.gateware.rtio import rtlink + + +class SimpleEdgeCounter(Module): + """Counts rising/falling edges of an input signal. + + Control (sensitivity/zeroing) is done via a single RTIO output channel, + which is is also used to request an input event to be emitted with the + current counter value. + + :param input_state: The (scalar) input signal to detect edges of. This + should already be in the rio_phy clock domain. + :param counter_width: The width of the counter register, in bits. Defaults + to 31 to match integers being signed in ARTIQ Python. + """ + + def __init__(self, input_state, counter_width=31): + assert counter_width >= 2 + + # RTIO interface: + # - output 0: 4 bits, + # - input 0: 32 bits, accumulated edge count + self.rtlink = rtlink.Interface( + rtlink.OInterface(4, enable_replace=False), + rtlink.IInterface(counter_width)) + + # # # + + current_count = Signal(counter_width) + + count_rising = Signal() + count_falling = Signal() + send_event_stb = Signal() + zero_counter_stb = Signal() + + # Read configuration from RTIO output events. + self.sync.rio += [ + If(self.rtlink.o.stb, + count_rising.eq(self.rtlink.o.data[0]), + count_falling.eq(self.rtlink.o.data[1]), + send_event_stb.eq(self.rtlink.o.data[2]), + zero_counter_stb.eq(self.rtlink.o.data[3]) + ).Else( + send_event_stb.eq(0), + zero_counter_stb.eq(0) + ) + ] + + # Generate RTIO input event with current count if requested. + event_data = Signal.like(current_count) + self.comb += [ + self.rtlink.i.stb.eq(send_event_stb), + self.rtlink.i.data.eq(event_data) + ] + + # Keep previous input state for edge detection. + input_state_d = Signal() + self.sync.rio_phy += input_state_d.eq(input_state) + + # Count input edges, saturating at the maximum. + new_count = Signal.like(current_count) + self.comb += new_count.eq( + current_count + Mux(current_count == 2**counter_width - 1, + 0, + (count_rising & (input_state & ~input_state_d)) | + (count_falling & (~input_state & input_state_d)) + ) + ) + + self.sync.rio += [ + event_data.eq(new_count), + current_count.eq(Mux(zero_counter_stb, 0, new_count)) + ] + + +if __name__ == '__main__': + input = Signal(name="input") + print(fhdl.verilog.convert(SimpleEdgeCounter(input))) diff --git a/artiq/gateware/rtio/phy/fastino.py b/artiq/gateware/rtio/phy/fastino.py new file mode 100644 index 000000000..ba8a7b5c9 --- /dev/null +++ b/artiq/gateware/rtio/phy/fastino.py @@ -0,0 +1,105 @@ +from migen import * +from migen.genlib.cdc import MultiReg +from migen.genlib.io import DifferentialOutput, DifferentialInput, DDROutput +from misoc.cores.liteeth_mini.mac.crc import LiteEthMACCRCEngine + +from artiq.gateware.rtio import rtlink +from .fastlink import SerDes, SerInterface + + +class Fastino(Module): + def __init__(self, pins, pins_n, log2_width=0): + width = 1 << log2_width + self.rtlink = rtlink.Interface( + rtlink.OInterface(data_width=max(16*width, 32), + address_width=8, + enable_replace=False), + rtlink.IInterface(data_width=14)) + + self.submodules.serializer = SerDes( + n_data=8, t_clk=7, d_clk=0b1100011, + n_frame=14, n_crc=12, poly=0x80f) + self.submodules.intf = SerInterface(pins, pins_n) + self.comb += [ + Cat(self.intf.data[:-1]).eq(Cat(self.serializer.data[:-1])), + self.serializer.data[-1].eq(self.intf.data[-1]), + ] + + # dac data words + dacs = [Signal(16) for i in range(32)] + header = Record([ + ("cfg", 4), + ("leds", 8), + ("reserved", 8), + ("addr", 4), + ("enable", len(dacs)), + ]) + body = Cat(header.raw_bits(), dacs) + assert len(body) == len(self.serializer.payload) + self.comb += self.serializer.payload.eq(body) + + # # # + + # Support staging DAC data (in `dacs`) by writing to the + # DAC RTIO addresses, if a channel is not "held" by its + # bit in `hold` the next frame will contain the update. + # For the DACs held, the update is triggered by setting the + # corresponding bit in `update`. Update is self-clearing. + # This enables atomic DAC updates synchronized to a frame edge. + # + # The `log2_width=0` RTIO layout uses one DAC channel per RTIO address + # and a dense RTIO address space. The RTIO words are narrow. + # (32 bit compared to 512) and few-channel updates are efficient. + # There is the least amount of DAC state tracking in kernels, + # at the cost of more DMA and RTIO data ((n*(32+32+64) vs + # 32+32*16+64)) + # + # Other `log2_width` (up to `log2_width=5) settings pack multiple + # (in powers of two) DAC channels into one group and + # into one RTIO write. + # The RTIO data width increases accordingly. The `log2_width` + # LSBs of the RTIO address for a DAC channel write must be zero and the + # address space is sparse. + + hold = Signal.like(header.enable) + + read_regs = Array([Signal.like(self.serializer.readback) + for _ in range(1 << len(header.addr))]) + + cases = { + # update + 0x20: header.enable.eq(header.enable | self.rtlink.o.data), + # hold + 0x21: hold.eq(self.rtlink.o.data), + # cfg + 0x22: header.cfg.eq(self.rtlink.o.data), + # leds + 0x23: header.leds.eq(self.rtlink.o.data), + # reserved + 0x24: header.reserved.eq(self.rtlink.o.data), + } + for i in range(0, len(dacs), width): + cases[i] = [ + Cat(dacs[i:i + width]).eq(self.rtlink.o.data), + [If(~hold[i + j], + header.enable[i + j].eq(1), + ) for j in range(width)] + ] + + self.sync.rio_phy += [ + If(self.serializer.stb, + header.enable.eq(0), + read_regs[header.addr].eq(self.serializer.readback), + header.addr.eq(header.addr + 1), + ), + If(self.rtlink.o.stb & ~self.rtlink.o.address[-1], + Case(self.rtlink.o.address[:-1], cases), + ), + ] + + self.sync.rtio += [ + self.rtlink.i.stb.eq(self.rtlink.o.stb & + self.rtlink.o.address[-1]), + self.rtlink.i.data.eq( + read_regs[self.rtlink.o.address[:-1]]), + ] diff --git a/artiq/gateware/rtio/phy/fastlink.py b/artiq/gateware/rtio/phy/fastlink.py new file mode 100644 index 000000000..1a9317f77 --- /dev/null +++ b/artiq/gateware/rtio/phy/fastlink.py @@ -0,0 +1,133 @@ +from migen import * +from migen.genlib.io import (DifferentialOutput, DifferentialInput, + DDROutput, DDRInput) +from misoc.cores.liteeth_mini.mac.crc import LiteEthMACCRCEngine + +from artiq.gateware.rtio import rtlink + + +class SerDes(Module): + # crc-12 telco: 0x80f + def __init__(self, n_data=8, t_clk=7, d_clk=0b1100011, + n_frame=14, n_crc=12, poly=0x80f): + """DDR fast link. + + * One word clock lane with `t_clk` period. + * Multiple data lanes at DDR speed. + * One return data lane at slower speed. + * n_frame//2 - 1 marker bits are used to provide framing. + + * `n_data` lanes + * `t_clk` bits per clk cycle with pattern `d_clk` + * `n_frame` words per frame + * `n_crc` CRC bits per frame for divisor poly `poly` + """ + # pins + self.data = [Signal(2, reset_less=True) for _ in range(n_data)] + n_mosi = n_data - 2 # mosi lanes + n_word = n_mosi*t_clk # bits per word + t_frame = t_clk*n_frame # frame duration + n_marker = n_frame//2 + 1 + n_body = n_word*n_frame - n_marker - n_crc + t_miso = 0 # miso sampling latency TODO + assert n_crc % n_mosi == 0 + + # frame data + self.payload = Signal(n_body) + # readback data + self.readback = Signal(n_frame, reset_less=True) + # data load synchronization event + self.stb = Signal() + + # # # + + self.submodules.crca = LiteEthMACCRCEngine( + data_width=n_mosi, width=n_crc, polynom=poly) + self.submodules.crcb = LiteEthMACCRCEngine( + data_width=n_mosi, width=n_crc, polynom=poly) + + words_ = [] + j = 0 + # build from LSB to MSB because MSB first + for i in range(n_frame): # iterate over words + if i == 0: # data and checksum + words_.append(C(0, n_crc)) + k = n_word - n_crc + elif i == 1: # marker + words_.append(C(1)) + k = n_word - 1 + elif i < n_frame//2 + 2: # marker + words_.append(C(0)) + k = n_word - 1 + else: # full word + k = n_word + # append corresponding frame body bits + words_.append(self.payload[j:j + k]) + j += k + words_ = Cat(words_) + assert len(words_) == n_frame*n_word + words = Signal(len(words_)) + self.comb += words.eq(words_) + + clk = Signal(t_clk, reset=d_clk) + i = Signal(max=t_frame//2) + # big shift register for mosi and + sr = [Signal(t_frame, reset_less=True) for i in range(n_mosi)] + assert len(Cat(sr)) == len(words) + crc_insert = Cat(([d[0] for d in self.data[1:-1]] + + [d[1] for d in self.data[1:-1]])[:n_crc]) + miso_sr = Signal(t_frame, reset_less=True) + miso_sr_next = Signal.like(miso_sr) + self.comb += [ + self.stb.eq(i == t_frame//2 - 1), + # LiteETHMACCRCEngine takes data LSB first + self.crca.data.eq(Cat([sri[-1] for sri in sr[::-1]])), + self.crcb.data.eq(Cat([sri[-2] for sri in sr[::-1]])), + self.crcb.last.eq(self.crca.next), + miso_sr_next.eq(Cat(self.data[-1], miso_sr)), + # unload miso + # TODO: align to marker + self.readback.eq(Cat([miso_sr_next[t_miso + i*t_clk] + for i in range(n_frame)])), + ] + self.sync.rio_phy += [ + # shift everything by two bits + [di.eq(sri[-2:]) for di, sri in zip(self.data, [clk] + sr)], + clk.eq(Cat(clk[-2:], clk)), + [sri.eq(Cat(C(0, 2), sri)) for sri in sr], + miso_sr.eq(miso_sr_next), + self.crca.last.eq(self.crcb.next), + i.eq(i + 1), + If(self.stb, + i.eq(0), + clk.eq(clk.reset), + self.crca.last.eq(0), + # transpose, load + [sri.eq(Cat(words[i::n_mosi])) for i, sri in enumerate(sr)], + # inject crc for the last cycle + crc_insert.eq(self.crca.next if n_crc // n_mosi <= 1 + else self.crca.last), + ), + ] + + +class SerInterface(Module): + def __init__(self, pins, pins_n): + self.data = [Signal(2) for _ in range(2 + len(pins.mosi))] + + for d, pp, pn in zip(self.data, + [pins.clk] + list(pins.mosi), + [pins_n.clk] + list(pins_n.mosi)): + ddr = Signal() + self.specials += [ + # d1 closer to q + DDROutput(d[1], d[0], ddr, ClockSignal("rio_phy")), + DifferentialOutput(ddr, pp, pn), + ] + ddr = Signal() + self.specials += [ + DifferentialInput(pins.miso, pins_n.miso, ddr), + # q1 closer to d + DDRInput(ddr, self.data[-1][0], self.data[-1][1], + ClockSignal("rio_phy")), + ] diff --git a/artiq/gateware/rtio/phy/grabber.py b/artiq/gateware/rtio/phy/grabber.py new file mode 100644 index 000000000..520450c76 --- /dev/null +++ b/artiq/gateware/rtio/phy/grabber.py @@ -0,0 +1,99 @@ +from migen import * +from migen.genlib.cdc import MultiReg, PulseSynchronizer +from migen.genlib.fsm import FSM + +from artiq.gateware.rtio import rtlink +from artiq.gateware.grabber import deserializer_7series +from artiq.gateware.grabber.core import * + + +__all__ = ["Grabber"] + + +class Synchronizer(Module): + def __init__(self, roi_engines): + counts_in = [roi_engine.out.count for roi_engine in roi_engines] + + # This assumes all ROI engines update at the same time. + self.update = Signal() + # stays valid until the next frame after self.update is pulsed. + self.counts = [Signal.like(count) for count in counts_in] + + # # # + + for count in counts_in: + count.attr.add("no_retiming") + self.specials += [MultiReg(i, o, "rtio") for i, o in zip(counts_in, self.counts)] + + ps = PulseSynchronizer("cl", "rtio") + self.submodules += ps + self.comb += ps.i.eq(roi_engines[0].out.update) + self.sync.rtio += self.update.eq(ps.o) + + +class Serializer(Module): + def __init__(self, update, counts, rtlink_i): + self.gate = Signal(len(counts)) + + # # # + + gate = Signal(len(counts)) + sentinel = 2**(len(rtlink_i.data) - 1) + + fsm = ClockDomainsRenamer("rio")(FSM()) + self.submodules += fsm + + fsm.act("INIT", + rtlink_i.data.eq(sentinel), + If(update & (self.gate != 0), + NextValue(gate, self.gate), + rtlink_i.stb.eq(1), + NextState(0) + ) + ) + for n, count in enumerate(counts): + last = n == len(counts)-1 + fsm.act(n, + rtlink_i.data.eq(count), + rtlink_i.stb.eq(gate[n]), + NextState("INIT" if last else n+1) + ) + + +class Grabber(Module): + def __init__(self, pins, roi_engine_count=16, res_width=12, count_shift=0): + self.config = rtlink.Interface( + rtlink.OInterface(res_width, + bits_for(4*roi_engine_count-1))) + self.gate_data = rtlink.Interface( + rtlink.OInterface(roi_engine_count), + rtlink.IInterface(1+ROI.count_len(res_width, count_shift), + timestamped=False)) + + self.submodules.deserializer = deserializer_7series.Deserializer(pins) + self.submodules.frequency_counter = FrequencyCounter() + self.submodules.parser = Parser(res_width) + self.comb += self.parser.cl.eq(self.deserializer.q) + self.roi_engines = [ROI(self.parser.pix, count_shift) for _ in range(roi_engine_count)] + self.submodules += self.roi_engines + self.submodules.synchronizer = Synchronizer(self.roi_engines) + self.submodules.serializer = Serializer(self.synchronizer.update, self.synchronizer.counts, + self.gate_data.i) + + for n, roi_engine in enumerate(self.roi_engines): + for offset, target in enumerate([roi_engine.cfg.x0, roi_engine.cfg.y0, + roi_engine.cfg.x1, roi_engine.cfg.y1]): + roi_boundary = Signal.like(target) + roi_boundary.attr.add("no_retiming") + self.sync.rtio += If(self.config.o.stb & (self.config.o.address == 4*n+offset), + roi_boundary.eq(self.config.o.data)) + self.specials += MultiReg(roi_boundary, target, "cl") + + self.sync.rio += If(self.gate_data.o.stb, + self.serializer.gate.eq(self.gate_data.o.data)) + + def get_csrs(self): + return ( + self.deserializer.get_csrs() + + self.frequency_counter.get_csrs() + + self.parser.get_csrs()) diff --git a/artiq/gateware/rtio/phy/phaser.py b/artiq/gateware/rtio/phy/phaser.py new file mode 100644 index 000000000..bb299ab0c --- /dev/null +++ b/artiq/gateware/rtio/phy/phaser.py @@ -0,0 +1,89 @@ +from migen import * +from misoc.cores.duc import MultiDDS + +from artiq.gateware.rtio import rtlink +from .fastlink import SerDes, SerInterface + + +class Phy(Module): + def __init__(self, regs): + self.rtlink = rtlink.Interface( + rtlink.OInterface(data_width=32, address_width=4, + enable_replace=True)) + self.sync.rtio += [ + If(self.rtlink.o.stb, + Array(regs)[self.rtlink.o.address].eq(self.rtlink.o.data) + ) + ] + + +class DDSChannel(Module): + def __init__(self, share_lut=None): + to_rio_phy = ClockDomainsRenamer("rio_phy") + self.submodules.dds = to_rio_phy(MultiDDS( + n=5, fwidth=32, xwidth=16, z=19, zl=10, share_lut=share_lut)) + self.submodules.frequency = Phy([i.f for i in self.dds.i]) + self.submodules.phase_amplitude = Phy( + [Cat(i.a, i.clr, i.p) for i in self.dds.i]) + + +class Phaser(Module): + def __init__(self, pins, pins_n): + self.rtlink = rtlink.Interface( + rtlink.OInterface(data_width=8, address_width=8, + enable_replace=False), + rtlink.IInterface(data_width=10)) + + # share a CosSinGen LUT between the two channels + self.submodules.ch0 = DDSChannel() + self.submodules.ch1 = DDSChannel(share_lut=self.ch0.dds.cs.lut) + n_channels = 2 + n_samples = 8 + n_bits = 14 + body = Signal(n_samples*n_channels*2*n_bits, reset_less=True) + self.sync.rio_phy += [ + If(self.ch0.dds.valid, # & self.ch1.dds.valid, + # recent:ch0:i as low order in body + Cat(body).eq(Cat(self.ch0.dds.o.i[2:], self.ch0.dds.o.q[2:], + self.ch1.dds.o.i[2:], self.ch1.dds.o.q[2:], + body)), + ), + ] + + self.submodules.serializer = SerDes( + n_data=8, t_clk=8, d_clk=0b00001111, + n_frame=10, n_crc=6, poly=0x2f) + self.submodules.intf = SerInterface(pins, pins_n) + self.comb += [ + Cat(self.intf.data[:-1]).eq(Cat(self.serializer.data[:-1])), + self.serializer.data[-1].eq(self.intf.data[-1]), + ] + + header = Record([ + ("we", 1), + ("addr", 7), + ("data", 8), + ("type", 4) + ]) + assert len(Cat(header.raw_bits(), body)) == \ + len(self.serializer.payload) + self.comb += self.serializer.payload.eq(Cat(header.raw_bits(), body)) + + re_dly = Signal(3) # stage, send, respond + self.sync.rtio += [ + header.type.eq(1), # body type is baseband data + If(self.serializer.stb, + self.ch0.dds.stb.eq(1), # synchronize + self.ch1.dds.stb.eq(1), # synchronize + header.we.eq(0), + re_dly.eq(re_dly[1:]), + ), + If(self.rtlink.o.stb, + re_dly[-1].eq(~self.rtlink.o.address[-1]), + header.we.eq(self.rtlink.o.address[-1]), + header.addr.eq(self.rtlink.o.address), + header.data.eq(self.rtlink.o.data), + ), + self.rtlink.i.stb.eq(re_dly[0] & self.serializer.stb), + self.rtlink.i.data.eq(self.serializer.readback), + ] diff --git a/artiq/gateware/rtio/phy/servo.py b/artiq/gateware/rtio/phy/servo.py new file mode 100644 index 000000000..9fa634521 --- /dev/null +++ b/artiq/gateware/rtio/phy/servo.py @@ -0,0 +1,178 @@ +from migen import * + +from artiq.gateware.rtio import rtlink + + +class RTServoCtrl(Module): + """Per channel RTIO control interface""" + def __init__(self, ctrl): + self.rtlink = rtlink.Interface( + rtlink.OInterface(len(ctrl.profile) + 2)) + + # # # + + self.comb += [ + ctrl.stb.eq(self.rtlink.o.stb), + self.rtlink.o.busy.eq(0) + ] + self.sync.rio_phy += [ + If(self.rtlink.o.stb, + Cat(ctrl.en_out, ctrl.en_iir, ctrl.profile).eq( + self.rtlink.o.data) + ) + ] + + +def _eq_sign_extend(t, s): + """Assign target signal `t` from source `s`, sign-extending `s` to the + full width. + """ + return t.eq(Cat(s, Replicate(s[-1], len(t) - len(s)))) + + +class RTServoMem(Module): + """All-channel all-profile coefficient and state RTIO control + interface. + + Servo internal addresses are internal_address_width wide, which is + typically longer than the 8-bit RIO address space. We pack the overflow + onto the RTIO data word after the data. + + Servo address space (from LSB): + - IIR coefficient/state memory address, (w.profile + w.channel + 2) bits. + If the state memory is selected, the lower bits are used directly as + the memory address. If the coefficient memory is selected, the LSB + (high_coeff) selects between the upper and lower halves of the memory + location, which is two coefficients wide, with the remaining bits used + as the memory address. + - config_sel (1 bit) + - state_sel (1 bit) + - we (1 bit) + + destination | config_sel | state_sel + ----------------|------------|---------- + IIR coeff mem | 0 | 0 + IIR coeff mem | 1 | 0 + IIR state mem | 0 | 1 + config (write) | 1 | 1 + status (read) | 1 | 1 + + Values returned to the user on the Python side of the RTIO interface are + 32 bit, so we sign-extend all values from w.coeff to that width. This works + (instead of having to decide whether to sign- or zero-extend per address), as + all unsigned values are less wide than w.coeff. + """ + def __init__(self, w, servo): + m_coeff = servo.iir.m_coeff.get_port(write_capable=True, + mode=READ_FIRST, + we_granularity=w.coeff, clock_domain="rio") + assert len(m_coeff.we) == 2 + m_state = servo.iir.m_state.get_port(write_capable=True, + # mode=READ_FIRST, + clock_domain="rio") + self.specials += m_state, m_coeff + + # just expose the w.coeff (18) MSBs of state + assert w.state >= w.coeff + # ensure that we can split the coefficient storage correctly + assert len(m_coeff.dat_w) == 2*w.coeff + # ensure that the DDS word data fits into the coefficient mem + assert w.coeff >= w.word + # ensure all unsigned values will be zero-extended on sign extension + assert w.word < w.coeff + assert 8 + w.dly < w.coeff + + # coeff, profile, channel, 2 mems, rw + internal_address_width = 3 + w.profile + w.channel + 1 + 1 + rtlink_address_width = min(8, internal_address_width) + overflow_address_width = internal_address_width - rtlink_address_width + self.rtlink = rtlink.Interface( + rtlink.OInterface( + data_width=overflow_address_width + w.coeff, + address_width=rtlink_address_width, + enable_replace=False), + rtlink.IInterface( + data_width=32, + timestamped=False) + ) + + # # # + + config = Signal(w.coeff, reset=0) + status = Signal(w.coeff) + pad = Signal(6) + self.comb += [ + Cat(servo.start).eq(config), + status.eq(Cat(servo.start, servo.done, pad, + [_.clip for _ in servo.iir.ctrl])) + ] + + assert len(self.rtlink.o.address) + len(self.rtlink.o.data) - w.coeff == ( + 1 + # we + 1 + # state_sel + 1 + # high_coeff + len(m_coeff.adr)) + # ensure that we can fit config/status into the state address space + assert len(self.rtlink.o.address) + len(self.rtlink.o.data) - w.coeff >= ( + 1 + # we + 1 + # state_sel + 1 + # config_sel + len(m_state.adr)) + + internal_address = Signal(internal_address_width) + self.comb += internal_address.eq(Cat(self.rtlink.o.address, + self.rtlink.o.data[w.coeff:])) + + coeff_data = Signal(w.coeff) + self.comb += coeff_data.eq(self.rtlink.o.data[:w.coeff]) + + we = internal_address[-1] + state_sel = internal_address[-2] + config_sel = internal_address[-3] + high_coeff = internal_address[0] + self.comb += [ + self.rtlink.o.busy.eq(0), + m_coeff.adr.eq(internal_address[1:]), + m_coeff.dat_w.eq(Cat(coeff_data, coeff_data)), + m_coeff.we[0].eq(self.rtlink.o.stb & ~high_coeff & + we & ~state_sel), + m_coeff.we[1].eq(self.rtlink.o.stb & high_coeff & + we & ~state_sel), + m_state.adr.eq(internal_address), + m_state.dat_w[w.state - w.coeff:].eq(self.rtlink.o.data), + m_state.we.eq(self.rtlink.o.stb & we & state_sel & ~config_sel), + ] + read = Signal() + read_state = Signal() + read_high = Signal() + read_config = Signal() + self.sync.rio += [ + If(read, + read.eq(0) + ), + If(self.rtlink.o.stb, + read.eq(~we), + read_state.eq(state_sel), + read_high.eq(high_coeff), + read_config.eq(config_sel), + ) + ] + self.sync.rio_phy += [ + If(self.rtlink.o.stb & we & state_sel & config_sel, + config.eq(self.rtlink.o.data) + ), + If(read & read_config & read_state, + [_.clip.eq(0) for _ in servo.iir.ctrl] + ) + ] + self.comb += [ + self.rtlink.i.stb.eq(read), + _eq_sign_extend(self.rtlink.i.data, + Mux(read_state, + Mux(read_config, + status, + m_state.dat_r[w.state - w.coeff:]), + Mux(read_high, + m_coeff.dat_r[w.coeff:], + m_coeff.dat_r[:w.coeff]))) + ] diff --git a/artiq/gateware/rtio/phy/spi.py b/artiq/gateware/rtio/phy/spi.py deleted file mode 100644 index 25d0d7703..000000000 --- a/artiq/gateware/rtio/phy/spi.py +++ /dev/null @@ -1,13 +0,0 @@ -from migen import * - -from artiq.gateware.spi import SPIMaster as SPIMasterWB -from artiq.gateware.rtio.phy.wishbone import RT2WB - - -class SPIMaster(Module): - def __init__(self, pads, pads_n=None, **kwargs): - self.submodules._ll = ClockDomainsRenamer("rio_phy")( - SPIMasterWB(pads, pads_n, **kwargs)) - self.submodules._rt2wb = RT2WB(2, self._ll.bus) - self.rtlink = self._rt2wb.rtlink - self.probes = [] diff --git a/artiq/gateware/rtio/phy/spi2.py b/artiq/gateware/rtio/phy/spi2.py new file mode 100644 index 000000000..c304d5b93 --- /dev/null +++ b/artiq/gateware/rtio/phy/spi2.py @@ -0,0 +1,116 @@ +from migen import * + +from misoc.cores.spi2 import SPIMachine, SPIInterfaceXC7Diff, SPIInterface +from artiq.gateware.rtio import rtlink + + +class SPIMaster(Module): + """ + RTIO SPI Master version 2. + + Register address and bit map: + + data (address 0): + 32 write/read data + + config (address 1): + 1 offline: all pins high-z (reset=1) + 1 end: end transaction with next transfer (reset=1) + 1 input: submit read data on RTIO input when readable (reset=0) + 1 cs_polarity: active level of chip select (reset=0) + 1 clk_polarity: idle level of clk (reset=0) + 1 clk_phase: first edge after cs assertion to sample data on (reset=0) + (clk_polarity, clk_phase) == (CPOL, CPHA) in Freescale language. + (0, 0): idle low, output on falling, input on rising + (0, 1): idle low, output on rising, input on falling + (1, 0): idle high, output on rising, input on falling + (1, 1): idle high, output on falling, input on rising + There is never a clk edge during a cs edge. + 1 lsb_first: LSB is the first bit on the wire (reset=0) + 1 half_duplex: 3-wire SPI, in/out on mosi (reset=0) + 5 length: 1-32 bits = length + 1 (reset=0) + 3 padding + 8 div: counter load value to divide this module's clock + to generate the SPI write clk (reset=0) + f_clk/f_spi == div + 2 + 8 cs: active high bit pattern of chip selects (reset=0) + """ + def __init__(self, pads, pads_n=None): + to_rio_phy = ClockDomainsRenamer("rio_phy") + if pads_n is None: + interface = SPIInterface(pads) + else: + interface = SPIInterfaceXC7Diff(pads, pads_n) + interface = to_rio_phy(interface) + spi = to_rio_phy(SPIMachine(data_width=32, div_width=8)) + self.submodules += interface, spi + + self.rtlink = rtlink.Interface( + rtlink.OInterface(len(spi.reg.pdo), address_width=1, + enable_replace=False), + rtlink.IInterface(len(spi.reg.pdi), timestamped=False) + ) + + ### + + config = Record([ + ("offline", 1), + ("end", 1), + ("input", 1), + ("cs_polarity", 1), + ("clk_polarity", 1), + ("clk_phase", 1), + ("lsb_first", 1), + ("half_duplex", 1), + ("length", 5), + ("padding", 3), + ("div", 8), + ("cs", 8), + ]) + assert len(config) == len(spi.reg.pdo) == len(spi.reg.pdi) == 32 + + config.offline.reset = 1 + config.end.reset = 1 + read = Signal() + + self.sync.rio_phy += [ + If(self.rtlink.i.stb, + read.eq(0) + ), + If(self.rtlink.o.stb & spi.writable, + If(self.rtlink.o.address, + config.raw_bits().eq(self.rtlink.o.data) + ).Else( + read.eq(config.input) + ) + ), + ] + + self.comb += [ + spi.length.eq(config.length), + spi.end.eq(config.end), + spi.cg.div.eq(config.div), + spi.clk_phase.eq(config.clk_phase), + spi.reg.lsb_first.eq(config.lsb_first), + + interface.half_duplex.eq(config.half_duplex), + interface.cs.eq(config.cs), + interface.cs_polarity.eq(Replicate( + config.cs_polarity, len(interface.cs_polarity))), + interface.clk_polarity.eq(config.clk_polarity), + interface.offline.eq(config.offline), + interface.cs_next.eq(spi.cs_next), + interface.clk_next.eq(spi.clk_next), + interface.ce.eq(spi.ce), + interface.sample.eq(spi.reg.sample), + spi.reg.sdi.eq(interface.sdi), + interface.sdo.eq(spi.reg.sdo), + + spi.load.eq(self.rtlink.o.stb & spi.writable & + ~self.rtlink.o.address), + spi.reg.pdo.eq(self.rtlink.o.data), + self.rtlink.o.busy.eq(~spi.writable), + self.rtlink.i.stb.eq(spi.readable & read), + self.rtlink.i.data.eq(spi.reg.pdi) + ] + self.probes = [] diff --git a/artiq/gateware/rtio/phy/ttl_serdes_7series.py b/artiq/gateware/rtio/phy/ttl_serdes_7series.py index 266037e1e..8189258d6 100644 --- a/artiq/gateware/rtio/phy/ttl_serdes_7series.py +++ b/artiq/gateware/rtio/phy/ttl_serdes_7series.py @@ -4,7 +4,7 @@ from artiq.gateware.rtio.phy import ttl_serdes_generic class _OSERDESE2_8X(Module): - def __init__(self, pad, pad_n=None): + def __init__(self, pad, pad_n=None, invert=False): self.o = Signal(8) self.t_in = Signal() self.t_out = Signal() @@ -14,21 +14,29 @@ class _OSERDESE2_8X(Module): o = self.o pad_o = Signal() self.specials += Instance("OSERDESE2", - p_DATA_RATE_OQ="DDR", p_DATA_RATE_TQ="BUF", - p_DATA_WIDTH=8, p_TRISTATE_WIDTH=1, - o_OQ=pad_o, o_TQ=self.t_out, - i_CLK=ClockSignal("rtiox4"), - i_CLKDIV=ClockSignal("rio_phy"), - i_D1=o[0], i_D2=o[1], i_D3=o[2], i_D4=o[3], - i_D5=o[4], i_D6=o[5], i_D7=o[6], i_D8=o[7], - i_TCE=1, i_OCE=1, i_RST=0, - i_T1=self.t_in) + p_DATA_RATE_OQ="DDR", p_DATA_RATE_TQ="BUF", + p_DATA_WIDTH=8, p_TRISTATE_WIDTH=1, + p_INIT_OQ=0b11111111 if invert else 0b00000000, + o_OQ=pad_o, o_TQ=self.t_out, + i_RST=ResetSignal("rio_phy"), + i_CLK=ClockSignal("rtiox4"), + i_CLKDIV=ClockSignal("rio_phy"), + i_D1=o[0] ^ invert, i_D2=o[1] ^ invert, i_D3=o[2] ^ invert, i_D4=o[3] ^ invert, + i_D5=o[4] ^ invert, i_D6=o[5] ^ invert, i_D7=o[6] ^ invert, i_D8=o[7] ^ invert, + i_TCE=1, i_OCE=1, + i_T1=self.t_in) if pad_n is None: self.comb += pad.eq(pad_o) else: - self.specials += Instance("OBUFDS", - i_I=pad_o, - o_O=pad, o_OB=pad_n) + self.specials += Instance("IOBUFDS_INTERMDISABLE", + p_DIFF_TERM="FALSE", + p_IBUF_LOW_PWR="TRUE", + p_USE_IBUFDISABLE="TRUE", + i_IBUFDISABLE=1, + i_INTERMDISABLE=1, + i_I=pad_o, + i_T=self.t_out, + io_IO=pad, io_IOB=pad_n) class _ISERDESE2_8X(Module): @@ -42,19 +50,27 @@ class _ISERDESE2_8X(Module): pad_i = Signal() i = self.i self.specials += Instance("ISERDESE2", p_DATA_RATE="DDR", - p_DATA_WIDTH=8, - p_INTERFACE_TYPE="NETWORKING", p_NUM_CE=1, - o_Q1=i[7], o_Q2=i[6], o_Q3=i[5], o_Q4=i[4], - o_Q5=i[3], o_Q6=i[2], o_Q7=i[1], o_Q8=i[0], - i_D=pad_i, - i_CLK=ClockSignal("rtiox4"), - i_CLKB=~ClockSignal("rtiox4"), - i_CE1=1, i_RST=0, - i_CLKDIV=ClockSignal("rio_phy")) + p_DATA_WIDTH=8, + p_INTERFACE_TYPE="NETWORKING", p_NUM_CE=1, + o_Q1=i[7], o_Q2=i[6], o_Q3=i[5], o_Q4=i[4], + o_Q5=i[3], o_Q6=i[2], o_Q7=i[1], o_Q8=i[0], + i_D=pad_i, + i_CLK=ClockSignal("rtiox4"), + i_CLKB=~ClockSignal("rtiox4"), + i_CE1=1, + i_RST=ResetSignal("rio_phy"), + i_CLKDIV=ClockSignal("rio_phy")) if pad_n is None: self.comb += pad_i.eq(pad) else: - self.specials += Instance("IBUFDS", o_O=pad_i, i_I=pad, i_IB=pad_n) + self.specials += Instance("IBUFDS_INTERMDISABLE", + p_DIFF_TERM="TRUE", + p_IBUF_LOW_PWR="TRUE", + p_USE_IBUFDISABLE="TRUE", + i_IBUFDISABLE=0, + i_INTERMDISABLE=0, + o_O=pad_i, + io_I=pad, io_IB=pad_n) class _IOSERDESE2_8X(Module): @@ -67,36 +83,32 @@ class _IOSERDESE2_8X(Module): pad_i = Signal() pad_o = Signal() - i = self.i - self.specials += Instance("ISERDESE2", p_DATA_RATE="DDR", - p_DATA_WIDTH=8, - p_INTERFACE_TYPE="NETWORKING", p_NUM_CE=1, - o_Q1=i[7], o_Q2=i[6], o_Q3=i[5], o_Q4=i[4], - o_Q5=i[3], o_Q6=i[2], o_Q7=i[1], o_Q8=i[0], - i_D=pad_i, - i_CLK=ClockSignal("rtiox4"), - i_CLKB=~ClockSignal("rtiox4"), - i_CE1=1, i_RST=0, - i_CLKDIV=ClockSignal("rio_phy")) + iserdes = _ISERDESE2_8X(pad_i) oserdes = _OSERDESE2_8X(pad_o) - self.submodules += oserdes + self.submodules += iserdes, oserdes if pad_n is None: self.specials += Instance("IOBUF", - i_I=pad_o, o_O=pad_i, i_T=oserdes.t_out, - io_IO=pad) + i_I=pad_o, o_O=pad_i, i_T=oserdes.t_out, + io_IO=pad) else: - self.specials += Instance("IOBUFDS", - i_I=pad_o, o_O=pad_i, i_T=oserdes.t_out, - io_IO=pad, io_IOB=pad_n) + self.specials += Instance("IOBUFDS_INTERMDISABLE", + p_DIFF_TERM="TRUE", + p_IBUF_LOW_PWR="TRUE", + p_USE_IBUFDISABLE="TRUE", + i_IBUFDISABLE=~oserdes.t_out, + i_INTERMDISABLE=~oserdes.t_out, + i_I=pad_o, o_O=pad_i, i_T=oserdes.t_out, + io_IO=pad, io_IOB=pad_n) self.comb += [ + self.i.eq(iserdes.i), oserdes.t_in.eq(~self.oe), oserdes.o.eq(self.o) ] class Output_8X(ttl_serdes_generic.Output): - def __init__(self, pad, pad_n=None): - serdes = _OSERDESE2_8X(pad, pad_n) + def __init__(self, pad, pad_n=None, invert=False): + serdes = _OSERDESE2_8X(pad, pad_n, invert=invert) self.submodules += serdes ttl_serdes_generic.Output.__init__(self, serdes) diff --git a/artiq/gateware/rtio/phy/ttl_serdes_generic.py b/artiq/gateware/rtio/phy/ttl_serdes_generic.py index 0d88c4284..5c8456d93 100644 --- a/artiq/gateware/rtio/phy/ttl_serdes_generic.py +++ b/artiq/gateware/rtio/phy/ttl_serdes_generic.py @@ -7,7 +7,7 @@ from artiq.gateware.rtio import rtlink def _mk_edges(w, direction): l = [(1 << i) - 1 for i in range(w)] if direction == "rising": - l = [2**w - 1 ^ x for x in l] + l = [((1 << w) - 1) ^ x for x in l] elif direction == "falling": pass else: @@ -67,6 +67,12 @@ class InOut(Module): override_oe = Signal() self.overrides = [override_en, override_o, override_oe] + # Output enable, for interfacing to external buffers. + self.oe = Signal() + # input state exposed for edge_counter: latest serdes sample + # support for short pulses will need a more involved solution + self.input_state = Signal() + # # # # Output @@ -78,15 +84,17 @@ class InOut(Module): override_en=override_en, override_o=override_o) oe_k = Signal() + self.oe.attr.add("no_retiming") self.sync.rio_phy += [ If(self.rtlink.o.stb & (self.rtlink.o.address == 1), oe_k.eq(self.rtlink.o.data[0])), If(override_en, - serdes.oe.eq(override_oe) + self.oe.eq(override_oe) ).Else( - serdes.oe.eq(oe_k) + self.oe.eq(oe_k) ) ] + self.comb += serdes.oe.eq(self.oe) # Input sensitivity = Signal(2) @@ -100,181 +108,20 @@ class InOut(Module): ] i = serdes.i[-1] + self.comb += self.input_state.eq(i) i_d = Signal() self.sync.rio_phy += [ i_d.eq(i), - self.rtlink.i.stb.eq( - sample | - (sensitivity[0] & ( i & ~i_d)) | - (sensitivity[1] & (~i & i_d)) - ), self.rtlink.i.data.eq(i), ] pe = PriorityEncoder(serdes_width) self.submodules += pe - self.comb += pe.i.eq(serdes.i ^ Replicate(i_d, serdes_width)) - self.sync.rio_phy += self.rtlink.i.fine_ts.eq(pe.o) - - -class _FakeSerdes(Module): - def __init__(self): - self.o = Signal(8) - self.i = Signal(8) - self.oe = Signal() - - -class _OutputTB(Module): - def __init__(self): - serdes = _FakeSerdes() - self.submodules.dut = RenameClockDomains(Output(serdes), - {"rio_phy": "sys"}) - - def gen_simulation(self, selfp): - selfp.dut.rtlink.o.data = 1 - selfp.dut.rtlink.o.fine_ts = 1 - selfp.dut.rtlink.o.stb = 1 - yield - selfp.dut.rtlink.o.stb = 0 - yield - selfp.dut.rtlink.o.data = 0 - selfp.dut.rtlink.o.fine_ts = 2 - selfp.dut.rtlink.o.stb = 1 - yield - yield - selfp.dut.rtlink.o.data = 1 - selfp.dut.rtlink.o.fine_ts = 7 - selfp.dut.rtlink.o.stb = 1 - for _ in range(6): - # note that stb stays active; output should not change - yield - - -class _InOutTB(Module): - def __init__(self): - self.serdes = _FakeSerdes() - self.submodules.dut = RenameClockDomains(InOut(self.serdes), - {"rio_phy": "sys", - "rio": "sys"}) - - def check_input(self, selfp, stb, fine_ts=None): - if stb != selfp.dut.rtlink.i.stb: - print("KO rtlink.i.stb should be {} but is {}" - .format(stb, selfp.dut.rtlink.i.stb)) - elif fine_ts is not None and fine_ts != selfp.dut.rtlink.i.fine_ts: - print("KO rtlink.i.fine_ts should be {} but is {}" - .format(fine_ts, selfp.dut.rtlink.i.fine_ts)) - else: - print("OK") - - def check_output(self, selfp, data): - if selfp.serdes.o != data: - print("KO io.o should be {} but is {}".format(data, selfp.serdes.o)) - else: - print("OK") - - def check_output_enable(self, selfp, oe): - if selfp.serdes.oe != oe: - print("KO io.oe should be {} but is {}".format(oe, selfp.serdes.oe)) - else: - print("OK") - - def gen_simulation(self, selfp): - selfp.dut.rtlink.o.address = 2 - selfp.dut.rtlink.o.data = 0b11 - selfp.dut.rtlink.o.stb = 1 # set sensitivity to rising + falling - yield - selfp.dut.rtlink.o.stb = 0 - - self.check_output_enable(selfp, 0) - yield - - selfp.serdes.i = 0b11111110 # rising edge at fine_ts = 1 - yield - selfp.serdes.i = 0b11111111 - yield - self.check_input(selfp, stb=1, fine_ts=1) - - selfp.serdes.i = 0b01111111 # falling edge at fine_ts = 7 - yield - selfp.serdes.i = 0b00000000 - yield - self.check_input(selfp, stb=1, fine_ts=7) - - selfp.serdes.i = 0b11000000 # rising edge at fine_ts = 6 - yield - selfp.serdes.i = 0b11111111 - yield - self.check_input(selfp, stb=1, fine_ts=6) - - selfp.dut.rtlink.o.address = 2 - selfp.dut.rtlink.o.data = 0b11 - selfp.dut.rtlink.o.stb = 1 # set sensitivity to rising only - yield - selfp.dut.rtlink.o.stb = 0 - yield - - selfp.serdes.i = 0b00001111 # falling edge at fine_ts = 4 - yield - self.check_input(selfp, stb=0) # no strobe, sensitivity is rising edge - - selfp.serdes.i = 0b11110000 # rising edge at fine_ts = 4 - yield - self.check_input(selfp, stb=1, fine_ts=4) - - selfp.dut.rtlink.o.address = 1 - selfp.dut.rtlink.o.data = 1 - selfp.dut.rtlink.o.stb = 1 # set Output Enable to 1 - yield - selfp.dut.rtlink.o.stb = 0 - yield - yield - self.check_output_enable(selfp, 1) - - selfp.dut.rtlink.o.address = 0 - selfp.dut.rtlink.o.data = 1 - selfp.dut.rtlink.o.fine_ts = 3 - selfp.dut.rtlink.o.stb = 1 # rising edge at fine_ts = 3 - yield - selfp.dut.rtlink.o.stb = 0 - yield - self.check_output(selfp, data=0b11111000) - - yield - self.check_output(selfp, data=0xFF) # stays at 1 - - selfp.dut.rtlink.o.data = 0 - selfp.dut.rtlink.o.fine_ts = 0 - selfp.dut.rtlink.o.stb = 1 # falling edge at fine_ts = 0 - yield - selfp.dut.rtlink.o.stb = 0 - yield - self.check_output(selfp, data=0) - - yield - self.check_output(selfp, data=0) - - selfp.dut.rtlink.o.data = 1 - selfp.dut.rtlink.o.fine_ts = 7 - selfp.dut.rtlink.o.stb = 1 # rising edge at fine_ts = 7 - yield - selfp.dut.rtlink.o.stb = 0 - yield - self.check_output(selfp, data=0b10000000) - - -if __name__ == "__main__": - import sys - from migen.sim.generic import Simulator, TopLevel - - if len(sys.argv) != 2: - print("Incorrect command line") - sys.exit(1) - - cls = { - "output": _OutputTB, - "inout": _InOutTB - }[sys.argv[1]] - - with Simulator(cls(), TopLevel("top.vcd", clk_period=int(1/0.125))) as s: - s.run() + self.comb += pe.i.eq( + (serdes.i ^ Cat(i_d, serdes.i)) & ( + (serdes.i & Replicate(sensitivity[0], serdes_width)) | + (~serdes.i & Replicate(sensitivity[1], serdes_width)))) + self.sync.rio_phy += [ + self.rtlink.i.fine_ts.eq(pe.o), + self.rtlink.i.stb.eq(sample | ~pe.n), + ] diff --git a/artiq/gateware/rtio/phy/ttl_serdes_ultrascale.py b/artiq/gateware/rtio/phy/ttl_serdes_ultrascale.py new file mode 100644 index 000000000..4231a8940 --- /dev/null +++ b/artiq/gateware/rtio/phy/ttl_serdes_ultrascale.py @@ -0,0 +1,113 @@ +from migen import * + +from artiq.gateware.rtio.phy import ttl_serdes_generic + + +class _OSERDESE3(Module): + def __init__(self, dw, pad, pad_n=None): + self.o = Signal(dw) + self.t_in = Signal() + self.t_out = Signal() + + # # # + + pad_o = Signal() + self.specials += Instance("OSERDESE3", + p_DATA_WIDTH=dw, p_INIT=0, + p_IS_CLK_INVERTED=0, p_IS_CLKDIV_INVERTED=0, p_IS_RST_INVERTED=0, + + o_OQ=pad_o, o_T_OUT=self.t_out, + i_RST=ResetSignal("rtio"), + i_CLK=ClockSignal("rtiox"), i_CLKDIV=ClockSignal("rtio"), + i_D=self.o, i_T=self.t_in) + if pad_n is None: + self.comb += pad.eq(pad_o) + else: + self.specials += Instance("IOBUFDS_INTERMDISABLE", + i_IBUFDISABLE=1, + i_INTERMDISABLE=1, + i_I=pad_o, + i_T=self.t_out, + io_IO=pad, io_IOB=pad_n) + + +class _ISERDESE3(Module): + def __init__(self, dw, pad, pad_n=None): + self.o = Signal(dw) + self.i = Signal(dw) + self.oe = Signal() + + # # # + + pad_i = Signal() + self.specials += Instance("ISERDESE3", + p_IS_CLK_INVERTED=0, + p_IS_CLK_B_INVERTED=1, + p_DATA_WIDTH=dw, + + i_D=pad_i, + i_RST=ResetSignal("rtio"), + i_FIFO_RD_EN=0, + i_CLK=ClockSignal("rtiox"), + i_CLK_B=ClockSignal("rtiox"), # locally inverted + i_CLKDIV=ClockSignal("rtio"), + o_Q=Cat(*[self.i[i] for i in reversed(range(dw))])) + if pad_n is None: + self.comb += pad_i.eq(pad) + else: + self.specials += Instance("IBUFDS_INTERMDISABLE", + i_IBUFDISABLE=0, + i_INTERMDISABLE=0, + o_O=pad_i, + io_I=pad, io_IB=pad_n) + + +class _IOSERDESE3(Module): + def __init__(self, dw, pad, pad_n=None): + self.o = Signal(dw) + self.i = Signal(dw) + self.oe = Signal() + + # # # + + pad_i = Signal() + pad_o = Signal() + iserdes = _ISERDESE3(dw, pad_i) + oserdes = _OSERDESE3(dw, pad_o) + self.submodules += iserdes, oserdes + if pad_n is None: + self.specials += Instance("IOBUF", + i_I=pad_o, o_O=pad_i, i_T=oserdes.t_out, + io_IO=pad) + else: + self.specials += Instance("IOBUFDS_INTERMDISABLE", + i_IBUFDISABLE=~oserdes.t_out, + i_INTERMDISABLE=~oserdes.t_out, + i_I=pad_o, o_O=pad_i, i_T=oserdes.t_out, + io_IO=pad, io_IOB=pad_n) + self.comb += [ + self.i.eq(iserdes.i), + oserdes.t_in.eq(~self.oe), + oserdes.o.eq(self.o) + ] + + +class Output(ttl_serdes_generic.Output): + def __init__(self, dw, pad, pad_n=None): + serdes = _OSERDESE3(dw, pad, pad_n) + self.submodules += serdes + ttl_serdes_generic.Output.__init__(self, serdes) + + +class InOut(ttl_serdes_generic.InOut): + def __init__(self, dw, pad, pad_n=None): + serdes = _IOSERDESE3(dw, pad, pad_n) + self.submodules += serdes + ttl_serdes_generic.InOut.__init__(self, serdes) + + +class Input(ttl_serdes_generic.InOut): + def __init__(self, dw, pad, pad_n=None): + serdes = _ISERDESE3(dw, pad, pad_n) + self.submodules += serdes + ttl_serdes_generic.InOut.__init__(self, serdes) diff --git a/artiq/gateware/rtio/phy/ttl_simple.py b/artiq/gateware/rtio/phy/ttl_simple.py index d5129606a..4484ce3af 100644 --- a/artiq/gateware/rtio/phy/ttl_simple.py +++ b/artiq/gateware/rtio/phy/ttl_simple.py @@ -1,13 +1,15 @@ from migen import * from migen.genlib.cdc import MultiReg +from migen.genlib.io import DifferentialInput, DifferentialOutput from artiq.gateware.rtio import rtlink class Output(Module): - def __init__(self, pad): + def __init__(self, pad, pad_n=None): self.rtlink = rtlink.Interface(rtlink.OInterface(1)) - self.probes = [pad] + pad_o = Signal(reset_less=True) + self.probes = [pad_o] override_en = Signal() override_o = Signal() self.overrides = [override_en, override_o] @@ -20,21 +22,28 @@ class Output(Module): pad_k.eq(self.rtlink.o.data) ), If(override_en, - pad.eq(override_o) + pad_o.eq(override_o) ).Else( - pad.eq(pad_k) + pad_o.eq(pad_k) ) ] + if pad_n is None: + self.comb += pad.eq(pad_o) + else: + self.specials += DifferentialOutput(pad_o, pad, pad_n) class Input(Module): - def __init__(self, pad): + def __init__(self, pad, pad_n=None): self.rtlink = rtlink.Interface( rtlink.OInterface(2, 2), rtlink.IInterface(1)) self.overrides = [] self.probes = [] + #: Registered copy of the input state, in the rio_phy clock domain. + self.input_state = Signal() + # # # sensitivity = Signal(2) @@ -49,8 +58,13 @@ class Input(Module): ] i = Signal() - i_d = Signal() - self.specials += MultiReg(pad, i, "rio_phy") + i_d = Signal(reset_less=True) + pad_i = Signal() + if pad_n is None: + self.comb += pad_i.eq(pad) + else: + self.specials += DifferentialInput(pad, pad_n, pad_i) + self.specials += MultiReg(pad_i, i, "rio_phy") self.sync.rio_phy += i_d.eq(i) self.comb += [ self.rtlink.i.stb.eq( @@ -58,7 +72,8 @@ class Input(Module): (sensitivity[0] & ( i & ~i_d)) | (sensitivity[1] & (~i & i_d)) ), - self.rtlink.i.data.eq(i) + self.rtlink.i.data.eq(i), + self.input_state.eq(i) ] self.probes += [i] @@ -75,6 +90,11 @@ class InOut(Module): self.overrides = [override_en, override_o, override_oe] self.probes = [] + # Output enable, for interfacing to external buffers. + self.oe = Signal() + # Registered copy of the input state, in the rio_phy clock domain. + self.input_state = Signal() + # # # ts = TSTriple() @@ -83,6 +103,7 @@ class InOut(Module): o_k = Signal() oe_k = Signal() + self.oe.attr.add("no_retiming") self.sync.rio_phy += [ If(self.rtlink.o.stb, If(self.rtlink.o.address == 0, o_k.eq(self.rtlink.o.data[0])), @@ -90,12 +111,13 @@ class InOut(Module): ), If(override_en, ts.o.eq(override_o), - ts.oe.eq(override_oe) + self.oe.eq(override_oe) ).Else( ts.o.eq(o_k), - ts.oe.eq(oe_k) + self.oe.eq(oe_k) ) ] + self.comb += ts.oe.eq(self.oe) sample = Signal() self.sync.rio += [ sample.eq(0), @@ -115,7 +137,8 @@ class InOut(Module): (sensitivity[0] & ( i & ~i_d)) | (sensitivity[1] & (~i & i_d)) ), - self.rtlink.i.data.eq(i) + self.rtlink.i.data.eq(i), + self.input_state.eq(i) ] self.probes += [i, ts.oe] diff --git a/artiq/gateware/rtio/phy/wishbone.py b/artiq/gateware/rtio/phy/wishbone.py index d8fa752c2..937e66a70 100644 --- a/artiq/gateware/rtio/phy/wishbone.py +++ b/artiq/gateware/rtio/phy/wishbone.py @@ -5,18 +5,18 @@ from artiq.gateware.rtio import rtlink class RT2WB(Module): - def __init__(self, address_width, wb=None, rtio_enable_replace=False): + def __init__(self, address_width, wb=None, rtio_enable_replace=False, write_only=False): if wb is None: wb = wishbone.Interface() self.wb = wb self.rtlink = rtlink.Interface( rtlink.OInterface( len(wb.dat_w), - address_width + 1, + address_width + 1 if not write_only else address_width, enable_replace=rtio_enable_replace), rtlink.IInterface( len(wb.dat_r), - timestamped=False) + timestamped=False) if not write_only else None ) # # # @@ -26,7 +26,7 @@ class RT2WB(Module): If(self.rtlink.o.stb, active.eq(1), wb.adr.eq(self.rtlink.o.address[:address_width]), - wb.we.eq(~self.rtlink.o.address[address_width]), + wb.we.eq(~self.rtlink.o.address[address_width] if not write_only else 1), wb.dat_w.eq(self.rtlink.o.data), wb.sel.eq(2**len(wb.sel) - 1) ), @@ -38,7 +38,10 @@ class RT2WB(Module): self.rtlink.o.busy.eq(active), wb.cyc.eq(active), wb.stb.eq(active), - - self.rtlink.i.stb.eq(wb.ack & ~wb.we), - self.rtlink.i.data.eq(wb.dat_r) ] + + if not write_only: + self.comb += [ + self.rtlink.i.stb.eq(wb.ack & ~wb.we), + self.rtlink.i.data.eq(wb.dat_r) + ] diff --git a/artiq/gateware/rtio/rtlink.py b/artiq/gateware/rtio/rtlink.py index a4fb3ebf9..f1c4e2fe0 100644 --- a/artiq/gateware/rtio/rtlink.py +++ b/artiq/gateware/rtio/rtlink.py @@ -8,12 +8,16 @@ class OInterface: self.stb = Signal() self.busy = Signal() + assert 0 <= data_width <= 512 + assert 0 <= address_width <= 8 + assert 0 <= fine_ts_width <= 4 + if data_width: - self.data = Signal(data_width) + self.data = Signal(data_width, reset_less=True) if address_width: - self.address = Signal(address_width) + self.address = Signal(address_width, reset_less=True) if fine_ts_width: - self.fine_ts = Signal(fine_ts_width) + self.fine_ts = Signal(fine_ts_width, reset_less=True) self.enable_replace = enable_replace @@ -35,10 +39,13 @@ class IInterface: timestamped=True, fine_ts_width=0, delay=0): self.stb = Signal() + assert 0 <= data_width <= 32 + assert 0 <= fine_ts_width <= 4 + if data_width: - self.data = Signal(data_width) + self.data = Signal(data_width, reset_less=True) if fine_ts_width: - self.fine_ts = Signal(fine_ts_width) + self.fine_ts = Signal(fine_ts_width, reset_less=True) assert(not fine_ts_width or timestamped) self.timestamped = timestamped @@ -69,14 +76,13 @@ class Interface: def _get_or_zero(interface, attr): - if isinstance(interface, Interface): - return max(_get_or_zero(interface.i, attr), - _get_or_zero(interface.o, attr)) + if interface is None: + return 0 + assert isinstance(interface, (OInterface, IInterface)) + if hasattr(interface, attr): + return len(getattr(interface, attr)) else: - if hasattr(interface, attr): - return len(getattr(interface, attr)) - else: - return 0 + return 0 def get_data_width(interface): diff --git a/artiq/gateware/rtio/sed/__init__.py b/artiq/gateware/rtio/sed/__init__.py new file mode 100644 index 000000000..d0055b52a --- /dev/null +++ b/artiq/gateware/rtio/sed/__init__.py @@ -0,0 +1,57 @@ +""" +The traditional RTIO system used one dedicated FIFO per output channel. While this architecture +is simple and appropriate for ARTIQ systems that were rather small and simple, it shows limitations +on more complex ones. By decreasing importance: +* with DRTIO, the master needed to keep track, for each FIFO in each satellite, a lower bound on +the number of available entries plus the last timestamp written. The timestamp is stored in order +to detect sequence errors rapidly (and allow precise exceptions without compromising performance). +When many satellites are involved, especially with DRTIO switches, the storage requirements become +prohibitive. +* with many channels in one device, the large muxes and the error detection logic that +can handle all the FIFOs make timing closure problematic. +* with many channels in one device, the FIFOs waste FPGA space, as they are never all filled at the +same time. + +The scalable event dispatcher (SED) addresses those issues: +* only one lower bound on the available entries needs to be stored per satellite device for flow +control purposes (called "buffer space"). Most sequence errors no longer exist (non-increasing +timestamps into one channel are permitted to an extent) so rapid detection of them is no longer +required. +* the events can be demultiplexed to the different channels using pipeline stages that ease timing. +* only a few FIFOs are required and they are shared between the channels. + +The SED core contains a configurable number of FIFOs that hold the usual information about RTIO +events (timestamp, address, data), the channel number, and a sequence number. The sequence number is +increased for each event submitted. + +When an event is submitted, it is written into the current FIFO if its timestamp is strictly +increasing. Otherwise, the current FIFO number is incremented by one (and wraps around, if the +current FIFO was the last) and the event is written there, unless that FIFO already contains an +event with a greater timestamp. In that case, an asynchronous error is reported. If the destination +FIFO is full, the submitter is blocked. + +In order to help spreading events among FIFOs and maximize buffering, the SED core may optionally +also switch to the next FIFO after the current FIFO has been full. + +At the output of the FIFOs, the events are distributed to the channels and simultaneous events on +the same channel are handled using a structure similar to a odd-even merge-sort network that sorts +by channel. When there are simultaneous events on the same channel, the event with the highest +sequence number is kept and a flag is raised to indicate that a replacement occured on that +channel. If a replacement was made on a channel that has replacements disabled, the final +event is dropped and a collision error is reported asynchronously. + +Underflow errors are detected as before by comparing the event timestamp with the current value of +the counter, and dropping events that do not have enough time to make it through the system. + +The sequence number is sized to be able to represent the combined capacity of all FIFOs, plus +2 bits that allow the detection of wrap-arounds. + +The maximum number of simultaneous events (on different channels), and the maximum number of active +timeline "rewinds", are equal to the number of FIFOs. + +The SED logic support both synchronous and asynchronous FIFOs, which are used respectively for local +RTIO and DRTIO. + +To implement flow control in DRTIO, the master queries the satellite for buffer space. The satellite +uses as buffer space the space available in its fullest FIFO. +""" diff --git a/artiq/gateware/rtio/sed/core.py b/artiq/gateware/rtio/sed/core.py new file mode 100644 index 000000000..7d0b0de4e --- /dev/null +++ b/artiq/gateware/rtio/sed/core.py @@ -0,0 +1,105 @@ +from migen import * + +from artiq.gateware.rtio.sed import layouts +from artiq.gateware.rtio.sed.lane_distributor import * +from artiq.gateware.rtio.sed.fifos import * +from artiq.gateware.rtio.sed.gates import * +from artiq.gateware.rtio.sed.output_driver import * + + +__all__ = ["SED"] + + +class SED(Module): + def __init__(self, channels, glbl_fine_ts_width, mode, + lane_count=8, fifo_depth=128, enable_spread=True, + quash_channels=[], report_buffer_space=False, interface=None): + if mode == "sync": + lane_dist_cdr = lambda x: x + fifos_cdr = lambda x: x + gates_cdr = lambda x: x + output_driver_cdr = lambda x: x + elif mode == "async": + lane_dist_cdr = ClockDomainsRenamer("rsys") + fifos_cdr = ClockDomainsRenamer({"write": "rsys", "read": "rio"}) + gates_cdr = ClockDomainsRenamer("rio") + output_driver_cdr = ClockDomainsRenamer("rio") + else: + raise ValueError + + seqn_width = layouts.seqn_width(lane_count, fifo_depth) + + self.submodules.lane_dist = lane_dist_cdr( + LaneDistributor(lane_count, seqn_width, + layouts.fifo_payload(channels), + [channel.interface.o.delay for channel in channels], + glbl_fine_ts_width, + enable_spread=enable_spread, + quash_channels=quash_channels, + interface=interface)) + self.submodules.fifos = fifos_cdr( + FIFOs(lane_count, fifo_depth, + layouts.fifo_payload(channels), mode, report_buffer_space)) + self.submodules.gates = gates_cdr( + Gates(lane_count, seqn_width, + layouts.fifo_payload(channels), + layouts.output_network_payload(channels, glbl_fine_ts_width))) + self.submodules.output_driver = output_driver_cdr( + OutputDriver(channels, glbl_fine_ts_width, lane_count, seqn_width)) + + for o, i in zip(self.lane_dist.output, self.fifos.input): + self.comb += o.connect(i) + for o, i in zip(self.fifos.output, self.gates.input): + self.comb += o.connect(i) + for o, i in zip(self.gates.output, self.output_driver.input): + self.comb += i.eq(o) + + if report_buffer_space: + self.comb += [ + self.cri.o_buffer_space_valid.eq(1), + self.cri.o_buffer_space.eq(self.fifos.buffer_space) + ] + + @property + def cri(self): + return self.lane_dist.cri + + # in CRI clock domain + @property + def minimum_coarse_timestamp(self): + return self.lane_dist.minimum_coarse_timestamp + + # in I/O clock domain + @property + def coarse_timestamp(self): + return self.gates.coarse_timestamp + + # in CRI clock domain + @property + def sequence_error(self): + return self.lane_dist.sequence_error + + # in CRI clock domain + @property + def sequence_error_channel(self): + return self.lane_dist.sequence_error_channel + + # in I/O clock domain + @property + def collision(self): + return self.output_driver.collision + + # in I/O clock domain + @property + def collision_channel(self): + return self.output_driver.collision_channel + + # in I/O clock domain + @property + def busy(self): + return self.output_driver.busy + + # in I/O clock domain + @property + def busy_channel(self): + return self.output_driver.busy_channel diff --git a/artiq/gateware/rtio/sed/fifos.py b/artiq/gateware/rtio/sed/fifos.py new file mode 100644 index 000000000..cbecccf07 --- /dev/null +++ b/artiq/gateware/rtio/sed/fifos.py @@ -0,0 +1,84 @@ +from operator import or_ +from functools import reduce + +from migen import * +from migen.genlib.fifo import * + +from artiq.gateware.rtio.sed import layouts + + +__all__ = ["FIFOs"] + + +class FIFOs(Module): + def __init__(self, lane_count, fifo_depth, layout_payload, mode, report_buffer_space=False): + seqn_width = layouts.seqn_width(lane_count, fifo_depth) + self.input = [Record(layouts.fifo_ingress(seqn_width, layout_payload)) + for _ in range(lane_count)] + self.output = [Record(layouts.fifo_egress(seqn_width, layout_payload)) + for _ in range(lane_count)] + + if report_buffer_space: + self.buffer_space = Signal(max=fifo_depth+1) + + # # # + + if mode == "sync": + fifo_cls = SyncFIFOBuffered + elif mode == "async": + fifo_cls = AsyncFIFOBuffered + else: + raise ValueError + + fifos = [] + for input, output in zip(self.input, self.output): + fifo = fifo_cls(seqn_width + layout_len(layout_payload), fifo_depth) + self.submodules += fifo + fifos.append(fifo) + + self.comb += [ + fifo.din.eq(Cat(input.seqn, input.payload.raw_bits())), + fifo.we.eq(input.we), + input.writable.eq(fifo.writable), + + Cat(output.seqn, output.payload.raw_bits()).eq(fifo.dout), + output.readable.eq(fifo.readable), + fifo.re.eq(output.re) + ] + + if report_buffer_space: + if mode != "sync": + raise NotImplementedError + + def compute_max(elts): + l = len(elts) + if l == 1: + return elts[0], 0 + else: + maximum1, latency1 = compute_max(elts[:l//2]) + maximum2, latency2 = compute_max(elts[l//2:]) + maximum = Signal(max(len(maximum1), len(maximum2))) + self.sync += [ + If(maximum1 > maximum2, + maximum.eq(maximum1) + ).Else( + maximum.eq(maximum2) + ) + ] + latency = max(latency1, latency2) + 1 + return maximum, latency + + max_level, latency = compute_max([fifo.level for fifo in fifos]) + max_level_valid = Signal() + max_level_valid_counter = Signal(max=latency) + self.sync += [ + If(reduce(or_, [fifo.we for fifo in fifos]), + max_level_valid.eq(0), + max_level_valid_counter.eq(latency - 1) + ).Elif(max_level_valid_counter == 0, + max_level_valid.eq(1) + ).Else( + max_level_valid_counter.eq(max_level_valid_counter - 1) + ) + ] + self.comb += If(max_level_valid, self.buffer_space.eq(fifo_depth - max_level)) diff --git a/artiq/gateware/rtio/sed/gates.py b/artiq/gateware/rtio/sed/gates.py new file mode 100644 index 000000000..bf4a99d25 --- /dev/null +++ b/artiq/gateware/rtio/sed/gates.py @@ -0,0 +1,40 @@ +from migen import * + +from artiq.gateware.rtio.sed import layouts + + +__all__ = ["Gates"] + + +class Gates(Module): + def __init__(self, lane_count, seqn_width, layout_fifo_payload, layout_output_network_payload): + self.input = [Record(layouts.fifo_egress(seqn_width, layout_fifo_payload)) + for _ in range(lane_count)] + self.output = [Record(layouts.output_network_node(seqn_width, layout_output_network_payload), + reset_less=True) + for _ in range(lane_count)] + + if hasattr(self.output[0].payload, "fine_ts"): + glbl_fine_ts_width = len(self.output[0].payload.fine_ts) + else: + glbl_fine_ts_width = 0 + + self.coarse_timestamp = Signal(64-glbl_fine_ts_width) + + # # # + + for input, output in zip(self.input, self.output): + for field, _ in output.payload.layout: + if field == "fine_ts": + self.sync += output.payload.fine_ts.eq(input.payload.timestamp[:glbl_fine_ts_width]) + else: + self.sync += getattr(output.payload, field).eq(getattr(input.payload, field)) + self.sync += output.seqn.eq(input.seqn) + self.comb += [ + output.replace_occured.eq(0), + output.nondata_replace_occured.eq(0) + ] + + self.comb += input.re.eq(input.payload.timestamp[glbl_fine_ts_width:] == self.coarse_timestamp) + output.valid.reset_less = False + self.sync += output.valid.eq(input.re & input.readable) diff --git a/artiq/gateware/rtio/sed/lane_distributor.py b/artiq/gateware/rtio/sed/lane_distributor.py new file mode 100644 index 000000000..08a3fa716 --- /dev/null +++ b/artiq/gateware/rtio/sed/lane_distributor.py @@ -0,0 +1,184 @@ +from migen import * + +from artiq.gateware.rtio import cri +from artiq.gateware.rtio.sed import layouts + + +__all__ = ["LaneDistributor"] + + +class LaneDistributor(Module): + def __init__(self, lane_count, seqn_width, layout_payload, + compensation, glbl_fine_ts_width, + enable_spread=True, quash_channels=[], interface=None): + if lane_count & (lane_count - 1): + raise NotImplementedError("lane count must be a power of 2") + + if interface is None: + interface = cri.Interface() + self.cri = interface + self.sequence_error = Signal() + self.sequence_error_channel = Signal(16, reset_less=True) + # The minimum timestamp that an event must have to avoid triggering + # an underflow, at the time when the CRI write happens, and to a channel + # with zero latency compensation. This is synchronous to the system clock + # domain. + us_timestamp_width = 64 - glbl_fine_ts_width + self.minimum_coarse_timestamp = Signal(us_timestamp_width) + self.output = [Record(layouts.fifo_ingress(seqn_width, layout_payload)) + for _ in range(lane_count)] + + # # # + + o_status_wait = Signal() + o_status_underflow = Signal() + self.comb += self.cri.o_status.eq(Cat(o_status_wait, o_status_underflow)) + + # The core keeps writing events into the current lane as long as timestamps + # (after compensation) are strictly increasing, otherwise it switches to + # the next lane. + # If spread is enabled, it also switches to the next lane after the current + # lane has been full, in order to maximize lane utilization. + # The current lane is called lane "A". The next lane (which may be chosen + # at a later stage by the core) is called lane "B". + # Computations for both lanes are prepared in advance to increase performance. + + current_lane = Signal(max=lane_count) + # The last coarse timestamp received from the CRI, after compensation. + # Used to determine when to switch lanes. + last_coarse_timestamp = Signal(us_timestamp_width) + # The last coarse timestamp written to each lane. Used to detect + # sequence errors. + last_lane_coarse_timestamps = Array(Signal(us_timestamp_width) + for _ in range(lane_count)) + # Sequence number counter. The sequence number is used to determine which + # event wins during a replace. + seqn = Signal(seqn_width) + + # distribute data to lanes + for lio in self.output: + self.comb += [ + lio.seqn.eq(seqn), + lio.payload.channel.eq(self.cri.chan_sel[:16]), + lio.payload.timestamp.eq(self.cri.o_timestamp), + ] + if hasattr(lio.payload, "address"): + self.comb += lio.payload.address.eq(self.cri.o_address) + if hasattr(lio.payload, "data"): + self.comb += lio.payload.data.eq(self.cri.o_data) + + # when timestamp and channel arrive in cycle #1, prepare computations + coarse_timestamp = Signal(us_timestamp_width) + self.comb += coarse_timestamp.eq(self.cri.o_timestamp[glbl_fine_ts_width:]) + min_minus_timestamp = Signal((us_timestamp_width + 1, True), + reset_less=True) + laneAmin_minus_timestamp = Signal.like(min_minus_timestamp) + laneBmin_minus_timestamp = Signal.like(min_minus_timestamp) + last_minus_timestamp = Signal.like(min_minus_timestamp) + current_lane_plus_one = Signal(max=lane_count) + self.comb += current_lane_plus_one.eq(current_lane + 1) + self.sync += [ + min_minus_timestamp.eq(self.minimum_coarse_timestamp - coarse_timestamp), + laneAmin_minus_timestamp.eq(last_lane_coarse_timestamps[current_lane] - coarse_timestamp), + laneBmin_minus_timestamp.eq(last_lane_coarse_timestamps[current_lane_plus_one] - coarse_timestamp), + last_minus_timestamp.eq(last_coarse_timestamp - coarse_timestamp) + ] + + # Quash channels are "dummy" channels to which writes are completely ignored. + # This is used by the RTIO log channel, which is taken into account + # by the analyzer but does not enter the lanes. + quash = Signal() + self.sync += quash.eq(0) + for channel in quash_channels: + self.sync += If(self.cri.chan_sel[:16] == channel, quash.eq(1)) + + assert all(abs(c) < 1 << 14 - 1 for c in compensation) + latency_compensation = Memory(14, len(compensation), init=compensation) + latency_compensation_port = latency_compensation.get_port() + self.specials += latency_compensation, latency_compensation_port + self.comb += latency_compensation_port.adr.eq(self.cri.chan_sel[:16]) + + # cycle #2, write + compensation = Signal((14, True)) + self.comb += compensation.eq(latency_compensation_port.dat_r) + timestamp_above_min = Signal() + timestamp_above_last = Signal() + timestamp_above_laneA_min = Signal() + timestamp_above_laneB_min = Signal() + timestamp_above_lane_min = Signal() + force_laneB = Signal() + use_laneB = Signal() + use_lanen = Signal(max=lane_count) + + do_write = Signal() + do_underflow = Signal() + do_sequence_error = Signal() + self.comb += [ + timestamp_above_min.eq(min_minus_timestamp - compensation < 0), + timestamp_above_laneA_min.eq(laneAmin_minus_timestamp - compensation < 0), + timestamp_above_laneB_min.eq(laneBmin_minus_timestamp - compensation < 0), + timestamp_above_last.eq(last_minus_timestamp - compensation < 0), + If(force_laneB | ~timestamp_above_last, + use_lanen.eq(current_lane_plus_one), + use_laneB.eq(1) + ).Else( + use_lanen.eq(current_lane), + use_laneB.eq(0) + ), + + timestamp_above_lane_min.eq(Mux(use_laneB, timestamp_above_laneB_min, timestamp_above_laneA_min)), + If(~quash & (self.cri.cmd == cri.commands["write"]), + If(timestamp_above_min, + If(timestamp_above_lane_min, + do_write.eq(1) + ).Else( + do_sequence_error.eq(1) + ) + ).Else( + do_underflow.eq(1) + ) + ), + Array(lio.we for lio in self.output)[use_lanen].eq(do_write) + ] + compensated_timestamp = Signal(64) + self.comb += compensated_timestamp.eq(self.cri.o_timestamp + (compensation << glbl_fine_ts_width)) + self.sync += [ + If(do_write, + current_lane.eq(use_lanen), + last_coarse_timestamp.eq(compensated_timestamp[glbl_fine_ts_width:]), + last_lane_coarse_timestamps[use_lanen].eq(compensated_timestamp[glbl_fine_ts_width:]), + seqn.eq(seqn + 1), + ) + ] + for lio in self.output: + self.comb += lio.payload.timestamp.eq(compensated_timestamp) + + # cycle #3, read status + current_lane_writable = Signal() + self.comb += [ + current_lane_writable.eq(Array(lio.writable for lio in self.output)[current_lane]), + o_status_wait.eq(~current_lane_writable) + ] + self.sync += [ + If(self.cri.cmd == cri.commands["write"], + o_status_underflow.eq(0) + ), + If(do_underflow, + o_status_underflow.eq(1) + ), + self.sequence_error.eq(do_sequence_error), + self.sequence_error_channel.eq(self.cri.chan_sel[:16]) + ] + + # current lane has been full, spread events by switching to the next. + if enable_spread: + current_lane_writable_r = Signal(reset=1) + self.sync += [ + current_lane_writable_r.eq(current_lane_writable), + If(~current_lane_writable_r & current_lane_writable, + force_laneB.eq(1) + ), + If(do_write, + force_laneB.eq(0) + ) + ] diff --git a/artiq/gateware/rtio/sed/layouts.py b/artiq/gateware/rtio/sed/layouts.py new file mode 100644 index 000000000..1fbb8f6ec --- /dev/null +++ b/artiq/gateware/rtio/sed/layouts.py @@ -0,0 +1,77 @@ +from migen import * + +from artiq.gateware.rtio import rtlink + + +def fifo_payload(channels): + address_width = max(rtlink.get_address_width(channel.interface.o) + for channel in channels) + data_width = max(rtlink.get_data_width(channel.interface.o) + for channel in channels) + + layout = [ + ("channel", bits_for(len(channels)-1)), + ("timestamp", 64) + ] + if address_width: + layout.append(("address", address_width)) + if data_width: + layout.append(("data", data_width)) + + return layout + + +def seqn_width(lane_count, fifo_depth): + # There must be a unique sequence number for every possible event in every FIFO. + # Plus 2 bits to detect and handle wraparounds. + return bits_for(lane_count*fifo_depth-1) + 2 + + +def fifo_ingress(seqn_width, layout_payload): + return [ + ("we", 1, DIR_M_TO_S), + ("writable", 1, DIR_S_TO_M), + ("seqn", seqn_width, DIR_M_TO_S), + ("payload", [(a, b, DIR_M_TO_S) for a, b in layout_payload]) + ] + + +def fifo_egress(seqn_width, layout_payload): + return [ + ("re", 1, DIR_S_TO_M), + ("readable", 1, DIR_M_TO_S), + ("seqn", seqn_width, DIR_M_TO_S), + ("payload", [(a, b, DIR_M_TO_S) for a, b in layout_payload]) + ] + + +# We use glbl_fine_ts_width in the output network so that collisions due +# to insufficiently increasing timestamps are always reliably detected. +# We can still have undetected collisions on the address by making it wrap +# around, but those are more rare and easier to debug, and addresses are +# not normally exposed directly to the ARTIQ user. +def output_network_payload(channels, glbl_fine_ts_width): + address_width = max(rtlink.get_address_width(channel.interface.o) + for channel in channels) + data_width = max(rtlink.get_data_width(channel.interface.o) + for channel in channels) + + layout = [("channel", bits_for(len(channels)-1))] + if glbl_fine_ts_width: + layout.append(("fine_ts", glbl_fine_ts_width)) + if address_width: + layout.append(("address", address_width)) + if data_width: + layout.append(("data", data_width)) + + return layout + + +def output_network_node(seqn_width, layout_payload): + return [ + ("valid", 1), + ("seqn", seqn_width), + ("replace_occured", 1), + ("nondata_replace_occured", 1), + ("payload", layout_payload) + ] diff --git a/artiq/gateware/rtio/sed/output_driver.py b/artiq/gateware/rtio/sed/output_driver.py new file mode 100644 index 000000000..e98c87227 --- /dev/null +++ b/artiq/gateware/rtio/sed/output_driver.py @@ -0,0 +1,110 @@ +from functools import reduce +from operator import or_ + +from migen import * + +from artiq.gateware.rtio.sed import layouts +from artiq.gateware.rtio.sed.output_network import OutputNetwork + + +__all__ = ["OutputDriver"] + + +class OutputDriver(Module): + def __init__(self, channels, glbl_fine_ts_width, lane_count, seqn_width): + self.collision = Signal() + self.collision_channel = Signal(max=len(channels), reset_less=True) + self.busy = Signal() + self.busy_channel = Signal(max=len(channels), reset_less=True) + + # output network + layout_on_payload = layouts.output_network_payload(channels, glbl_fine_ts_width) + output_network = OutputNetwork(lane_count, seqn_width, layout_on_payload) + self.submodules += output_network + self.input = output_network.input + + # detect collisions (adds one pipeline stage) + layout_lane_data = [ + ("valid", 1), + ("collision", 1), + ("payload", layout_on_payload) + ] + lane_datas = [Record(layout_lane_data, reset_less=True) for _ in range(lane_count)] + en_replaces = [channel.interface.o.enable_replace for channel in channels] + for lane_data, on_output in zip(lane_datas, output_network.output): + lane_data.valid.reset_less = False + lane_data.collision.reset_less = False + replace_occured_r = Signal() + nondata_replace_occured_r = Signal() + self.sync += [ + lane_data.valid.eq(on_output.valid), + lane_data.payload.eq(on_output.payload), + replace_occured_r.eq(on_output.replace_occured), + nondata_replace_occured_r.eq(on_output.nondata_replace_occured) + ] + + en_replaces_rom = Memory(1, len(en_replaces), init=en_replaces) + en_replaces_rom_port = en_replaces_rom.get_port() + self.specials += en_replaces_rom, en_replaces_rom_port + self.comb += [ + en_replaces_rom_port.adr.eq(on_output.payload.channel), + lane_data.collision.eq(replace_occured_r & (~en_replaces_rom_port.dat_r | nondata_replace_occured_r)) + ] + + self.sync += [ + self.collision.eq(0), + self.collision_channel.eq(0) + ] + for lane_data in lane_datas: + self.sync += [ + If(lane_data.valid & lane_data.collision, + self.collision.eq(1), + self.collision_channel.eq(lane_data.payload.channel) + ) + ] + + # demultiplex channels (adds one pipeline stage) + for n, channel in enumerate(channels): + oif = channel.interface.o + + onehot_stb = [] + onehot_fine_ts = [] + onehot_address = [] + onehot_data = [] + for lane_data in lane_datas: + selected = Signal() + self.comb += selected.eq(lane_data.valid & ~lane_data.collision & (lane_data.payload.channel == n)) + onehot_stb.append(selected) + if hasattr(lane_data.payload, "fine_ts") and hasattr(oif, "fine_ts"): + ts_shift = len(lane_data.payload.fine_ts) - len(oif.fine_ts) + onehot_fine_ts.append(Mux(selected, lane_data.payload.fine_ts[ts_shift:], 0)) + if hasattr(lane_data.payload, "address"): + onehot_address.append(Mux(selected, lane_data.payload.address, 0)) + if hasattr(lane_data.payload, "data"): + onehot_data.append(Mux(selected, lane_data.payload.data, 0)) + + self.sync += oif.stb.eq(reduce(or_, onehot_stb)) + if hasattr(oif, "fine_ts"): + self.sync += oif.fine_ts.eq(reduce(or_, onehot_fine_ts)) + if hasattr(oif, "address"): + self.sync += oif.address.eq(reduce(or_, onehot_address)) + if hasattr(oif, "data"): + self.sync += oif.data.eq(reduce(or_, onehot_data)) + + # detect busy errors, at lane level to reduce muxing + self.sync += [ + self.busy.eq(0), + self.busy_channel.eq(0) + ] + for lane_data in lane_datas: + stb_r = Signal() + channel_r = Signal(max=len(channels), reset_less=True) + self.sync += [ + stb_r.eq(lane_data.valid & ~lane_data.collision), + channel_r.eq(lane_data.payload.channel), + + If(stb_r & Array(channel.interface.o.busy for channel in channels)[channel_r], + self.busy.eq(1), + self.busy_channel.eq(channel_r) + ) + ] diff --git a/artiq/gateware/rtio/sed/output_network.py b/artiq/gateware/rtio/sed/output_network.py new file mode 100644 index 000000000..e37f7f29c --- /dev/null +++ b/artiq/gateware/rtio/sed/output_network.py @@ -0,0 +1,105 @@ +from migen import * + +from artiq.gateware.rtio.sed import layouts + + +__all__ = ["latency", "OutputNetwork"] + + +# Based on: https://github.com/Bekbolatov/SortingNetworks/blob/master/src/main/js/gr.js +def boms_get_partner(n, l, p): + if p == 1: + return n ^ (1 << (l - 1)) + scale = 1 << (l - p) + box = 1 << p + sn = n//scale - n//scale//box*box + if sn == 0 or sn == (box - 1): + return n + if (sn % 2) == 0: + return n - scale + return n + scale + + +def boms_steps_pairs(lane_count): + d = log2_int(lane_count) + steps = [] + for l in range(1, d+1): + for p in range(1, l+1): + pairs = [] + for n in range(2**d): + partner = boms_get_partner(n, l, p) + if partner != n: + if partner > n: + pair = (n, partner) + else: + pair = (partner, n) + if pair not in pairs: + pairs.append(pair) + steps.append(pairs) + return steps + + +def latency(lane_count): + d = log2_int(lane_count) + return sum(l for l in range(1, d+1)) + + +def cmp_wrap(a, b): + return Mux((a[-2] == a[-1]) & (b[-2] == b[-1]) & (a[-1] != b[-1]), a[-1], a < b) + + +class OutputNetwork(Module): + def __init__(self, lane_count, seqn_width, layout_payload): + self.input = [Record(layouts.output_network_node(seqn_width, layout_payload)) + for _ in range(lane_count)] + self.output = None + + step_input = self.input + for step in boms_steps_pairs(lane_count): + step_output = [] + for i in range(lane_count): + rec = Record(layouts.output_network_node(seqn_width, layout_payload), + reset_less=True) + rec.valid.reset_less = False + step_output.append(rec) + + for node1, node2 in step: + nondata_difference = Signal() + for field, _ in layout_payload: + if field != "data": + f1 = getattr(step_input[node1].payload, field) + f2 = getattr(step_input[node2].payload, field) + self.comb += If(f1 != f2, nondata_difference.eq(1)) + + k1 = Cat(step_input[node1].payload.channel, ~step_input[node1].valid) + k2 = Cat(step_input[node2].payload.channel, ~step_input[node2].valid) + self.sync += [ + If(k1 == k2, + If(cmp_wrap(step_input[node1].seqn, step_input[node2].seqn), + step_output[node1].eq(step_input[node2]), + step_output[node2].eq(step_input[node1]) + ).Else( + step_output[node1].eq(step_input[node1]), + step_output[node2].eq(step_input[node2]) + ), + step_output[node1].replace_occured.eq(1), + step_output[node1].nondata_replace_occured.eq(nondata_difference), + step_output[node2].valid.eq(0), + ).Elif(k1 < k2, + step_output[node1].eq(step_input[node1]), + step_output[node2].eq(step_input[node2]) + ).Else( + step_output[node1].eq(step_input[node2]), + step_output[node2].eq(step_input[node1]) + ) + ] + + unchanged = list(range(lane_count)) + for node1, node2 in step: + unchanged.remove(node1) + unchanged.remove(node2) + for node in unchanged: + self.sync += step_output[node].eq(step_input[node]) + + self.output = step_output + step_input = step_output diff --git a/artiq/gateware/rtio/tsc.py b/artiq/gateware/rtio/tsc.py new file mode 100644 index 000000000..e93744553 --- /dev/null +++ b/artiq/gateware/rtio/tsc.py @@ -0,0 +1,48 @@ +from migen import * + +from artiq.gateware.rtio.cdc import GrayCodeTransfer + + +class TSC(Module): + def __init__(self, mode, glbl_fine_ts_width=0): + self.glbl_fine_ts_width = glbl_fine_ts_width + + # in rtio domain + self.coarse_ts = Signal(64 - glbl_fine_ts_width) + self.full_ts = Signal(64) + + # in sys domain + # monotonic, may lag behind the counter in the IO clock domain, but + # not be ahead of it. + self.coarse_ts_sys = Signal.like(self.coarse_ts) + self.full_ts_sys = Signal(64) + + # in rtio domain + self.load = Signal() + self.load_value = Signal.like(self.coarse_ts) + + if mode == "async": + self.full_ts_cri = self.full_ts_sys + elif mode == "sync": + self.full_ts_cri = self.full_ts + else: + raise ValueError + + # # # + + self.sync.rtio += If(self.load, + self.coarse_ts.eq(self.load_value) + ).Else( + self.coarse_ts.eq(self.coarse_ts + 1) + ) + coarse_ts_cdc = GrayCodeTransfer(len(self.coarse_ts)) # from rtio to sys + self.submodules += coarse_ts_cdc + self.comb += [ + coarse_ts_cdc.i.eq(self.coarse_ts), + self.coarse_ts_sys.eq(coarse_ts_cdc.o) + ] + + self.comb += [ + self.full_ts.eq(self.coarse_ts << glbl_fine_ts_width), + self.full_ts_sys.eq(self.coarse_ts_sys << glbl_fine_ts_width) + ] diff --git a/artiq/gateware/serwb/__init__.py b/artiq/gateware/serwb/__init__.py deleted file mode 100644 index 3ebf3f028..000000000 --- a/artiq/gateware/serwb/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from artiq.gateware.serwb import s7phy, kusphy, phy, core, packet, etherbone diff --git a/artiq/gateware/serwb/core.py b/artiq/gateware/serwb/core.py deleted file mode 100644 index 61c76a884..000000000 --- a/artiq/gateware/serwb/core.py +++ /dev/null @@ -1,39 +0,0 @@ -from migen import * - -from misoc.interconnect import stream - -from artiq.gateware.serwb.packet import Depacketizer, Packetizer -from artiq.gateware.serwb.etherbone import Etherbone - - -class SERWBCore(Module): - def __init__(self, phy, clk_freq, mode): - self.submodules.etherbone = etherbone = Etherbone(mode) - depacketizer = Depacketizer(clk_freq) - packetizer = Packetizer() - self.submodules += depacketizer, packetizer - tx_cdc = stream.AsyncFIFO([("data", 32)], 32) - tx_cdc = ClockDomainsRenamer({"write": "sys", "read": "serwb_serdes"})(tx_cdc) - self.submodules += tx_cdc - rx_cdc = stream.AsyncFIFO([("data", 32)], 32) - rx_cdc = ClockDomainsRenamer({"write": "serwb_serdes", "read": "sys"})(rx_cdc) - self.submodules += rx_cdc - self.comb += [ - # core <--> etherbone - depacketizer.source.connect(etherbone.sink), - etherbone.source.connect(packetizer.sink), - - # core --> serdes - packetizer.source.connect(tx_cdc.sink), - If(phy.init.ready, - If(tx_cdc.source.stb, - phy.serdes.tx_data.eq(tx_cdc.source.data) - ), - tx_cdc.source.ack.eq(1) - ), - - # serdes --> core - rx_cdc.sink.stb.eq(phy.init.ready), - rx_cdc.sink.data.eq(phy.serdes.rx_data), - rx_cdc.source.connect(depacketizer.sink), - ] diff --git a/artiq/gateware/serwb/etherbone.py b/artiq/gateware/serwb/etherbone.py deleted file mode 100644 index d69baf923..000000000 --- a/artiq/gateware/serwb/etherbone.py +++ /dev/null @@ -1,741 +0,0 @@ -""" -Etherbone - -CERN's Etherbone protocol is initially used to run a Wishbone bus over an -ethernet network. This re-implementation is meant to be run over serdes -and introduces some limitations: -- no probing (pf/pr) -- no address spaces (rca/bca/wca/wff) -- 32bits data and address -- 1 record per frame -""" - -from migen import * - -from misoc.interconnect import stream -from misoc.interconnect import wishbone - -from artiq.gateware.serwb.packet import * - - -class _Packetizer(Module): - def __init__(self, sink_description, source_description, header): - self.sink = sink = stream.Endpoint(sink_description) - self.source = source = stream.Endpoint(source_description) - self.header = Signal(header.length*8) - - # # # - - dw = len(self.sink.data) - - header_reg = Signal(header.length*8, reset_less=True) - header_words = (header.length*8)//dw - load = Signal() - shift = Signal() - counter = Signal(max=max(header_words, 2)) - counter_reset = Signal() - counter_ce = Signal() - self.sync += \ - If(counter_reset, - counter.eq(0) - ).Elif(counter_ce, - counter.eq(counter + 1) - ) - - self.comb += header.encode(sink, self.header) - if header_words == 1: - self.sync += [ - If(load, - header_reg.eq(self.header) - ) - ] - else: - self.sync += [ - If(load, - header_reg.eq(self.header) - ).Elif(shift, - header_reg.eq(Cat(header_reg[dw:], Signal(dw))) - ) - ] - - fsm = FSM(reset_state="IDLE") - self.submodules += fsm - - if header_words == 1: - idle_next_state = "COPY" - else: - idle_next_state = "SEND_HEADER" - - fsm.act("IDLE", - sink.ack.eq(1), - counter_reset.eq(1), - If(sink.stb, - sink.ack.eq(0), - source.stb.eq(1), - source.eop.eq(0), - source.data.eq(self.header[:dw]), - If(source.stb & source.ack, - load.eq(1), - NextState(idle_next_state) - ) - ) - ) - if header_words != 1: - fsm.act("SEND_HEADER", - source.stb.eq(1), - source.eop.eq(0), - source.data.eq(header_reg[dw:2*dw]), - If(source.stb & source.ack, - shift.eq(1), - counter_ce.eq(1), - If(counter == header_words-2, - NextState("COPY") - ) - ) - ) - if hasattr(sink, "error"): - self.comb += source.error.eq(sink.error) - fsm.act("COPY", - source.stb.eq(sink.stb), - source.eop.eq(sink.eop), - source.data.eq(sink.data), - If(source.stb & source.ack, - sink.ack.eq(1), - If(source.eop, - NextState("IDLE") - ) - ) - ) - - -class _Depacketizer(Module): - def __init__(self, sink_description, source_description, header): - self.sink = sink = stream.Endpoint(sink_description) - self.source = source = stream.Endpoint(source_description) - self.header = Signal(header.length*8) - - # # # - - dw = len(sink.data) - - header_reg = Signal(header.length*8, reset_less=True) - header_words = (header.length*8)//dw - - shift = Signal() - counter = Signal(max=max(header_words, 2)) - counter_reset = Signal() - counter_ce = Signal() - self.sync += \ - If(counter_reset, - counter.eq(0) - ).Elif(counter_ce, - counter.eq(counter + 1) - ) - - if header_words == 1: - self.sync += \ - If(shift, - header_reg.eq(sink.data) - ) - else: - self.sync += \ - If(shift, - header_reg.eq(Cat(header_reg[dw:], sink.data)) - ) - self.comb += self.header.eq(header_reg) - - fsm = FSM(reset_state="IDLE") - self.submodules += fsm - - if header_words == 1: - idle_next_state = "COPY" - else: - idle_next_state = "RECEIVE_HEADER" - - fsm.act("IDLE", - sink.ack.eq(1), - counter_reset.eq(1), - If(sink.stb, - shift.eq(1), - NextState(idle_next_state) - ) - ) - if header_words != 1: - fsm.act("RECEIVE_HEADER", - sink.ack.eq(1), - If(sink.stb, - counter_ce.eq(1), - shift.eq(1), - If(counter == header_words-2, - NextState("COPY") - ) - ) - ) - no_payload = Signal() - self.sync += \ - If(fsm.before_entering("COPY"), - no_payload.eq(sink.eop) - ) - - if hasattr(sink, "error"): - self.comb += source.error.eq(sink.error) - self.comb += [ - source.eop.eq(sink.eop | no_payload), - source.data.eq(sink.data), - header.decode(self.header, source) - ] - fsm.act("COPY", - sink.ack.eq(source.ack), - source.stb.eq(sink.stb | no_payload), - If(source.stb & source.ack & source.eop, - NextState("IDLE") - ) - ) - - -etherbone_magic = 0x4e6f -etherbone_version = 1 -etherbone_packet_header_length = 8 -etherbone_packet_header_fields = { - "magic": HeaderField(0, 0, 16), - - "version": HeaderField(2, 4, 4), - "nr": HeaderField(2, 2, 1), - "pr": HeaderField(2, 1, 1), # unused - "pf": HeaderField(2, 0, 1), # unused - - "addr_size": HeaderField(3, 4, 4), # static - "port_size": HeaderField(3, 0, 4) # static -} -etherbone_packet_header = Header(etherbone_packet_header_fields, - etherbone_packet_header_length, - swap_field_bytes=True) - -etherbone_record_header_length = 4 -etherbone_record_header_fields = { - "bca": HeaderField(0, 0, 1), # unused - "rca": HeaderField(0, 1, 1), # unused - "rff": HeaderField(0, 2, 1), # unused - "cyc": HeaderField(0, 4, 1), # unused - "wca": HeaderField(0, 5, 1), # unused - "wff": HeaderField(0, 6, 1), # unused - - "byte_enable": HeaderField(1, 0, 8), - - "wcount": HeaderField(2, 0, 8), - - "rcount": HeaderField(3, 0, 8) -} -etherbone_record_header = Header(etherbone_record_header_fields, - etherbone_record_header_length, - swap_field_bytes=True) - -def _remove_from_layout(layout, *args): - r = [] - for f in layout: - remove = False - for arg in args: - if f[0] == arg: - remove = True - if not remove: - r.append(f) - return r - -def etherbone_packet_description(dw): - layout = etherbone_packet_header.get_layout() - layout += [("data", dw)] - return stream.EndpointDescription(layout) - -def etherbone_packet_user_description(dw): - layout = etherbone_packet_header.get_layout() - layout = _remove_from_layout(layout, - "magic", - "portsize", - "addrsize", - "version") - layout += user_description(dw).payload_layout - return stream.EndpointDescription(layout) - -def etherbone_record_description(dw): - layout = etherbone_record_header.get_layout() - layout += [("data", dw)] - return stream.EndpointDescription(layout) - -def etherbone_mmap_description(dw): - layout = [ - ("we", 1), - ("count", 8), - ("base_addr", 32), - ("be", dw//8), - ("addr", 32), - ("data", dw) - ] - return stream.EndpointDescription(layout) - - -# etherbone packet - -class _EtherbonePacketPacketizer(_Packetizer): - def __init__(self): - _Packetizer.__init__(self, - etherbone_packet_description(32), - user_description(32), - etherbone_packet_header) - - -class _EtherbonePacketTX(Module): - def __init__(self): - self.sink = sink = stream.Endpoint(etherbone_packet_user_description(32)) - self.source = source = stream.Endpoint(user_description(32)) - - # # # - - self.submodules.packetizer = packetizer = _EtherbonePacketPacketizer() - self.comb += [ - packetizer.sink.stb.eq(sink.stb), - packetizer.sink.eop.eq(sink.eop), - sink.ack.eq(packetizer.sink.ack), - - packetizer.sink.magic.eq(etherbone_magic), - packetizer.sink.port_size.eq(32//8), - packetizer.sink.addr_size.eq(32//8), - packetizer.sink.nr.eq(sink.nr), - packetizer.sink.version.eq(etherbone_version), - - packetizer.sink.data.eq(sink.data) - ] - self.submodules.fsm = fsm = FSM(reset_state="IDLE") - fsm.act("IDLE", - packetizer.source.ack.eq(1), - If(packetizer.source.stb, - packetizer.source.ack.eq(0), - NextState("SEND") - ) - ) - fsm.act("SEND", - packetizer.source.connect(source), - source.length.eq(sink.length + etherbone_packet_header.length), - If(source.stb & source.eop & source.ack, - NextState("IDLE") - ) - ) - - -class _EtherbonePacketDepacketizer(_Depacketizer): - def __init__(self): - _Depacketizer.__init__(self, - user_description(32), - etherbone_packet_description(32), - etherbone_packet_header) - - -class _EtherbonePacketRX(Module): - def __init__(self): - self.sink = sink = stream.Endpoint(user_description(32)) - self.source = source = stream.Endpoint(etherbone_packet_user_description(32)) - - # # # - - self.submodules.depacketizer = depacketizer = _EtherbonePacketDepacketizer() - self.comb += sink.connect(depacketizer.sink) - - self.submodules.fsm = fsm = FSM(reset_state="IDLE") - fsm.act("IDLE", - depacketizer.source.ack.eq(1), - If(depacketizer.source.stb, - depacketizer.source.ack.eq(0), - NextState("CHECK") - ) - ) - stb = Signal() - self.sync += stb.eq( - depacketizer.source.stb & - (depacketizer.source.magic == etherbone_magic) - ) - fsm.act("CHECK", - If(stb, - NextState("PRESENT") - ).Else( - NextState("DROP") - ) - ) - self.comb += [ - source.eop.eq(depacketizer.source.eop), - - source.nr.eq(depacketizer.source.nr), - - source.data.eq(depacketizer.source.data), - - source.length.eq(sink.length - etherbone_packet_header.length) - ] - fsm.act("PRESENT", - source.stb.eq(depacketizer.source.stb), - depacketizer.source.ack.eq(source.ack), - If(source.stb & source.eop & source.ack, - NextState("IDLE") - ) - ) - fsm.act("DROP", - depacketizer.source.ack.eq(1), - If(depacketizer.source.stb & - depacketizer.source.eop & - depacketizer.source.ack, - NextState("IDLE") - ) - ) - - -class _EtherbonePacket(Module): - def __init__(self, port_sink, port_source): - self.submodules.tx = tx = _EtherbonePacketTX() - self.submodules.rx = rx = _EtherbonePacketRX() - self.comb += [ - tx.source.connect(port_sink), - port_source.connect(rx.sink) - ] - self.sink, self.source = self.tx.sink, self.rx.source - -# etherbone record - -class _EtherboneRecordPacketizer(_Packetizer): - def __init__(self): - _Packetizer.__init__(self, - etherbone_record_description(32), - etherbone_packet_user_description(32), - etherbone_record_header) - - -class _EtherboneRecordDepacketizer(_Depacketizer): - def __init__(self): - _Depacketizer.__init__(self, - etherbone_packet_user_description(32), - etherbone_record_description(32), - etherbone_record_header) - - -class _EtherboneRecordReceiver(Module): - def __init__(self, buffer_depth=256): - self.sink = sink = stream.Endpoint(etherbone_record_description(32)) - self.source = source = stream.Endpoint(etherbone_mmap_description(32)) - - # # # - - fifo = stream.SyncFIFO(etherbone_record_description(32), buffer_depth, - buffered=True) - self.submodules += fifo - self.comb += sink.connect(fifo.sink) - - base_addr = Signal(32) - base_addr_update = Signal() - self.sync += If(base_addr_update, base_addr.eq(fifo.source.data)) - - counter = Signal(max=512) - counter_reset = Signal() - counter_ce = Signal() - self.sync += \ - If(counter_reset, - counter.eq(0) - ).Elif(counter_ce, - counter.eq(counter + 1) - ) - - self.submodules.fsm = fsm = FSM(reset_state="IDLE") - fsm.act("IDLE", - fifo.source.ack.eq(1), - counter_reset.eq(1), - If(fifo.source.stb, - base_addr_update.eq(1), - If(fifo.source.wcount, - NextState("RECEIVE_WRITES") - ).Elif(fifo.source.rcount, - NextState("RECEIVE_READS") - - ) - ) - ) - fsm.act("RECEIVE_WRITES", - source.stb.eq(fifo.source.stb), - source.eop.eq(counter == fifo.source.wcount-1), - source.count.eq(fifo.source.wcount), - source.be.eq(fifo.source.byte_enable), - source.addr.eq(base_addr[2:] + counter), - source.we.eq(1), - source.data.eq(fifo.source.data), - fifo.source.ack.eq(source.ack), - If(source.stb & source.ack, - counter_ce.eq(1), - If(source.eop, - If(fifo.source.rcount, - NextState("RECEIVE_BASE_RET_ADDR") - ).Else( - NextState("IDLE") - ) - ) - ) - ) - fsm.act("RECEIVE_BASE_RET_ADDR", - counter_reset.eq(1), - If(fifo.source.stb, - base_addr_update.eq(1), - NextState("RECEIVE_READS") - ) - ) - fsm.act("RECEIVE_READS", - source.stb.eq(fifo.source.stb), - source.eop.eq(counter == fifo.source.rcount-1), - source.count.eq(fifo.source.rcount), - source.base_addr.eq(base_addr), - source.addr.eq(fifo.source.data[2:]), - fifo.source.ack.eq(source.ack), - If(source.stb & source.ack, - counter_ce.eq(1), - If(source.eop, - NextState("IDLE") - ) - ) - ) - - -class _EtherboneRecordSender(Module): - def __init__(self, buffer_depth=256): - self.sink = sink = stream.Endpoint(etherbone_mmap_description(32)) - self.source = source = stream.Endpoint(etherbone_record_description(32)) - - # # # - - pbuffer = stream.SyncFIFO(etherbone_mmap_description(32), buffer_depth, - buffered=True) - self.submodules += pbuffer - self.comb += sink.connect(pbuffer.sink) - - self.submodules.fsm = fsm = FSM(reset_state="IDLE") - fsm.act("IDLE", - pbuffer.source.ack.eq(1), - If(pbuffer.source.stb, - pbuffer.source.ack.eq(0), - NextState("SEND_BASE_ADDRESS") - ) - ) - self.comb += [ - source.byte_enable.eq(pbuffer.source.be), - If(pbuffer.source.we, - source.wcount.eq(pbuffer.source.count) - ).Else( - source.rcount.eq(pbuffer.source.count) - ) - ] - - fsm.act("SEND_BASE_ADDRESS", - source.stb.eq(pbuffer.source.stb), - source.eop.eq(0), - source.data.eq(pbuffer.source.base_addr), - If(source.ack, - NextState("SEND_DATA") - ) - ) - fsm.act("SEND_DATA", - source.stb.eq(pbuffer.source.stb), - source.eop.eq(pbuffer.source.eop), - source.data.eq(pbuffer.source.data), - If(source.stb & source.ack, - pbuffer.source.ack.eq(1), - If(source.eop, - NextState("IDLE") - ) - ) - ) - - -class _EtherboneRecord(Module): - def __init__(self): - self.sink = sink = stream.Endpoint(etherbone_packet_user_description(32)) - self.source = source = stream.Endpoint(etherbone_packet_user_description(32)) - - # # # - - # receive record, decode it and generate mmap stream - self.submodules.depacketizer = depacketizer = _EtherboneRecordDepacketizer() - self.submodules.receiver = receiver = _EtherboneRecordReceiver() - self.comb += [ - sink.connect(depacketizer.sink), - depacketizer.source.connect(receiver.sink) - ] - - # receive mmap stream, encode it and send records - self.submodules.sender = sender = _EtherboneRecordSender() - self.submodules.packetizer = packetizer = _EtherboneRecordPacketizer() - self.comb += [ - sender.source.connect(packetizer.sink), - packetizer.source.connect(source), - source.length.eq(etherbone_record_header.length + - (sender.source.wcount != 0)*4 + sender.source.wcount*4 + - (sender.source.rcount != 0)*4 + sender.source.rcount*4) - ] - - -# etherbone wishbone - -class _EtherboneWishboneMaster(Module): - def __init__(self): - self.sink = sink = stream.Endpoint(etherbone_mmap_description(32)) - self.source = source = stream.Endpoint(etherbone_mmap_description(32)) - self.bus = bus = wishbone.Interface() - - # # # - - data = Signal(32) - data_update = Signal() - self.sync += If(data_update, data.eq(bus.dat_r)) - - self.submodules.fsm = fsm = FSM(reset_state="IDLE") - fsm.act("IDLE", - sink.ack.eq(1), - If(sink.stb, - sink.ack.eq(0), - If(sink.we, - NextState("WRITE_DATA") - ).Else( - NextState("READ_DATA") - ) - ) - ) - fsm.act("WRITE_DATA", - bus.adr.eq(sink.addr), - bus.dat_w.eq(sink.data), - bus.sel.eq(sink.be), - bus.stb.eq(sink.stb), - bus.we.eq(1), - bus.cyc.eq(1), - If(bus.stb & bus.ack, - sink.ack.eq(1), - If(sink.eop, - NextState("IDLE") - ) - ) - ) - fsm.act("READ_DATA", - bus.adr.eq(sink.addr), - bus.sel.eq(sink.be), - bus.stb.eq(sink.stb), - bus.cyc.eq(1), - If(bus.stb & bus.ack, - data_update.eq(1), - NextState("SEND_DATA") - ) - ) - fsm.act("SEND_DATA", - source.stb.eq(sink.stb), - source.eop.eq(sink.eop), - source.base_addr.eq(sink.base_addr), - source.addr.eq(sink.addr), - source.count.eq(sink.count), - source.be.eq(sink.be), - source.we.eq(1), - source.data.eq(data), - If(source.stb & source.ack, - sink.ack.eq(1), - If(source.eop, - NextState("IDLE") - ).Else( - NextState("READ_DATA") - ) - ) - ) - - -class _EtherboneWishboneSlave(Module): - def __init__(self): - self.bus = bus = wishbone.Interface() - self.ready = Signal(reset=1) - self.sink = sink = stream.Endpoint(etherbone_mmap_description(32)) - self.source = source = stream.Endpoint(etherbone_mmap_description(32)) - - # # # - - self.submodules.fsm = fsm = FSM(reset_state="IDLE") - fsm.act("IDLE", - sink.ack.eq(1), - If(bus.stb & bus.cyc, - If(self.ready, - If(bus.we, - NextState("SEND_WRITE") - ).Else( - NextState("SEND_READ") - ) - ).Else( - NextState("SEND_ERROR") - ) - ) - ) - fsm.act("SEND_WRITE", - If(~self.ready, - NextState("SEND_ERROR") - ).Else( - source.stb.eq(1), - source.eop.eq(1), - source.base_addr[2:].eq(bus.adr), - source.count.eq(1), - source.be.eq(bus.sel), - source.we.eq(1), - source.data.eq(bus.dat_w), - If(source.stb & source.ack, - bus.ack.eq(1), - NextState("IDLE") - ) - ) - ) - fsm.act("SEND_READ", - If(~self.ready, - NextState("SEND_ERROR") - ).Else( - source.stb.eq(1), - source.eop.eq(1), - source.base_addr.eq(0), - source.count.eq(1), - source.be.eq(bus.sel), - source.we.eq(0), - source.data[2:].eq(bus.adr), - If(source.stb & source.ack, - NextState("WAIT_READ") - ) - ) - ) - fsm.act("WAIT_READ", - sink.ack.eq(1), - If(~self.ready, - NextState("SEND_ERROR") - ).Elif(sink.stb & sink.we, - bus.ack.eq(1), - bus.dat_r.eq(sink.data), - NextState("IDLE") - ) - ) - fsm.act("SEND_ERROR", - bus.ack.eq(1), - bus.err.eq(1) - ) - -# etherbone - -class Etherbone(Module): - def __init__(self, mode="master"): - self.sink = sink = stream.Endpoint(user_description(32)) - self.source = source = stream.Endpoint(user_description(32)) - - # # # - - self.submodules.packet = _EtherbonePacket(source, sink) - self.submodules.record = _EtherboneRecord() - if mode == "master": - self.submodules.wishbone = _EtherboneWishboneMaster() - elif mode == "slave": - self.submodules.wishbone = _EtherboneWishboneSlave() - else: - raise ValueError - - self.comb += [ - self.packet.source.connect(self.record.sink), - self.record.source.connect(self.packet.sink), - self.record.receiver.source.connect(self.wishbone.sink), - self.wishbone.source.connect(self.record.sender.sink) - ] diff --git a/artiq/gateware/serwb/kusphy.py b/artiq/gateware/serwb/kusphy.py deleted file mode 100644 index f4fd6cd6c..000000000 --- a/artiq/gateware/serwb/kusphy.py +++ /dev/null @@ -1,227 +0,0 @@ -from migen import * -from migen.genlib.resetsync import AsyncResetSynchronizer -from migen.genlib.cdc import MultiReg, PulseSynchronizer, Gearbox -from migen.genlib.misc import BitSlip - -from misoc.cores.code_8b10b import Encoder, Decoder - - -class KUSSerdes(Module): - def __init__(self, pll, pads, mode="master"): - self.tx_data = Signal(32) - self.rx_data = Signal(32) - - self.tx_idle = Signal() - self.tx_comma = Signal() - self.rx_idle = Signal() - self.rx_comma = Signal() - - self.rx_bitslip_value = Signal(6) - self.rx_delay_rst = Signal() - self.rx_delay_inc = Signal() - self.rx_delay_ce = Signal() - self.rx_delay_en_vtc = Signal() - - # # # - - self.submodules.encoder = ClockDomainsRenamer("serwb_serdes")( - Encoder(4, True)) - self.decoders = [ClockDomainsRenamer("serwb_serdes")( - Decoder(True)) for _ in range(4)] - self.submodules += self.decoders - - # clocking: - - # In master mode: - # - linerate/10 pll refclk provided by user - # - linerate/10 slave refclk generated on clk_pads - # In Slave mode: - # - linerate/10 pll refclk provided by clk_pads - self.clock_domains.cd_serwb_serdes = ClockDomain() - self.clock_domains.cd_serwb_serdes_5x = ClockDomain() - self.clock_domains.cd_serwb_serdes_20x = ClockDomain(reset_less=True) - self.comb += [ - self.cd_serwb_serdes.clk.eq(pll.serwb_serdes_clk), - self.cd_serwb_serdes_5x.clk.eq(pll.serwb_serdes_5x_clk), - self.cd_serwb_serdes_20x.clk.eq(pll.serwb_serdes_20x_clk) - ] - self.specials += AsyncResetSynchronizer(self.cd_serwb_serdes, ~pll.lock) - self.specials += AsyncResetSynchronizer(self.cd_serwb_serdes_5x, ~pll.lock) - - # control/status cdc - tx_idle = Signal() - tx_comma = Signal() - rx_idle = Signal() - rx_comma = Signal() - rx_bitslip_value = Signal(6) - rx_delay_rst = Signal() - rx_delay_inc = Signal() - rx_delay_en_vtc = Signal() - rx_delay_ce = Signal() - self.specials += [ - MultiReg(self.tx_idle, tx_idle, "serwb_serdes"), - MultiReg(self.tx_comma, tx_comma, "serwb_serdes"), - MultiReg(rx_idle, self.rx_idle, "sys"), - MultiReg(rx_comma, self.rx_comma, "sys"), - MultiReg(self.rx_bitslip_value, rx_bitslip_value, "serwb_serdes"), - MultiReg(self.rx_delay_inc, rx_delay_inc, "serwb_serdes_5x"), - MultiReg(self.rx_delay_en_vtc, rx_delay_en_vtc, "serwb_serdes_5x") - ] - self.submodules.do_rx_delay_rst = PulseSynchronizer("sys", "serwb_serdes_5x") - self.comb += [ - rx_delay_rst.eq(self.do_rx_delay_rst.o), - self.do_rx_delay_rst.i.eq(self.rx_delay_rst) - ] - self.submodules.do_rx_delay_ce = PulseSynchronizer("sys", "serwb_serdes_5x") - self.comb += [ - rx_delay_ce.eq(self.do_rx_delay_ce.o), - self.do_rx_delay_ce.i.eq(self.rx_delay_ce) - ] - - # tx clock (linerate/10) - if mode == "master": - self.submodules.tx_clk_gearbox = Gearbox(40, "serwb_serdes", 8, "serwb_serdes_5x") - self.comb += self.tx_clk_gearbox.i.eq((0b1111100000 << 30) | - (0b1111100000 << 20) | - (0b1111100000 << 10) | - (0b1111100000 << 0)) - clk_o = Signal() - self.specials += [ - Instance("OSERDESE3", - p_DATA_WIDTH=8, p_INIT=0, - p_IS_CLK_INVERTED=0, p_IS_CLKDIV_INVERTED=0, p_IS_RST_INVERTED=0, - - o_OQ=clk_o, - i_RST=ResetSignal("serwb_serdes"), - i_CLK=ClockSignal("serwb_serdes_20x"), i_CLKDIV=ClockSignal("serwb_serdes_5x"), - i_D=self.tx_clk_gearbox.o - ), - Instance("OBUFDS", - i_I=clk_o, - o_O=pads.clk_p, - o_OB=pads.clk_n - ) - ] - - # tx datapath - # tx_data -> encoders -> gearbox -> serdes - self.submodules.tx_gearbox = Gearbox(40, "serwb_serdes", 8, "serwb_serdes_5x") - self.comb += [ - If(tx_comma, - self.encoder.k[0].eq(1), - self.encoder.d[0].eq(0xbc) - ).Else( - self.encoder.d[0].eq(self.tx_data[0:8]), - self.encoder.d[1].eq(self.tx_data[8:16]), - self.encoder.d[2].eq(self.tx_data[16:24]), - self.encoder.d[3].eq(self.tx_data[24:32]) - ) - ] - self.sync.serwb_serdes += \ - If(tx_idle, - self.tx_gearbox.i.eq(0) - ).Else( - self.tx_gearbox.i.eq(Cat(*[self.encoder.output[i] for i in range(4)])) - ) - - serdes_o = Signal() - self.specials += [ - Instance("OSERDESE3", - p_DATA_WIDTH=8, p_INIT=0, - p_IS_CLK_INVERTED=0, p_IS_CLKDIV_INVERTED=0, p_IS_RST_INVERTED=0, - - o_OQ=serdes_o, - i_RST=ResetSignal("serwb_serdes"), - i_CLK=ClockSignal("serwb_serdes_20x"), i_CLKDIV=ClockSignal("serwb_serdes_5x"), - i_D=self.tx_gearbox.o - ), - Instance("OBUFDS", - i_I=serdes_o, - o_O=pads.tx_p, - o_OB=pads.tx_n - ) - ] - - # rx clock - use_bufr = True - if mode == "slave": - clk_i = Signal() - clk_i_bufg = Signal() - self.specials += [ - Instance("IBUFDS", - i_I=pads.clk_p, - i_IB=pads.clk_n, - o_O=clk_i - ) - ] - if use_bufr: - clk_i_bufr = Signal() - self.specials += [ - Instance("BUFR", i_I=clk_i, o_O=clk_i_bufr), - Instance("BUFG", i_I=clk_i_bufr, o_O=clk_i_bufg) - ] - else: - self.specials += Instance("BUFG", i_I=clk_i, o_O=clk_i_bufg) - self.comb += pll.refclk.eq(clk_i_bufg) - - # rx datapath - # serdes -> gearbox -> bitslip -> decoders -> rx_data - self.submodules.rx_gearbox = Gearbox(8, "serwb_serdes_5x", 40, "serwb_serdes") - self.submodules.rx_bitslip = ClockDomainsRenamer("serwb_serdes")(BitSlip(40)) - - serdes_i_nodelay = Signal() - self.specials += [ - Instance("IBUFDS_DIFF_OUT", - i_I=pads.rx_p, - i_IB=pads.rx_n, - o_O=serdes_i_nodelay - ) - ] - - serdes_i_delayed = Signal() - serdes_q = Signal(8) - self.specials += [ - Instance("IDELAYE3", - p_CASCADE="NONE", p_UPDATE_MODE="ASYNC", p_REFCLK_FREQUENCY=200.0, - p_IS_CLK_INVERTED=0, p_IS_RST_INVERTED=0, - p_DELAY_FORMAT="COUNT", p_DELAY_SRC="IDATAIN", - p_DELAY_TYPE="VARIABLE", p_DELAY_VALUE=0, - - i_CLK=ClockSignal("serwb_serdes_5x"), - i_RST=rx_delay_rst, i_LOAD=0, - i_INC=rx_delay_inc, i_EN_VTC=rx_delay_en_vtc, - i_CE=rx_delay_ce, - - i_IDATAIN=serdes_i_nodelay, o_DATAOUT=serdes_i_delayed - ), - Instance("ISERDESE3", - p_IS_CLK_INVERTED=0, - p_IS_CLK_B_INVERTED=1, - p_DATA_WIDTH=8, - - i_D=serdes_i_delayed, - i_RST=ResetSignal("serwb_serdes"), - i_FIFO_RD_CLK=0, i_FIFO_RD_EN=0, - i_CLK=ClockSignal("serwb_serdes_20x"), - i_CLK_B=ClockSignal("serwb_serdes_20x"), # locally inverted - i_CLKDIV=ClockSignal("serwb_serdes_5x"), - o_Q=serdes_q - ) - ] - - self.comb += [ - self.rx_gearbox.i.eq(serdes_q), - self.rx_bitslip.value.eq(rx_bitslip_value), - self.rx_bitslip.i.eq(self.rx_gearbox.o), - self.decoders[0].input.eq(self.rx_bitslip.o[0:10]), - self.decoders[1].input.eq(self.rx_bitslip.o[10:20]), - self.decoders[2].input.eq(self.rx_bitslip.o[20:30]), - self.decoders[3].input.eq(self.rx_bitslip.o[30:40]), - self.rx_data.eq(Cat(*[self.decoders[i].d for i in range(4)])), - rx_idle.eq(self.rx_bitslip.o == 0), - rx_comma.eq(((self.decoders[0].d == 0xbc) & (self.decoders[0].k == 1)) & - ((self.decoders[1].d == 0x00) & (self.decoders[1].k == 0)) & - ((self.decoders[2].d == 0x00) & (self.decoders[2].k == 0)) & - ((self.decoders[3].d == 0x00) & (self.decoders[3].k == 0))) - - ] diff --git a/artiq/gateware/serwb/packet.py b/artiq/gateware/serwb/packet.py deleted file mode 100644 index c7650087a..000000000 --- a/artiq/gateware/serwb/packet.py +++ /dev/null @@ -1,172 +0,0 @@ -from math import ceil - -from migen import * -from migen.genlib.misc import WaitTimer - -from misoc.interconnect import stream - - -def reverse_bytes(signal): - n = ceil(len(signal)/8) - return Cat(iter([signal[i*8:(i+1)*8] for i in reversed(range(n))])) - - -class HeaderField: - def __init__(self, byte, offset, width): - self.byte = byte - self.offset = offset - self.width = width - - -class Header: - def __init__(self, fields, length, swap_field_bytes=True): - self.fields = fields - self.length = length - self.swap_field_bytes = swap_field_bytes - - def get_layout(self): - layout = [] - for k, v in sorted(self.fields.items()): - layout.append((k, v.width)) - return layout - - def get_field(self, obj, name, width): - if "_lsb" in name: - field = getattr(obj, name.replace("_lsb", ""))[:width] - elif "_msb" in name: - field = getattr(obj, name.replace("_msb", ""))[width:2*width] - else: - field = getattr(obj, name) - if len(field) != width: - raise ValueError("Width mismatch on " + name + " field") - return field - - def encode(self, obj, signal): - r = [] - for k, v in sorted(self.fields.items()): - start = v.byte*8 + v.offset - end = start + v.width - field = self.get_field(obj, k, v.width) - if self.swap_field_bytes: - field = reverse_bytes(field) - r.append(signal[start:end].eq(field)) - return r - - def decode(self, signal, obj): - r = [] - for k, v in sorted(self.fields.items()): - start = v.byte*8 + v.offset - end = start + v.width - field = self.get_field(obj, k, v.width) - if self.swap_field_bytes: - r.append(field.eq(reverse_bytes(signal[start:end]))) - else: - r.append(field.eq(signal[start:end])) - return r - -def phy_description(dw): - layout = [("data", dw)] - return stream.EndpointDescription(layout) - - -def user_description(dw): - layout = [ - ("data", 32), - ("length", 32) - ] - return stream.EndpointDescription(layout) - - -class Packetizer(Module): - def __init__(self): - self.sink = sink = stream.Endpoint(user_description(32)) - self.source = source = stream.Endpoint(phy_description(32)) - - # # # - - # Packet description - # - preamble : 4 bytes - # - length : 4 bytes - # - payload - - fsm = FSM(reset_state="IDLE") - self.submodules += fsm - - fsm.act("IDLE", - If(sink.stb, - NextState("INSERT_PREAMBLE") - ) - ) - fsm.act("INSERT_PREAMBLE", - source.stb.eq(1), - source.data.eq(0x5aa55aa5), - If(source.ack, - NextState("INSERT_LENGTH") - ) - ) - fsm.act("INSERT_LENGTH", - source.stb.eq(1), - source.data.eq(sink.length), - If(source.ack, - NextState("COPY") - ) - ) - fsm.act("COPY", - source.stb.eq(sink.stb), - source.data.eq(sink.data), - sink.ack.eq(source.ack), - If(source.ack & sink.eop, - NextState("IDLE") - ) - ) - - -class Depacketizer(Module): - def __init__(self, clk_freq, timeout=10): - self.sink = sink = stream.Endpoint(phy_description(32)) - self.source = source = stream.Endpoint(user_description(32)) - - # # # - - # Packet description - # - preamble : 4 bytes - # - length : 4 bytes - # - payload - - fsm = FSM(reset_state="IDLE") - self.submodules += fsm - - self.submodules.timer = WaitTimer(clk_freq*timeout) - self.comb += self.timer.wait.eq(~fsm.ongoing("IDLE")) - - fsm.act("IDLE", - sink.ack.eq(1), - If(sink.stb & (sink.data == 0x5aa55aa5), - NextState("RECEIVE_LENGTH") - ) - ) - fsm.act("RECEIVE_LENGTH", - sink.ack.eq(1), - If(sink.stb, - NextValue(source.length, sink.data), - NextState("COPY") - ) - ) - eop = Signal() - cnt = Signal(32) - fsm.act("COPY", - source.stb.eq(sink.stb), - source.eop.eq(eop), - source.data.eq(sink.data), - sink.ack.eq(source.ack), - If((source.stb & source.ack & eop) | self.timer.done, - NextState("IDLE") - ) - ) - self.sync += \ - If(fsm.ongoing("IDLE"), - cnt.eq(0) - ).Elif(source.stb & source.ack, - cnt.eq(cnt + 1) - ) - self.comb += eop.eq(cnt == source.length[2:] - 1) diff --git a/artiq/gateware/serwb/phy.py b/artiq/gateware/serwb/phy.py deleted file mode 100644 index d48084116..000000000 --- a/artiq/gateware/serwb/phy.py +++ /dev/null @@ -1,393 +0,0 @@ -from migen import * -from migen.genlib.cdc import MultiReg, PulseSynchronizer -from migen.genlib.misc import WaitTimer - -from misoc.interconnect.csr import * - -from artiq.gateware.serwb.kusphy import KUSSerdes -from artiq.gateware.serwb.s7phy import S7Serdes - - -# Master <--> Slave synchronization: -# 1) Master sends idle pattern (zeroes) to reset Slave. -# 2) Master sends K28.5 commas to allow Slave to calibrate, Slave sends idle pattern. -# 3) Slave sends K28.5 commas to allow Master to calibrate, Master sends K28.5 commas. -# 4) Master stops sending K28.5 commas. -# 5) Slave stops sending K28.5 commas. -# 6) Link is ready. - -class _SerdesMasterInit(Module): - def __init__(self, serdes, taps, timeout=1024): - self.reset = Signal() - self.ready = Signal() - self.error = Signal() - - # # # - - self.delay = delay = Signal(max=taps) - self.delay_min = delay_min = Signal(max=taps) - self.delay_min_found = delay_min_found = Signal() - self.delay_max = delay_max = Signal(max=taps) - self.delay_max_found = delay_max_found = Signal() - self.bitslip = bitslip = Signal(max=40) - - timer = WaitTimer(timeout) - self.submodules += timer - - self.submodules.fsm = fsm = ResetInserter()(FSM(reset_state="IDLE")) - self.comb += self.fsm.reset.eq(self.reset) - - self.comb += serdes.rx_delay_inc.eq(1) - - fsm.act("IDLE", - NextValue(delay, 0), - NextValue(delay_min, 0), - NextValue(delay_min_found, 0), - NextValue(delay_max, 0), - NextValue(delay_max_found, 0), - serdes.rx_delay_rst.eq(1), - NextValue(bitslip, 0), - NextState("RESET_SLAVE"), - serdes.tx_idle.eq(1) - ) - fsm.act("RESET_SLAVE", - timer.wait.eq(1), - If(timer.done, - timer.wait.eq(0), - NextState("SEND_PATTERN") - ), - serdes.tx_idle.eq(1) - ) - fsm.act("SEND_PATTERN", - If(~serdes.rx_idle, - NextState("WAIT_STABLE") - ), - serdes.tx_comma.eq(1) - ) - fsm.act("WAIT_STABLE", - timer.wait.eq(1), - If(timer.done, - timer.wait.eq(0), - NextState("CHECK_PATTERN") - ), - serdes.tx_comma.eq(1) - ) - fsm.act("CHECK_PATTERN", - If(~delay_min_found, - If(serdes.rx_comma, - timer.wait.eq(1), - If(timer.done, - timer.wait.eq(0), - NextValue(delay_min, delay), - NextValue(delay_min_found, 1) - ) - ).Else( - NextState("INC_DELAY_BITSLIP") - ), - ).Else( - If(~serdes.rx_comma, - NextValue(delay_max, delay), - NextValue(delay_max_found, 1), - NextState("CHECK_SAMPLING_WINDOW") - ).Else( - NextState("INC_DELAY_BITSLIP") - ) - ), - serdes.tx_comma.eq(1) - ) - self.comb += serdes.rx_bitslip_value.eq(bitslip) - fsm.act("INC_DELAY_BITSLIP", - NextState("WAIT_STABLE"), - If(delay == (taps - 1), - If(bitslip == (40 - 1), - NextState("ERROR") - ).Else( - NextValue(delay_min_found, 0), - NextValue(bitslip, bitslip + 1) - ), - NextValue(delay, 0), - serdes.rx_delay_rst.eq(1) - ).Else( - NextValue(delay, delay + 1), - serdes.rx_delay_ce.eq(1) - ), - serdes.tx_comma.eq(1) - ) - fsm.act("CHECK_SAMPLING_WINDOW", - If((delay_min == 0) | - (delay_max == (taps - 1)) | - ((delay_max - delay_min) < taps//16), - NextValue(delay_min_found, 0), - NextValue(delay_max_found, 0), - NextState("WAIT_STABLE") - ).Else( - NextState("CONFIGURE_SAMPLING_WINDOW") - ), - serdes.tx_comma.eq(1) - ) - fsm.act("CONFIGURE_SAMPLING_WINDOW", - If(delay == (delay_min + (delay_max - delay_min)[1:]), - NextState("READY") - ).Else( - NextValue(delay, delay + 1), - serdes.rx_delay_inc.eq(1), - serdes.rx_delay_ce.eq(1), - NextState("WAIT_SAMPLING_WINDOW") - ), - serdes.tx_comma.eq(1) - ) - fsm.act("WAIT_SAMPLING_WINDOW", - timer.wait.eq(1), - If(timer.done, - timer.wait.eq(0), - NextState("CONFIGURE_SAMPLING_WINDOW") - ), - serdes.tx_comma.eq(1) - ) - fsm.act("READY", - self.ready.eq(1) - ) - fsm.act("ERROR", - self.error.eq(1) - ) - - -class _SerdesSlaveInit(Module, AutoCSR): - def __init__(self, serdes, taps, timeout=1024): - self.reset = Signal() - self.ready = Signal() - self.error = Signal() - - # # # - - self.delay = delay = Signal(max=taps) - self.delay_min = delay_min = Signal(max=taps) - self.delay_min_found = delay_min_found = Signal() - self.delay_max = delay_max = Signal(max=taps) - self.delay_max_found = delay_max_found = Signal() - self.bitslip = bitslip = Signal(max=40) - - timer = WaitTimer(timeout) - self.submodules += timer - - self.comb += self.reset.eq(serdes.rx_idle) - - self.comb += serdes.rx_delay_inc.eq(1) - - self.submodules.fsm = fsm = ResetInserter()(FSM(reset_state="IDLE")) - fsm.act("IDLE", - NextValue(delay, 0), - NextValue(delay_min, 0), - NextValue(delay_min_found, 0), - NextValue(delay_max, 0), - NextValue(delay_max_found, 0), - serdes.rx_delay_rst.eq(1), - NextValue(bitslip, 0), - NextState("WAIT_STABLE"), - serdes.tx_idle.eq(1) - ) - fsm.act("WAIT_STABLE", - timer.wait.eq(1), - If(timer.done, - timer.wait.eq(0), - NextState("CHECK_PATTERN") - ), - serdes.tx_idle.eq(1) - ) - fsm.act("CHECK_PATTERN", - If(~delay_min_found, - If(serdes.rx_comma, - timer.wait.eq(1), - If(timer.done, - timer.wait.eq(0), - NextValue(delay_min, delay), - NextValue(delay_min_found, 1) - ) - ).Else( - NextState("INC_DELAY_BITSLIP") - ), - ).Else( - If(~serdes.rx_comma, - NextValue(delay_max, delay), - NextValue(delay_max_found, 1), - NextState("CHECK_SAMPLING_WINDOW") - ).Else( - NextState("INC_DELAY_BITSLIP") - ) - ), - serdes.tx_idle.eq(1) - ) - self.comb += serdes.rx_bitslip_value.eq(bitslip) - fsm.act("INC_DELAY_BITSLIP", - NextState("WAIT_STABLE"), - If(delay == (taps - 1), - If(bitslip == (40 - 1), - NextState("ERROR") - ).Else( - NextValue(delay_min_found, 0), - NextValue(bitslip, bitslip + 1) - ), - NextValue(delay, 0), - serdes.rx_delay_rst.eq(1) - ).Else( - NextValue(delay, delay + 1), - serdes.rx_delay_ce.eq(1) - ), - serdes.tx_idle.eq(1) - ) - fsm.act("CHECK_SAMPLING_WINDOW", - If((delay_min == 0) | - (delay_max == (taps - 1)) | - ((delay_max - delay_min) < taps//16), - NextValue(delay_min_found, 0), - NextValue(delay_max_found, 0), - NextState("WAIT_STABLE") - ).Else( - NextState("CONFIGURE_SAMPLING_WINDOW") - ), - serdes.tx_idle.eq(1) - ) - fsm.act("CONFIGURE_SAMPLING_WINDOW", - If(delay == (delay_min + (delay_max - delay_min)[1:]), - NextState("SEND_PATTERN") - ).Else( - NextValue(delay, delay + 1), - serdes.rx_delay_inc.eq(1), - serdes.rx_delay_ce.eq(1), - NextState("WAIT_SAMPLING_WINDOW") - ) - ) - fsm.act("WAIT_SAMPLING_WINDOW", - timer.wait.eq(1), - If(timer.done, - timer.wait.eq(0), - NextState("CONFIGURE_SAMPLING_WINDOW") - ) - ) - fsm.act("SEND_PATTERN", - timer.wait.eq(1), - If(timer.done, - If(~serdes.rx_comma, - NextState("READY") - ) - ), - serdes.tx_comma.eq(1) - ) - fsm.act("READY", - self.ready.eq(1) - ) - fsm.act("ERROR", - self.error.eq(1) - ) - - -class _SerdesControl(Module, AutoCSR): - def __init__(self, init, mode="master"): - if mode == "master": - self.reset = CSR() - self.ready = CSRStatus() - self.error = CSRStatus() - - self.delay = CSRStatus(9) - self.delay_min_found = CSRStatus() - self.delay_min = CSRStatus(9) - self.delay_max_found = CSRStatus() - self.delay_max = CSRStatus(9) - self.bitslip = CSRStatus(6) - - # # # - - if mode == "master": - self.comb += init.reset.eq(self.reset.re) - self.comb += [ - self.ready.status.eq(init.ready), - self.error.status.eq(init.error), - self.delay.status.eq(init.delay), - self.delay_min_found.status.eq(init.delay_min_found), - self.delay_min.status.eq(init.delay_min), - self.delay_max_found.status.eq(init.delay_max_found), - self.delay_max.status.eq(init.delay_max), - self.bitslip.status.eq(init.bitslip) - ] - - -class SERWBPLL(Module): - def __init__(self, refclk_freq, linerate, vco_div=1): - assert refclk_freq == 125e6 - assert linerate == 1.25e9 - - self.lock = Signal() - self.refclk = Signal() - self.serwb_serdes_clk = Signal() - self.serwb_serdes_20x_clk = Signal() - self.serwb_serdes_5x_clk = Signal() - - # # # - - #---------------------------- - # refclk: 125MHz - # vco: 1250MHz - #---------------------------- - # serwb_serdes: 31.25MHz - # serwb_serdes_20x: 625MHz - # serwb_serdes_5x: 156.25MHz - #---------------------------- - self.linerate = linerate - - pll_locked = Signal() - pll_fb = Signal() - pll_serwb_serdes_clk = Signal() - pll_serwb_serdes_20x_clk = Signal() - pll_serwb_serdes_5x_clk = Signal() - self.specials += [ - Instance("PLLE2_BASE", - p_STARTUP_WAIT="FALSE", o_LOCKED=pll_locked, - - # VCO @ 1.25GHz / vco_div - p_REF_JITTER1=0.01, p_CLKIN1_PERIOD=8.0, - p_CLKFBOUT_MULT=10, p_DIVCLK_DIVIDE=vco_div, - i_CLKIN1=self.refclk, i_CLKFBIN=pll_fb, - o_CLKFBOUT=pll_fb, - - # 31.25MHz: serwb_serdes - p_CLKOUT0_DIVIDE=40//vco_div, p_CLKOUT0_PHASE=0.0, - o_CLKOUT0=pll_serwb_serdes_clk, - - # 625MHz: serwb_serdes_20x - p_CLKOUT1_DIVIDE=2//vco_div, p_CLKOUT1_PHASE=0.0, - o_CLKOUT1=pll_serwb_serdes_20x_clk, - - # 156.25MHz: serwb_serdes_5x - p_CLKOUT2_DIVIDE=8//vco_div, p_CLKOUT2_PHASE=0.0, - o_CLKOUT2=pll_serwb_serdes_5x_clk - ), - Instance("BUFG", - i_I=pll_serwb_serdes_clk, - o_O=self.serwb_serdes_clk), - Instance("BUFG", - i_I=pll_serwb_serdes_20x_clk, - o_O=self.serwb_serdes_20x_clk), - Instance("BUFG", - i_I=pll_serwb_serdes_5x_clk, - o_O=self.serwb_serdes_5x_clk) - ] - self.specials += MultiReg(pll_locked, self.lock) - - - -class SERWBPHY(Module, AutoCSR): - def __init__(self, device, pll, pads, mode="master"): - assert mode in ["master", "slave"] - if device[:4] == "xcku": - taps = 512 - self.submodules.serdes = KUSSerdes(pll, pads, mode) - elif device[:4] == "xc7a": - taps = 32 - self.submodules.serdes = S7Serdes(pll, pads, mode) - else: - raise NotImplementedError - if mode == "master": - self.submodules.init = _SerdesMasterInit(self.serdes, taps) - else: - self.submodules.init = _SerdesSlaveInit(self.serdes, taps) - self.submodules.control = _SerdesControl(self.init, mode) diff --git a/artiq/gateware/serwb/s7phy.py b/artiq/gateware/serwb/s7phy.py deleted file mode 100644 index d64f5bbb5..000000000 --- a/artiq/gateware/serwb/s7phy.py +++ /dev/null @@ -1,223 +0,0 @@ -from migen import * -from migen.genlib.resetsync import AsyncResetSynchronizer -from migen.genlib.cdc import MultiReg, Gearbox -from migen.genlib.misc import BitSlip - -from misoc.cores.code_8b10b import Encoder, Decoder - - -class S7Serdes(Module): - def __init__(self, pll, pads, mode="master"): - self.tx_data = Signal(32) - self.rx_data = Signal(32) - - self.tx_idle = Signal() - self.tx_comma = Signal() - self.rx_idle = Signal() - self.rx_comma = Signal() - - self.rx_bitslip_value = Signal(6) - self.rx_delay_rst = Signal() - self.rx_delay_inc = Signal() - self.rx_delay_ce = Signal() - - # # # - - self.submodules.encoder = ClockDomainsRenamer("serwb_serdes")( - Encoder(4, True)) - self.decoders = [ClockDomainsRenamer("serwb_serdes")( - Decoder(True)) for _ in range(4)] - self.submodules += self.decoders - - # clocking: - - # In master mode: - # - linerate/10 pll refclk provided by user - # - linerate/10 slave refclk generated on clk_pads - # In Slave mode: - # - linerate/10 pll refclk provided by clk_pads - self.clock_domains.cd_serwb_serdes = ClockDomain() - self.clock_domains.cd_serwb_serdes_5x = ClockDomain() - self.clock_domains.cd_serwb_serdes_20x = ClockDomain(reset_less=True) - self.comb += [ - self.cd_serwb_serdes.clk.eq(pll.serwb_serdes_clk), - self.cd_serwb_serdes_5x.clk.eq(pll.serwb_serdes_5x_clk), - self.cd_serwb_serdes_20x.clk.eq(pll.serwb_serdes_20x_clk) - ] - self.specials += AsyncResetSynchronizer(self.cd_serwb_serdes, ~pll.lock) - self.comb += self.cd_serwb_serdes_5x.rst.eq(self.cd_serwb_serdes.rst) - - # control/status cdc - tx_idle = Signal() - tx_comma = Signal() - rx_idle = Signal() - rx_comma = Signal() - rx_bitslip_value = Signal(6) - self.specials += [ - MultiReg(self.tx_idle, tx_idle, "serwb_serdes"), - MultiReg(self.tx_comma, tx_comma, "serwb_serdes"), - MultiReg(rx_idle, self.rx_idle, "sys"), - MultiReg(rx_comma, self.rx_comma, "sys") - ] - self.specials += MultiReg(self.rx_bitslip_value, rx_bitslip_value, "serwb_serdes"), - - # tx clock (linerate/10) - if mode == "master": - self.submodules.tx_clk_gearbox = Gearbox(40, "serwb_serdes", 8, "serwb_serdes_5x") - self.comb += self.tx_clk_gearbox.i.eq((0b1111100000 << 30) | - (0b1111100000 << 20) | - (0b1111100000 << 10) | - (0b1111100000 << 0)) - clk_o = Signal() - self.specials += [ - Instance("OSERDESE2", - p_DATA_WIDTH=8, p_TRISTATE_WIDTH=1, - p_DATA_RATE_OQ="DDR", p_DATA_RATE_TQ="BUF", - p_SERDES_MODE="MASTER", - - o_OQ=clk_o, - i_OCE=1, - i_RST=ResetSignal("serwb_serdes"), - i_CLK=ClockSignal("serwb_serdes_20x"), i_CLKDIV=ClockSignal("serwb_serdes_5x"), - i_D1=self.tx_clk_gearbox.o[0], i_D2=self.tx_clk_gearbox.o[1], - i_D3=self.tx_clk_gearbox.o[2], i_D4=self.tx_clk_gearbox.o[3], - i_D5=self.tx_clk_gearbox.o[4], i_D6=self.tx_clk_gearbox.o[5], - i_D7=self.tx_clk_gearbox.o[6], i_D8=self.tx_clk_gearbox.o[7] - ), - Instance("OBUFDS", - i_I=clk_o, - o_O=pads.clk_p, - o_OB=pads.clk_n - ) - ] - - # tx datapath - # tx_data -> encoders -> gearbox -> serdes - self.submodules.tx_gearbox = Gearbox(40, "serwb_serdes", 8, "serwb_serdes_5x") - self.comb += [ - If(tx_comma, - self.encoder.k[0].eq(1), - self.encoder.d[0].eq(0xbc) - ).Else( - self.encoder.d[0].eq(self.tx_data[0:8]), - self.encoder.d[1].eq(self.tx_data[8:16]), - self.encoder.d[2].eq(self.tx_data[16:24]), - self.encoder.d[3].eq(self.tx_data[24:32]) - ) - ] - self.sync.serwb_serdes += \ - If(tx_idle, - self.tx_gearbox.i.eq(0) - ).Else( - self.tx_gearbox.i.eq(Cat(*[self.encoder.output[i] for i in range(4)])) - ) - - serdes_o = Signal() - self.specials += [ - Instance("OSERDESE2", - p_DATA_WIDTH=8, p_TRISTATE_WIDTH=1, - p_DATA_RATE_OQ="DDR", p_DATA_RATE_TQ="BUF", - p_SERDES_MODE="MASTER", - - o_OQ=serdes_o, - i_OCE=1, - i_RST=ResetSignal("serwb_serdes"), - i_CLK=ClockSignal("serwb_serdes_20x"), i_CLKDIV=ClockSignal("serwb_serdes_5x"), - i_D1=self.tx_gearbox.o[0], i_D2=self.tx_gearbox.o[1], - i_D3=self.tx_gearbox.o[2], i_D4=self.tx_gearbox.o[3], - i_D5=self.tx_gearbox.o[4], i_D6=self.tx_gearbox.o[5], - i_D7=self.tx_gearbox.o[6], i_D8=self.tx_gearbox.o[7] - ), - Instance("OBUFDS", - i_I=serdes_o, - o_O=pads.tx_p, - o_OB=pads.tx_n - ) - ] - - # rx clock - use_bufr = True - if mode == "slave": - clk_i = Signal() - clk_i_bufg = Signal() - self.specials += [ - Instance("IBUFDS", - i_I=pads.clk_p, - i_IB=pads.clk_n, - o_O=clk_i - ) - ] - if use_bufr: - clk_i_bufr = Signal() - self.specials += [ - Instance("BUFR", i_I=clk_i, o_O=clk_i_bufr), - Instance("BUFG", i_I=clk_i_bufr, o_O=clk_i_bufg) - ] - else: - self.specials += Instance("BUFG", i_I=clk_i, o_O=clk_i_bufg) - self.comb += pll.refclk.eq(clk_i_bufg) - - # rx datapath - # serdes -> gearbox -> bitslip -> decoders -> rx_data - self.submodules.rx_gearbox = Gearbox(8, "serwb_serdes_5x", 40, "serwb_serdes") - self.submodules.rx_bitslip = ClockDomainsRenamer("serwb_serdes")(BitSlip(40)) - - serdes_i_nodelay = Signal() - self.specials += [ - Instance("IBUFDS_DIFF_OUT", - i_I=pads.rx_p, - i_IB=pads.rx_n, - o_O=serdes_i_nodelay - ) - ] - - serdes_i_delayed = Signal() - serdes_q = Signal(8) - self.specials += [ - Instance("IDELAYE2", - p_DELAY_SRC="IDATAIN", p_SIGNAL_PATTERN="DATA", - p_CINVCTRL_SEL="FALSE", p_HIGH_PERFORMANCE_MODE="TRUE", - p_REFCLK_FREQUENCY=200.0, p_PIPE_SEL="FALSE", - p_IDELAY_TYPE="VARIABLE", p_IDELAY_VALUE=0, - - i_C=ClockSignal(), - i_LD=self.rx_delay_rst, - i_CE=self.rx_delay_ce, - i_LDPIPEEN=0, i_INC=self.rx_delay_inc, - - i_IDATAIN=serdes_i_nodelay, o_DATAOUT=serdes_i_delayed - ), - Instance("ISERDESE2", - p_DATA_WIDTH=8, p_DATA_RATE="DDR", - p_SERDES_MODE="MASTER", p_INTERFACE_TYPE="NETWORKING", - p_NUM_CE=1, p_IOBDELAY="IFD", - - i_DDLY=serdes_i_delayed, - i_CE1=1, - i_RST=ResetSignal("serwb_serdes"), - i_CLK=ClockSignal("serwb_serdes_20x"), i_CLKB=~ClockSignal("serwb_serdes_20x"), - i_CLKDIV=ClockSignal("serwb_serdes_5x"), - i_BITSLIP=0, - o_Q8=serdes_q[0], o_Q7=serdes_q[1], - o_Q6=serdes_q[2], o_Q5=serdes_q[3], - o_Q4=serdes_q[4], o_Q3=serdes_q[5], - o_Q2=serdes_q[6], o_Q1=serdes_q[7] - ) - ] - - self.comb += [ - self.rx_gearbox.i.eq(serdes_q), - self.rx_bitslip.value.eq(rx_bitslip_value), - self.rx_bitslip.i.eq(self.rx_gearbox.o), - self.decoders[0].input.eq(self.rx_bitslip.o[0:10]), - self.decoders[1].input.eq(self.rx_bitslip.o[10:20]), - self.decoders[2].input.eq(self.rx_bitslip.o[20:30]), - self.decoders[3].input.eq(self.rx_bitslip.o[30:40]), - self.rx_data.eq(Cat(*[self.decoders[i].d for i in range(4)])), - rx_idle.eq(self.rx_bitslip.o == 0), - rx_comma.eq(((self.decoders[0].d == 0xbc) & (self.decoders[0].k == 1)) & - ((self.decoders[1].d == 0x00) & (self.decoders[1].k == 0)) & - ((self.decoders[2].d == 0x00) & (self.decoders[2].k == 0)) & - ((self.decoders[3].d == 0x00) & (self.decoders[3].k == 0))) - - ] diff --git a/artiq/gateware/spi.py b/artiq/gateware/spi.py deleted file mode 100644 index 182a934f5..000000000 --- a/artiq/gateware/spi.py +++ /dev/null @@ -1,371 +0,0 @@ -from itertools import product - -from migen import * -from misoc.interconnect import wishbone -from misoc.cores.spi import SPIMachine - - -class SPIMaster(Module): - """SPI Master. - - Notes: - * M = 32 is the data width (width of the data register, - maximum write bits, maximum read bits) - * Every transfer consists of a write_length 0-M bit write followed - by a read_length 0-M bit read. - * cs_n is asserted at the beginning and deasserted at the end of the - transfer if there is no other transfer pending. - * cs_n handling is agnostic to whether it is one-hot or decoded - somewhere downstream. If it is decoded, "cs_n all deasserted" - should be handled accordingly (no slave selected). - If it is one-hot, asserting multiple slaves should only be attempted - if miso is either not connected between slaves, or open collector, - or correctly multiplexed externally. - * If config.cs_polarity == 0 (cs active low, the default), - "cs_n all deasserted" means "all cs_n bits high". - * cs is not mandatory in pads. Framing and chip selection can also - be handled independently through other means. - * If there is a miso wire in pads, the input and output can be done - with two signals (a.k.a. 4-wire SPI), else mosi must be used for - both output and input (a.k.a. 3-wire SPI) and config.half_duplex - must to be set when reading data is desired. - * For 4-wire SPI only the sum of read_length and write_length matters. - The behavior is the same no matter how the total transfer length is - divided between the two. For 3-wire SPI, the direction of mosi/miso - is switched from output to input after write_len cycles, at the - "shift_out" clk edge corresponding to bit write_length + 1 of the - transfer. - * The first bit output on mosi is always the MSB/LSB (depending on - config.lsb_first) of the data register, independent of - xfer.write_len. The last bit input from miso always ends up in - the LSB/MSB (respectively) of the data register, independent of - read_len. - * Data output on mosi in 4-wire SPI during the read cycles is what - is found in the data register at the time. - Data in the data register outside the least/most (depending - on config.lsb_first) significant read_length bits is what is - seen on miso during the write cycles. - * The SPI data register is double-buffered: Once a transfer has - started, new write data can be written, queuing a new transfer. - Transfers submitted this way are chained and executed without - deasserting cs. Once a transfer completes, the previous transfer's - read data is available in the data register. - * Writes to the config register take effect immediately. Writes to xfer - and data are synchronized to the start of a transfer. - * A wishbone data register write is ack-ed when the transfer has - been written to the intermediate buffer. It will be started when - there are no other transactions being executed, either starting - a new SPI transfer of chained to an in-flight transfer. - Writes take two cycles unless the write is to the data register - and another chained transfer is pending and the transfer being - executed is not complete. Reads always finish in two cycles. - - Transaction Sequence: - * If desired, write the config register to set up the core. - * If desired, write the xfer register to change lengths and cs_n. - * Write the data register (also for zero-length writes), - writing triggers the transfer and when the transfer is accepted to - the inermediate buffer, the write is ack-ed. - * If desired, read the data register corresponding to the last - completed transfer. - * If desired, change xfer register for the next transfer. - * If desired, write data queuing the next (possibly chained) transfer. - - Register address and bit map: - - config (address 2): - 1 offline: all pins high-z (reset=1) - 1 active: cs/transfer active (read-only) - 1 pending: transfer pending in intermediate buffer (read-only) - 1 cs_polarity: active level of chip select (reset=0) - 1 clk_polarity: idle level of clk (reset=0) - 1 clk_phase: first edge after cs assertion to sample data on (reset=0) - (clk_polarity, clk_phase) == (CPOL, CPHA) in Freescale language. - (0, 0): idle low, output on falling, input on rising - (0, 1): idle low, output on rising, input on falling - (1, 0): idle high, output on rising, input on falling - (1, 1): idle high, output on falling, input on rising - There is never a clk edge during a cs edge. - 1 lsb_first: LSB is the first bit on the wire (reset=0) - 1 half_duplex: 3-wire SPI, in/out on mosi (reset=0) - 8 undefined - 8 div_write: counter load value to divide this module's clock - to generate the SPI write clk (reset=0) - f_clk/f_spi_write == div_write + 2 - 8 div_read: ditto for the read clock - - xfer (address 1): - 16 cs: active high bit mask of chip selects to assert (reset=0) - 6 write_len: 0-M bits (reset=0) - 2 undefined - 6 read_len: 0-M bits (reset=0) - 2 undefined - - data (address 0): - M write/read data (reset=0) - """ - def __init__(self, pads, pads_n=None, bus=None): - if bus is None: - bus = wishbone.Interface(data_width=32) - self.bus = bus - - ### - - # Wishbone - config = Record([ - ("offline", 1), - ("active", 1), - ("pending", 1), - ("cs_polarity", 1), - ("clk_polarity", 1), - ("clk_phase", 1), - ("lsb_first", 1), - ("half_duplex", 1), - ("padding", 8), - ("div_write", 8), - ("div_read", 8), - ]) - config.offline.reset = 1 - assert len(config) <= len(bus.dat_w) - - xfer = Record([ - ("cs", 16), - ("write_length", 6), - ("padding0", 2), - ("read_length", 6), - ("padding1", 2), - ]) - assert len(xfer) <= len(bus.dat_w) - - self.submodules.spi = spi = SPIMachine( - data_width=len(bus.dat_w) + 1, - clock_width=len(config.div_read), - bits_width=len(xfer.read_length)) - - pending = Signal() - cs = Signal.like(xfer.cs) - data_read = Signal.like(spi.reg.data) - data_write = Signal.like(spi.reg.data) - - self.comb += [ - spi.start.eq(pending & (~spi.cs | spi.done)), - spi.clk_phase.eq(config.clk_phase), - spi.reg.lsb.eq(config.lsb_first), - spi.div_write.eq(config.div_write), - spi.div_read.eq(config.div_read), - ] - self.sync += [ - If(spi.done, - data_read.eq( - Mux(spi.reg.lsb, spi.reg.data[1:], spi.reg.data[:-1])), - ), - If(spi.start, - cs.eq(xfer.cs), - spi.bits.n_write.eq(xfer.write_length), - spi.bits.n_read.eq(xfer.read_length), - If(spi.reg.lsb, - spi.reg.data[:-1].eq(data_write), - ).Else( - spi.reg.data[1:].eq(data_write), - ), - pending.eq(0), - ), - # wb.ack a transaction if any of the following: - # a) reading, - # b) writing to non-data register - # c) writing to data register and no pending transfer - # d) writing to data register and pending and swapping buffers - bus.ack.eq(bus.cyc & bus.stb & - (~bus.we | (bus.adr != 0) | ~pending | spi.done)), - If(bus.cyc & bus.stb, - bus.dat_r.eq( - Array([data_read, xfer.raw_bits(), config.raw_bits() - ])[bus.adr]), - ), - If(bus.ack, - bus.ack.eq(0), - If(bus.we, - Array([data_write, xfer.raw_bits(), config.raw_bits() - ])[bus.adr].eq(bus.dat_w), - If(bus.adr == 0, # data register - pending.eq(1), - ), - ), - ), - config.active.eq(spi.cs), - config.pending.eq(pending), - ] - - # I/O - mosi_oe = Signal() - clk = Signal() - self.comb += [ - mosi_oe.eq( - ~config.offline & spi.cs & - (spi.oe | ~config.half_duplex)), - clk.eq((spi.cg.clk & spi.cs) ^ config.clk_polarity) - ] - - if pads_n is None: - if hasattr(pads, "cs_n"): - cs_n_t = TSTriple(len(pads.cs_n)) - self.specials += cs_n_t.get_tristate(pads.cs_n) - self.comb += [ - cs_n_t.oe.eq(~config.offline), - cs_n_t.o.eq((cs & Replicate(spi.cs, len(cs))) ^ - Replicate(~config.cs_polarity, len(cs))), - ] - - clk_t = TSTriple() - self.specials += clk_t.get_tristate(pads.clk) - self.comb += [ - clk_t.oe.eq(~config.offline), - clk_t.o.eq(clk), - ] - - mosi_t = TSTriple() - self.specials += mosi_t.get_tristate(pads.mosi) - self.comb += [ - mosi_t.oe.eq(mosi_oe), - mosi_t.o.eq(spi.reg.o), - spi.reg.i.eq(Mux(config.half_duplex, mosi_t.i, - getattr(pads, "miso", mosi_t.i))), - ] - else: - if hasattr(pads, "cs_n"): - for i in range(len(pads.cs_n)): - self.specials += Instance("IOBUFDS", - i_I=(cs[i] & spi.cs) ^ ~config.cs_polarity, - i_T=config.offline, - io_IO=pads.cs_n[i], io_IOB=pads_n.cs_n[i]) - - self.specials += Instance("IOBUFDS", - i_I=clk, i_T=config.offline, - io_IO=pads.clk, io_IOB=pads_n.clk) - - mosi = Signal() - self.specials += Instance("IOBUFDS", - o_O=mosi, i_I=spi.reg.o, i_T=~mosi_oe, - io_IO=pads.mosi, io_IOB=pads_n.mosi) - if hasattr(pads, "miso"): - miso = Signal() - self.specials += Instance("IBUFDS", - o_O=miso, i_I=pads.miso, i_IB=pads_n.miso) - else: - miso = mosi - self.comb += spi.reg.i.eq(Mux(config.half_duplex, mosi, miso)) - - -SPI_DATA_ADDR, SPI_XFER_ADDR, SPI_CONFIG_ADDR = range(3) -( - SPI_OFFLINE, - SPI_ACTIVE, - SPI_PENDING, - SPI_CS_POLARITY, - SPI_CLK_POLARITY, - SPI_CLK_PHASE, - SPI_LSB_FIRST, - SPI_HALF_DUPLEX, -) = (1 << i for i in range(8)) - - -def SPI_DIV_WRITE(i): - return i << 16 - - -def SPI_DIV_READ(i): - return i << 24 - - -def SPI_CS(i): - return i << 0 - - -def SPI_WRITE_LENGTH(i): - return i << 16 - - -def SPI_READ_LENGTH(i): - return i << 24 - - -def _test_xfer(bus, cs, wlen, rlen, wdata): - yield from bus.write(SPI_XFER_ADDR, SPI_CS(cs) | - SPI_WRITE_LENGTH(wlen) | SPI_READ_LENGTH(rlen)) - yield from bus.write(SPI_DATA_ADDR, wdata) - yield - - -def _test_read(bus, sync=SPI_ACTIVE | SPI_PENDING): - while (yield from bus.read(SPI_CONFIG_ADDR)) & sync: - pass - return (yield from bus.read(SPI_DATA_ADDR)) - - -def _test_gen(bus): - yield from bus.write(SPI_CONFIG_ADDR, - 0*SPI_CLK_PHASE | 0*SPI_LSB_FIRST | - 1*SPI_HALF_DUPLEX | - SPI_DIV_WRITE(3) | SPI_DIV_READ(5)) - yield from _test_xfer(bus, 0b01, 4, 0, 0x90000000) - print(hex((yield from _test_read(bus)))) - yield from _test_xfer(bus, 0b10, 0, 4, 0x90000000) - print(hex((yield from _test_read(bus)))) - yield from _test_xfer(bus, 0b11, 4, 4, 0x81000000) - print(hex((yield from _test_read(bus)))) - yield from _test_xfer(bus, 0b01, 8, 32, 0x87654321) - yield from _test_xfer(bus, 0b01, 0, 32, 0x12345678) - print(hex((yield from _test_read(bus, SPI_PENDING)))) - print(hex((yield from _test_read(bus, SPI_ACTIVE)))) - return - for cpol, cpha, lsb, clk in product( - (0, 1), (0, 1), (0, 1), (0, 1)): - yield from bus.write(SPI_CONFIG_ADDR, - cpol*SPI_CLK_POLARITY | cpha*SPI_CLK_PHASE | - lsb*SPI_LSB_FIRST | SPI_DIV_WRITE(clk) | - SPI_DIV_READ(clk)) - for wlen, rlen, wdata in product((0, 8, 32), (0, 8, 32), - (0, 0xffffffff, 0xdeadbeef)): - rdata = (yield from _test_xfer(bus, 0b1, wlen, rlen, wdata, True)) - len = (wlen + rlen) % 32 - mask = (1 << len) - 1 - if lsb: - shift = (wlen + rlen) % 32 - else: - shift = 0 - a = (wdata >> wshift) & wmask - b = (rdata >> rshift) & rmask - if a != b: - print("ERROR", end=" ") - print(cpol, cpha, lsb, clk, wlen, rlen, - hex(wdata), hex(rdata), hex(a), hex(b)) - - -class _TestPads: - def __init__(self): - self.cs_n = Signal(2) - self.clk = Signal() - self.mosi = Signal() - self.miso = Signal() - - -class _TestTristate(Module): - def __init__(self, t): - oe = Signal() - self.comb += [ - t.target.eq(t.o), - oe.eq(t.oe), - t.i.eq(t.o), - ] - -if __name__ == "__main__": - from migen.fhdl.specials import Tristate - - pads = _TestPads() - dut = SPIMaster(pads) - dut.comb += pads.miso.eq(pads.mosi) - # from migen.fhdl.verilog import convert - # print(convert(dut)) - - Tristate.lower = _TestTristate - run_simulation(dut, _test_gen(dut.bus), vcd_name="spi_master.vcd") diff --git a/artiq/devices/korad_ka3005p/__init__.py b/artiq/gateware/suservo/__init__.py similarity index 100% rename from artiq/devices/korad_ka3005p/__init__.py rename to artiq/gateware/suservo/__init__.py diff --git a/artiq/gateware/suservo/adc_ser.py b/artiq/gateware/suservo/adc_ser.py new file mode 100644 index 000000000..9e2bf48b9 --- /dev/null +++ b/artiq/gateware/suservo/adc_ser.py @@ -0,0 +1,132 @@ +import logging +import string +from collections import namedtuple + +from migen import * +from migen.genlib import io + + +logger = logging.getLogger(__name__) + + +# all times in cycles +ADCParams = namedtuple("ADCParams", [ + "channels", # number of channels + "lanes", # number of SDO? data lanes + # lanes need to be named alphabetically and contiguous + # (e.g. [sdoa, sdob, sdoc, sdoc] or [sdoa, sdob]) + "width", # bits to transfer per channel + "t_cnvh", # CNVH duration (minimum) + "t_conv", # CONV duration (minimum) + "t_rtt", # upper estimate for clock round trip time from + # sck at the FPGA to clkout at the FPGA (cycles) + # this avoids having synchronizers and another counter + # to signal end-of transfer + # and it ensures fixed latency early in the pipeline +]) + + +class ADC(Module): + """Multi-lane, multi-channel, triggered, source-synchronous, serial + ADC interface. + + * Supports ADCs like the LTC2320-16. + * Hardcoded timings. + """ + def __init__(self, pads, params): + self.params = p = params # ADCParams + self.data = [Signal((p.width, True), reset_less=True) + for i in range(p.channels)] # retrieved ADC data + self.start = Signal() # start conversion and reading + self.reading = Signal() # data is being read (outputs are invalid) + self.done = Signal() # data is valid and a new conversion can + # be started + + ### + + # collect sdo lines + sdo = [] + for i in string.ascii_lowercase[:p.lanes]: + sdo.append(getattr(pads, "sdo" + i)) + assert p.lanes == len(sdo) + + # set up counters for the four states CNVH, CONV, READ, RTT + t_read = p.width*p.channels//p.lanes # SDR + assert p.lanes*t_read == p.width*p.channels + assert all(_ > 0 for _ in (p.t_cnvh, p.t_conv, p.t_rtt)) + assert p.t_conv > 1 + count = Signal(max=max(p.t_cnvh, p.t_conv, t_read, p.t_rtt), + reset_less=True) + count_load = Signal.like(count) + count_done = Signal() + update = Signal() + + self.comb += count_done.eq(count == 0) + self.sync += [ + count.eq(count - 1), + If(count_done, + count.eq(count_load), + ) + ] + + self.submodules.fsm = fsm = FSM("IDLE") + fsm.act("IDLE", + self.done.eq(1), + If(self.start, + count_load.eq(p.t_cnvh - 1), + NextState("CNVH") + ) + ) + fsm.act("CNVH", + count_load.eq(p.t_conv - 1), + pads.cnv.eq(1), + If(count_done, + NextState("CONV") + ) + ) + fsm.act("CONV", + count_load.eq(t_read - 1), + If(count_done, + NextState("READ") + ) + ) + fsm.act("READ", + self.reading.eq(1), + count_load.eq(p.t_rtt - 1), + pads.sck_en.eq(1), + If(count_done, + NextState("RTT") + ) + ) + fsm.act("RTT", # account for sck->clkout round trip time + self.reading.eq(1), + If(count_done, + update.eq(1), + NextState("IDLE") + ) + ) + + try: + sck_en_ret = pads.sck_en_ret # simulation + except AttributeError: + sck_en_ret = 1 + + self.clock_domains.cd_ret = ClockDomain("ret", reset_less=True) + self.comb += self.cd_ret.clk.eq(pads.clkout) + + k = p.channels//p.lanes + assert t_read == k*p.width + for i, sdo in enumerate(sdo): + sdo_sr = Signal(2*t_read) + self.sync.ret += [ + If(sck_en_ret, + sdo_sr[1:].eq(sdo_sr), + sdo_sr[0].eq(sdo), + ) + ] + self.sync += [ + If(update, + Cat(reversed([self.data[i*k + j] for j in range(k)]) + ).eq(sdo_sr) + ) + ] diff --git a/artiq/gateware/suservo/dds_ser.py b/artiq/gateware/suservo/dds_ser.py new file mode 100644 index 000000000..8df30e9fe --- /dev/null +++ b/artiq/gateware/suservo/dds_ser.py @@ -0,0 +1,43 @@ +import logging + +from migen import * + +from . import spi + + +logger = logging.getLogger(__name__) + + +DDSParams = spi.SPIParams + + +class DDS(spi.SPISimple): + """Multi-DDS SPI interface. + + * Supports SPI DDS chips like the AD9910. + * Shifts data out to multiple DDS in parallel with a shared CLK and shared + CS_N line. + * Supports a single hardcoded command. + * Configuration and setup must be done over a different channel. + * Asserts IO_UPDATE for one clock cycle immediately after the SPI transfer. + """ + def __init__(self, pads, params): + super().__init__(pads, params) + + self.profile = [Signal(32 + 16 + 16, reset_less=True) + for i in range(params.channels)] + cmd = Signal(8, reset=0x0e) # write to single tone profile 0 + assert params.width == len(cmd) + len(self.profile[0]) + + self.sync += [ + If(self.start, + [d.eq(Cat(p, cmd)) + for d, p in zip(self.data, self.profile)] + ) + ] + + # this assumes that the cycle time (1/125 MHz = 8 ns) is >1 SYNC_CLK + # cycle (1/250 MHz = 4ns) + done_old = Signal() + self.sync += done_old.eq(self.done) + self.comb += pads.io_update.eq(self.done & ~done_old) diff --git a/artiq/gateware/suservo/iir.py b/artiq/gateware/suservo/iir.py new file mode 100644 index 000000000..0ebab3f13 --- /dev/null +++ b/artiq/gateware/suservo/iir.py @@ -0,0 +1,680 @@ +from collections import namedtuple +import logging + +from migen import * + + +logger = logging.getLogger(__name__) + + +# all these are number of bits! +IIRWidths = namedtuple("IIRWidths", [ + "state", # the signed x and y states of the IIR filter + # DSP A input, x state is one bit smaller + # due to AD pre-adder, y has full width (25) + "coeff", # signed IIR filter coefficients a1, b0, b1 (18) + "accu", # IIR accumulator width (48) + "adc", # signed ADC data (16) + "word", # "word" size to break up DDS profile data (16) + "asf", # unsigned amplitude scale factor for DDS (14) + "shift", # fixed point scaling coefficient for a1, b0, b1 (log2!) (11) + "channel", # channels (log2!) (3) + "profile", # profiles per channel (log2!) (5) + "dly", # the activation delay +]) + + +def signed(v, w): + """Convert an unsigned integer ``v`` to it's signed value assuming ``w`` + bits""" + assert 0 <= v < (1 << w) + if v & (1 << w - 1): + v -= 1 << w + return v + + +class DSP(Module): + """Thin abstraction of DSP functionality used here, commonly present, + and inferrable in FPGAs: multiplier with pre-adder and post-accumulator + and pipeline registers at every stage.""" + def __init__(self, w, signed_output=False): + self.state = Signal((w.state, True)) + # NOTE: + # If offset is non-zero, care must be taken to ensure that the + # offset-state difference does not overflow the width of the ad factor + # which is also w.state. + self.offset = Signal((w.state, True)) + self.coeff = Signal((w.coeff, True)) + self.output = Signal((w.state, True)) + self.accu_clr = Signal() + self.offset_load = Signal() + self.clip = Signal() + + a = Signal((w.state, True), reset_less=True) + d = Signal((w.state, True), reset_less=True) + ad = Signal((w.state, True), reset_less=True) + b = Signal((w.coeff, True), reset_less=True) + m = Signal((w.accu, True), reset_less=True) + p = Signal((w.accu, True), reset_less=True) + + self.sync += [ + a.eq(self.state), + If(self.offset_load, + d.eq(self.offset) + ), + ad.eq(d + a), + b.eq(self.coeff), + m.eq(ad*b), + p.eq(p + m), + If(self.accu_clr, + # inject symmetric rouding constant + # p.eq(1 << (w.shift - 1)) + # but that won't infer P reg, so we just clear + # and round down + p.eq(0), + ) + ] + # Bit layout (LSB-MSB): w.shift | w.state - 1 | n_sign - 1 | 1 (sign) + n_sign = w.accu - w.state - w.shift + 1 + assert n_sign > 1 + + # clipping + if signed_output: + self.comb += [ + self.clip.eq(p[-n_sign:] != Replicate(p[-1], n_sign)), + self.output.eq(Mux(self.clip, + Cat(Replicate(~p[-1], w.state - 1), p[-1]), + p[w.shift:])) + ] + else: + self.comb += [ + self.clip.eq(p[-n_sign:] != 0), + self.output.eq(Mux(self.clip, + Replicate(~p[-1], w.state - 1), + p[w.shift:])) + ] + + +class IIR(Module): + """Pipelined IIR processor. + + This module implements a multi-channel IIR (infinite impulse response) + filter processor optimized for synthesis on FPGAs. + + The module is parametrized by passing a ``IIRWidths()`` object which + will be abbreviated W here. + + It reads 1 << W.channels input channels (typically from an ADC) + and on each iteration processes the data using a first-order IIR filter. + At the end of the cycle each the output of the filter together with + additional data (typically frequency tunning word and phase offset word + for a DDS) are presented at the 1 << W.channels outputs of the module. + + Profile memory + ============== + + Each channel can operate using any of its 1 << W.profile profiles. + The profile data consists of the input ADC channel index (SEL), a delay + (DLY) for delayed activation of the IIR updates, the three IIR + coefficients (A1, B0, B1), the input offset (OFFSET), and additional data + (FTW0, FTW1, and POW). Profile data is stored in a dual-port block RAM that + can be accessed externally. + + Memory Layout + ------------- + + The profile data is stored sequentially for each channel. + Each channel has 1 << W.profile profiles available. + Each profile stores 8 values, each up to W.coeff bits wide, arranged as: + [FTW1, B1, POW, CFG, OFFSET, A1, FTW0, B0] + The lower 8 bits of CFG hold the ADC input channel index SEL. + The subsequent 8 bits hold the IIR activation delay DLY. + The back memory is 2*W.coeff bits wide and each value pair + (even and odd address) + are stored in a single location with the odd address value occupying the + high bits. + + State memory + ============ + + The filter state consists of the previous ADC input values X1, + the current ADC input values X0 and the previous output values + of the IIR filter (Y1). The filter + state is stored in a dual-port block RAM that can be accessed + externally. + + Memory Layout + ------------- + + The state memory holds all Y1 values (IIR processor outputs) for all + profiles of all channels in the lower half (1 << W.profile + W.channel + addresses) and the pairs of old and new ADC input values X1, and X0, + in the upper half (1 << W.channel addresses). Each memory location is + W.state bits wide. + + Real-time control + ================= + + Signals are exposed for each channel: + + * The active profile, PROFILE + * Whether to perform IIR filter iterations, EN_IIR + * The RF switch state enabling output from the channel, EN_OUT + + Delayed IIR processing + ====================== + + The IIR filter iterations on a given channel are only performed all of the + following are true: + + * PROFILE, EN_IIR, EN_OUT have not been updated in the within the + last DLY cycles + * EN_IIR is asserted + * EN_OUT is asserted + + DSP design + ========== + + Typical design at the DSP level. This does not include the description of + the pipelining or the overall latency involved. + + IIRWidths(state=25, coeff=18, adc=16, + asf=14, word=16, accu=48, shift=11, + channel=3, profile=5, dly=8) + + X0 = ADC * 2^(25 - 1 - 16) + X1 = X0 delayed by one cycle + A0 = 2^11 + A0*Y0 = A1*Y1 + B0*(X0 + OFFSET) + B1*(X1 + OFFSET) + Y1 = Y0 delayed by one cycle + ASF = Y0 / 2^(25 - 14 - 1) + + ADC: input value from the ADC + ASF: output amplitude scale factor to DDS + OFFSET: setpoint + A0: fixed factor (normalization) + A1/B0/B1: coefficients (integers) + + B0 --/- A0: 2^11 + 18 | | + ADC -/-[<<]-/-(+)-/---(x)-(+)-/-[>>]-/-[_/^]-/---[>>]-/- ASF + 16 8 24 | 25 | | 48 11 37 25 | 10 15 + OFFSET --/- [z^-1] ^ [z^-1] + 24 | | | + -(x)-(+)-<-(x)-----<------ + | | + B1 --/- A1 --/- + 18 18 + + [<<]: left shift, multiply by 2^n + [>>]: right shift, divide by 2^n + (x): multiplication + (+), (-): addition, subtraction + [_/^]: clip + [z^-1]: register, delay by one processing cycle (~1.1 µs) + --/--: signal with a given bit width always includes a sign bit + -->--: flow is to the right and down unless otherwise indicated + """ + def __init__(self, w): + self.widths = w + for i, j in enumerate(w): + assert j > 0, (i, j, w) + assert w.word <= w.coeff # same memory + assert w.state + w.coeff + 3 <= w.accu + + # m_coeff of active profiles should only be accessed during + # ~processing + self.specials.m_coeff = Memory( + width=2*w.coeff, # Cat(pow/ftw/offset, cfg/a/b) + depth=4 << w.profile + w.channel) + # m_state[x] should only be read during ~(shifting | + # loading) + # m_state[y] of active profiles should only be read during + # ~processing + self.specials.m_state = Memory( + width=w.state, # y1,x0,x1 + depth=(1 << w.profile + w.channel) + (2 << w.channel)) + # ctrl should only be updated synchronously + self.ctrl = [Record([ + ("profile", w.profile), + ("en_out", 1), + ("en_iir", 1), + ("clip", 1), + ("stb", 1)]) + for i in range(1 << w.channel)] + # only update during ~loading + self.adc = [Signal((w.adc, True), reset_less=True) + for i in range(1 << w.channel)] + # Cat(ftw0, ftw1, pow, asf) + # only read during ~processing + self.dds = [Signal(4*w.word, reset_less=True) + for i in range(1 << w.channel)] + # perform one IIR iteration, start with loading, + # then processing, then shifting, end with done + self.start = Signal() + # adc inputs being loaded into RAM (becoming x0) + self.loading = Signal() + # processing state data (extracting ftw0/ftw1/pow, + # computing asf/y0, and storing as y1) + self.processing = Signal() + # shifting input state values around (x0 becomes x1) + self.shifting = Signal() + # iteration done, the next iteration can be started + self.done = Signal() + + ### + + # pivot arrays for muxing + profiles = Array([ch.profile for ch in self.ctrl]) + en_outs = Array([ch.en_out for ch in self.ctrl]) + en_iirs = Array([ch.en_iir for ch in self.ctrl]) + clips = Array([ch.clip for ch in self.ctrl]) + + # state counter + state = Signal(w.channel + 2) + # pipeline group activity flags (SR) + stage = Signal(3) + self.submodules.fsm = fsm = FSM("IDLE") + state_clr = Signal() + stage_en = Signal() + fsm.act("IDLE", + self.done.eq(1), + state_clr.eq(1), + If(self.start, + NextState("LOAD") + ) + ) + fsm.act("LOAD", + self.loading.eq(1), + If(state == (1 << w.channel) - 1, + state_clr.eq(1), + stage_en.eq(1), + NextState("PROCESS") + ) + ) + fsm.act("PROCESS", + self.processing.eq(1), + # this is technically wasting three cycles + # (one for setting stage, and phase=2,3 with stage[2]) + If(stage == 0, + state_clr.eq(1), + NextState("SHIFT") + ) + ) + fsm.act("SHIFT", + self.shifting.eq(1), + If(state == (2 << w.channel) - 1, + NextState("IDLE") + ) + ) + + self.sync += [ + state.eq(state + 1), + If(state_clr, + state.eq(0), + ), + If(stage_en, + stage[0].eq(1) + ) + ] + + # pipeline group channel pointer + # for each pipeline stage, this is the channel currently being + # processed + channel = [Signal(w.channel, reset_less=True) for i in range(3)] + # pipeline group profile pointer (SR) + # for each pipeline stage, this is the profile currently being + # processed + profile = [Signal(w.profile, reset_less=True) for i in range(2)] + # pipeline phase (lower two bits of state) + phase = Signal(2, reset_less=True) + + self.comb += Cat(phase, channel[0]).eq(state) + self.sync += [ + Case(phase, { + 0: [ + profile[0].eq(profiles[channel[0]]), + profile[1].eq(profile[0]) + ], + 3: [ + Cat(channel[1:]).eq(Cat(channel[:-1])), + stage[1:].eq(stage[:-1]), + If(channel[0] == (1 << w.channel) - 1, + stage[0].eq(0) + ) + ] + }) + ] + + m_coeff = self.m_coeff.get_port() + m_state = self.m_state.get_port(write_capable=True) # mode=READ_FIRST + self.specials += m_state, m_coeff + + dsp = DSP(w) + self.submodules += dsp + + offset_clr = Signal() + + self.comb += [ + m_coeff.adr.eq(Cat(phase, profile[0], + Mux(phase==0, channel[1], channel[0]))), + dsp.offset[-w.coeff - 1:].eq(Mux(offset_clr, 0, + Cat(m_coeff.dat_r[:w.coeff], m_coeff.dat_r[w.coeff - 1]) + )), + dsp.coeff.eq(m_coeff.dat_r[w.coeff:]), + dsp.state.eq(m_state.dat_r), + Case(phase, { + 0: dsp.accu_clr.eq(1), + 2: [ + offset_clr.eq(1), + dsp.offset_load.eq(1) + ], + 3: dsp.offset_load.eq(1) + }) + ] + + # selected adc and profile delay (combinatorial from dat_r) + # both share the same coeff word (sel in the lower 8 bits) + sel_profile = Signal(w.channel) + dly_profile = Signal(w.dly) + assert w.channel <= 8 + assert 8 + w.dly <= w.coeff + + # latched adc selection + sel = Signal(w.channel, reset_less=True) + # iir enable SR + en = Signal(2, reset_less=True) + + self.comb += [ + sel_profile.eq(m_coeff.dat_r[w.coeff:]), + dly_profile.eq(m_coeff.dat_r[w.coeff + 8:]), + If(self.shifting, + m_state.adr.eq(state | (1 << w.profile + w.channel)), + m_state.dat_w.eq(m_state.dat_r), + m_state.we.eq(state[0]) + ), + If(self.loading, + m_state.adr.eq((state << 1) | (1 << w.profile + w.channel)), + m_state.dat_w[-w.adc - 1:-1].eq(Array(self.adc)[state]), + m_state.dat_w[-1].eq(m_state.dat_w[-2]), + m_state.we.eq(1) + ), + If(self.processing, + m_state.adr.eq(Array([ + # write back new y + Cat(profile[1], channel[2]), + # read old y + Cat(profile[0], channel[0]), + # x0 (recent) + 0 | (sel_profile << 1) | (1 << w.profile + w.channel), + # x1 (old) + 1 | (sel << 1) | (1 << w.profile + w.channel), + ])[phase]), + m_state.dat_w.eq(dsp.output), + m_state.we.eq((phase == 0) & stage[2] & en[1]), + ) + ] + + # internal channel delay counters + dlys = Array([Signal(w.dly) + for i in range(1 << w.channel)]) + self._dlys = dlys # expose for debugging only + + for i in range(1 << w.channel): + self.sync += [ + # (profile != profile_old) | ~en_out + If(self.ctrl[i].stb, + dlys[i].eq(0), + ) + ] + + # latched channel delay + dly = Signal(w.dly, reset_less=True) + # latched channel en_out + en_out = Signal(reset_less=True) + # latched channel en_iir + en_iir = Signal(reset_less=True) + # muxing + ddss = Array(self.dds) + + self.sync += [ + Case(phase, { + 0: [ + dly.eq(dlys[channel[0]]), + en_out.eq(en_outs[channel[0]]), + en_iir.eq(en_iirs[channel[0]]), + If(stage[1], + ddss[channel[1]][:w.word].eq(m_coeff.dat_r) + ), + If(stage[2] & en[1] & dsp.clip, + clips[channel[2]].eq(1) + ) + ], + 1: [ + If(stage[1], + ddss[channel[1]][w.word:2*w.word].eq( + m_coeff.dat_r), + ), + If(stage[2], + ddss[channel[2]][3*w.word:].eq( + m_state.dat_r[w.state - w.asf - 1:w.state - 1]) + ) + ], + 2: [ + en[0].eq(0), + en[1].eq(en[0]), + sel.eq(sel_profile), + If(stage[0], + ddss[channel[0]][2*w.word:3*w.word].eq( + m_coeff.dat_r), + If(en_out, + If(dly != dly_profile, + dlys[channel[0]].eq(dly + 1) + ).Elif(en_iir, + en[0].eq(1) + ) + ) + ) + ], + 3: [ + ], + }), + ] + + def _coeff(self, channel, profile, coeff): + """Return ``high_word``, ``address`` and bit ``mask`` for the + storage of coefficient name ``coeff`` in profile ``profile`` + of channel ``channel``. + + ``high_word`` determines whether the coefficient is stored in the high + or low part of the memory location. + """ + w = self.widths + addr = "ftw1 b1 pow cfg offset a1 ftw0 b0".split().index(coeff) + coeff_addr = ((channel << w.profile + 2) | (profile << 2) | + (addr >> 1)) + mask = (1 << w.coeff) - 1 + return addr & 1, coeff_addr, mask + + def set_coeff(self, channel, profile, coeff, value): + """Set the coefficient value. + + Note that due to two coefficiddents sharing a single memory + location, only one coefficient update can be effected to a given memory + location per simulation clock cycle. + """ + w = self.widths + word, addr, mask = self._coeff(channel, profile, coeff) + val = yield self.m_coeff[addr] + if word: + val = (val & mask) | ((value & mask) << w.coeff) + else: + val = (value & mask) | (val & (mask << w.coeff)) + yield self.m_coeff[addr].eq(val) + + def get_coeff(self, channel, profile, coeff): + """Get a coefficient value.""" + w = self.widths + word, addr, mask = self._coeff(channel, profile, coeff) + val = yield self.m_coeff[addr] + if word: + return val >> w.coeff + else: + return val & mask + if val in "offset a1 b0 b1".split(): + val = signed(val, w.coeff) + return val + + def set_state(self, channel, val, profile=None, coeff="y1"): + """Set a state value.""" + w = self.widths + if coeff == "y1": + assert profile is not None + yield self.m_state[profile | (channel << w.profile)].eq(val) + elif coeff == "x0": + assert profile is None + yield self.m_state[(channel << 1) | + (1 << w.profile + w.channel)].eq(val) + elif coeff == "x1": + assert profile is None + yield self.m_state[1 | (channel << 1) | + (1 << w.profile + w.channel)].eq(val) + else: + raise ValueError("no such state", coeff) + + def get_state(self, channel, profile=None, coeff="y1"): + """Get a state value.""" + w = self.widths + if coeff == "y1": + val = yield self.m_state[profile | (channel << w.profile)] + elif coeff == "x0": + val = yield self.m_state[(channel << 1) | + (1 << w.profile + w.channel)] + elif coeff == "x1": + val = yield self.m_state[1 | (channel << 1) | + (1 << w.profile + w.channel)] + else: + raise ValueError("no such state", coeff) + return signed(val, w.state) + + def fast_iter(self): + """Perform a single processing iteration.""" + assert (yield self.done) + yield self.start.eq(1) + yield + yield self.start.eq(0) + yield + while not (yield self.done): + yield + + def check_iter(self): + """Perform a single processing iteration while verifying + the behavior.""" + w = self.widths + + while not (yield self.done): + yield + + yield self.start.eq(1) + yield + yield self.start.eq(0) + yield + assert not (yield self.done) + assert (yield self.loading) + while (yield self.loading): + yield + + x0s = [] + # check adc loading + for i in range(1 << w.channel): + v_adc = signed((yield self.adc[i]), w.adc) + x0 = yield from self.get_state(i, coeff="x0") + x0s.append(x0) + assert v_adc << (w.state - w.adc - 1) == x0, (hex(v_adc), hex(x0)) + logger.debug("adc[%d] adc=%x x0=%x", i, v_adc, x0) + + data = [] + # predict output + for i in range(1 << w.channel): + j = yield self.ctrl[i].profile + en_iir = yield self.ctrl[i].en_iir + en_out = yield self.ctrl[i].en_out + dly_i = yield self._dlys[i] + logger.debug("ctrl[%d] profile=%d en_iir=%d en_out=%d dly=%d", + i, j, en_iir, en_out, dly_i) + + cfg = yield from self.get_coeff(i, j, "cfg") + k_j = cfg & ((1 << w.channel) - 1) + dly_j = (cfg >> 8) & 0xff + logger.debug("cfg[%d,%d] sel=%d dly=%d", i, j, k_j, dly_j) + + en = en_iir & en_out & (dly_i >= dly_j) + logger.debug("en[%d,%d] %d", i, j, en) + + offset = yield from self.get_coeff(i, j, "offset") + offset <<= w.state - w.coeff - 1 + a1 = yield from self.get_coeff(i, j, "a1") + b0 = yield from self.get_coeff(i, j, "b0") + b1 = yield from self.get_coeff(i, j, "b1") + logger.debug("coeff[%d,%d] offset=%#x a1=%#x b0=%#x b1=%#x", + i, j, offset, a1, b0, b1) + + ftw0 = yield from self.get_coeff(i, j, "ftw0") + ftw1 = yield from self.get_coeff(i, j, "ftw1") + pow = yield from self.get_coeff(i, j, "pow") + logger.debug("dds[%d,%d] ftw0=%#x ftw1=%#x pow=%#x", + i, j, ftw0, ftw1, pow) + + y1 = yield from self.get_state(i, j, "y1") + x1 = yield from self.get_state(k_j, coeff="x1") + x0 = yield from self.get_state(k_j, coeff="x0") + logger.debug("state y1[%d,%d]=%#x x0[%d]=%#x x1[%d]=%#x", + i, j, y1, k_j, x0, k_j, x1) + + p = (0*(1 << w.shift - 1) + a1*(y1 + 0) + + b0*(x0 + offset) + b1*(x1 + offset)) + out = p >> w.shift + y0 = min(max(0, out), (1 << w.state - 1) - 1) + logger.debug("dsp[%d,%d] p=%#x out=%#x y0=%#x", + i, j, p, out, y0) + + if not en: + y0 = y1 + data.append((ftw0, ftw1, pow, y0, x1, x0)) + + # wait for output + assert (yield self.processing) + while (yield self.processing): + yield + + assert (yield self.shifting) + while (yield self.shifting): + yield + + # check x shifting + for i, x0 in enumerate(x0s): + x1 = yield from self.get_state(i, coeff="x1") + assert x1 == x0, (hex(x1), hex(x0)) + logger.debug("adc[%d] x0=%x x1=%x", i, x0, x1) + + # check new state + for i in range(1 << w.channel): + j = yield self.ctrl[i].profile + logger.debug("ch[%d] profile=%d", i, j) + y1 = yield from self.get_state(i, j, "y1") + ftw0, ftw1, pow, y0, x1, x0 = data[i] + assert y1 == y0, (hex(y1), hex(y0)) + + # check dds output + for i in range(1 << w.channel): + ftw0, ftw1, pow, y0, x1, x0 = data[i] + asf = y0 >> (w.state - w.asf - 1) + dds = (ftw0 | (ftw1 << w.word) | + (pow << 2*w.word) | (asf << 3*w.word)) + dds_state = yield self.dds[i] + logger.debug("ch[%d] dds_state=%#x dds=%#x", i, dds_state, dds) + assert dds_state == dds, [hex(_) for _ in + (dds_state, asf, pow, ftw1, ftw0)] + + assert (yield self.done) + return data diff --git a/artiq/gateware/suservo/pads.py b/artiq/gateware/suservo/pads.py new file mode 100644 index 000000000..0ab7d352f --- /dev/null +++ b/artiq/gateware/suservo/pads.py @@ -0,0 +1,83 @@ +from migen import * +from migen.genlib.io import DifferentialOutput, DifferentialInput, DDROutput + + +class SamplerPads(Module): + def __init__(self, platform, eem): + self.sck_en = Signal() + self.cnv = Signal() + self.clkout = Signal() + + spip = platform.request("{}_adc_spi_p".format(eem)) + spin = platform.request("{}_adc_spi_n".format(eem)) + cnv = platform.request("{}_cnv".format(eem)) + sdr = platform.request("{}_sdr".format(eem)) + dp = platform.request("{}_adc_data_p".format(eem)) + dn = platform.request("{}_adc_data_n".format(eem)) + + clkout_se = Signal() + clkout_inv = Signal() + sck = Signal() + + self.specials += [ + DifferentialOutput(self.cnv, cnv.p, cnv.n), + DifferentialOutput(1, sdr.p, sdr.n), + DDROutput(self.sck_en, 0, sck, ClockSignal("rio_phy")), + DifferentialOutput(sck, spip.clk, spin.clk), + DifferentialInput(dp.clkout, dn.clkout, clkout_se), + # FIXME (hardware): CLKOUT is inverted + # (Sampler v2.0, v2.1) out on rising, in on falling + Instance("BUFR", i_I=clkout_se, o_O=clkout_inv) + ] + self.comb += self.clkout.eq(~clkout_inv) + + # define clock here before the input delays below + self.clkout_p = dp.clkout # available for false paths + platform.add_platform_command( + "create_clock -name {clk} -period 8 [get_nets {clk}]", + clk=dp.clkout) + # platform.add_period_constraint(sampler_pads.clkout_p, 8.) + for i in "abcd": + sdo = Signal() + setattr(self, "sdo{}".format(i), sdo) + if i != "a": + # FIXME (hardware): sdob, sdoc, sdod are inverted + # (Sampler v2.0, v2.1) + sdo, sdo_inv = Signal(), sdo + self.comb += sdo_inv.eq(~sdo) + sdop = getattr(dp, "sdo{}".format(i)) + sdon = getattr(dn, "sdo{}".format(i)) + self.specials += [ + DifferentialInput(sdop, sdon, sdo), + ] + # -0+1.5 hold (t_HSDO_SDR), -0.5+0.5 skew + platform.add_platform_command( + "set_input_delay -clock {clk} -max 2 [get_ports {port}]\n" + "set_input_delay -clock {clk} -min -0.5 [get_ports {port}]", + clk=dp.clkout, port=sdop) + + +class UrukulPads(Module): + def __init__(self, platform, *eems): + spip, spin = [[ + platform.request("{}_qspi_{}".format(eem, pol), 0) + for eem in eems] for pol in "pn"] + ioup = [platform.request("{}_io_update".format(eem), 0) + for eem in eems] + self.cs_n = Signal() + self.clk = Signal() + self.io_update = Signal() + self.specials += [( + DifferentialOutput(~self.cs_n, spip[i].cs, spin[i].cs), + DifferentialOutput(self.clk, spip[i].clk, spin[i].clk), + DifferentialOutput(self.io_update, ioup[i].p, ioup[i].n)) + for i in range(len(eems))] + for i in range(8): + mosi = Signal() + setattr(self, "mosi{}".format(i), mosi) + for i in range(4*len(eems)): + self.specials += [ + DifferentialOutput(getattr(self, "mosi{}".format(i)), + getattr(spip[i // 4], "mosi{}".format(i % 4)), + getattr(spin[i // 4], "mosi{}".format(i % 4))) + ] diff --git a/artiq/gateware/suservo/servo.py b/artiq/gateware/suservo/servo.py new file mode 100644 index 000000000..08b31a3bc --- /dev/null +++ b/artiq/gateware/suservo/servo.py @@ -0,0 +1,61 @@ +from migen import * + +from .adc_ser import ADC, ADCParams +from .iir import IIR, IIRWidths +from .dds_ser import DDS, DDSParams + + +class Servo(Module): + def __init__(self, adc_pads, dds_pads, adc_p, iir_p, dds_p): + self.submodules.adc = ADC(adc_pads, adc_p) + self.submodules.iir = IIR(iir_p) + self.submodules.dds = DDS(dds_pads, dds_p) + + # adc channels are reversed on Sampler + for i, j, k, l in zip(reversed(self.adc.data), self.iir.adc, + self.iir.dds, self.dds.profile): + self.comb += j.eq(i), l.eq(k) + + t_adc = (adc_p.t_cnvh + adc_p.t_conv + adc_p.t_rtt + + adc_p.channels*adc_p.width//adc_p.lanes) + 1 + t_iir = ((1 + 4 + 1) << iir_p.channel) + 1 + t_dds = (dds_p.width*2 + 1)*dds_p.clk + 1 + + t_cycle = max(t_adc, t_iir, t_dds) + assert t_iir + (2 << iir_p.channel) < t_cycle, "need shifting time" + + self.start = Signal() + t_restart = t_cycle - t_adc + 1 + assert t_restart > 1 + cnt = Signal(max=t_restart) + cnt_done = Signal() + active = Signal(3) + self.done = Signal() + self.sync += [ + If(self.dds.done, + active[2].eq(0) + ), + If(self.dds.start & self.dds.done, + active[2].eq(1), + active[1].eq(0) + ), + If(self.iir.start & self.iir.done, + active[1].eq(1), + active[0].eq(0) + ), + If(~cnt_done & self.adc.done, + cnt.eq(cnt - 1) + ), + If(self.adc.start & self.adc.done, + active[0].eq(1), + cnt.eq(t_restart - 1) + ) + ] + self.comb += [ + cnt_done.eq(cnt == 0), + self.adc.start.eq(self.start & cnt_done), + self.iir.start.eq(active[0] & self.adc.done), + self.dds.start.eq(active[1] & + (self.iir.shifting | self.iir.done)), + self.done.eq(self.dds.done), + ] diff --git a/artiq/gateware/suservo/spi.py b/artiq/gateware/suservo/spi.py new file mode 100644 index 000000000..94f28c9a1 --- /dev/null +++ b/artiq/gateware/suservo/spi.py @@ -0,0 +1,95 @@ +import logging +from collections import namedtuple + +from migen import * +from migen.genlib.fsm import FSM, NextState +from migen.genlib import io + + +logger = logging.getLogger(__name__) + + +# all times in cycles +SPIParams = namedtuple("SPIParams", [ + "channels", # number of MOSI? data lanes + "width", # transfer width + "clk", # CLK half cycle width (in cycles) +]) + + +class SPISimple(Module): + """Simple reduced SPI interface. + + * Multiple MOSI lines + * Supports differential CLK/CS_N/MOSI + * Fixed CLK timing + * SPI MODE 0 (CPHA=0, CPOL=0) + """ + def __init__(self, pads, params): + self.params = p = params + self.data = [Signal(p.width, reset_less=True) + for i in range(p.channels)] # data to be output, MSB first + self.start = Signal() # start transfer + self.done = Signal() # transfer complete, next transfer can be + # started + + ### + + assert p.clk >= 1 + + cnt = Signal(max=max(2, p.clk), reset_less=True) + cnt_done = Signal() + cnt_next = Signal() + self.comb += cnt_done.eq(cnt == 0) + self.sync += [ + If(cnt_done, + If(cnt_next, + cnt.eq(p.clk - 1) + ) + ).Else( + cnt.eq(cnt - 1) + ) + ] + + for i, d in enumerate(self.data): + self.comb += getattr(pads, "mosi{}".format(i)).eq(d[-1]) + + bits = Signal(max=p.width + 1, reset_less=True) + + self.submodules.fsm = fsm = CEInserter()(FSM("IDLE")) + + self.comb += fsm.ce.eq(cnt_done) + + fsm.act("IDLE", + self.done.eq(1), + pads.cs_n.eq(1), + If(self.start, + cnt_next.eq(1), + NextState("SETUP") + ) + ) + fsm.act("SETUP", + cnt_next.eq(1), + If(bits == 0, + NextState("IDLE") + ).Else( + NextState("HOLD") + ) + ) + fsm.act("HOLD", + cnt_next.eq(1), + pads.clk.eq(1), + NextState("SETUP") + ) + + self.sync += [ + If(fsm.ce, + If(fsm.before_leaving("HOLD"), + bits.eq(bits - 1), + [d[1:].eq(d) for d in self.data] + ), + If(fsm.ongoing("IDLE"), + bits.eq(p.width) + ) + ) + ] diff --git a/artiq/gateware/targets/kasli.py b/artiq/gateware/targets/kasli.py new file mode 100755 index 000000000..91c6fb9a0 --- /dev/null +++ b/artiq/gateware/targets/kasli.py @@ -0,0 +1,724 @@ +#!/usr/bin/env python3 + +import argparse + +from migen import * +from migen.genlib.resetsync import AsyncResetSynchronizer +from migen.genlib.cdc import MultiReg +from migen.genlib.io import DifferentialOutput + +from misoc.interconnect.csr import * +from misoc.cores import gpio +from misoc.cores.a7_gtp import * +from misoc.targets.kasli import ( + BaseSoC, MiniSoC, soc_kasli_args, soc_kasli_argdict) +from misoc.integration.builder import builder_args, builder_argdict + +from artiq.gateware.amp import AMPSoC +from artiq.gateware import rtio +from artiq.gateware.rtio.phy import ttl_simple, ttl_serdes_7series, edge_counter +from artiq.gateware import eem +from artiq.gateware.drtio.transceiver import gtp_7series +from artiq.gateware.drtio.siphaser import SiPhaser7Series +from artiq.gateware.drtio.wrpll import WRPLL, DDMTDSamplerGTP +from artiq.gateware.drtio.rx_synchronizer import XilinxRXSynchronizer +from artiq.gateware.drtio import * +from artiq.build_soc import * + + +class _RTIOCRG(Module, AutoCSR): + def __init__(self, platform): + self.pll_reset = CSRStorage(reset=1) + self.pll_locked = CSRStatus() + self.clock_domains.cd_rtio = ClockDomain() + self.clock_domains.cd_rtiox4 = ClockDomain(reset_less=True) + + if platform.hw_rev == "v2.0": + clk_synth = platform.request("cdr_clk_clean_fabric") + else: + clk_synth = platform.request("si5324_clkout_fabric") + clk_synth_se = Signal() + platform.add_period_constraint(clk_synth.p, 8.0) + self.specials += [ + Instance("IBUFGDS", + p_DIFF_TERM="TRUE", p_IBUF_LOW_PWR="FALSE", + i_I=clk_synth.p, i_IB=clk_synth.n, o_O=clk_synth_se), + ] + + pll_locked = Signal() + rtio_clk = Signal() + rtiox4_clk = Signal() + fb_clk = Signal() + self.specials += [ + Instance("PLLE2_ADV", + p_STARTUP_WAIT="FALSE", o_LOCKED=pll_locked, + p_BANDWIDTH="HIGH", + p_REF_JITTER1=0.001, + p_CLKIN1_PERIOD=8.0, p_CLKIN2_PERIOD=8.0, + i_CLKIN2=clk_synth_se, + # Warning: CLKINSEL=0 means CLKIN2 is selected + i_CLKINSEL=0, + + # VCO @ 1.5GHz when using 125MHz input + p_CLKFBOUT_MULT=12, p_DIVCLK_DIVIDE=1, + i_CLKFBIN=fb_clk, + i_RST=self.pll_reset.storage, + + o_CLKFBOUT=fb_clk, + + p_CLKOUT0_DIVIDE=3, p_CLKOUT0_PHASE=0.0, + o_CLKOUT0=rtiox4_clk, + + p_CLKOUT1_DIVIDE=12, p_CLKOUT1_PHASE=0.0, + o_CLKOUT1=rtio_clk), + Instance("BUFG", i_I=rtio_clk, o_O=self.cd_rtio.clk), + Instance("BUFG", i_I=rtiox4_clk, o_O=self.cd_rtiox4.clk), + + AsyncResetSynchronizer(self.cd_rtio, ~pll_locked), + MultiReg(pll_locked, self.pll_locked.status) + ] + + +class SMAClkinForward(Module): + def __init__(self, platform): + sma_clkin = platform.request("sma_clkin") + sma_clkin_se = Signal() + sma_clkin_buffered = Signal() + cdr_clk_se = Signal() + cdr_clk = platform.request("cdr_clk") + self.specials += [ + Instance("IBUFDS", i_I=sma_clkin.p, i_IB=sma_clkin.n, o_O=sma_clkin_se), + Instance("BUFIO", i_I=sma_clkin_se, o_O=sma_clkin_buffered), + Instance("ODDR", i_C=sma_clkin_buffered, i_CE=1, i_D1=0, i_D2=1, o_Q=cdr_clk_se), + Instance("OBUFDS", i_I=cdr_clk_se, o_O=cdr_clk.p, o_OB=cdr_clk.n) + ] + + +def fix_serdes_timing_path(platform): + # ignore timing of path from OSERDESE2 through the pad to ISERDESE2 + platform.add_platform_command( + "set_false_path -quiet " + "-through [get_pins -filter {{REF_PIN_NAME == OQ || REF_PIN_NAME == TQ}} " + "-of [get_cells -filter {{REF_NAME == OSERDESE2}}]] " + "-to [get_pins -filter {{REF_PIN_NAME == D}} " + "-of [get_cells -filter {{REF_NAME == ISERDESE2}}]]" + ) + + +class StandaloneBase(MiniSoC, AMPSoC): + mem_map = { + "cri_con": 0x10000000, + "rtio": 0x20000000, + "rtio_dma": 0x30000000, + "mailbox": 0x70000000 + } + mem_map.update(MiniSoC.mem_map) + + def __init__(self, gateware_identifier_str=None, **kwargs): + MiniSoC.__init__(self, + cpu_type="or1k", + sdram_controller_type="minicon", + l2_size=128*1024, + integrated_sram_size=8192, + ethmac_nrxslots=4, + ethmac_ntxslots=4, + **kwargs) + AMPSoC.__init__(self) + add_identifier(self, gateware_identifier_str=gateware_identifier_str) + + if self.platform.hw_rev == "v2.0": + self.submodules.error_led = gpio.GPIOOut(Cat( + self.platform.request("error_led"))) + self.csr_devices.append("error_led") + self.submodules += SMAClkinForward(self.platform) + + i2c = self.platform.request("i2c") + self.submodules.i2c = gpio.GPIOTristate([i2c.scl, i2c.sda]) + self.csr_devices.append("i2c") + self.config["I2C_BUS_COUNT"] = 1 + self.config["HAS_SI5324"] = None + self.config["SI5324_SOFT_RESET"] = None + + def add_rtio(self, rtio_channels): + self.submodules.rtio_crg = _RTIOCRG(self.platform) + self.csr_devices.append("rtio_crg") + fix_serdes_timing_path(self.platform) + self.submodules.rtio_tsc = rtio.TSC("async", glbl_fine_ts_width=3) + self.submodules.rtio_core = rtio.Core(self.rtio_tsc, rtio_channels) + self.csr_devices.append("rtio_core") + self.submodules.rtio = rtio.KernelInitiator(self.rtio_tsc) + self.submodules.rtio_dma = ClockDomainsRenamer("sys_kernel")( + rtio.DMA(self.get_native_sdram_if())) + self.register_kernel_cpu_csrdevice("rtio") + self.register_kernel_cpu_csrdevice("rtio_dma") + self.submodules.cri_con = rtio.CRIInterconnectShared( + [self.rtio.cri, self.rtio_dma.cri], + [self.rtio_core.cri]) + self.register_kernel_cpu_csrdevice("cri_con") + + # Only add MonInj core if there is anything to monitor + if any([len(c.probes) for c in rtio_channels]): + self.submodules.rtio_moninj = rtio.MonInj(rtio_channels) + self.csr_devices.append("rtio_moninj") + + self.platform.add_false_path_constraints( + self.crg.cd_sys.clk, + self.rtio_crg.cd_rtio.clk) + + self.submodules.rtio_analyzer = rtio.Analyzer(self.rtio_tsc, self.rtio_core.cri, + self.get_native_sdram_if()) + self.csr_devices.append("rtio_analyzer") + + +class Tester(StandaloneBase): + """ + Configuration for CI tests. Contains the maximum number of different EEMs. + """ + def __init__(self, hw_rev=None, **kwargs): + if hw_rev is None: + hw_rev = "v2.0" + StandaloneBase.__init__(self, hw_rev=hw_rev, **kwargs) + + self.config["SI5324_AS_SYNTHESIZER"] = None + # self.config["SI5324_EXT_REF"] = None + self.config["RTIO_FREQUENCY"] = "125.0" + if hw_rev == "v1.0": + # EEM clock fan-out from Si5324, not MMCX + self.comb += self.platform.request("clk_sel").eq(1) + + self.rtio_channels = [] + eem.DIO.add_std(self, 5, + ttl_serdes_7series.InOut_8X, ttl_serdes_7series.Output_8X, + edge_counter_cls=edge_counter.SimpleEdgeCounter) + eem.Urukul.add_std(self, 0, 1, ttl_serdes_7series.Output_8X, + ttl_simple.ClockGen) + eem.Sampler.add_std(self, 3, 2, ttl_serdes_7series.Output_8X) + eem.Zotino.add_std(self, 4, ttl_serdes_7series.Output_8X) + + if hw_rev in ("v1.0", "v1.1"): + for i in (1, 2): + sfp_ctl = self.platform.request("sfp_ctl", i) + phy = ttl_simple.Output(sfp_ctl.led) + self.submodules += phy + self.rtio_channels.append(rtio.Channel.from_phy(phy)) + + self.config["HAS_RTIO_LOG"] = None + self.config["RTIO_LOG_CHANNEL"] = len(self.rtio_channels) + self.rtio_channels.append(rtio.LogChannel()) + self.add_rtio(self.rtio_channels) + + +class SUServo(StandaloneBase): + """ + SUServo (Sampler-Urukul-Servo) extension variant configuration + """ + def __init__(self, hw_rev=None, **kwargs): + if hw_rev is None: + hw_rev = "v2.0" + StandaloneBase.__init__(self, hw_rev=hw_rev, **kwargs) + + self.config["SI5324_AS_SYNTHESIZER"] = None + # self.config["SI5324_EXT_REF"] = None + self.config["RTIO_FREQUENCY"] = "125.0" + if hw_rev == "v1.0": + # EEM clock fan-out from Si5324, not MMCX + self.comb += self.platform.request("clk_sel").eq(1) + + self.rtio_channels = [] + # EEM0, EEM1: DIO + eem.DIO.add_std(self, 0, + ttl_serdes_7series.InOut_8X, ttl_serdes_7series.Output_8X) + eem.DIO.add_std(self, 1, + ttl_serdes_7series.Output_8X, ttl_serdes_7series.Output_8X) + + # EEM3/2: Sampler, EEM5/4: Urukul, EEM7/6: Urukul + eem.SUServo.add_std( + self, eems_sampler=(3, 2), + eems_urukul0=(5, 4), eems_urukul1=(7, 6)) + + for i in (1, 2): + sfp_ctl = self.platform.request("sfp_ctl", i) + phy = ttl_simple.Output(sfp_ctl.led) + self.submodules += phy + self.rtio_channels.append(rtio.Channel.from_phy(phy)) + + self.config["HAS_RTIO_LOG"] = None + self.config["RTIO_LOG_CHANNEL"] = len(self.rtio_channels) + self.rtio_channels.append(rtio.LogChannel()) + + self.add_rtio(self.rtio_channels) + + pads = self.platform.lookup_request("sampler3_adc_data_p") + self.platform.add_false_path_constraints( + pads.clkout, self.rtio_crg.cd_rtio.clk) + self.platform.add_false_path_constraints( + pads.clkout, self.crg.cd_sys.clk) + + +class _RTIOClockMultiplier(Module, AutoCSR): + def __init__(self, rtio_clk_freq): + self.pll_reset = CSRStorage(reset=1) + self.pll_locked = CSRStatus() + self.clock_domains.cd_rtiox4 = ClockDomain(reset_less=True) + + # See "Global Clock Network Deskew Using Two BUFGs" in ug472. + clkfbout = Signal() + clkfbin = Signal() + rtiox4_clk = Signal() + pll_locked = Signal() + self.specials += [ + Instance("MMCME2_BASE", + p_CLKIN1_PERIOD=1e9/rtio_clk_freq, + i_CLKIN1=ClockSignal("rtio"), + i_RST=self.pll_reset.storage, + o_LOCKED=pll_locked, + + p_CLKFBOUT_MULT_F=8.0, p_DIVCLK_DIVIDE=1, + + o_CLKFBOUT=clkfbout, i_CLKFBIN=clkfbin, + + p_CLKOUT0_DIVIDE_F=2.0, o_CLKOUT0=rtiox4_clk, + ), + Instance("BUFG", i_I=clkfbout, o_O=clkfbin), + Instance("BUFG", i_I=rtiox4_clk, o_O=self.cd_rtiox4.clk), + + MultiReg(pll_locked, self.pll_locked.status) + ] + + +class MasterBase(MiniSoC, AMPSoC): + mem_map = { + "cri_con": 0x10000000, + "rtio": 0x20000000, + "rtio_dma": 0x30000000, + "drtioaux": 0x50000000, + "mailbox": 0x70000000 + } + mem_map.update(MiniSoC.mem_map) + + def __init__(self, rtio_clk_freq=125e6, enable_sata=False, gateware_identifier_str=None, **kwargs): + MiniSoC.__init__(self, + cpu_type="or1k", + sdram_controller_type="minicon", + l2_size=128*1024, + integrated_sram_size=8192, + ethmac_nrxslots=4, + ethmac_ntxslots=4, + **kwargs) + AMPSoC.__init__(self) + add_identifier(self, gateware_identifier_str=gateware_identifier_str) + + platform = self.platform + + if platform.hw_rev == "v2.0": + self.submodules += SMAClkinForward(platform) + + i2c = self.platform.request("i2c") + self.submodules.i2c = gpio.GPIOTristate([i2c.scl, i2c.sda]) + self.csr_devices.append("i2c") + self.config["I2C_BUS_COUNT"] = 1 + self.config["HAS_SI5324"] = None + self.config["SI5324_SOFT_RESET"] = None + self.config["SI5324_AS_SYNTHESIZER"] = None + self.config["RTIO_FREQUENCY"] = str(rtio_clk_freq/1e6) + + drtio_data_pads = [] + if enable_sata: + drtio_data_pads.append(platform.request("sata")) + drtio_data_pads += [platform.request("sfp", i) for i in range(1, 3)] + if self.platform.hw_rev == "v2.0": + drtio_data_pads.append(platform.request("sfp", 3)) + + if self.platform.hw_rev in ("v1.0", "v1.1"): + sfp_ctls = [platform.request("sfp_ctl", i) for i in range(1, 3)] + self.comb += [sc.tx_disable.eq(0) for sc in sfp_ctls] + + self.submodules.drtio_transceiver = gtp_7series.GTP( + qpll_channel=self.drtio_qpll_channel, + data_pads=drtio_data_pads, + sys_clk_freq=self.clk_freq, + rtio_clk_freq=rtio_clk_freq) + self.csr_devices.append("drtio_transceiver") + self.sync += self.disable_cdr_clk_ibuf.eq( + ~self.drtio_transceiver.stable_clkin.storage) + + if enable_sata: + sfp_channels = self.drtio_transceiver.channels[1:] + else: + sfp_channels = self.drtio_transceiver.channels + if self.platform.hw_rev in ("v1.0", "v1.1"): + self.comb += [sfp_ctl.led.eq(channel.rx_ready) + for sfp_ctl, channel in zip(sfp_ctls, sfp_channels)] + if self.platform.hw_rev == "v2.0": + self.comb += [self.virtual_leds.get(i + 1).eq(channel.rx_ready) + for i, channel in enumerate(sfp_channels)] + + self.submodules.rtio_tsc = rtio.TSC("async", glbl_fine_ts_width=3) + + drtio_csr_group = [] + drtioaux_csr_group = [] + drtioaux_memory_group = [] + self.drtio_cri = [] + for i in range(len(self.drtio_transceiver.channels)): + core_name = "drtio" + str(i) + coreaux_name = "drtioaux" + str(i) + memory_name = "drtioaux" + str(i) + "_mem" + drtio_csr_group.append(core_name) + drtioaux_csr_group.append(coreaux_name) + drtioaux_memory_group.append(memory_name) + + cdr = ClockDomainsRenamer({"rtio_rx": "rtio_rx" + str(i)}) + + core = cdr(DRTIOMaster(self.rtio_tsc, self.drtio_transceiver.channels[i])) + setattr(self.submodules, core_name, core) + self.drtio_cri.append(core.cri) + self.csr_devices.append(core_name) + + coreaux = cdr(DRTIOAuxController(core.link_layer)) + setattr(self.submodules, coreaux_name, coreaux) + self.csr_devices.append(coreaux_name) + + memory_address = self.mem_map["drtioaux"] + 0x800*i + self.add_wb_slave(memory_address, 0x800, + coreaux.bus) + self.add_memory_region(memory_name, memory_address | self.shadow_base, 0x800) + self.config["HAS_DRTIO"] = None + self.config["HAS_DRTIO_ROUTING"] = None + self.add_csr_group("drtio", drtio_csr_group) + self.add_csr_group("drtioaux", drtioaux_csr_group) + self.add_memory_group("drtioaux_mem", drtioaux_memory_group) + + rtio_clk_period = 1e9/rtio_clk_freq + gtp = self.drtio_transceiver.gtps[0] + platform.add_period_constraint(gtp.txoutclk, rtio_clk_period) + platform.add_period_constraint(gtp.rxoutclk, rtio_clk_period) + platform.add_false_path_constraints( + self.crg.cd_sys.clk, + gtp.txoutclk, gtp.rxoutclk) + for gtp in self.drtio_transceiver.gtps[1:]: + platform.add_period_constraint(gtp.rxoutclk, rtio_clk_period) + platform.add_false_path_constraints( + self.crg.cd_sys.clk, gtp.rxoutclk) + + self.submodules.rtio_crg = _RTIOClockMultiplier(rtio_clk_freq) + self.csr_devices.append("rtio_crg") + fix_serdes_timing_path(platform) + + def add_rtio(self, rtio_channels): + # Only add MonInj core if there is anything to monitor + if any([len(c.probes) for c in rtio_channels]): + self.submodules.rtio_moninj = rtio.MonInj(rtio_channels) + self.csr_devices.append("rtio_moninj") + + self.submodules.rtio_core = rtio.Core(self.rtio_tsc, rtio_channels) + self.csr_devices.append("rtio_core") + + self.submodules.rtio = rtio.KernelInitiator(self.rtio_tsc) + self.submodules.rtio_dma = ClockDomainsRenamer("sys_kernel")( + rtio.DMA(self.get_native_sdram_if())) + self.register_kernel_cpu_csrdevice("rtio") + self.register_kernel_cpu_csrdevice("rtio_dma") + self.submodules.cri_con = rtio.CRIInterconnectShared( + [self.rtio.cri, self.rtio_dma.cri], + [self.rtio_core.cri] + self.drtio_cri, + enable_routing=True) + self.register_kernel_cpu_csrdevice("cri_con") + self.submodules.routing_table = rtio.RoutingTableAccess(self.cri_con) + self.csr_devices.append("routing_table") + + self.submodules.rtio_analyzer = rtio.Analyzer(self.rtio_tsc, self.cri_con.switch.slave, + self.get_native_sdram_if()) + self.csr_devices.append("rtio_analyzer") + + # Never running out of stupid features, GTs on A7 make you pack + # unrelated transceiver PLLs into one GTPE2_COMMON yourself. + def create_qpll(self): + # The GTP acts up if you send any glitch to its + # clock input, even while the PLL is held in reset. + self.disable_cdr_clk_ibuf = Signal(reset=1) + self.disable_cdr_clk_ibuf.attr.add("no_retiming") + if self.platform.hw_rev == "v2.0": + cdr_clk_clean = self.platform.request("cdr_clk_clean") + else: + cdr_clk_clean = self.platform.request("si5324_clkout") + cdr_clk_clean_buf = Signal() + self.specials += Instance("IBUFDS_GTE2", + i_CEB=self.disable_cdr_clk_ibuf, + i_I=cdr_clk_clean.p, i_IB=cdr_clk_clean.n, + o_O=cdr_clk_clean_buf) + # Note precisely the rules Xilinx made up: + # refclksel=0b001 GTREFCLK0 selected + # refclksel=0b010 GTREFCLK1 selected + # but if only one clock is used, then it must be 001. + qpll_drtio_settings = QPLLSettings( + refclksel=0b001, + fbdiv=4, + fbdiv_45=5, + refclk_div=1) + qpll_eth_settings = QPLLSettings( + refclksel=0b010, + fbdiv=4, + fbdiv_45=5, + refclk_div=1) + qpll = QPLL(cdr_clk_clean_buf, qpll_drtio_settings, + self.crg.clk125_buf, qpll_eth_settings) + self.submodules += qpll + self.drtio_qpll_channel, self.ethphy_qpll_channel = qpll.channels + + +class SatelliteBase(BaseSoC): + mem_map = { + "drtioaux": 0x50000000, + } + mem_map.update(BaseSoC.mem_map) + + def __init__(self, rtio_clk_freq=125e6, enable_sata=False, *, with_wrpll=False, gateware_identifier_str=None, **kwargs): + BaseSoC.__init__(self, + cpu_type="or1k", + sdram_controller_type="minicon", + l2_size=128*1024, + **kwargs) + add_identifier(self, gateware_identifier_str=gateware_identifier_str) + + platform = self.platform + + disable_cdr_clk_ibuf = Signal(reset=1) + disable_cdr_clk_ibuf.attr.add("no_retiming") + if self.platform.hw_rev == "v2.0": + cdr_clk_clean = self.platform.request("cdr_clk_clean") + else: + cdr_clk_clean = self.platform.request("si5324_clkout") + cdr_clk_clean_buf = Signal() + self.specials += Instance("IBUFDS_GTE2", + i_CEB=disable_cdr_clk_ibuf, + i_I=cdr_clk_clean.p, i_IB=cdr_clk_clean.n, + o_O=cdr_clk_clean_buf) + qpll_drtio_settings = QPLLSettings( + refclksel=0b001, + fbdiv=4, + fbdiv_45=5, + refclk_div=1) + qpll = QPLL(cdr_clk_clean_buf, qpll_drtio_settings) + self.submodules += qpll + + drtio_data_pads = [] + if enable_sata: + drtio_data_pads.append(platform.request("sata")) + drtio_data_pads += [platform.request("sfp", i) for i in range(3)] + if self.platform.hw_rev == "v2.0": + drtio_data_pads.append(platform.request("sfp", 3)) + + if self.platform.hw_rev in ("v1.0", "v1.1"): + sfp_ctls = [platform.request("sfp_ctl", i) for i in range(3)] + self.comb += [sc.tx_disable.eq(0) for sc in sfp_ctls] + self.submodules.drtio_transceiver = gtp_7series.GTP( + qpll_channel=qpll.channels[0], + data_pads=drtio_data_pads, + sys_clk_freq=self.clk_freq, + rtio_clk_freq=rtio_clk_freq) + self.csr_devices.append("drtio_transceiver") + self.sync += disable_cdr_clk_ibuf.eq( + ~self.drtio_transceiver.stable_clkin.storage) + + if enable_sata: + sfp_channels = self.drtio_transceiver.channels[1:] + else: + sfp_channels = self.drtio_transceiver.channels + if self.platform.hw_rev in ("v1.0", "v1.1"): + self.comb += [sfp_ctl.led.eq(channel.rx_ready) + for sfp_ctl, channel in zip(sfp_ctls, sfp_channels)] + if self.platform.hw_rev == "v2.0": + self.comb += [self.virtual_leds.get(i).eq(channel.rx_ready) + for i, channel in enumerate(sfp_channels)] + + self.submodules.rtio_tsc = rtio.TSC("sync", glbl_fine_ts_width=3) + + drtioaux_csr_group = [] + drtioaux_memory_group = [] + drtiorep_csr_group = [] + self.drtio_cri = [] + for i in range(len(self.drtio_transceiver.channels)): + coreaux_name = "drtioaux" + str(i) + memory_name = "drtioaux" + str(i) + "_mem" + drtioaux_csr_group.append(coreaux_name) + drtioaux_memory_group.append(memory_name) + + cdr = ClockDomainsRenamer({"rtio_rx": "rtio_rx" + str(i)}) + + if i == 0: + self.submodules.rx_synchronizer = cdr(XilinxRXSynchronizer()) + core = cdr(DRTIOSatellite( + self.rtio_tsc, self.drtio_transceiver.channels[i], + self.rx_synchronizer)) + self.submodules.drtiosat = core + self.csr_devices.append("drtiosat") + else: + corerep_name = "drtiorep" + str(i-1) + drtiorep_csr_group.append(corerep_name) + + core = cdr(DRTIORepeater( + self.rtio_tsc, self.drtio_transceiver.channels[i])) + setattr(self.submodules, corerep_name, core) + self.drtio_cri.append(core.cri) + self.csr_devices.append(corerep_name) + + coreaux = cdr(DRTIOAuxController(core.link_layer)) + setattr(self.submodules, coreaux_name, coreaux) + self.csr_devices.append(coreaux_name) + + memory_address = self.mem_map["drtioaux"] + 0x800*i + self.add_wb_slave(memory_address, 0x800, + coreaux.bus) + self.add_memory_region(memory_name, memory_address | self.shadow_base, 0x800) + self.config["HAS_DRTIO"] = None + self.config["HAS_DRTIO_ROUTING"] = None + self.add_csr_group("drtioaux", drtioaux_csr_group) + self.add_memory_group("drtioaux_mem", drtioaux_memory_group) + self.add_csr_group("drtiorep", drtiorep_csr_group) + + i2c = self.platform.request("i2c") + self.submodules.i2c = gpio.GPIOTristate([i2c.scl, i2c.sda]) + self.csr_devices.append("i2c") + self.config["I2C_BUS_COUNT"] = 1 + + rtio_clk_period = 1e9/rtio_clk_freq + self.config["RTIO_FREQUENCY"] = str(rtio_clk_freq/1e6) + if with_wrpll: + self.submodules.wrpll_sampler = DDMTDSamplerGTP( + self.drtio_transceiver, + platform.request("cdr_clk_clean_fabric")) + helper_clk_pads = platform.request("ddmtd_helper_clk") + self.submodules.wrpll = WRPLL( + helper_clk_pads=helper_clk_pads, + main_dcxo_i2c=platform.request("ddmtd_main_dcxo_i2c"), + helper_dxco_i2c=platform.request("ddmtd_helper_dcxo_i2c"), + ddmtd_inputs=self.wrpll_sampler) + self.csr_devices.append("wrpll") + # note: do not use self.wrpll.cd_helper.clk; otherwise, vivado craps out with: + # critical warning: create_clock attempting to set clock on an unknown port/pin + # command: "create_clock -period 7.920000 -waveform {0.000000 3.960000} -name + # helper_clk [get_xlnx_outside_genome_inst_pin 20 0] + platform.add_period_constraint(helper_clk_pads.p, rtio_clk_period*0.99) + platform.add_false_path_constraints(self.crg.cd_sys.clk, helper_clk_pads.p) + else: + self.submodules.siphaser = SiPhaser7Series( + si5324_clkin=platform.request("cdr_clk") if platform.hw_rev == "v2.0" + else platform.request("si5324_clkin"), + rx_synchronizer=self.rx_synchronizer, + ref_clk=self.crg.clk125_div2, ref_div2=True, + rtio_clk_freq=rtio_clk_freq) + platform.add_false_path_constraints( + self.crg.cd_sys.clk, self.siphaser.mmcm_freerun_output) + self.csr_devices.append("siphaser") + self.config["HAS_SI5324"] = None + self.config["SI5324_SOFT_RESET"] = None + + gtp = self.drtio_transceiver.gtps[0] + platform.add_period_constraint(gtp.txoutclk, rtio_clk_period) + platform.add_period_constraint(gtp.rxoutclk, rtio_clk_period) + platform.add_false_path_constraints( + self.crg.cd_sys.clk, + gtp.txoutclk, gtp.rxoutclk) + if with_wrpll: + platform.add_false_path_constraints( + helper_clk_pads.p, gtp.rxoutclk) + for gtp in self.drtio_transceiver.gtps[1:]: + platform.add_period_constraint(gtp.rxoutclk, rtio_clk_period) + platform.add_false_path_constraints( + self.crg.cd_sys.clk, gtp.rxoutclk) + + self.submodules.rtio_crg = _RTIOClockMultiplier(rtio_clk_freq) + self.csr_devices.append("rtio_crg") + fix_serdes_timing_path(platform) + + def add_rtio(self, rtio_channels): + # Only add MonInj core if there is anything to monitor + if any([len(c.probes) for c in rtio_channels]): + self.submodules.rtio_moninj = rtio.MonInj(rtio_channels) + self.csr_devices.append("rtio_moninj") + + self.submodules.local_io = SyncRTIO(self.rtio_tsc, rtio_channels) + self.comb += self.drtiosat.async_errors.eq(self.local_io.async_errors) + self.submodules.cri_con = rtio.CRIInterconnectShared( + [self.drtiosat.cri], + [self.local_io.cri] + self.drtio_cri, + mode="sync", enable_routing=True) + self.csr_devices.append("cri_con") + self.submodules.routing_table = rtio.RoutingTableAccess(self.cri_con) + self.csr_devices.append("routing_table") + + +class Master(MasterBase): + def __init__(self, hw_rev=None, **kwargs): + if hw_rev is None: + hw_rev = "v2.0" + MasterBase.__init__(self, hw_rev=hw_rev, **kwargs) + + self.rtio_channels = [] + + phy = ttl_simple.Output(self.platform.request("user_led", 0)) + self.submodules += phy + self.rtio_channels.append(rtio.Channel.from_phy(phy)) + # matches Tester EEM numbers + eem.DIO.add_std(self, 5, + ttl_serdes_7series.InOut_8X, ttl_serdes_7series.Output_8X) + eem.Urukul.add_std(self, 0, 1, ttl_serdes_7series.Output_8X) + + self.config["HAS_RTIO_LOG"] = None + self.config["RTIO_LOG_CHANNEL"] = len(self.rtio_channels) + self.rtio_channels.append(rtio.LogChannel()) + + self.add_rtio(self.rtio_channels) + + +class Satellite(SatelliteBase): + def __init__(self, hw_rev=None, **kwargs): + if hw_rev is None: + hw_rev = "v2.0" + SatelliteBase.__init__(self, hw_rev=hw_rev, **kwargs) + + self.rtio_channels = [] + phy = ttl_simple.Output(self.platform.request("user_led", 0)) + self.submodules += phy + self.rtio_channels.append(rtio.Channel.from_phy(phy)) + # matches Tester EEM numbers + eem.DIO.add_std(self, 5, + ttl_serdes_7series.InOut_8X, ttl_serdes_7series.Output_8X) + + self.add_rtio(self.rtio_channels) + + +VARIANTS = {cls.__name__.lower(): cls for cls in [Tester, SUServo, Master, Satellite]} + + +def main(): + parser = argparse.ArgumentParser( + description="ARTIQ device binary builder for Kasli systems") + builder_args(parser) + soc_kasli_args(parser) + parser.set_defaults(output_dir="artiq_kasli") + parser.add_argument("-V", "--variant", default="tester", + help="variant: {} (default: %(default)s)".format( + "/".join(sorted(VARIANTS.keys())))) + parser.add_argument("--with-wrpll", default=False, action="store_true") + parser.add_argument("--gateware-identifier-str", default=None, + help="Override ROM identifier") + args = parser.parse_args() + + argdict = dict() + if args.with_wrpll: + argdict["with_wrpll"] = True + argdict["gateware_identifier_str"] = args.gateware_identifier_str + + variant = args.variant.lower() + try: + cls = VARIANTS[variant] + except KeyError: + raise SystemExit("Invalid variant (-V/--variant)") + + soc = cls(**soc_kasli_argdict(args), **argdict) + build_artiq_soc(soc, builder_argdict(args)) + + +if __name__ == "__main__": + main() diff --git a/artiq/gateware/targets/kasli_generic.py b/artiq/gateware/targets/kasli_generic.py new file mode 100755 index 000000000..577b93dad --- /dev/null +++ b/artiq/gateware/targets/kasli_generic.py @@ -0,0 +1,288 @@ +#!/usr/bin/env python3 + +import argparse +import json + +from misoc.integration.builder import builder_args, builder_argdict +from misoc.targets.kasli import soc_kasli_args, soc_kasli_argdict + +from artiq.gateware import rtio +from artiq.gateware.rtio.phy import ttl_simple, ttl_serdes_7series, edge_counter +from artiq.gateware import eem +from artiq.gateware.targets.kasli import StandaloneBase, MasterBase, SatelliteBase +from artiq.build_soc import * + + +def peripheral_dio(module, peripheral): + ttl_classes = { + "input": ttl_serdes_7series.InOut_8X, + "output": ttl_serdes_7series.Output_8X + } + if len(peripheral["ports"]) != 1: + raise ValueError("wrong number of ports") + if peripheral.get("edge_counter", False): + edge_counter_cls = edge_counter.SimpleEdgeCounter + else: + edge_counter_cls = None + eem.DIO.add_std(module, peripheral["ports"][0], + ttl_classes[peripheral["bank_direction_low"]], + ttl_classes[peripheral["bank_direction_high"]], + edge_counter_cls=edge_counter_cls) + + +def peripheral_urukul(module, peripheral): + if len(peripheral["ports"]) == 1: + port, port_aux = peripheral["ports"][0], None + elif len(peripheral["ports"]) == 2: + port, port_aux = peripheral["ports"] + else: + raise ValueError("wrong number of ports") + if peripheral.get("synchronization", False): + sync_gen_cls = ttl_simple.ClockGen + else: + sync_gen_cls = None + eem.Urukul.add_std(module, port, port_aux, ttl_serdes_7series.Output_8X, + sync_gen_cls) + + +def peripheral_novogorny(module, peripheral): + if len(peripheral["ports"]) != 1: + raise ValueError("wrong number of ports") + eem.Novogorny.add_std(module, peripheral["ports"][0], ttl_serdes_7series.Output_8X) + + +def peripheral_sampler(module, peripheral): + if len(peripheral["ports"]) == 1: + port, port_aux = peripheral["ports"][0], None + elif len(peripheral["ports"]) == 2: + port, port_aux = peripheral["ports"] + else: + raise ValueError("wrong number of ports") + eem.Sampler.add_std(module, port, port_aux, ttl_serdes_7series.Output_8X) + + +def peripheral_suservo(module, peripheral): + if len(peripheral["sampler_ports"]) != 2: + raise ValueError("wrong number of Sampler ports") + urukul_ports = [] + if len(peripheral["urukul0_ports"]) != 2: + raise ValueError("wrong number of Urukul #0 ports") + urukul_ports.append(peripheral["urukul0_ports"]) + if "urukul1_ports" in peripheral: + if len(peripheral["urukul1_ports"]) != 2: + raise ValueError("wrong number of Urukul #1 ports") + urukul_ports.append(peripheral["urukul1_ports"]) + eem.SUServo.add_std(module, + peripheral["sampler_ports"], + urukul_ports) + + +def peripheral_zotino(module, peripheral): + if len(peripheral["ports"]) != 1: + raise ValueError("wrong number of ports") + eem.Zotino.add_std(module, peripheral["ports"][0], + ttl_serdes_7series.Output_8X) + + +def peripheral_grabber(module, peripheral): + if len(peripheral["ports"]) == 1: + port = peripheral["ports"][0] + port_aux = None + port_aux2 = None + elif len(peripheral["ports"]) == 2: + port, port_aux = peripheral["ports"] + port_aux2 = None + elif len(peripheral["ports"]) == 3: + port, port_aux, port_aux2 = peripheral["ports"] + else: + raise ValueError("wrong number of ports") + eem.Grabber.add_std(module, port, port_aux, port_aux2) + + +def peripheral_mirny(module, peripheral): + if len(peripheral["ports"]) != 1: + raise ValueError("wrong number of ports") + eem.Mirny.add_std(module, peripheral["ports"][0], + ttl_serdes_7series.Output_8X) + + +def peripheral_fastino(module, peripheral): + if len(peripheral["ports"]) != 1: + raise ValueError("wrong number of ports") + eem.Fastino.add_std(module, peripheral["ports"][0], + peripheral.get("log2_width", 0)) + + +def peripheral_phaser(module, peripheral): + if len(peripheral["ports"]) != 1: + raise ValueError("wrong number of ports") + eem.Phaser.add_std(module, peripheral["ports"][0]) + + +peripheral_processors = { + "dio": peripheral_dio, + "urukul": peripheral_urukul, + "novogorny": peripheral_novogorny, + "sampler": peripheral_sampler, + "suservo": peripheral_suservo, + "zotino": peripheral_zotino, + "grabber": peripheral_grabber, + "mirny": peripheral_mirny, + "fastino": peripheral_fastino, + "phaser": peripheral_phaser, +} + + +def add_peripherals(module, peripherals): + for peripheral in peripherals: + peripheral_processors[peripheral["type"]](module, peripheral) + + +class GenericStandalone(StandaloneBase): + def __init__(self, description, hw_rev=None,**kwargs): + if hw_rev is None: + hw_rev = description["hw_rev"] + self.class_name_override = description["variant"] + StandaloneBase.__init__(self, hw_rev=hw_rev, **kwargs) + + self.config["SI5324_AS_SYNTHESIZER"] = None + self.config["RTIO_FREQUENCY"] = "{:.1f}".format(description.get("rtio_frequency", 125e6)/1e6) + if "ext_ref_frequency" in description: + self.config["SI5324_EXT_REF"] = None + self.config["EXT_REF_FREQUENCY"] = "{:.1f}".format( + description["ext_ref_frequency"]/1e6) + if hw_rev == "v1.0": + # EEM clock fan-out from Si5324, not MMCX + self.comb += self.platform.request("clk_sel").eq(1) + + has_grabber = any(peripheral["type"] == "grabber" for peripheral in description["peripherals"]) + if has_grabber: + self.grabber_csr_group = [] + + self.rtio_channels = [] + add_peripherals(self, description["peripherals"]) + if hw_rev in ("v1.0", "v1.1"): + for i in (1, 2): + print("SFP LED at RTIO channel 0x{:06x}".format(len(self.rtio_channels))) + sfp_ctl = self.platform.request("sfp_ctl", i) + phy = ttl_simple.Output(sfp_ctl.led) + self.submodules += phy + self.rtio_channels.append(rtio.Channel.from_phy(phy)) + + self.config["HAS_RTIO_LOG"] = None + self.config["RTIO_LOG_CHANNEL"] = len(self.rtio_channels) + self.rtio_channels.append(rtio.LogChannel()) + + self.add_rtio(self.rtio_channels) + if has_grabber: + self.config["HAS_GRABBER"] = None + self.add_csr_group("grabber", self.grabber_csr_group) + for grabber in self.grabber_csr_group: + self.platform.add_false_path_constraints( + self.rtio_crg.cd_rtio.clk, getattr(self, grabber).deserializer.cd_cl.clk) + + +class GenericMaster(MasterBase): + def __init__(self, description, hw_rev=None, **kwargs): + if hw_rev is None: + hw_rev = description["hw_rev"] + self.class_name_override = description["variant"] + MasterBase.__init__(self, + hw_rev=hw_rev, + rtio_clk_freq=description.get("rtio_frequency", 125e6), + enable_sata=description.get("enable_sata_drtio", False), + **kwargs) + if "ext_ref_frequency" in description: + self.config["SI5324_EXT_REF"] = None + self.config["EXT_REF_FREQUENCY"] = "{:.1f}".format( + description["ext_ref_frequency"]/1e6) + if hw_rev == "v1.0": + # EEM clock fan-out from Si5324, not MMCX + self.comb += self.platform.request("clk_sel").eq(1) + + has_grabber = any(peripheral["type"] == "grabber" for peripheral in description["peripherals"]) + if has_grabber: + self.grabber_csr_group = [] + + self.rtio_channels = [] + add_peripherals(self, description["peripherals"]) + self.config["HAS_RTIO_LOG"] = None + self.config["RTIO_LOG_CHANNEL"] = len(self.rtio_channels) + self.rtio_channels.append(rtio.LogChannel()) + + self.add_rtio(self.rtio_channels) + if has_grabber: + self.config["HAS_GRABBER"] = None + self.add_csr_group("grabber", self.grabber_csr_group) + for grabber in self.grabber_csr_group: + self.platform.add_false_path_constraints( + self.drtio_transceiver.gtps[0].txoutclk, getattr(self, grabber).deserializer.cd_cl.clk) + + +class GenericSatellite(SatelliteBase): + def __init__(self, description, hw_rev=None, **kwargs): + if hw_rev is None: + hw_rev = description["hw_rev"] + self.class_name_override = description["variant"] + SatelliteBase.__init__(self, + hw_rev=hw_rev, + rtio_clk_freq=description.get("rtio_frequency", 125e6), + enable_sata=description.get("enable_sata_drtio", False), + **kwargs) + if hw_rev == "v1.0": + # EEM clock fan-out from Si5324, not MMCX + self.comb += self.platform.request("clk_sel").eq(1) + + has_grabber = any(peripheral["type"] == "grabber" for peripheral in description["peripherals"]) + if has_grabber: + self.grabber_csr_group = [] + + self.rtio_channels = [] + add_peripherals(self, description["peripherals"]) + self.config["HAS_RTIO_LOG"] = None + self.config["RTIO_LOG_CHANNEL"] = len(self.rtio_channels) + self.rtio_channels.append(rtio.LogChannel()) + + self.add_rtio(self.rtio_channels) + if has_grabber: + self.config["HAS_GRABBER"] = None + self.add_csr_group("grabber", self.grabber_csr_group) + for grabber in self.grabber_csr_group: + self.platform.add_false_path_constraints( + self.drtio_transceiver.gtps[0].txoutclk, getattr(self, grabber).deserializer.cd_cl.clk) + + +def main(): + parser = argparse.ArgumentParser( + description="ARTIQ device binary builder for generic Kasli systems") + builder_args(parser) + soc_kasli_args(parser) + parser.set_defaults(output_dir="artiq_kasli") + parser.add_argument("description", metavar="DESCRIPTION", + help="JSON system description file") + parser.add_argument("--gateware-identifier-str", default=None, + help="Override ROM identifier") + args = parser.parse_args() + + with open(args.description, "r") as f: + description = json.load(f) + + if description["target"] != "kasli": + raise ValueError("Description is for a different target") + + if description["base"] == "standalone": + cls = GenericStandalone + elif description["base"] == "master": + cls = GenericMaster + elif description["base"] == "satellite": + cls = GenericSatellite + else: + raise ValueError("Invalid base") + + soc = cls(description, gateware_identifier_str=args.gateware_identifier_str, **soc_kasli_argdict(args)) + args.variant = description["variant"] + build_artiq_soc(soc, builder_argdict(args)) + + +if __name__ == "__main__": + main() diff --git a/artiq/gateware/targets/kc705_dds.py b/artiq/gateware/targets/kc705.py similarity index 62% rename from artiq/gateware/targets/kc705_dds.py rename to artiq/gateware/targets/kc705.py index 642c0ca44..1481fd351 100755 --- a/artiq/gateware/targets/kc705_dds.py +++ b/artiq/gateware/targets/kc705.py @@ -10,37 +10,38 @@ from migen.build.xilinx.vivado import XilinxVivadoToolchain from migen.build.xilinx.ise import XilinxISEToolchain from misoc.interconnect.csr import * -from misoc.cores import gpio +from misoc.cores import gpio, timer from misoc.targets.kc705 import MiniSoC, soc_kc705_args, soc_kc705_argdict from misoc.integration.builder import builder_args, builder_argdict -from artiq.gateware.amp import AMPSoC, build_artiq_soc +from artiq.gateware.amp import AMPSoC from artiq.gateware import rtio, nist_clock, nist_qc2 -from artiq.gateware.rtio.phy import (ttl_simple, ttl_serdes_7series, - dds, spi, ad5360_monitor) -from artiq import __version__ as artiq_version +from artiq.gateware.rtio.phy import ttl_simple, ttl_serdes_7series, dds, spi2 +from artiq.build_soc import * class _RTIOCRG(Module, AutoCSR): - def __init__(self, platform, rtio_internal_clk): + def __init__(self, platform, rtio_internal_clk, use_sma=True): self._clock_sel = CSRStorage() self._pll_reset = CSRStorage(reset=1) self._pll_locked = CSRStatus() self.clock_domains.cd_rtio = ClockDomain() self.clock_domains.cd_rtiox4 = ClockDomain(reset_less=True) - # 10 MHz when using 125MHz input + # 100 MHz when using 125MHz input self.clock_domains.cd_ext_clkout = ClockDomain(reset_less=True) - ext_clkout = platform.request("user_sma_gpio_p_33") - self.sync.ext_clkout += ext_clkout.eq(~ext_clkout) - + platform.add_period_constraint(self.cd_ext_clkout.clk, 5.0) + if use_sma: + ext_clkout = platform.request("user_sma_gpio_p_33") + self.sync.ext_clkout += ext_clkout.eq(~ext_clkout) rtio_external_clk = Signal() - user_sma_clock = platform.request("user_sma_clock") - platform.add_period_constraint(user_sma_clock.p, 8.0) - self.specials += Instance("IBUFDS", - i_I=user_sma_clock.p, i_IB=user_sma_clock.n, - o_O=rtio_external_clk) + if use_sma: + user_sma_clock = platform.request("user_sma_clock") + platform.add_period_constraint(user_sma_clock.p, 8.0) + self.specials += Instance("IBUFDS", + i_I=user_sma_clock.p, i_IB=user_sma_clock.n, + o_O=rtio_external_clk) pll_locked = Signal() rtio_clk = Signal() @@ -66,7 +67,7 @@ class _RTIOCRG(Module, AutoCSR): p_CLKOUT0_DIVIDE=2, p_CLKOUT0_PHASE=0.0, o_CLKOUT0=rtiox4_clk, - p_CLKOUT1_DIVIDE=50, p_CLKOUT1_PHASE=0.0, + p_CLKOUT1_DIVIDE=5, p_CLKOUT1_PHASE=0.0, o_CLKOUT1=ext_clkout_clk), Instance("BUFG", i_I=rtio_clk, o_O=self.cd_rtio.clk), Instance("BUFG", i_I=rtiox4_clk, o_O=self.cd_rtiox4.clk), @@ -99,7 +100,7 @@ _ams101_dac = [ _sdcard_spi_33 = [ ("sdcard_spi_33", 0, - Subsignal("miso", Pins("AC20"), Misc("PULLUP")), + Subsignal("miso", Pins("AC20"), Misc("PULLUP=TRUE")), Subsignal("clk", Pins("AB23")), Subsignal("mosi", Pins("AB22")), Subsignal("cs_n", Pins("AC21")), @@ -107,35 +108,9 @@ _sdcard_spi_33 = [ ) ] -_zotino = [ - ("fmcdio_dirctl", 0, - Subsignal("clk", Pins("HPC:LA32_N")), - Subsignal("ser", Pins("HPC:LA33_P")), - Subsignal("latch", Pins("HPC:LA32_P")), - IOStandard("LVCMOS33") - ), - ("zotino_spi_p", 0, - Subsignal("clk", Pins("HPC:LA08_P")), - Subsignal("mosi", Pins("HPC:LA09_P")), - Subsignal("miso", Pins("HPC:LA10_P")), - Subsignal("cs_n", Pins("HPC:LA11_P")), - IOStandard("LVDS_25") - ), - ("zotino_spi_n", 0, - Subsignal("clk", Pins("HPC:LA08_N")), - Subsignal("mosi", Pins("HPC:LA09_N")), - Subsignal("miso", Pins("HPC:LA10_N")), - Subsignal("cs_n", Pins("HPC:LA11_N")), - IOStandard("LVDS_25") - ), - ("zotino_ldac", 0, - Subsignal("p", Pins("HPC:LA13_P")), - Subsignal("n", Pins("HPC:LA13_N")), - IOStandard("LVDS_25"), Misc("DIFF_TERM=TRUE") - ) -] -class _NIST_Ions(MiniSoC, AMPSoC): + +class _StandaloneBase(MiniSoC, AMPSoC): mem_map = { "cri_con": 0x10000000, "rtio": 0x20000000, @@ -144,16 +119,18 @@ class _NIST_Ions(MiniSoC, AMPSoC): } mem_map.update(MiniSoC.mem_map) - def __init__(self, cpu_type="or1k", **kwargs): + def __init__(self, gateware_identifier_str=None, **kwargs): MiniSoC.__init__(self, - cpu_type=cpu_type, + cpu_type="or1k", sdram_controller_type="minicon", l2_size=128*1024, - ident=artiq_version, + integrated_sram_size=8192, ethmac_nrxslots=4, ethmac_ntxslots=4, **kwargs) AMPSoC.__init__(self) + add_identifier(self, gateware_identifier_str=gateware_identifier_str) + if isinstance(self.platform.toolchain, XilinxVivadoToolchain): self.platform.toolchain.bitstream_commands.extend([ "set_property BITSTREAM.GENERAL.COMPRESS True [current_design]", @@ -161,6 +138,10 @@ class _NIST_Ions(MiniSoC, AMPSoC): if isinstance(self.platform.toolchain, XilinxISEToolchain): self.platform.toolchain.bitgen_opt += " -g compress" + self.submodules.timer1 = timer.Timer() + self.csr_devices.append("timer1") + self.interrupt_devices.append("timer1") + self.submodules.leds = gpio.GPIOOut(Cat( self.platform.request("user_led", 0), self.platform.request("user_led", 1))) @@ -169,7 +150,6 @@ class _NIST_Ions(MiniSoC, AMPSoC): self.platform.add_extension(_sma33_io) self.platform.add_extension(_ams101_dac) self.platform.add_extension(_sdcard_spi_33) - self.platform.add_extension(_zotino) i2c = self.platform.request("i2c") self.submodules.i2c = gpio.GPIOTristate([i2c.scl, i2c.sda]) @@ -181,9 +161,11 @@ class _NIST_Ions(MiniSoC, AMPSoC): def add_rtio(self, rtio_channels): self.submodules.rtio_crg = _RTIOCRG(self.platform, self.crg.cd_sys.clk) self.csr_devices.append("rtio_crg") - self.submodules.rtio_core = rtio.Core(rtio_channels) + self.config["HAS_RTIO_CLOCK_SWITCH"] = None + self.submodules.rtio_tsc = rtio.TSC("async", glbl_fine_ts_width=3) + self.submodules.rtio_core = rtio.Core(self.rtio_tsc, rtio_channels) self.csr_devices.append("rtio_core") - self.submodules.rtio = rtio.KernelInitiator() + self.submodules.rtio = rtio.KernelInitiator(self.rtio_tsc) self.submodules.rtio_dma = ClockDomainsRenamer("sys_kernel")( rtio.DMA(self.get_native_sdram_if())) self.register_kernel_cpu_csrdevice("rtio") @@ -195,23 +177,22 @@ class _NIST_Ions(MiniSoC, AMPSoC): self.submodules.rtio_moninj = rtio.MonInj(rtio_channels) self.csr_devices.append("rtio_moninj") - self.rtio_crg.cd_rtio.clk.attr.add("keep") self.platform.add_period_constraint(self.rtio_crg.cd_rtio.clk, 8.) self.platform.add_false_path_constraints( self.crg.cd_sys.clk, self.rtio_crg.cd_rtio.clk) - self.submodules.rtio_analyzer = rtio.Analyzer(self.rtio_core.cri, + self.submodules.rtio_analyzer = rtio.Analyzer(self.rtio_tsc, self.rtio_core.cri, self.get_native_sdram_if()) self.csr_devices.append("rtio_analyzer") -class NIST_CLOCK(_NIST_Ions): +class NIST_CLOCK(_StandaloneBase): """ NIST clock hardware, with old backplane and 11 DDS channels """ - def __init__(self, cpu_type="or1k", **kwargs): - _NIST_Ions.__init__(self, cpu_type, **kwargs) + def __init__(self, **kwargs): + _StandaloneBase.__init__(self, **kwargs) platform = self.platform platform.add_extension(nist_clock.fmc_adapter_io) @@ -249,47 +230,25 @@ class NIST_CLOCK(_NIST_Ions): self.submodules += phy rtio_channels.append(rtio.Channel.from_phy(phy)) - phy = spi.SPIMaster(ams101_dac) + phy = spi2.SPIMaster(ams101_dac) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy( - phy, ofifo_depth=4, ififo_depth=4)) + phy, ififo_depth=4)) for i in range(3): - phy = spi.SPIMaster(self.platform.request("spi", i)) + phy = spi2.SPIMaster(self.platform.request("spi", i)) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy( - phy, ofifo_depth=128, ififo_depth=128)) - - phy = spi.SPIMaster(platform.request("sdcard_spi_33")) + phy, ififo_depth=128)) + + phy = spi2.SPIMaster(platform.request("sdcard_spi_33")) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy( - phy, ofifo_depth=4, ififo_depth=4)) - - fmcdio_dirctl = self.platform.request("fmcdio_dirctl") - for s in fmcdio_dirctl.clk, fmcdio_dirctl.ser, fmcdio_dirctl.latch: - phy = ttl_simple.Output(s) - self.submodules += phy - rtio_channels.append(rtio.Channel.from_phy(phy)) - - sdac_phy = spi.SPIMaster(self.platform.request("zotino_spi_p"), - self.platform.request("zotino_spi_n")) - self.submodules += sdac_phy - rtio_channels.append(rtio.Channel.from_phy(sdac_phy, ififo_depth=4)) - - pads = platform.request("zotino_ldac") - ldac_phy = ttl_serdes_7series.Output_8X(pads.p, pads.n) - self.submodules += ldac_phy - rtio_channels.append(rtio.Channel.from_phy(ldac_phy)) - - dac_monitor = ad5360_monitor.AD5360Monitor(sdac_phy.rtlink, ldac_phy.rtlink) - self.submodules += dac_monitor - sdac_phy.probes.extend(dac_monitor.probes) + phy, ififo_depth=4)) phy = dds.AD9914(platform.request("dds"), 11, onehot=True) self.submodules += phy - rtio_channels.append(rtio.Channel.from_phy(phy, - ofifo_depth=512, - ififo_depth=4)) + rtio_channels.append(rtio.Channel.from_phy(phy, ififo_depth=4)) self.config["HAS_RTIO_LOG"] = None self.config["RTIO_LOG_CHANNEL"] = len(rtio_channels) @@ -298,13 +257,13 @@ class NIST_CLOCK(_NIST_Ions): self.add_rtio(rtio_channels) -class NIST_QC2(_NIST_Ions): +class NIST_QC2(_StandaloneBase): """ NIST QC2 hardware, as used in Quantum I and Quantum II, with new backplane and 24 DDS channels. Two backplanes are used. """ - def __init__(self, cpu_type="or1k", **kwargs): - _NIST_Ions.__init__(self, cpu_type, **kwargs) + def __init__(self, **kwargs): + _StandaloneBase.__init__(self, **kwargs) platform = self.platform platform.add_extension(nist_qc2.fmc_adapter_io) @@ -344,24 +303,22 @@ class NIST_QC2(_NIST_Ions): # add clock generators after TTLs rtio_channels += clock_generators - phy = spi.SPIMaster(ams101_dac) + phy = spi2.SPIMaster(ams101_dac) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy( - phy, ofifo_depth=4, ififo_depth=4)) + phy, ififo_depth=4)) for i in range(4): - phy = spi.SPIMaster(self.platform.request("spi", i)) + phy = spi2.SPIMaster(self.platform.request("spi", i)) self.submodules += phy rtio_channels.append(rtio.Channel.from_phy( - phy, ofifo_depth=128, ififo_depth=128)) + phy, ififo_depth=128)) for backplane_offset in range(2): phy = dds.AD9914( platform.request("dds", backplane_offset), 12, onehot=True) self.submodules += phy - rtio_channels.append(rtio.Channel.from_phy(phy, - ofifo_depth=512, - ififo_depth=4)) + rtio_channels.append(rtio.Channel.from_phy(phy, ififo_depth=4)) self.config["HAS_RTIO_LOG"] = None self.config["RTIO_LOG_CHANNEL"] = len(rtio_channels) @@ -370,27 +327,106 @@ class NIST_QC2(_NIST_Ions): self.add_rtio(rtio_channels) +_sma_spi = [ + ("sma_spi", 0, + Subsignal("clk", Pins("Y23")), # user_sma_gpio_p + Subsignal("cs_n", Pins("Y24")), # user_sma_gpio_n + Subsignal("mosi", Pins("L25")), # user_sma_clk_p + Subsignal("miso", Pins("K25")), # user_sma_clk_n + IOStandard("LVCMOS25")), +] + + +class SMA_SPI(_StandaloneBase): + """ + SPI on 4 SMA for PDQ2 test/demo. + """ + def __init__(self, **kwargs): + _StandaloneBase.__init__(self, **kwargs) + + platform = self.platform + self.platform.add_extension(_sma_spi) + + rtio_channels = [] + + phy = ttl_simple.Output(platform.request("user_led", 2)) + self.submodules += phy + rtio_channels.append(rtio.Channel.from_phy(phy)) + + ams101_dac = self.platform.request("ams101_dac", 0) + phy = ttl_simple.Output(ams101_dac.ldac) + self.submodules += phy + rtio_channels.append(rtio.Channel.from_phy(phy)) + + phy = spi2.SPIMaster(ams101_dac) + self.submodules += phy + rtio_channels.append(rtio.Channel.from_phy( + phy, ififo_depth=4)) + + phy = spi2.SPIMaster(self.platform.request("sma_spi")) + self.submodules += phy + rtio_channels.append(rtio.Channel.from_phy( + phy, ififo_depth=128)) + + self.config["HAS_RTIO_LOG"] = None + self.config["RTIO_LOG_CHANNEL"] = len(rtio_channels) + rtio_channels.append(rtio.LogChannel()) + + self.add_rtio(rtio_channels) + + def add_rtio(self, rtio_channels): + self.submodules.rtio_crg = _RTIOCRG(self.platform, self.crg.cd_sys.clk, + use_sma=False) + self.csr_devices.append("rtio_crg") + self.config["HAS_RTIO_CLOCK_SWITCH"] = None + self.submodules.rtio_tsc = rtio.TSC("async", glbl_fine_ts_width=3) + self.submodules.rtio_core = rtio.Core(self.rtio_tsc, rtio_channels) + self.csr_devices.append("rtio_core") + self.submodules.rtio = rtio.KernelInitiator(self.rtio_tsc) + self.submodules.rtio_dma = ClockDomainsRenamer("sys_kernel")( + rtio.DMA(self.get_native_sdram_if())) + self.register_kernel_cpu_csrdevice("rtio") + self.register_kernel_cpu_csrdevice("rtio_dma") + self.submodules.cri_con = rtio.CRIInterconnectShared( + [self.rtio.cri, self.rtio_dma.cri], + [self.rtio_core.cri]) + self.submodules.rtio_moninj = rtio.MonInj(rtio_channels) + self.csr_devices.append("rtio_moninj") + + self.platform.add_period_constraint(self.rtio_crg.cd_rtio.clk, 8.) + self.platform.add_false_path_constraints( + self.crg.cd_sys.clk, + self.rtio_crg.cd_rtio.clk) + + self.submodules.rtio_analyzer = rtio.Analyzer(self.rtio_tsc, self.rtio_core.cri, + self.get_native_sdram_if()) + self.csr_devices.append("rtio_analyzer") + + +VARIANTS = {cls.__name__.lower(): cls for cls in [NIST_CLOCK, NIST_QC2, SMA_SPI]} + + def main(): parser = argparse.ArgumentParser( - description="ARTIQ device binary builder / single-FPGA KC705-based " - "systems with AD9 DDS (NIST Ions hardware)") + description="KC705 gateware and firmware builder") builder_args(parser) soc_kc705_args(parser) - parser.add_argument("-H", "--hw-adapter", default="nist_clock", - help="hardware adapter type: " - "nist_clock/nist_qc2 " + parser.set_defaults(output_dir="artiq_kc705") + parser.add_argument("-V", "--variant", default="nist_clock", + help="variant: " + "nist_clock/nist_qc2/sma_spi " "(default: %(default)s)") + parser.add_argument("--gateware-identifier-str", default=None, + help="Override ROM identifier") args = parser.parse_args() - hw_adapter = args.hw_adapter.lower() - if hw_adapter == "nist_clock": - cls = NIST_CLOCK - elif hw_adapter == "nist_qc2": - cls = NIST_QC2 - else: - raise SystemExit("Invalid hardware adapter string (-H/--hw-adapter)") + variant = args.variant.lower() + try: + cls = VARIANTS[variant] + except KeyError: + raise SystemExit("Invalid variant (-V/--variant)") - soc = cls(**soc_kc705_argdict(args)) + soc = cls(gateware_identifier_str=args.gateware_identifier_str, **soc_kc705_argdict(args)) build_artiq_soc(soc, builder_argdict(args)) diff --git a/artiq/gateware/targets/kc705_sma_spi.py b/artiq/gateware/targets/kc705_sma_spi.py deleted file mode 100755 index cea74c574..000000000 --- a/artiq/gateware/targets/kc705_sma_spi.py +++ /dev/null @@ -1,157 +0,0 @@ -#!/usr/bin/env python3 - -import argparse - -from migen import * - -from migen.build.generic_platform import * -from migen.genlib.resetsync import AsyncResetSynchronizer -from migen.genlib.cdc import MultiReg -from misoc.targets.kc705 import soc_kc705_args, soc_kc705_argdict -from misoc.integration.builder import builder_args, builder_argdict -from misoc.interconnect.csr import * - -from artiq.gateware.amp import build_artiq_soc -from artiq.gateware import rtio -from artiq.gateware.rtio.phy import ttl_simple, spi - - -from .kc705_dds import _NIST_Ions - - -class _RTIOCRG(Module, AutoCSR): - def __init__(self, platform, rtio_internal_clk): - self._clock_sel = CSRStorage() - self._pll_reset = CSRStorage(reset=1) - self._pll_locked = CSRStatus() - self.clock_domains.cd_rtio = ClockDomain() - self.clock_domains.cd_rtiox4 = ClockDomain(reset_less=True) - - # 10 MHz when using 125MHz input - self.clock_domains.cd_ext_clkout = ClockDomain(reset_less=True) - - rtio_external_clk = Signal() - - pll_locked = Signal() - rtio_clk = Signal() - rtiox4_clk = Signal() - ext_clkout_clk = Signal() - self.specials += [ - Instance("PLLE2_ADV", - p_STARTUP_WAIT="FALSE", o_LOCKED=pll_locked, - - p_REF_JITTER1=0.01, - p_CLKIN1_PERIOD=8.0, p_CLKIN2_PERIOD=8.0, - i_CLKIN1=rtio_internal_clk, i_CLKIN2=rtio_external_clk, - # Warning: CLKINSEL=0 means CLKIN2 is selected - i_CLKINSEL=~self._clock_sel.storage, - - # VCO @ 1GHz when using 125MHz input - p_CLKFBOUT_MULT=8, p_DIVCLK_DIVIDE=1, - i_CLKFBIN=self.cd_rtio.clk, - i_RST=self._pll_reset.storage, - - o_CLKFBOUT=rtio_clk, - - p_CLKOUT0_DIVIDE=2, p_CLKOUT0_PHASE=0.0, - o_CLKOUT0=rtiox4_clk, - - p_CLKOUT1_DIVIDE=50, p_CLKOUT1_PHASE=0.0, - o_CLKOUT1=ext_clkout_clk), - Instance("BUFG", i_I=rtio_clk, o_O=self.cd_rtio.clk), - Instance("BUFG", i_I=rtiox4_clk, o_O=self.cd_rtiox4.clk), - Instance("BUFG", i_I=ext_clkout_clk, o_O=self.cd_ext_clkout.clk), - - AsyncResetSynchronizer(self.cd_rtio, ~pll_locked), - MultiReg(pll_locked, self._pll_locked.status) - ] - - -_sma_spi = [ - ("sma_spi", 0, - Subsignal("clk", Pins("Y23")), # user_sma_gpio_p - Subsignal("cs_n", Pins("Y24")), # user_sma_gpio_n - Subsignal("mosi", Pins("L25")), # user_sma_clk_p - Subsignal("miso", Pins("K25")), # user_sma_clk_n - IOStandard("LVCMOS25")), -] - - -class SMA_SPI(_NIST_Ions): - """ - SPI on 4 SMA for PDQ2 test/demo. - """ - def __init__(self, cpu_type="or1k", **kwargs): - _NIST_Ions.__init__(self, cpu_type, **kwargs) - - platform = self.platform - self.platform.add_extension(_sma_spi) - - rtio_channels = [] - - phy = ttl_simple.Output(platform.request("user_led", 2)) - self.submodules += phy - rtio_channels.append(rtio.Channel.from_phy(phy)) - - ams101_dac = self.platform.request("ams101_dac", 0) - phy = ttl_simple.Output(ams101_dac.ldac) - self.submodules += phy - rtio_channels.append(rtio.Channel.from_phy(phy)) - - phy = spi.SPIMaster(ams101_dac) - self.submodules += phy - rtio_channels.append(rtio.Channel.from_phy( - phy, ofifo_depth=4, ififo_depth=4)) - - phy = spi.SPIMaster(self.platform.request("sma_spi")) - self.submodules += phy - rtio_channels.append(rtio.Channel.from_phy( - phy, ofifo_depth=128, ififo_depth=128)) - - self.config["HAS_RTIO_LOG"] = None - self.config["RTIO_LOG_CHANNEL"] = len(rtio_channels) - rtio_channels.append(rtio.LogChannel()) - - self.add_rtio(rtio_channels) - - def add_rtio(self, rtio_channels): - self.submodules.rtio_crg = _RTIOCRG(self.platform, self.crg.cd_sys.clk) - self.csr_devices.append("rtio_crg") - self.submodules.rtio_core = rtio.Core(rtio_channels) - self.csr_devices.append("rtio_core") - self.submodules.rtio = rtio.KernelInitiator() - self.submodules.rtio_dma = ClockDomainsRenamer("sys_kernel")( - rtio.DMA(self.get_native_sdram_if())) - self.register_kernel_cpu_csrdevice("rtio") - self.register_kernel_cpu_csrdevice("rtio_dma") - self.submodules.cri_con = rtio.CRIInterconnectShared( - [self.rtio.cri, self.rtio_dma.cri], - [self.rtio_core.cri]) - self.submodules.rtio_moninj = rtio.MonInj(rtio_channels) - self.csr_devices.append("rtio_moninj") - - self.rtio_crg.cd_rtio.clk.attr.add("keep") - self.platform.add_period_constraint(self.rtio_crg.cd_rtio.clk, 8.) - self.platform.add_false_path_constraints( - self.crg.cd_sys.clk, - self.rtio_crg.cd_rtio.clk) - - self.submodules.rtio_analyzer = rtio.Analyzer(self.rtio_core.cri, - self.get_native_sdram_if()) - self.csr_devices.append("rtio_analyzer") - - -def main(): - parser = argparse.ArgumentParser( - description="ARTIQ device binary builder / " - "KC705 SMA SPI demo/test for PDQ2") - builder_args(parser) - soc_kc705_args(parser) - args = parser.parse_args() - - soc = SMA_SPI(**soc_kc705_argdict(args)) - build_artiq_soc(soc, builder_argdict(args)) - - -if __name__ == "__main__": - main() diff --git a/artiq/gateware/targets/metlino.py b/artiq/gateware/targets/metlino.py new file mode 100755 index 000000000..2139278ef --- /dev/null +++ b/artiq/gateware/targets/metlino.py @@ -0,0 +1,176 @@ +#!/usr/bin/env python3 + +import argparse + +from migen import * + +from misoc.cores import gpio +from misoc.integration.builder import builder_args, builder_argdict +from misoc.interconnect.csr import * +from misoc.targets.metlino import * + +from artiq.gateware.amp import AMPSoC +from artiq.gateware import eem +from artiq.gateware import rtio +from artiq.gateware.rtio.phy import ttl_simple, ttl_serdes_ultrascale +from artiq.gateware.drtio.transceiver import gth_ultrascale +from artiq.gateware.drtio import * +from artiq.build_soc import * + + +def workaround_us_lvds_tristate(platform): + # Those shoddy Kintex Ultrascale FPGAs take almost a microsecond to change the direction of a + # LVDS I/O buffer. The application has to cope with it and this cannot be handled at static + # timing analysis. Disable the latter for IOBUFDS. + # See: + # https://forums.xilinx.com/t5/Timing-Analysis/Delay-890-ns-in-OBUFTDS-in-Kintex-UltraScale/td-p/868364 + platform.add_platform_command( + "set_false_path -through [get_pins -filter {{REF_PIN_NAME == T}} -of [get_cells -filter {{REF_NAME == IOBUFDS}}]]") + + +class Master(MiniSoC, AMPSoC): + mem_map = { + "cri_con": 0x10000000, + "rtio": 0x11000000, + "rtio_dma": 0x12000000, + "drtioaux": 0x14000000, + "mailbox": 0x70000000 + } + mem_map.update(MiniSoC.mem_map) + + def __init__(self, gateware_identifier_str=None, **kwargs): + MiniSoC.__init__(self, + cpu_type="or1k", + sdram_controller_type="minicon", + l2_size=128*1024, + integrated_sram_size=8192, + ethmac_nrxslots=4, + ethmac_ntxslots=4, + csr_address_width=15, + **kwargs) + AMPSoC.__init__(self) + add_identifier(self, gateware_identifier_str=gateware_identifier_str) + + platform = self.platform + rtio_clk_freq = 150e6 + + self.comb += platform.request("input_clk_sel").eq(1) + self.comb += platform.request("filtered_clk_sel").eq(1) + self.submodules.si5324_rst_n = gpio.GPIOOut(platform.request("si5324").rst_n) + self.csr_devices.append("si5324_rst_n") + i2c = self.platform.request("i2c") + self.submodules.i2c = gpio.GPIOTristate([i2c.scl, i2c.sda]) + self.csr_devices.append("i2c") + self.config["I2C_BUS_COUNT"] = 1 + self.config["HAS_SI5324"] = None + self.config["SI5324_AS_SYNTHESIZER"] = None + self.config["RTIO_FREQUENCY"] = str(rtio_clk_freq/1e6) + + self.submodules.drtio_transceiver = gth_ultrascale.GTH( + clock_pads=platform.request("cdr_clk_clean", 0), + data_pads=[platform.request("mch_fabric_d", i) for i in range(11)], + sys_clk_freq=self.clk_freq, + rtio_clk_freq=rtio_clk_freq) + self.csr_devices.append("drtio_transceiver") + + self.submodules.rtio_tsc = rtio.TSC("async", glbl_fine_ts_width=3) + + drtio_csr_group = [] + drtioaux_csr_group = [] + drtioaux_memory_group = [] + drtio_cri = [] + for i in range(len(self.drtio_transceiver.channels)): + core_name = "drtio" + str(i) + coreaux_name = "drtioaux" + str(i) + memory_name = "drtioaux" + str(i) + "_mem" + drtio_csr_group.append(core_name) + drtioaux_csr_group.append(coreaux_name) + drtioaux_memory_group.append(memory_name) + + cdr = ClockDomainsRenamer({"rtio_rx": "rtio_rx" + str(i)}) + + core = cdr(DRTIOMaster(self.rtio_tsc, self.drtio_transceiver.channels[i])) + setattr(self.submodules, core_name, core) + drtio_cri.append(core.cri) + self.csr_devices.append(core_name) + + coreaux = cdr(DRTIOAuxController(core.link_layer)) + setattr(self.submodules, coreaux_name, coreaux) + self.csr_devices.append(coreaux_name) + + memory_address = self.mem_map["drtioaux"] + 0x800*i + self.add_wb_slave(memory_address, 0x800, + coreaux.bus) + self.add_memory_region(memory_name, memory_address | self.shadow_base, 0x800) + self.config["HAS_DRTIO"] = None + self.config["HAS_DRTIO_ROUTING"] = None + self.add_csr_group("drtio", drtio_csr_group) + self.add_csr_group("drtioaux", drtioaux_csr_group) + self.add_memory_group("drtioaux_mem", drtioaux_memory_group) + + rtio_clk_period = 1e9/rtio_clk_freq + gth0 = self.drtio_transceiver.gths[0] + platform.add_period_constraint(gth0.txoutclk, rtio_clk_period/2) + platform.add_period_constraint(gth0.rxoutclk, rtio_clk_period) + platform.add_false_path_constraints( + self.crg.cd_sys.clk, + gth0.txoutclk, gth0.rxoutclk) + for gth in self.drtio_transceiver.gths[1:]: + platform.add_period_constraint(gth.rxoutclk, rtio_clk_period) + platform.add_false_path_constraints( + self.crg.cd_sys.clk, gth0.txoutclk, gth.rxoutclk) + + self.rtio_channels = rtio_channels = [] + for i in range(4): + phy = ttl_simple.Output(platform.request("user_led", i)) + self.submodules += phy + rtio_channels.append(rtio.Channel.from_phy(phy)) + + eem.DIO.add_std(self, 2, ttl_simple.Output, ttl_simple.Output, + iostandard="LVDS") + eem.Urukul.add_std(self, 0, 1, ttl_simple.Output, + iostandard="LVDS") + eem.Zotino.add_std(self, 3, ttl_simple.Output, + iostandard="LVDS") + workaround_us_lvds_tristate(platform) + + self.config["HAS_RTIO_LOG"] = None + self.config["RTIO_LOG_CHANNEL"] = len(rtio_channels) + rtio_channels.append(rtio.LogChannel()) + + self.submodules.rtio_moninj = rtio.MonInj(rtio_channels) + self.csr_devices.append("rtio_moninj") + + self.submodules.rtio_core = rtio.Core(self.rtio_tsc, rtio_channels) + self.csr_devices.append("rtio_core") + + self.submodules.rtio = rtio.KernelInitiator(self.rtio_tsc) + self.submodules.rtio_dma = ClockDomainsRenamer("sys_kernel")( + rtio.DMA(self.get_native_sdram_if())) + self.register_kernel_cpu_csrdevice("rtio") + self.register_kernel_cpu_csrdevice("rtio_dma") + self.submodules.cri_con = rtio.CRIInterconnectShared( + [self.rtio.cri, self.rtio_dma.cri], + [self.rtio_core.cri] + drtio_cri, + enable_routing=True) + self.register_kernel_cpu_csrdevice("cri_con") + self.submodules.routing_table = rtio.RoutingTableAccess(self.cri_con) + self.csr_devices.append("routing_table") + + +def main(): + parser = argparse.ArgumentParser( + description="Metlino gateware and firmware builder") + builder_args(parser) + soc_sdram_args(parser) + parser.set_defaults(output_dir="artiq_metlino") + parser.add_argument("--gateware-identifier-str", default=None, + help="Override ROM identifier") + args = parser.parse_args() + args.variant = "master" + soc = Master(gateware_identifier_str=args.gateware_identifier_str, **soc_sdram_argdict(args)) + build_artiq_soc(soc, builder_argdict(args)) + + +if __name__ == "__main__": + main() diff --git a/artiq/gateware/targets/sayma_amc.py b/artiq/gateware/targets/sayma_amc.py new file mode 100755 index 000000000..4edfc72c7 --- /dev/null +++ b/artiq/gateware/targets/sayma_amc.py @@ -0,0 +1,456 @@ +#!/usr/bin/env python3 + +import argparse +import os +import warnings + +from migen import * + +from misoc.cores import gpio +from misoc.integration.builder import builder_args, builder_argdict +from misoc.interconnect.csr import * +from misoc.targets.sayma_amc import * + +from artiq.gateware.amp import AMPSoC +from artiq.gateware import eem +from artiq.gateware import rtio +from artiq.gateware import jesd204_tools +from artiq.gateware import fmcdio_vhdci_eem +from artiq.gateware.rtio.phy import ttl_simple, ttl_serdes_ultrascale, sawg +from artiq.gateware.drtio.transceiver import gth_ultrascale +from artiq.gateware.drtio.siphaser import SiPhaser7Series +from artiq.gateware.drtio.wrpll import WRPLL, DDMTDSamplerExtFF +from artiq.gateware.drtio.rx_synchronizer import XilinxRXSynchronizer +from artiq.gateware.drtio import * +from artiq.build_soc import * + + +def workaround_us_lvds_tristate(platform): + # Those shoddy Kintex Ultrascale FPGAs take almost a microsecond to change the direction of a + # LVDS I/O buffer. The application has to cope with it and this cannot be handled at static + # timing analysis. Disable the latter for IOBUFDS. + # See: + # https://forums.xilinx.com/t5/Timing-Analysis/Delay-890-ns-in-OBUFTDS-in-Kintex-UltraScale/td-p/868364 + platform.add_platform_command( + "set_false_path -through [get_pins -filter {{REF_PIN_NAME == T}} -of [get_cells -filter {{REF_NAME == IOBUFDS}}]]") + + +class RTMUARTForward(Module): + def __init__(self, platform): + # forward RTM UART to second FTDI UART channel + serial_1 = platform.request("serial", 1) + serial_rtm = platform.request("serial_rtm") + self.comb += [ + serial_1.tx.eq(serial_rtm.rx), + serial_rtm.tx.eq(serial_1.rx) + ] + + +class SatelliteBase(MiniSoC): + mem_map = { + "drtioaux": 0x14000000, + } + mem_map.update(MiniSoC.mem_map) + + def __init__(self, rtio_clk_freq=125e6, identifier_suffix="", gateware_identifier_str=None, with_sfp=False, *, with_wrpll, **kwargs): + MiniSoC.__init__(self, + cpu_type="or1k", + sdram_controller_type="minicon", + l2_size=128*1024, + integrated_sram_size=8192, + ethmac_nrxslots=4, + ethmac_ntxslots=4, + **kwargs) + add_identifier(self, suffix=identifier_suffix, gateware_identifier_str=gateware_identifier_str) + self.rtio_clk_freq = rtio_clk_freq + + platform = self.platform + + if with_wrpll: + clock_recout_pads = platform.request("ddmtd_rec_clk") + else: + clock_recout_pads = None + if with_sfp: + # Use SFP0 to connect to master (Kasli) + self.comb += platform.request("sfp_tx_disable", 0).eq(0) + drtio_uplink = platform.request("sfp", 0) + else: + drtio_uplink = platform.request("fat_pipe", 0) + self.submodules.drtio_transceiver = gth_ultrascale.GTH( + clock_pads=platform.request("cdr_clk_clean"), + data_pads=[drtio_uplink, platform.request("rtm_amc_link")], + sys_clk_freq=self.clk_freq, + rtio_clk_freq=rtio_clk_freq, + clock_recout_pads=clock_recout_pads) + self.csr_devices.append("drtio_transceiver") + + self.submodules.rtio_tsc = rtio.TSC("sync", glbl_fine_ts_width=3) + + drtioaux_csr_group = [] + drtioaux_memory_group = [] + drtiorep_csr_group = [] + self.drtio_cri = [] + for i in range(len(self.drtio_transceiver.channels)): + coreaux_name = "drtioaux" + str(i) + memory_name = "drtioaux" + str(i) + "_mem" + drtioaux_csr_group.append(coreaux_name) + drtioaux_memory_group.append(memory_name) + + cdr = ClockDomainsRenamer({"rtio_rx": "rtio_rx" + str(i)}) + + if i == 0: + self.submodules.rx_synchronizer = cdr(XilinxRXSynchronizer()) + core = cdr(DRTIOSatellite( + self.rtio_tsc, self.drtio_transceiver.channels[i], + self.rx_synchronizer)) + self.submodules.drtiosat = core + self.csr_devices.append("drtiosat") + else: + corerep_name = "drtiorep" + str(i-1) + drtiorep_csr_group.append(corerep_name) + + core = cdr(DRTIORepeater( + self.rtio_tsc, self.drtio_transceiver.channels[i])) + setattr(self.submodules, corerep_name, core) + self.drtio_cri.append(core.cri) + self.csr_devices.append(corerep_name) + + coreaux = cdr(DRTIOAuxController(core.link_layer)) + setattr(self.submodules, coreaux_name, coreaux) + self.csr_devices.append(coreaux_name) + + memory_address = self.mem_map["drtioaux"] + 0x800*i + self.add_wb_slave(memory_address, 0x800, + coreaux.bus) + self.add_memory_region(memory_name, memory_address | self.shadow_base, 0x800) + self.config["HAS_DRTIO"] = None + self.config["HAS_DRTIO_ROUTING"] = None + self.add_csr_group("drtioaux", drtioaux_csr_group) + self.add_memory_group("drtioaux_mem", drtioaux_memory_group) + self.add_csr_group("drtiorep", drtiorep_csr_group) + + rtio_clk_period = 1e9/rtio_clk_freq + self.config["RTIO_FREQUENCY"] = str(rtio_clk_freq/1e6) + if with_wrpll: + self.comb += [ + platform.request("filtered_clk_sel").eq(0), + platform.request("ddmtd_main_dcxo_oe").eq(1), + platform.request("ddmtd_helper_dcxo_oe").eq(1) + ] + self.submodules.wrpll_sampler = DDMTDSamplerExtFF( + platform.request("ddmtd_inputs")) + self.submodules.wrpll = WRPLL( + helper_clk_pads=platform.request("ddmtd_helper_clk"), + main_dcxo_i2c=platform.request("ddmtd_main_dcxo_i2c"), + helper_dxco_i2c=platform.request("ddmtd_helper_dcxo_i2c"), + ddmtd_inputs=self.wrpll_sampler) + self.csr_devices.append("wrpll") + platform.add_period_constraint(self.wrpll.cd_helper.clk, rtio_clk_period*0.99) + platform.add_false_path_constraints(self.crg.cd_sys.clk, self.wrpll.cd_helper.clk) + else: + self.comb += platform.request("filtered_clk_sel").eq(1) + self.submodules.siphaser = SiPhaser7Series( + si5324_clkin=platform.request("si5324_clkin"), + rx_synchronizer=self.rx_synchronizer, + ultrascale=True, + rtio_clk_freq=rtio_clk_freq) + platform.add_false_path_constraints( + self.crg.cd_sys.clk, self.siphaser.mmcm_freerun_output) + self.csr_devices.append("siphaser") + self.submodules.si5324_rst_n = gpio.GPIOOut(platform.request("si5324").rst_n) + self.csr_devices.append("si5324_rst_n") + i2c = self.platform.request("i2c") + self.submodules.i2c = gpio.GPIOTristate([i2c.scl, i2c.sda]) + self.csr_devices.append("i2c") + self.config["I2C_BUS_COUNT"] = 1 + self.config["HAS_SI5324"] = None + + gth = self.drtio_transceiver.gths[0] + platform.add_period_constraint(gth.txoutclk, rtio_clk_period/2) + platform.add_period_constraint(gth.rxoutclk, rtio_clk_period) + platform.add_false_path_constraints( + self.crg.cd_sys.clk, + gth.txoutclk, gth.rxoutclk) + + def add_rtio(self, rtio_channels): + # Only add MonInj core if there is anything to monitor + if any([len(c.probes) for c in rtio_channels]): + self.submodules.rtio_moninj = rtio.MonInj(rtio_channels) + self.csr_devices.append("rtio_moninj") + + self.submodules.local_io = SyncRTIO(self.rtio_tsc, rtio_channels) + self.comb += self.drtiosat.async_errors.eq(self.local_io.async_errors) + self.submodules.cri_con = rtio.CRIInterconnectShared( + [self.drtiosat.cri], + [self.local_io.cri] + self.drtio_cri, + mode="sync", enable_routing=True) + self.csr_devices.append("cri_con") + self.submodules.routing_table = rtio.RoutingTableAccess(self.cri_con) + self.csr_devices.append("routing_table") + + +# JESD204 DAC Channel Group +class JDCGSAWG(Module, AutoCSR): + def __init__(self, platform, sys_crg, jesd_crg, dac): + # Kintex Ultrascale GTH, speed grade -1C: + # CPLL linerate (D=1): 4.0 - 8.5 Gb/s + self.submodules.jesd = jesd204_tools.UltrascaleTX( + platform, sys_crg, jesd_crg, dac) + + self.submodules.sawgs = [sawg.Channel(width=16, parallelism=4) for i in range(4)] + + for conv, ch in zip(self.jesd.core.sink.flatten(), self.sawgs): + assert len(Cat(ch.o)) == len(conv) + self.sync.jesd += conv.eq(Cat(ch.o)) + + +class JDCGPattern(Module, AutoCSR): + def __init__(self, platform, sys_crg, jesd_crg, dac): + self.submodules.jesd = jesd204_tools.UltrascaleTX( + platform, sys_crg, jesd_crg, dac) + + self.sawgs = [] + + ramp = Signal(4) + self.sync.rtio += ramp.eq(ramp + 1) + + samples = [[Signal(16) for i in range(4)] for j in range(4)] + self.comb += [ + a.eq(Cat(b)) for a, b in zip( + self.jesd.core.sink.flatten(), samples) + ] + # ch0: 16-step ramp with big carry toggles + for i in range(4): + self.comb += [ + samples[0][i][-4:].eq(ramp), + samples[0][i][:-4].eq(0x7ff if i % 2 else 0x800) + ] + # ch1: 50 MHz + from math import pi, cos + data = [int(round(cos(i/12*2*pi)*((1 << 15) - 1))) + for i in range(12)] + k = Signal(2) + self.sync.rtio += If(k == 2, k.eq(0)).Else(k.eq(k + 1)) + self.comb += [ + Case(k, { + i: [samples[1][j].eq(data[i*4 + j]) for j in range(4)] + for i in range(3) + }) + ] + # ch2: ch0, ch3: ch1 + self.comb += [ + Cat(samples[2]).eq(Cat(samples[0])), + Cat(samples[3]).eq(Cat(samples[1])) + ] + + +class JDCGSyncDDS(Module, AutoCSR): + def __init__(self, platform, sys_crg, jesd_crg, dac): + self.submodules.jesd = jesd204_tools.UltrascaleTX( + platform, sys_crg, jesd_crg, dac) + self.coarse_ts = Signal(32) + + self.sawgs = [] + + ftw = round(2**len(self.coarse_ts)*9e6/600e6) + parallelism = 4 + + mul_1 = Signal.like(self.coarse_ts) + mul_2 = Signal.like(self.coarse_ts) + mul_3 = Signal.like(self.coarse_ts) + self.sync.rtio += [ + mul_1.eq(self.coarse_ts*ftw*parallelism), + mul_2.eq(mul_1), + mul_3.eq(mul_2) + ] + + phases = [Signal.like(self.coarse_ts) for i in range(parallelism)] + self.sync.rtio += [phases[i].eq(mul_3 + i*ftw) for i in range(parallelism)] + + resolution = 10 + steps = 2**resolution + from math import pi, cos + data = [(2**16 + round(cos(i/steps*2*pi)*((1 << 15) - 1))) & 0xffff + for i in range(steps)] + samples = [Signal(16) for i in range(4)] + for phase, sample in zip(phases, samples): + table = Memory(16, steps, init=data) + table_port = table.get_port(clock_domain="rtio") + self.specials += table, table_port + self.comb += [ + table_port.adr.eq(phase >> (len(self.coarse_ts) - resolution)), + sample.eq(table_port.dat_r) + ] + + self.sync.rtio += [sink.eq(Cat(samples)) + for sink in self.jesd.core.sink.flatten()] + + +class Satellite(SatelliteBase): + """ + DRTIO satellite with local DAC/SAWG channels, as well as TTL channels via FMC and VHDCI carrier. + """ + def __init__(self, jdcg_type, **kwargs): + SatelliteBase.__init__(self, 150e6, + identifier_suffix="." + jdcg_type, + **kwargs) + + platform = self.platform + + self.submodules += RTMUARTForward(platform) + + # RTM bitstream upload + slave_fpga_cfg = self.platform.request("rtm_fpga_cfg") + self.submodules.slave_fpga_cfg = gpio.GPIOTristate([ + slave_fpga_cfg.cclk, + slave_fpga_cfg.din, + slave_fpga_cfg.done, + slave_fpga_cfg.init_b, + slave_fpga_cfg.program_b, + ]) + self.csr_devices.append("slave_fpga_cfg") + self.config["SLAVE_FPGA_GATEWARE"] = 0x200000 + + self.rtio_channels = rtio_channels = [] + for i in range(4): + phy = ttl_simple.Output(platform.request("user_led", i)) + self.submodules += phy + rtio_channels.append(rtio.Channel.from_phy(phy)) + mcx_io = platform.request("mcx_io", 0) + phy = ttl_serdes_ultrascale.InOut(4, mcx_io.level) + self.comb += mcx_io.direction.eq(phy.oe) + self.submodules += phy + rtio_channels.append(rtio.Channel.from_phy(phy)) + mcx_io = platform.request("mcx_io", 1) + phy = ttl_serdes_ultrascale.InOut(4, mcx_io.level) + self.comb += mcx_io.direction.eq(phy.oe) + self.submodules += phy + rtio_channels.append(rtio.Channel.from_phy(phy)) + + self.submodules.jesd_crg = jesd204_tools.UltrascaleCRG( + platform, use_rtio_clock=True) + cls = { + "sawg": JDCGSAWG, + "pattern": JDCGPattern, + "syncdds": JDCGSyncDDS + }[jdcg_type] + self.submodules.jdcg_0 = cls(platform, self.crg, self.jesd_crg, 0) + self.submodules.jdcg_1 = cls(platform, self.crg, self.jesd_crg, 1) + self.csr_devices.append("jesd_crg") + self.csr_devices.append("jdcg_0") + self.csr_devices.append("jdcg_1") + self.config["HAS_JDCG"] = None + self.add_csr_group("jdcg", ["jdcg_0", "jdcg_1"]) + self.config["RTIO_FIRST_SAWG_CHANNEL"] = len(rtio_channels) + rtio_channels.extend(rtio.Channel.from_phy(phy) + for sawg in self.jdcg_0.sawgs + + self.jdcg_1.sawgs + for phy in sawg.phys) + + # FMC-VHDCI-EEM DIOs x 2 (all OUTPUTs) + platform.add_connectors(fmcdio_vhdci_eem.connectors) + eem.DIO.add_std(self, 0, + ttl_simple.Output, ttl_simple.Output, iostandard="LVDS") + eem.DIO.add_std(self, 1, + ttl_simple.Output, ttl_simple.Output, iostandard="LVDS") + # FMC-DIO-32ch-LVDS-a Direction Control Pins (via shift register) as TTLs x 3 + platform.add_extension(fmcdio_vhdci_eem.io) + print("fmcdio_vhdci_eem.[CLK, SER, LATCH] starting at RTIO channel 0x{:06x}" + .format(len(rtio_channels))) + fmcdio_dirctl = platform.request("fmcdio_dirctl", 0) + fmcdio_dirctl_phys = [ + ttl_simple.Output(fmcdio_dirctl.clk), + ttl_simple.Output(fmcdio_dirctl.ser), + ttl_simple.Output(fmcdio_dirctl.latch) + ] + for phy in fmcdio_dirctl_phys: + self.submodules += phy + rtio_channels.append(rtio.Channel.from_phy(phy)) + workaround_us_lvds_tristate(platform) + + self.add_rtio(rtio_channels) + + self.submodules.sysref_sampler = jesd204_tools.SysrefSampler( + platform.request("amc_fpga_sysref", 0), self.rtio_tsc.coarse_ts) + self.csr_devices.append("sysref_sampler") + self.jdcg_0.jesd.core.register_jref(self.sysref_sampler.jref) + self.jdcg_1.jesd.core.register_jref(self.sysref_sampler.jref) + if jdcg_type == "syncdds": + self.comb += [ + self.jdcg_0.coarse_ts.eq(self.rtio_tsc.coarse_ts), + self.jdcg_1.coarse_ts.eq(self.rtio_tsc.coarse_ts), + ] + + +class SimpleSatellite(SatelliteBase): + def __init__(self, **kwargs): + SatelliteBase.__init__(self, **kwargs) + + platform = self.platform + + self.submodules += RTMUARTForward(platform) + + rtio_channels = [] + for i in range(4): + phy = ttl_simple.Output(platform.request("user_led", i)) + self.submodules += phy + rtio_channels.append(rtio.Channel.from_phy(phy)) + mcx_io = platform.request("mcx_io", 0) + phy = ttl_serdes_ultrascale.InOut(4, mcx_io.level) + self.comb += mcx_io.direction.eq(phy.oe) + self.submodules += phy + rtio_channels.append(rtio.Channel.from_phy(phy)) + mcx_io = platform.request("mcx_io", 1) + phy = ttl_serdes_ultrascale.InOut(4, mcx_io.level) + self.comb += mcx_io.direction.eq(phy.oe) + self.submodules += phy + rtio_channels.append(rtio.Channel.from_phy(phy)) + + self.add_rtio(rtio_channels) + + +def main(): + parser = argparse.ArgumentParser( + description="Sayma AMC gateware and firmware builder") + builder_args(parser) + soc_sayma_amc_args(parser) + parser.set_defaults(output_dir="artiq_sayma") + parser.add_argument("-V", "--variant", default="satellite", + help="variant: satellite/simplesatellite " + "(default: %(default)s)") + parser.add_argument("--sfp", default=False, action="store_true", + help="use SFP port for DRTIO instead of uTCA backplane") + parser.add_argument("--rtm-csr-csv", + default=os.path.join("artiq_sayma", "rtm_gateware", "rtm_csr.csv"), + help="CSV file listing remote CSRs on RTM (default: %(default)s)") + parser.add_argument("--jdcg-type", + default="sawg", + help="Change type of signal generator. This is used exclusively for " + "development and debugging.") + parser.add_argument("--with-wrpll", default=False, action="store_true") + parser.add_argument("--gateware-identifier-str", default=None, + help="Override ROM identifier") + args = parser.parse_args() + + variant = args.variant.lower() + if variant == "satellite": + soc = Satellite( + with_sfp=args.sfp, + jdcg_type=args.jdcg_type, + with_wrpll=args.with_wrpll, + gateware_identifier_str=args.gateware_identifier_str, + **soc_sayma_amc_argdict(args)) + elif variant == "simplesatellite": + soc = SimpleSatellite( + with_sfp=args.sfp, + with_wrpll=args.with_wrpll, + gateware_identifier_str=args.gateware_identifier_str, + **soc_sayma_amc_argdict(args)) + else: + raise SystemExit("Invalid variant (-V/--variant)") + + build_artiq_soc(soc, builder_argdict(args)) + + +if __name__ == "__main__": + main() diff --git a/artiq/gateware/targets/sayma_amc_drtio_master.py b/artiq/gateware/targets/sayma_amc_drtio_master.py deleted file mode 100755 index cd1bbc149..000000000 --- a/artiq/gateware/targets/sayma_amc_drtio_master.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env python3 - -import argparse - -from migen import * -from migen.build.generic_platform import * - -from misoc.cores import spi as spi_csr -from misoc.integration.soc_sdram import soc_sdram_args, soc_sdram_argdict -from misoc.integration.builder import builder_args, builder_argdict -from misoc.targets.sayma_amc import MiniSoC - -from artiq.gateware.amp import AMPSoC, build_artiq_soc -from artiq.gateware import rtio -from artiq.gateware.rtio.phy import ttl_simple -from artiq.gateware.drtio.transceiver import gth_ultrascale -from artiq.gateware.drtio import DRTIOMaster -from artiq import __version__ as artiq_version - - -class Master(MiniSoC, AMPSoC): - mem_map = { - "cri_con": 0x10000000, - "rtio": 0x20000000, - "rtio_dma": 0x30000000, - "drtio_aux": 0x50000000, - "mailbox": 0x70000000 - } - mem_map.update(MiniSoC.mem_map) - - def __init__(self, **kwargs): - MiniSoC.__init__(self, - cpu_type="or1k", - sdram_controller_type="minicon", - l2_size=128*1024, - ident=artiq_version, - ethmac_nrxslots=4, - ethmac_ntxslots=4, - **kwargs) - AMPSoC.__init__(self) - - platform = self.platform - - # Si5324 used as a free-running oscillator, to avoid dependency on RTM. - self.submodules.si5324_rst_n = gpio.GPIOOut(platform.request("si5324").rst_n) - self.csr_devices.append("si5324_rst_n") - i2c = self.platform.request("i2c") - self.submodules.i2c = gpio.GPIOTristate([i2c.scl, i2c.sda]) - self.csr_devices.append("i2c") - self.config["I2C_BUS_COUNT"] = 1 - self.config["HAS_SI5324"] = None - self.config["SI5324_FREE_RUNNING"] = None - - self.submodules.transceiver = gth_ultrascale.GTH( - clock_pads=platform.request("si5324_clkout"), - tx_pads=[platform.request("sfp_tx")], - rx_pads=[platform.request("sfp_rx")], - sys_clk_freq=self.clk_freq) - - self.submodules.drtio0 = ClockDomainsRenamer({"rtio_rx": "rtio_rx0"})( - DRTIOMaster(self.transceiver.channels[0])) - self.csr_devices.append("drtio0") - self.add_wb_slave(self.mem_map["drtio_aux"], 0x800, - self.drtio0.aux_controller.bus) - self.add_memory_region("drtio0_aux", self.mem_map["drtio_aux"] | self.shadow_base, 0x800) - self.config["HAS_DRTIO"] = None - self.add_csr_group("drtio", ["drtio0"]) - self.add_memory_group("drtio_aux", ["drtio0_aux"]) - - rtio_clk_period = 1e9/self.transceiver.rtio_clk_freq - platform.add_period_constraint(self.transceiver.txoutclk, rtio_clk_period) - platform.add_period_constraint(self.transceiver.rxoutclk, rtio_clk_period) - platform.add_false_path_constraints( - self.crg.cd_sys.clk, - self.transceiver.txoutclk, self.transceiver.rxoutclk) - - rtio_channels = [] - for i in range(4): - phy = ttl_simple.Output(platform.request("user_led", i)) - self.submodules += phy - rtio_channels.append(rtio.Channel.from_phy(phy)) - sma_io = platform.request("sma_io", 0) - self.comb += sma_io.direction.eq(1) - phy = ttl_simple.Output(sma_io.level) - self.submodules += phy - rtio_channels.append(rtio.Channel.from_phy(phy)) - sma_io = platform.request("sma_io", 1) - self.comb += sma_io.direction.eq(0) - phy = ttl_simple.InOut(sma_io.level) - self.submodules += phy - rtio_channels.append(rtio.Channel.from_phy(phy)) - - self.submodules.rtio_moninj = rtio.MonInj(rtio_channels) - self.csr_devices.append("rtio_moninj") - - self.submodules.rtio_core = rtio.Core(rtio_channels, 3) - self.csr_devices.append("rtio_core") - - self.submodules.rtio = rtio.KernelInitiator() - self.submodules.rtio_dma = ClockDomainsRenamer("sys_kernel")( - rtio.DMA(self.get_native_sdram_if())) - self.register_kernel_cpu_csrdevice("rtio") - self.register_kernel_cpu_csrdevice("rtio_dma") - self.submodules.cri_con = rtio.CRIInterconnectShared( - [self.rtio.cri, self.rtio_dma.cri], - [self.rtio_core.cri, self.drtio0.cri]) - self.register_kernel_cpu_csrdevice("cri_con") - - -def main(): - parser = argparse.ArgumentParser( - description="ARTIQ device binary builder / Sayma DRTIO master") - builder_args(parser) - soc_sdram_args(parser) - args = parser.parse_args() - - soc = Master(**soc_sdram_argdict(args)) - build_artiq_soc(soc, builder_argdict(args)) - - -if __name__ == "__main__": - main() diff --git a/artiq/gateware/targets/sayma_amc_drtio_satellite.py b/artiq/gateware/targets/sayma_amc_drtio_satellite.py deleted file mode 100755 index aecfca2c1..000000000 --- a/artiq/gateware/targets/sayma_amc_drtio_satellite.py +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -import os - -from migen import * -from migen.build.generic_platform import * -from misoc.cores import spi as spi_csr -from misoc.cores import gpio -from misoc.integration.soc_sdram import soc_sdram_args, soc_sdram_argdict -from misoc.integration.builder import builder_args, builder_argdict -from misoc.targets.sayma_amc import BaseSoC - -from artiq.gateware import rtio -from artiq.gateware.rtio.phy import ttl_simple -from artiq.gateware.drtio.transceiver import gth_ultrascale -from artiq.gateware.drtio import DRTIOSatellite -from artiq import __version__ as artiq_version -from artiq import __artiq_dir__ as artiq_dir - - -class Satellite(BaseSoC): - mem_map = { - "drtio_aux": 0x50000000, - } - mem_map.update(BaseSoC.mem_map) - - def __init__(self, **kwargs): - BaseSoC.__init__(self, - cpu_type="or1k", - sdram_controller_type="minicon", - l2_size=128*1024, - ident=artiq_version, - **kwargs) - - platform = self.platform - - rtio_channels = [] - for i in range(4): - phy = ttl_simple.Output(platform.request("user_led", i)) - self.submodules += phy - rtio_channels.append(rtio.Channel.from_phy(phy)) - sma_io = platform.request("sma_io", 0) - self.comb += sma_io.direction.eq(1) - phy = ttl_simple.Output(sma_io.level) - self.submodules += phy - rtio_channels.append(rtio.Channel.from_phy(phy)) - sma_io = platform.request("sma_io", 1) - self.comb += sma_io.direction.eq(0) - phy = ttl_simple.InOut(sma_io.level) - self.submodules += phy - rtio_channels.append(rtio.Channel.from_phy(phy)) - - self.submodules.rtio_moninj = rtio.MonInj(rtio_channels) - self.csr_devices.append("rtio_moninj") - - self.submodules.transceiver = gth_7series.GTH( - clock_pads=platform.request("si5324_clkout"), - tx_pads=platform.request("sfp_tx"), - rx_pads=platform.request("sfp_rx"), - sys_clk_freq=self.clk_freq) - rx0 = ClockDomainsRenamer({"rtio_rx": "rtio_rx0"}) - self.submodules.rx_synchronizer0 = rx0(gth_ultrascale.RXSynchronizer( - self.transceiver.rtio_clk_freq, initial_phase=180.0)) - self.submodules.drtio0 = rx0(DRTIOSatellite( - self.transceiver.channels[0], rtio_channels, self.rx_synchronizer0)) - self.csr_devices.append("rx_synchronizer0") - self.csr_devices.append("drtio0") - self.add_wb_slave(self.mem_map["drtio_aux"], 0x800, - self.drtio0.aux_controller.bus) - self.add_memory_region("drtio0_aux", self.mem_map["drtio_aux"] | self.shadow_base, 0x800) - self.config["HAS_DRTIO"] = None - self.add_csr_group("drtio", ["drtio0"]) - self.add_memory_group("drtio_aux", ["drtio0_aux"]) - - self.config["RTIO_FREQUENCY"] = str(self.transceiver.rtio_clk_freq/1e6) - si5324_clkin = platform.request("si5324_clkin") - self.specials += \ - Instance("OBUFDS", - i_I=ClockSignal("rtio_rx0"), - o_O=si5324_clkin.p, o_OB=si5324_clkin.n - ) - self.submodules.si5324_rst_n = gpio.GPIOOut(platform.request("si5324").rst_n) - self.csr_devices.append("si5324_rst_n") - i2c = self.platform.request("i2c") - self.submodules.i2c = gpio.GPIOTristate([i2c.scl, i2c.sda]) - self.csr_devices.append("i2c") - self.config["I2C_BUS_COUNT"] = 1 - self.config["HAS_SI5324"] = None - - rtio_clk_period = 1e9/self.transceiver.rtio_clk_freq - platform.add_period_constraint(self.transceiver.txoutclk, rtio_clk_period) - platform.add_period_constraint(self.transceiver.rxoutclk, rtio_clk_period) - platform.add_false_path_constraints( - self.crg.cd_sys.clk, - self.transceiver.txoutclk, self.transceiver.rxoutclk) - - -def main(): - parser = argparse.ArgumentParser( - description="ARTIQ device binary builder / Sayma DRTIO satellite") - builder_args(parser) - soc_sdram_args(parser) - args = parser.parse_args() - - soc = Satellite(**soc_sdram_argdict(args)) - builder = Builder(soc, **builder_argdict(args)) - builder.add_software_package("satman", os.path.join(artiq_dir, "firmware", "satman")) - builder.build() - - -if __name__ == "__main__": - main() diff --git a/artiq/gateware/targets/sayma_amc_standalone.py b/artiq/gateware/targets/sayma_amc_standalone.py deleted file mode 100755 index c9361cee5..000000000 --- a/artiq/gateware/targets/sayma_amc_standalone.py +++ /dev/null @@ -1,251 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -import os -from collections import namedtuple - -from migen import * -from migen.genlib.resetsync import AsyncResetSynchronizer - -from misoc.integration.soc_sdram import soc_sdram_args, soc_sdram_argdict -from misoc.integration.builder import builder_args, builder_argdict -from misoc.interconnect import stream -from misoc.interconnect.csr import * -from misoc.targets.sayma_amc import MiniSoC - -from jesd204b.common import (JESD204BTransportSettings, - JESD204BPhysicalSettings, - JESD204BSettings) -from jesd204b.phy.gth import GTHChannelPLL as JESD204BGTHChannelPLL -from jesd204b.phy import JESD204BPhyTX -from jesd204b.core import JESD204BCoreTX -from jesd204b.core import JESD204BCoreTXControl - -from artiq.gateware.amp import AMPSoC, build_artiq_soc -from artiq.gateware import serwb -from artiq.gateware import remote_csr -from artiq.gateware import rtio -from artiq.gateware.rtio.phy import ttl_simple, sawg -from artiq import __version__ as artiq_version - - -PhyPads = namedtuple("PhyPads", "txp txn") -to_jesd = ClockDomainsRenamer("jesd") - - -class AD9154CRG(Module, AutoCSR): - linerate = int(6e9) - refclk_freq = int(150e6) - fabric_freq = int(125e6) - def __init__(self, platform): - self.jreset = CSRStorage(reset=1) - - self.refclk = Signal() - refclk2 = Signal() - self.clock_domains.cd_jesd = ClockDomain() - refclk_pads = platform.request("dac_refclk", 0) - - self.specials += [ - Instance("IBUFDS_GTE3", i_CEB=0, p_REFCLK_HROW_CK_SEL=0b00, - i_I=refclk_pads.p, i_IB=refclk_pads.n, - o_O=self.refclk, o_ODIV2=refclk2), - Instance("BUFG_GT", i_I=refclk2, o_O=self.cd_jesd.clk), - AsyncResetSynchronizer(self.cd_jesd, self.jreset.storage), - ] - self.cd_jesd.clk.attr.add("keep") - platform.add_period_constraint(self.cd_jesd.clk, 1e9/self.refclk_freq) - - -class AD9154JESD(Module, AutoCSR): - def __init__(self, platform, sys_crg, jesd_crg, dac): - ps = JESD204BPhysicalSettings(l=8, m=4, n=16, np=16) - ts = JESD204BTransportSettings(f=2, s=2, k=16, cs=0) - settings = JESD204BSettings(ps, ts, did=0x5a, bid=0x5) - - jesd_pads = platform.request("dac_jesd", dac) - phys = [] - for i in range(len(jesd_pads.txp)): - cpll = JESD204BGTHChannelPLL( - jesd_crg.refclk, jesd_crg.refclk_freq, jesd_crg.linerate) - self.submodules += cpll - #print(cpll) - phy = JESD204BPhyTX( - cpll, PhyPads(jesd_pads.txp[i], jesd_pads.txn[i]), - jesd_crg.fabric_freq, transceiver="gth") - phy.transmitter.cd_tx.clk.attr.add("keep") - platform.add_period_constraint(phy.transmitter.cd_tx.clk, - 40*1e9/jesd_crg.linerate) - platform.add_false_path_constraints( - sys_crg.cd_sys.clk, - jesd_crg.cd_jesd.clk, - phy.transmitter.cd_tx.clk) - phys.append(phy) - - self.submodules.core = core = to_jesd(JESD204BCoreTX( - phys, settings, converter_data_width=64)) - self.submodules.control = control = to_jesd(JESD204BCoreTXControl(core)) - core.register_jsync(platform.request("dac_sync", dac)) - - -class AD9154(Module, AutoCSR): - def __init__(self, platform, sys_crg, jesd_crg, dac): - self.submodules.jesd = AD9154JESD(platform, sys_crg, jesd_crg, dac) - - self.sawgs = [sawg.Channel(width=16, parallelism=4) for i in range(4)] - self.submodules += self.sawgs - - for conv, ch in zip(self.jesd.core.sink.flatten(), self.sawgs): - self.sync.jesd += conv.eq(Cat(ch.o)) - - -class Standalone(MiniSoC, AMPSoC): - mem_map = { - "cri_con": 0x10000000, - "rtio": 0x11000000, - "rtio_dma": 0x12000000, - "serwb": 0x13000000, - "mailbox": 0x70000000 - } - mem_map.update(MiniSoC.mem_map) - - def __init__(self, cpu_type="or1k", with_sawg=False, **kwargs): - MiniSoC.__init__(self, - cpu_type=cpu_type, - sdram_controller_type="minicon", - l2_size=128*1024, - ident=artiq_version, - ethmac_nrxslots=4, - ethmac_ntxslots=4, - **kwargs) - AMPSoC.__init__(self) - platform = self.platform - platform.toolchain.bitstream_commands.extend([ - "set_property BITSTREAM.GENERAL.COMPRESS True [current_design]", - "set_property CFGBVS VCCO [current_design]", - "set_property CONFIG_VOLTAGE 3.3 [current_design]", - ]) - - # forward RTM UART to second FTDI UART channel - serial_1 = platform.request("serial", 1) - serial_rtm = platform.request("serial_rtm") - self.comb += [ - serial_1.tx.eq(serial_rtm.rx), - serial_rtm.tx.eq(serial_1.rx) - ] - - # AMC/RTM serwb - serwb_pll = serwb.phy.SERWBPLL(125e6, 1.25e9, vco_div=2) - self.comb += serwb_pll.refclk.eq(self.crg.cd_sys.clk) - self.submodules += serwb_pll - - serwb_pads = platform.request("amc_rtm_serwb") - serwb_phy_amc = serwb.phy.SERWBPHY(platform.device, serwb_pll, serwb_pads, mode="master") - self.submodules.serwb_phy_amc = serwb_phy_amc - self.csr_devices.append("serwb_phy_amc") - - serwb_phy_amc.serdes.cd_serwb_serdes.clk.attr.add("keep") - serwb_phy_amc.serdes.cd_serwb_serdes_20x.clk.attr.add("keep") - serwb_phy_amc.serdes.cd_serwb_serdes_5x.clk.attr.add("keep") - platform.add_period_constraint(serwb_phy_amc.serdes.cd_serwb_serdes.clk, 40*1e9/serwb_pll.linerate), - platform.add_period_constraint(serwb_phy_amc.serdes.cd_serwb_serdes_20x.clk, 2*1e9/serwb_pll.linerate), - platform.add_period_constraint(serwb_phy_amc.serdes.cd_serwb_serdes_5x.clk, 8*1e9/serwb_pll.linerate) - platform.add_false_path_constraints( - self.crg.cd_sys.clk, - serwb_phy_amc.serdes.cd_serwb_serdes.clk, - serwb_phy_amc.serdes.cd_serwb_serdes_5x.clk) - - serwb_core = serwb.core.SERWBCore(serwb_phy_amc, int(self.clk_freq), mode="slave") - self.submodules += serwb_core - self.add_wb_slave(self.mem_map["serwb"], 8192, serwb_core.etherbone.wishbone.bus) - - # RTIO - rtio_channels = [] - for i in range(4): - phy = ttl_simple.Output(platform.request("user_led", i)) - self.submodules += phy - rtio_channels.append(rtio.Channel.from_phy(phy)) - sma_io = platform.request("sma_io", 0) - self.comb += sma_io.direction.eq(1) - phy = ttl_simple.Output(sma_io.level) - self.submodules += phy - rtio_channels.append(rtio.Channel.from_phy(phy)) - sma_io = platform.request("sma_io", 1) - self.comb += sma_io.direction.eq(0) - phy = ttl_simple.InOut(sma_io.level) - self.submodules += phy - rtio_channels.append(rtio.Channel.from_phy(phy)) - - if with_sawg: - self.submodules.ad9154_crg = AD9154CRG(platform) - self.submodules.ad9154_0 = AD9154(platform, self.crg, self.ad9154_crg, 0) - self.submodules.ad9154_1 = AD9154(platform, self.crg, self.ad9154_crg, 1) - self.csr_devices.append("ad9154_crg") - self.csr_devices.append("ad9154_0") - self.csr_devices.append("ad9154_1") - self.config["HAS_AD9154"] = None - self.add_csr_group("ad9154", ["ad9154_0", "ad9154_1"]) - self.config["RTIO_FIRST_SAWG_CHANNEL"] = len(rtio_channels) - rtio_channels.extend(rtio.Channel.from_phy(phy) - for sawg in self.ad9154_0.sawgs + - self.ad9154_1.sawgs - for phy in sawg.phys) - - self.config["HAS_RTIO_LOG"] = None - self.config["RTIO_LOG_CHANNEL"] = len(rtio_channels) - rtio_channels.append(rtio.LogChannel()) - - self.clock_domains.cd_rtio = ClockDomain() - self.comb += [ - self.cd_rtio.clk.eq(ClockSignal()), - self.cd_rtio.rst.eq(ResetSignal()) - ] - self.submodules.rtio_core = rtio.Core(rtio_channels) - self.csr_devices.append("rtio_core") - self.submodules.rtio = rtio.KernelInitiator() - self.submodules.rtio_dma = ClockDomainsRenamer("sys_kernel")( - rtio.DMA(self.get_native_sdram_if())) - self.register_kernel_cpu_csrdevice("rtio") - self.register_kernel_cpu_csrdevice("rtio_dma") - self.submodules.cri_con = rtio.CRIInterconnectShared( - [self.rtio.cri, self.rtio_dma.cri], - [self.rtio_core.cri]) - self.register_kernel_cpu_csrdevice("cri_con") - self.submodules.rtio_moninj = rtio.MonInj(rtio_channels) - self.csr_devices.append("rtio_moninj") - - self.submodules.rtio_analyzer = rtio.Analyzer(self.rtio_core.cri, - self.get_native_sdram_if()) - self.csr_devices.append("rtio_analyzer") - - -def main(): - parser = argparse.ArgumentParser( - description="ARTIQ device binary builder / Sayma AMC stand-alone") - builder_args(parser) - soc_sdram_args(parser) - parser.add_argument("--rtm-csr-csv", - default=os.path.join("artiq_sayma_rtm", "sayma_rtm_csr.csv"), - help="CSV file listing remote CSRs on RTM (default: %(default)s)") - parser.add_argument("--with-sawg", - default=False, action="store_true", - help="add JESD204B and SAWG channels (default: %(default)s)") - args = parser.parse_args() - - soc = Standalone(with_sawg=args.with_sawg, **soc_sdram_argdict(args)) - - remote_csr_regions = remote_csr.get_remote_csr_regions( - soc.mem_map["serwb"] | soc.shadow_base, - args.rtm_csr_csv) - for name, origin, busword, csrs in remote_csr_regions: - soc.add_csr_region(name, origin, busword, csrs) - # Configuration for RTM peripherals. Keep in sync with sayma_rtm.py! - soc.config["HAS_HMC830_7043"] = None - soc.config["CONVERTER_SPI_HMC830_CS"] = 0 - soc.config["CONVERTER_SPI_HMC7043_CS"] = 1 - soc.config["CONVERTER_SPI_FIRST_AD9154_CS"] = 2 - - build_artiq_soc(soc, builder_argdict(args)) - - -if __name__ == "__main__": - main() diff --git a/artiq/gateware/targets/sayma_rtm.py b/artiq/gateware/targets/sayma_rtm.py index df9206dc5..294a17823 100755 --- a/artiq/gateware/targets/sayma_rtm.py +++ b/artiq/gateware/targets/sayma_rtm.py @@ -1,194 +1,319 @@ #!/usr/bin/env python3 +import argparse import os +import subprocess +import struct from migen import * -from migen.genlib.resetsync import AsyncResetSynchronizer -from migen.build.platforms.sinara import sayma_rtm +from migen.genlib.cdc import MultiReg -from misoc.interconnect import wishbone, stream from misoc.interconnect.csr import * -from misoc.cores import spi from misoc.cores import gpio -from misoc.integration.wb_slaves import WishboneSlaveManager -from misoc.integration.cpu_interface import get_csr_csv +from misoc.cores import spi2 +from misoc.cores.a7_gtp import * +from misoc.targets.sayma_rtm import BaseSoC, soc_sayma_rtm_args, soc_sayma_rtm_argdict +from misoc.integration.builder import Builder, builder_args, builder_argdict -from artiq.gateware import serwb +from artiq.gateware import rtio +from artiq.gateware import jesd204_tools +from artiq.gateware.rtio.phy import ttl_simple, ttl_serdes_7series +from artiq.gateware.drtio.transceiver import gtp_7series +from artiq.gateware.drtio.siphaser import SiPhaser7Series +from artiq.gateware.drtio.wrpll import WRPLL, DDMTDSamplerGTP +from artiq.gateware.drtio.rx_synchronizer import XilinxRXSynchronizer +from artiq.gateware.drtio import * +from artiq.build_soc import add_identifier +from artiq import __artiq_dir__ as artiq_dir -class CRG(Module): - def __init__(self, platform): - self.clock_domains.cd_sys = ClockDomain() - self.clock_domains.cd_clk200 = ClockDomain() +def fix_serdes_timing_path(platform): + # ignore timing of path from OSERDESE2 through the pad to ISERDESE2 + platform.add_platform_command( + "set_false_path -quiet " + "-through [get_pins -filter {{REF_PIN_NAME == OQ || REF_PIN_NAME == TQ}} " + "-of [get_cells -filter {{REF_NAME == OSERDESE2}}]] " + "-to [get_pins -filter {{REF_PIN_NAME == D}} " + "-of [get_cells -filter {{REF_NAME == ISERDESE2}}]]" + ) - clk50 = platform.request("clk50") - self.reset = Signal() +class _RTIOClockMultiplier(Module, AutoCSR): + def __init__(self, rtio_clk_freq): + self.pll_reset = CSRStorage(reset=1) + self.pll_locked = CSRStatus() + self.clock_domains.cd_rtiox4 = ClockDomain(reset_less=True) + + # See "Global Clock Network Deskew Using Two BUFGs" in ug472. + clkfbout = Signal() + clkfbin = Signal() + rtiox4_clk = Signal() pll_locked = Signal() - pll_fb = Signal() - pll_sys = Signal() - pll_clk200 = Signal() self.specials += [ - Instance("PLLE2_BASE", - p_STARTUP_WAIT="FALSE", o_LOCKED=pll_locked, + Instance("MMCME2_BASE", + p_CLKIN1_PERIOD=1e9/rtio_clk_freq, + i_CLKIN1=ClockSignal("rtio"), + i_RST=self.pll_reset.storage, + o_LOCKED=pll_locked, - # VCO @ 1GHz - p_REF_JITTER1=0.01, p_CLKIN1_PERIOD=20.0, - p_CLKFBOUT_MULT=20, p_DIVCLK_DIVIDE=1, - i_CLKIN1=clk50, i_CLKFBIN=pll_fb, o_CLKFBOUT=pll_fb, + p_CLKFBOUT_MULT_F=8.0, p_DIVCLK_DIVIDE=1, - # 125MHz - p_CLKOUT0_DIVIDE=8, p_CLKOUT0_PHASE=0.0, o_CLKOUT0=pll_sys, + o_CLKFBOUT=clkfbout, i_CLKFBIN=clkfbin, - # 200MHz - p_CLKOUT3_DIVIDE=5, p_CLKOUT3_PHASE=0.0, o_CLKOUT3=pll_clk200 + p_CLKOUT0_DIVIDE_F=2.0, o_CLKOUT0=rtiox4_clk, ), - Instance("BUFG", i_I=pll_sys, o_O=self.cd_sys.clk), - Instance("BUFG", i_I=pll_clk200, o_O=self.cd_clk200.clk), - AsyncResetSynchronizer(self.cd_sys, ~pll_locked | self.reset), - AsyncResetSynchronizer(self.cd_clk200, ~pll_locked | self.reset) + Instance("BUFG", i_I=clkfbout, o_O=clkfbin), + Instance("BUFG", i_I=rtiox4_clk, o_O=self.cd_rtiox4.clk), + + MultiReg(pll_locked, self.pll_locked.status) ] - reset_counter = Signal(4, reset=15) - ic_reset = Signal(reset=1) - self.sync.clk200 += \ - If(reset_counter != 0, - reset_counter.eq(reset_counter - 1) - ).Else( - ic_reset.eq(0) - ) - self.specials += Instance("IDELAYCTRL", i_REFCLK=ClockSignal("clk200"), i_RST=ic_reset) +class _SatelliteBase(BaseSoC): + mem_map = { + "drtioaux": 0x50000000, + } + mem_map.update(BaseSoC.mem_map) -class RTMIdentifier(Module, AutoCSR): - def __init__(self): - self.identifier = CSRStatus(32) - self.comb += self.identifier.status.eq(0x5352544d) # "SRTM" + def __init__(self, rtio_clk_freq, *, with_wrpll, gateware_identifier_str, **kwargs): + BaseSoC.__init__(self, + cpu_type="or1k", + **kwargs) + add_identifier(self, gateware_identifier_str=gateware_identifier_str) + self.rtio_clk_freq = rtio_clk_freq + platform = self.platform -CSR_RANGE_SIZE = 0x800 + disable_cdrclkc_ibuf = Signal(reset=1) + disable_cdrclkc_ibuf.attr.add("no_retiming") + cdrclkc_clkout = platform.request("cdr_clk_clean") + cdrclkc_clkout_buf = Signal() + self.specials += Instance("IBUFDS_GTE2", + i_CEB=disable_cdrclkc_ibuf, + i_I=cdrclkc_clkout.p, i_IB=cdrclkc_clkout.n, + o_O=cdrclkc_clkout_buf) + qpll_drtio_settings = QPLLSettings( + refclksel=0b001, + fbdiv=4, + fbdiv_45=5, + refclk_div=1) + qpll = QPLL(cdrclkc_clkout_buf, qpll_drtio_settings) + self.submodules += qpll + self.submodules.drtio_transceiver = gtp_7series.GTP( + qpll_channel=qpll.channels[0], + data_pads=[platform.request("rtm_amc_link", 0)], + sys_clk_freq=self.clk_freq, + rtio_clk_freq=rtio_clk_freq) + self.csr_devices.append("drtio_transceiver") + self.sync += disable_cdrclkc_ibuf.eq( + ~self.drtio_transceiver.stable_clkin.storage) -class SaymaRTM(Module): - def __init__(self, platform): - csr_devices = [] + self.submodules.rtio_tsc = rtio.TSC("sync", glbl_fine_ts_width=3) - self.submodules.crg = CRG(platform) - self.crg.cd_sys.clk.attr.add("keep") - clk_freq = 125e6 - platform.add_period_constraint(self.crg.cd_sys.clk, 8.0) - platform.add_period_constraint(self.crg.cd_clk200.clk, 5.0) + cdr = ClockDomainsRenamer({"rtio_rx": "rtio_rx0"}) + + self.submodules.rx_synchronizer = cdr(XilinxRXSynchronizer()) + core = cdr(DRTIOSatellite( + self.rtio_tsc, self.drtio_transceiver.channels[0], + self.rx_synchronizer)) + self.submodules.drtiosat = core + self.csr_devices.append("drtiosat") + + coreaux = cdr(DRTIOAuxController(core.link_layer)) + self.submodules.drtioaux0 = coreaux + self.csr_devices.append("drtioaux0") + + memory_address = self.mem_map["drtioaux"] + self.add_wb_slave(memory_address, 0x800, + coreaux.bus) + self.add_memory_region("drtioaux0_mem", memory_address | self.shadow_base, 0x800) + + self.config["HAS_DRTIO"] = None + self.add_csr_group("drtioaux", ["drtioaux0"]) + self.add_memory_group("drtioaux_mem", ["drtioaux0_mem"]) + + gtp = self.drtio_transceiver.gtps[0] + rtio_clk_period = 1e9/rtio_clk_freq + self.config["RTIO_FREQUENCY"] = str(rtio_clk_freq/1e6) + if with_wrpll: + self.comb += [ + platform.request("filtered_clk_sel").eq(0), + platform.request("ddmtd_main_dcxo_oe").eq(1), + platform.request("ddmtd_helper_dcxo_oe").eq(1) + ] + self.submodules.wrpll_sampler = DDMTDSamplerGTP( + self.drtio_transceiver, + platform.request("cdr_clk_clean_fabric")) + self.submodules.wrpll = WRPLL( + helper_clk_pads=platform.request("ddmtd_helper_clk"), + main_dcxo_i2c=platform.request("ddmtd_main_dcxo_i2c"), + helper_dxco_i2c=platform.request("ddmtd_helper_dcxo_i2c"), + ddmtd_inputs=self.wrpll_sampler) + self.csr_devices.append("wrpll") + platform.add_period_constraint(self.wrpll.cd_helper.clk, rtio_clk_period*0.99) + platform.add_false_path_constraints(self.crg.cd_sys.clk, self.wrpll.cd_helper.clk) + platform.add_false_path_constraints(self.wrpll.cd_helper.clk, gtp.rxoutclk) + else: + self.comb += platform.request("filtered_clk_sel").eq(1) + self.submodules.siphaser = SiPhaser7Series( + si5324_clkin=platform.request("si5324_clkin"), + rx_synchronizer=self.rx_synchronizer, + ref_clk=self.crg.cd_sys.clk, ref_div2=True, + rtio_clk_freq=rtio_clk_freq) + platform.add_false_path_constraints( + self.crg.cd_sys.clk, self.siphaser.mmcm_freerun_output) + self.csr_devices.append("siphaser") + self.submodules.si5324_rst_n = gpio.GPIOOut(platform.request("si5324").rst_n) + self.csr_devices.append("si5324_rst_n") + i2c = self.platform.request("i2c") + self.submodules.i2c = gpio.GPIOTristate([i2c.scl, i2c.sda]) + self.csr_devices.append("i2c") + self.config["I2C_BUS_COUNT"] = 1 + self.config["HAS_SI5324"] = None + + platform.add_period_constraint(gtp.txoutclk, rtio_clk_period) + platform.add_period_constraint(gtp.rxoutclk, rtio_clk_period) platform.add_false_path_constraints( self.crg.cd_sys.clk, - self.crg.cd_clk200.clk) + gtp.txoutclk, gtp.rxoutclk) - self.submodules.rtm_identifier = RTMIdentifier() - csr_devices.append("rtm_identifier") + self.submodules.rtio_crg = _RTIOClockMultiplier(rtio_clk_freq) + self.csr_devices.append("rtio_crg") + fix_serdes_timing_path(platform) - # clock mux: 100MHz ext SMA clock to HMC830 input - self.submodules.clock_mux = gpio.GPIOOut(Cat( - platform.request("clk_src_ext_sel"), - platform.request("ref_clk_src_sel"), - platform.request("dac_clk_src_sel"))) - csr_devices.append("clock_mux") + def add_rtio(self, rtio_channels): + self.submodules.rtio_moninj = rtio.MonInj(rtio_channels) + self.csr_devices.append("rtio_moninj") - # Allaki: enable RF output, GPIO access to attenuator - self.comb += [ - platform.request("allaki0_rfsw0").eq(1), - platform.request("allaki0_rfsw1").eq(1), - platform.request("allaki1_rfsw0").eq(1), - platform.request("allaki1_rfsw1").eq(1), - platform.request("allaki2_rfsw0").eq(1), - platform.request("allaki2_rfsw1").eq(1), - platform.request("allaki3_rfsw0").eq(1), - platform.request("allaki3_rfsw1").eq(1), - ] - allaki_atts = [ - platform.request("allaki0_att0"), - platform.request("allaki0_att1"), - platform.request("allaki1_att0"), - platform.request("allaki1_att1"), - platform.request("allaki2_att0"), - platform.request("allaki2_att1"), - platform.request("allaki3_att0"), - platform.request("allaki3_att1"), - ] - allaki_att_gpio = [] - for allaki_att in allaki_atts: - allaki_att_gpio += [ - allaki_att.le, - allaki_att.sin, - allaki_att.clk, - allaki_att.rst_n, - ] - self.submodules.allaki_atts = gpio.GPIOOut(Cat(*allaki_att_gpio)) - csr_devices.append("allaki_atts") + self.submodules.local_io = SyncRTIO(self.rtio_tsc, rtio_channels) + self.comb += self.drtiosat.async_errors.eq(self.local_io.async_errors) + self.comb += self.drtiosat.cri.connect(self.local_io.cri) + + +class Satellite(_SatelliteBase): + def __init__(self, **kwargs): + _SatelliteBase.__init__(self, **kwargs) + + platform = self.platform + + rtio_channels = [] + for bm in range(2): + print("BaseMod{} RF switches starting at RTIO channel 0x{:06x}" + .format(bm, len(rtio_channels))) + for i in range(4): + phy = ttl_serdes_7series.Output_8X(platform.request("basemod{}_rfsw".format(bm), i), + invert=True) + self.submodules += phy + rtio_channels.append(rtio.Channel.from_phy(phy)) + + print("BaseMod{} attenuator starting at RTIO channel 0x{:06x}" + .format(bm, len(rtio_channels))) + basemod_att = platform.request("basemod{}_att".format(bm)) + for name in "rst_n clk le".split(): + signal = getattr(basemod_att, name) + for i in range(len(signal)): + phy = ttl_simple.Output(signal[i]) + self.submodules += phy + rtio_channels.append(rtio.Channel.from_phy(phy)) + phy = ttl_simple.Output(basemod_att.mosi[0]) + self.submodules += phy + rtio_channels.append(rtio.Channel.from_phy(phy)) + for i in range(3): + self.comb += basemod_att.mosi[i+1].eq(basemod_att.miso[i]) + phy = ttl_simple.InOut(basemod_att.miso[3]) + self.submodules += phy + rtio_channels.append(rtio.Channel.from_phy(phy)) + + self.add_rtio(rtio_channels) + + self.comb += platform.request("clk_src_ext_sel").eq(0) # HMC clock chip and DAC control self.comb += [ - platform.request("ad9154_rst_n").eq(1), - platform.request("ad9154_txen", 0).eq(0b11), - platform.request("ad9154_txen", 1).eq(0b11) + platform.request("ad9154_rst_n", 0).eq(1), + platform.request("ad9154_rst_n", 1).eq(1) ] - - self.submodules.converter_spi = spi.SPIMaster([ + self.submodules.converter_spi = spi2.SPIMaster(spi2.SPIInterface( platform.request("hmc_spi"), platform.request("ad9154_spi", 0), - platform.request("ad9154_spi", 1)]) - csr_devices.append("converter_spi") - self.comb += platform.request("hmc7043_reset").eq(0) + platform.request("ad9154_spi", 1))) + self.csr_devices.append("converter_spi") + self.submodules.hmc7043_reset = gpio.GPIOOut( + platform.request("hmc7043_reset"), reset_out=1) + self.csr_devices.append("hmc7043_reset") + self.submodules.hmc7043_gpo = gpio.GPIOIn( + platform.request("hmc7043_gpo")) + self.csr_devices.append("hmc7043_gpo") + self.config["HAS_HMC830_7043"] = None + self.config["HAS_AD9154"] = None + self.config["AD9154_COUNT"] = 2 + self.config["CONVERTER_SPI_HMC830_CS"] = 0 + self.config["CONVERTER_SPI_HMC7043_CS"] = 1 + self.config["CONVERTER_SPI_FIRST_AD9154_CS"] = 2 + self.config["HMC830_REF"] = str(int(self.rtio_clk_freq/1e6)) - # AMC/RTM serwb - serwb_pll = serwb.phy.SERWBPLL(125e6, 1.25e9, vco_div=1) - self.submodules += serwb_pll + # HMC workarounds + self.comb += platform.request("hmc830_pwr_en").eq(1) + self.submodules.hmc7043_out_en = gpio.GPIOOut( + platform.request("hmc7043_out_en")) + self.csr_devices.append("hmc7043_out_en") - serwb_pads = platform.request("amc_rtm_serwb") - serwb_phy_rtm = serwb.phy.SERWBPHY(platform.device, serwb_pll, serwb_pads, mode="slave") - self.submodules.serwb_phy_rtm = serwb_phy_rtm - self.comb += self.crg.reset.eq(serwb_phy_rtm.init.reset) - csr_devices.append("serwb_phy_rtm") - - serwb_phy_rtm.serdes.cd_serwb_serdes.clk.attr.add("keep") - serwb_phy_rtm.serdes.cd_serwb_serdes_20x.clk.attr.add("keep") - serwb_phy_rtm.serdes.cd_serwb_serdes_5x.clk.attr.add("keep") - platform.add_period_constraint(serwb_phy_rtm.serdes.cd_serwb_serdes.clk, 40*1e9/serwb_pll.linerate), - platform.add_period_constraint(serwb_phy_rtm.serdes.cd_serwb_serdes_20x.clk, 2*1e9/serwb_pll.linerate), - platform.add_period_constraint(serwb_phy_rtm.serdes.cd_serwb_serdes_5x.clk, 8*1e9/serwb_pll.linerate) + # DDMTD + sysref_pads = platform.request("rtm_fpga_sysref", 0) + self.submodules.sysref_ddmtd = jesd204_tools.DDMTD(sysref_pads, self.rtio_clk_freq) + self.csr_devices.append("sysref_ddmtd") platform.add_false_path_constraints( - self.crg.cd_sys.clk, - serwb_phy_rtm.serdes.cd_serwb_serdes.clk, - serwb_phy_rtm.serdes.cd_serwb_serdes_5x.clk) + self.sysref_ddmtd.cd_helper.clk, self.drtio_transceiver.gtps[0].txoutclk) + platform.add_false_path_constraints( + self.sysref_ddmtd.cd_helper.clk, self.crg.cd_sys.clk) - serwb_core = serwb.core.SERWBCore(serwb_phy_rtm, int(clk_freq), mode="master") - self.submodules += serwb_core - # process CSR devices and connect them to serwb - self.csr_regions = [] - wb_slaves = WishboneSlaveManager(0x10000000) - for i, name in enumerate(csr_devices): - origin = i*CSR_RANGE_SIZE - module = getattr(self, name) - csrs = module.get_csrs() +class SatmanSoCBuilder(Builder): + def __init__(self, *args, **kwargs): + Builder.__init__(self, *args, **kwargs) + firmware_dir = os.path.join(artiq_dir, "firmware") + self.software_packages = [] + self.add_software_package("satman", os.path.join(firmware_dir, "satman")) - bank = wishbone.CSRBank(csrs) - self.submodules += bank + def initialize_memory(self): + satman = os.path.join(self.output_dir, "software", "satman", + "satman.bin") + with open(satman, "rb") as boot_file: + boot_data = [] + unpack_endian = ">I" + while True: + w = boot_file.read(4) + if not w: + break + boot_data.append(struct.unpack(unpack_endian, w)[0]) - wb_slaves.add(origin, CSR_RANGE_SIZE, bank.bus) - self.csr_regions.append((name, origin, 32, csrs)) - - self.submodules += wishbone.Decoder(serwb_core.etherbone.wishbone.bus, - wb_slaves.get_interconnect_slaves(), - register=True) + self.soc.main_ram.mem.init = boot_data def main(): - build_dir = "artiq_sayma_rtm" - platform = sayma_rtm.Platform() - top = SaymaRTM(platform) - os.makedirs(build_dir, exist_ok=True) - with open(os.path.join(build_dir, "sayma_rtm_csr.csv"), "w") as f: - f.write(get_csr_csv(top.csr_regions)) - platform.build(top, build_dir=build_dir) + parser = argparse.ArgumentParser( + description="Sayma RTM gateware and firmware builder") + builder_args(parser) + soc_sayma_rtm_args(parser) + parser.add_argument("--rtio-clk-freq", + default=150, type=int, help="RTIO clock frequency in MHz") + parser.add_argument("--with-wrpll", default=False, action="store_true") + parser.add_argument("--gateware-identifier-str", default=None, + help="Override ROM identifier") + parser.set_defaults(output_dir=os.path.join("artiq_sayma", "rtm")) + args = parser.parse_args() + + soc = Satellite( + rtio_clk_freq=1e6*args.rtio_clk_freq, + with_wrpll=args.with_wrpll, + gateware_identifier_str=args.gateware_identifier_str, + **soc_sayma_rtm_argdict(args)) + builder = SatmanSoCBuilder(soc, **builder_argdict(args)) + try: + builder.build() + except subprocess.CalledProcessError as e: + raise SystemExit("Command {} failed".format(" ".join(e.cmd))) if __name__ == "__main__": diff --git a/artiq/gateware/test/drtio/packet_interface.py b/artiq/gateware/test/drtio/packet_interface.py new file mode 100644 index 000000000..c6d2979a2 --- /dev/null +++ b/artiq/gateware/test/drtio/packet_interface.py @@ -0,0 +1,72 @@ +from migen import * + +from artiq.gateware.drtio.rt_serializer import * + + +class PacketInterface: + def __init__(self, direction, ws): + if direction == "m2s": + self.plm = get_m2s_layouts(ws) + elif direction == "s2m": + self.plm = get_s2m_layouts(ws) + else: + raise ValueError + self.frame = Signal() + self.data = Signal(ws) + + def send(self, ty, **kwargs): + idx = 8 + value = self.plm.types[ty] + for field_name, field_size in self.plm.layouts[ty][1:]: + try: + fvalue = kwargs[field_name] + del kwargs[field_name] + except KeyError: + fvalue = 0 + value = value | (fvalue << idx) + idx += field_size + if kwargs: + raise ValueError + + ws = len(self.data) + yield self.frame.eq(1) + for i in range(idx//ws): + yield self.data.eq(value) + value >>= ws + yield + yield self.frame.eq(0) + yield + + @passive + def receive(self, callback): + previous_frame = 0 + frame_words = [] + while True: + frame = yield self.frame + if frame: + frame_words.append((yield self.data)) + if previous_frame and not frame: + packet_type = self.plm.type_names[frame_words[0] & 0xff] + packet_nwords = layout_len(self.plm.layouts[packet_type]) \ + //len(self.data) + packet, trailer = frame_words[:packet_nwords], \ + frame_words[packet_nwords:] + + n = 0 + packet_int = 0 + for w in packet: + packet_int |= (w << n) + n += len(self.data) + + field_dict = dict() + idx = 0 + for field_name, field_size in self.plm.layouts[packet_type]: + v = (packet_int >> idx) & (2**field_size - 1) + field_dict[field_name] = v + idx += field_size + + callback(packet_type, field_dict, trailer) + + frame_words = [] + previous_frame = frame + yield diff --git a/artiq/gateware/test/drtio/test_aux_controller.py b/artiq/gateware/test/drtio/test_aux_controller.py index f07f5eb6d..64e2e15d7 100644 --- a/artiq/gateware/test/drtio/test_aux_controller.py +++ b/artiq/gateware/test/drtio/test_aux_controller.py @@ -36,7 +36,7 @@ class TB(Module): def __init__(self, nwords): self.submodules.link_layer = Loopback(nwords) self.submodules.aux_controller = ClockDomainsRenamer( - {"rtio": "sys", "rtio_rx": "sys"})(AuxController(self.link_layer)) + {"rtio": "sys", "rtio_rx": "sys"})(DRTIOAuxController(self.link_layer)) class TestAuxController(unittest.TestCase): diff --git a/artiq/gateware/test/drtio/test_cdc.py b/artiq/gateware/test/drtio/test_cdc.py new file mode 100644 index 000000000..5598fd68b --- /dev/null +++ b/artiq/gateware/test/drtio/test_cdc.py @@ -0,0 +1,94 @@ +import unittest +import random + +from migen import * + +from artiq.gateware.drtio.cdc import CrossDomainRequest, CrossDomainNotification + + +class TestCDC(unittest.TestCase): + def test_cross_domain_request(self): + prng = random.Random(1) + for sys_freq in 3, 6, 11: + for srv_freq in 3, 6, 11: + req_stb = Signal() + req_ack = Signal() + req_data = Signal(8) + srv_stb = Signal() + srv_ack = Signal() + srv_data = Signal(8) + test_seq = [93, 92, 19, 39, 91, 30, 12, 91, 38, 42] + received_seq = [] + + def requester(): + for data in test_seq: + yield req_data.eq(data) + yield req_stb.eq(1) + yield + while not (yield req_ack): + yield + yield req_stb.eq(0) + for j in range(prng.randrange(0, 10)): + yield + + def server(): + for i in range(len(test_seq)): + while not (yield srv_stb): + yield + received_seq.append((yield srv_data)) + for j in range(prng.randrange(0, 10)): + yield + yield srv_ack.eq(1) + yield + yield srv_ack.eq(0) + yield + + dut = CrossDomainRequest("srv", + req_stb, req_ack, req_data, + srv_stb, srv_ack, srv_data) + run_simulation(dut, + {"sys": requester(), "srv": server()}, + {"sys": sys_freq, "srv": srv_freq}) + self.assertEqual(test_seq, received_seq) + + def test_cross_domain_notification(self): + prng = random.Random(1) + + emi_stb = Signal() + emi_data = Signal(8) + rec_stb = Signal() + rec_ack = Signal() + rec_data = Signal(8) + + test_seq = [23, 12, 8, 3, 28] + received_seq = [] + + def emitter(): + for data in test_seq: + yield emi_stb.eq(1) + yield emi_data.eq(data) + yield + yield emi_stb.eq(0) + yield + for j in range(prng.randrange(0, 3)): + yield + + def receiver(): + for i in range(len(test_seq)): + while not (yield rec_stb): + yield + received_seq.append((yield rec_data)) + yield rec_ack.eq(1) + yield + yield rec_ack.eq(0) + yield + for j in range(prng.randrange(0, 3)): + yield + + dut = CrossDomainNotification("emi", "sys", + emi_stb, emi_data, + rec_stb, rec_ack, rec_data) + run_simulation(dut, + {"emi": emitter(), "sys": receiver()}, + {"emi": 13, "sys": 3}) + self.assertEqual(test_seq, received_seq) diff --git a/artiq/gateware/test/drtio/test_full_stack.py b/artiq/gateware/test/drtio/test_full_stack.py index 59e5b2897..02535b7e6 100644 --- a/artiq/gateware/test/drtio/test_full_stack.py +++ b/artiq/gateware/test/drtio/test_full_stack.py @@ -52,191 +52,205 @@ class DUT(Module): self.ttl1 = Signal() self.transceivers = DummyTransceiverPair(nwords) - self.submodules.master = DRTIOMaster(self.transceivers.alice) - self.submodules.master_ki = rtio.KernelInitiator(self.master.cri) + self.submodules.tsc_master = rtio.TSC("async") + self.submodules.master = DRTIOMaster(self.tsc_master, + self.transceivers.alice) + self.submodules.master_ki = rtio.KernelInitiator(self.tsc_master, + self.master.cri) rx_synchronizer = DummyRXSynchronizer() self.submodules.phy0 = ttl_simple.Output(self.ttl0) self.submodules.phy1 = ttl_simple.Output(self.ttl1) self.submodules.phy2 = SimpleIOPHY(512, 32) # test wide output data rtio_channels = [ - rtio.Channel.from_phy(self.phy0, ofifo_depth=4), - rtio.Channel.from_phy(self.phy1, ofifo_depth=4), - rtio.Channel.from_phy(self.phy2, ofifo_depth=4), + rtio.Channel.from_phy(self.phy0), + rtio.Channel.from_phy(self.phy1), + rtio.Channel.from_phy(self.phy2), ] + self.submodules.tsc_satellite = rtio.TSC("sync") self.submodules.satellite = DRTIOSatellite( - self.transceivers.bob, rtio_channels, rx_synchronizer) - + self.tsc_satellite, self.transceivers.bob, rx_synchronizer) + self.satellite.reset.storage.reset = 0 + self.satellite.reset.storage_full.reset = 0 + self.satellite.reset_phy.storage.reset = 0 + self.satellite.reset_phy.storage_full.reset = 0 + self.submodules.satellite_rtio = SyncRTIO( + self.tsc_satellite, rtio_channels, lane_count=4, fifo_depth=8) + self.comb += [ + self.satellite.cri.connect(self.satellite_rtio.cri), + self.satellite.async_errors.eq(self.satellite_rtio.async_errors), + ] + + +class OutputsTestbench: + def __init__(self): + self.dut = DUT(2) + self.now = 0 + + def init(self): + yield from self.dut.master.rt_controller.csrs.underflow_margin.write(100) + while not (yield from self.dut.master.link_layer.rx_up.read()): + yield + yield from self.get_buffer_space() + + def get_buffer_space(self): + csrs = self.dut.master.rt_controller.csrs + yield from csrs.o_get_buffer_space.write(1) + yield + while (yield from csrs.o_wait.read()): + yield + r = (yield from csrs.o_dbg_buffer_space.read()) + return r + + def delay(self, dt): + self.now += dt + + def sync(self): + t = self.now + 15 + while (yield self.dut.tsc_master.full_ts_cri) < t: + yield + + def write(self, channel, data): + kcsrs = self.dut.master_ki + yield from kcsrs.target.write(channel << 8) + yield from kcsrs.now_hi.write(self.now >> 32) + yield from kcsrs.now_lo.write(self.now & 0xffffffff) + yield from kcsrs.o_data.write(data) + yield + status = 1 + wlen = 0 + while status: + status = yield from kcsrs.o_status.read() + if status & 0x2: + raise RTIOUnderflow + if status & 0x4: + raise RTIODestinationUnreachable + yield + wlen += 1 + return wlen + + @passive + def check_ttls(self, ttl_changes): + cycle = 0 + old_ttls = [0, 0] + while True: + ttls = [(yield self.dut.ttl0), (yield self.dut.ttl1)] + for n, (old_ttl, ttl) in enumerate(zip(old_ttls, ttls)): + if ttl != old_ttl: + ttl_changes.append((cycle, n)) + old_ttls = ttls + yield + cycle += 1 + class TestFullStack(unittest.TestCase): clocks = {"sys": 8, "rtio": 5, "rtio_rx": 5, - "rio": 5, "rio_phy": 5, - "sys_with_rst": 8, "rtio_with_rst": 5} - - def test_outputs(self): - dut = DUT(2) - kcsrs = dut.master_ki - csrs = dut.master.rt_controller.csrs - mgr = dut.master.rt_manager - saterr = dut.satellite.rt_errors + "rio": 5, "rio_phy": 5} + def test_pulses(self): + tb = OutputsTestbench() ttl_changes = [] correct_ttl_changes = [ - # from test_pulses - (203, 0), (208, 0), - (208, 1), - (214, 1), - - # from test_fifo_space - (414, 0), - (454, 0), - (494, 0), - (534, 0), - (574, 0), - (614, 0) + (213, 0), + (213, 1), + (219, 1), ] - now = 0 - def delay(dt): - nonlocal now - now += dt + def test(): + yield from tb.init() + tb.delay(200) + yield from tb.write(0, 1) + tb.delay(5) + yield from tb.write(0, 0) + yield from tb.write(1, 1) + tb.delay(6) + yield from tb.write(1, 0) + yield from tb.sync() - def get_fifo_space(channel): - yield from csrs.chan_sel_override_en.write(1) - yield from csrs.chan_sel_override.write(channel) - yield from csrs.o_get_fifo_space.write(1) - yield - while (yield from csrs.o_wait.read()): - yield - r = (yield from csrs.o_dbg_fifo_space.read()) - yield from csrs.chan_sel_override_en.write(0) - return r + run_simulation(tb.dut, + {"sys": test(), "rtio": tb.check_ttls(ttl_changes)}, self.clocks) + self.assertEqual(ttl_changes, correct_ttl_changes) - def write(channel, data): - yield from kcsrs.chan_sel.write(channel) - yield from kcsrs.timestamp.write(now) - yield from kcsrs.o_data.write(data) - yield from kcsrs.o_we.write(1) - yield - status = 1 - wlen = 0 - while status: - status = yield from kcsrs.o_status.read() - if status & 2: - raise RTIOUnderflow - if status & 4: - raise RTIOSequenceError - yield - wlen += 1 - return wlen + def test_underflow(self): + tb = OutputsTestbench() - def test_init(): - yield from get_fifo_space(0) - yield from get_fifo_space(1) - - def test_underflow(): + def test(): + yield from tb.init() with self.assertRaises(RTIOUnderflow): - yield from write(0, 0) + yield from tb.write(0, 0) - def test_pulses(): - delay(200*8) - yield from write(0, 1) - delay(5*8) - yield from write(0, 1) - yield from write(0, 0) # replace - yield from write(1, 1) - delay(6*8) - yield from write(1, 0) + run_simulation(tb.dut, {"sys": test()}, self.clocks) - def test_sequence_error(): - delay(-200*8) - with self.assertRaises(RTIOSequenceError): - yield from write(0, 1) - delay(200*8) + def test_large_data(self): + tb = OutputsTestbench() - def test_large_data(): + def test(): + yield from tb.init() correct_large_data = random.Random(0).randrange(2**512-1) - self.assertNotEqual((yield dut.phy2.received_data), correct_large_data) - delay(10*8) - yield from write(2, correct_large_data) - for i in range(45): - yield - self.assertEqual((yield dut.phy2.received_data), correct_large_data) + self.assertNotEqual((yield tb.dut.phy2.received_data), correct_large_data) + tb.delay(200) + yield from tb.write(2, correct_large_data) + yield from tb.sync() + self.assertEqual((yield tb.dut.phy2.received_data), correct_large_data) - def test_fifo_space(): - delay(200*8) + run_simulation(tb.dut, {"sys": test()}, self.clocks) + + def test_buffer_space(self): + tb = OutputsTestbench() + ttl_changes = [] + correct_ttl_changes = [(258 + 40*i, 0) for i in range(10)] + + def test(): + yield from tb.init() + tb.delay(250) max_wlen = 0 - for _ in range(3): - wlen = yield from write(0, 1) + for i in range(10): + wlen = yield from tb.write(0, (i + 1) % 2) max_wlen = max(max_wlen, wlen) - delay(40*8) - wlen = yield from write(0, 0) - max_wlen = max(max_wlen, wlen) - delay(40*8) - # check that some writes caused FIFO space requests + tb.delay(40) + # check that some writes caused buffer space requests self.assertGreater(max_wlen, 5) + yield from tb.sync() - def test_tsc_error(): + run_simulation(tb.dut, + {"sys": test(), "rtio": tb.check_ttls(ttl_changes)}, self.clocks) + self.assertEqual(ttl_changes, correct_ttl_changes) + + def test_write_underflow(self): + tb = OutputsTestbench() + + def test(): + saterr = tb.dut.satellite.rt_errors + csrs = tb.dut.master.rt_controller.csrs + yield from tb.init() errors = yield from saterr.protocol_error.read() self.assertEqual(errors, 0) - yield from csrs.tsc_correction.write(100000000) - yield from csrs.set_time.write(1) - for i in range(15): - yield - delay(10000*8) - yield from write(0, 1) + yield from csrs.underflow_margin.write(0) + tb.delay(100) + yield from tb.write(42, 1) for i in range(12): yield errors = yield from saterr.protocol_error.read() - self.assertEqual(errors, 4) # write underflow + underflow_channel = yield from saterr.underflow_channel.read() + underflow_timestamp_event = yield from saterr.underflow_timestamp_event.read() + self.assertEqual(errors, 8) # write underflow + self.assertEqual(underflow_channel, 42) + self.assertEqual(underflow_timestamp_event, 100) yield from saterr.protocol_error.write(errors) yield errors = yield from saterr.protocol_error.read() self.assertEqual(errors, 0) - def wait_ttl_events(): - while len(ttl_changes) < len(correct_ttl_changes): - yield - - def test(): - while not (yield from dut.master.link_layer.link_status.read()): - yield - - yield from test_init() - yield from test_underflow() - yield from test_pulses() - yield from test_sequence_error() - yield from test_fifo_space() - yield from test_large_data() - yield from test_tsc_error() - yield from wait_ttl_events() - - @passive - def check_ttls(): - cycle = 0 - old_ttls = [0, 0] - while True: - ttls = [(yield dut.ttl0), (yield dut.ttl1)] - for n, (old_ttl, ttl) in enumerate(zip(old_ttls, ttls)): - if ttl != old_ttl: - ttl_changes.append((cycle, n)) - old_ttls = ttls - yield - cycle += 1 - - run_simulation(dut, - {"sys": test(), "rtio": check_ttls()}, self.clocks) - self.assertEqual(ttl_changes, correct_ttl_changes) + run_simulation(tb.dut, {"sys": test()}, self.clocks) def test_inputs(self): dut = DUT(2) kcsrs = dut.master_ki def get_input(timeout): - yield from kcsrs.chan_sel.write(2) - yield from kcsrs.timestamp.write(10) - yield from kcsrs.i_request.write(1) + yield from kcsrs.target.write(2 << 8) + yield from kcsrs.i_timeout.write(10) yield status = yield from kcsrs.i_status.read() while status & 0x4: @@ -246,12 +260,13 @@ class TestFullStack(unittest.TestCase): return "timeout" if status & 0x2: return "overflow" + if status & 0x8: + return "destination unreachable" return ((yield from kcsrs.i_data.read()), (yield from kcsrs.i_timestamp.read())) def test(): - # wait for link layer ready - for i in range(5): + while not (yield from dut.master.link_layer.rx_up.read()): yield i1 = yield from get_input(10) @@ -269,30 +284,29 @@ class TestFullStack(unittest.TestCase): yield dut.phy2.rtlink.i.stb.eq(0) run_simulation(dut, - {"sys": test(), "rtio": generate_input()}, self.clocks, vcd_name="foo.vcd") + {"sys": test(), "rtio": generate_input()}, self.clocks) def test_echo(self): dut = DUT(2) - csrs = dut.master.rt_controller.csrs - mgr = dut.master.rt_manager + packet = dut.master.rt_packet def test(): - while not (yield from dut.master.link_layer.link_status.read()): + while not (yield from dut.master.link_layer.rx_up.read()): yield - yield from mgr.update_packet_cnt.write(1) - yield - self.assertEqual((yield from mgr.packet_cnt_tx.read()), 0) - self.assertEqual((yield from mgr.packet_cnt_rx.read()), 0) + self.assertEqual((yield dut.master.rt_packet.packet_cnt_tx), 0) + self.assertEqual((yield dut.master.rt_packet.packet_cnt_rx), 0) - yield from mgr.request_echo.write(1) + yield dut.master.rt_packet.echo_stb.eq(1) + yield + while not (yield dut.master.rt_packet.echo_ack): + yield + yield dut.master.rt_packet.echo_stb.eq(0) for i in range(15): yield - yield from mgr.update_packet_cnt.write(1) - yield - self.assertEqual((yield from mgr.packet_cnt_tx.read()), 1) - self.assertEqual((yield from mgr.packet_cnt_rx.read()), 1) + self.assertEqual((yield dut.master.rt_packet.packet_cnt_tx), 1) + self.assertEqual((yield dut.master.rt_packet.packet_cnt_rx), 1) run_simulation(dut, test(), self.clocks) diff --git a/artiq/gateware/test/drtio/test_rt_packet.py b/artiq/gateware/test/drtio/test_rt_packet.py deleted file mode 100644 index 05d80aa4d..000000000 --- a/artiq/gateware/test/drtio/test_rt_packet.py +++ /dev/null @@ -1,213 +0,0 @@ -import unittest -from types import SimpleNamespace -import random - -from migen import * - -from artiq.gateware.drtio.rt_serializer import * -from artiq.gateware.drtio.rt_packet_satellite import RTPacketSatellite -from artiq.gateware.drtio.rt_packet_master import (_CrossDomainRequest, - _CrossDomainNotification) - - -class PacketInterface: - def __init__(self, direction, ws): - if direction == "m2s": - self.plm = get_m2s_layouts(ws) - elif direction == "s2m": - self.plm = get_s2m_layouts(ws) - else: - raise ValueError - self.frame = Signal() - self.data = Signal(ws) - - def send(self, ty, **kwargs): - idx = 8 - value = self.plm.types[ty] - for field_name, field_size in self.plm.layouts[ty][1:]: - try: - fvalue = kwargs[field_name] - del kwargs[field_name] - except KeyError: - fvalue = 0 - value = value | (fvalue << idx) - idx += field_size - if kwargs: - raise ValueError - - ws = len(self.data) - yield self.frame.eq(1) - for i in range(idx//ws): - yield self.data.eq(value) - value >>= ws - yield - yield self.frame.eq(0) - yield - - @passive - def receive(self, callback): - previous_frame = 0 - frame_words = [] - while True: - frame = yield self.frame - if frame: - frame_words.append((yield self.data)) - if previous_frame and not frame: - packet_type = self.plm.type_names[frame_words[0] & 0xff] - packet_nwords = layout_len(self.plm.layouts[packet_type]) \ - //len(self.data) - packet, trailer = frame_words[:packet_nwords], \ - frame_words[packet_nwords:] - - n = 0 - packet_int = 0 - for w in packet: - packet_int |= (w << n) - n += len(self.data) - - field_dict = dict() - idx = 0 - for field_name, field_size in self.plm.layouts[packet_type]: - v = (packet_int >> idx) & (2**field_size - 1) - field_dict[field_name] = v - idx += field_size - - callback(packet_type, field_dict, trailer) - - frame_words = [] - previous_frame = frame - yield - - -class TestSatellite(unittest.TestCase): - def create_dut(self, nwords): - pt = PacketInterface("m2s", nwords*8) - pr = PacketInterface("s2m", nwords*8) - dut = RTPacketSatellite(SimpleNamespace( - rx_rt_frame=pt.frame, rx_rt_data=pt.data, - tx_rt_frame=pr.frame, tx_rt_data=pr.data)) - return pt, pr, dut - - def test_echo(self): - for nwords in range(1, 8): - pt, pr, dut = self.create_dut(nwords) - completed = False - def send(): - yield from pt.send("echo_request") - while not completed: - yield - def receive(packet_type, field_dict, trailer): - nonlocal completed - self.assertEqual(packet_type, "echo_reply") - self.assertEqual(trailer, []) - completed = True - run_simulation(dut, [send(), pr.receive(receive)]) - - def test_set_time(self): - for nwords in range(1, 8): - pt, _, dut = self.create_dut(nwords) - tx_times = [0x12345678aabbccdd, 0x0102030405060708, - 0xaabbccddeeff1122] - def send(): - for t in tx_times: - yield from pt.send("set_time", timestamp=t) - # flush - for i in range(10): - yield - rx_times = [] - @passive - def receive(): - while True: - if (yield dut.tsc_load): - rx_times.append((yield dut.tsc_load_value)) - yield - run_simulation(dut, [send(), receive()]) - self.assertEqual(tx_times, rx_times) - - -class TestCDC(unittest.TestCase): - def test_cross_domain_request(self): - prng = random.Random(1) - for sys_freq in 3, 6, 11: - for srv_freq in 3, 6, 11: - req_stb = Signal() - req_ack = Signal() - req_data = Signal(8) - srv_stb = Signal() - srv_ack = Signal() - srv_data = Signal(8) - test_seq = [93, 92, 19, 39, 91, 30, 12, 91, 38, 42] - received_seq = [] - - def requester(): - for data in test_seq: - yield req_data.eq(data) - yield req_stb.eq(1) - yield - while not (yield req_ack): - yield - yield req_stb.eq(0) - for j in range(prng.randrange(0, 10)): - yield - - def server(): - for i in range(len(test_seq)): - while not (yield srv_stb): - yield - received_seq.append((yield srv_data)) - for j in range(prng.randrange(0, 10)): - yield - yield srv_ack.eq(1) - yield - yield srv_ack.eq(0) - yield - - dut = _CrossDomainRequest("srv", - req_stb, req_ack, req_data, - srv_stb, srv_ack, srv_data) - run_simulation(dut, - {"sys": requester(), "srv": server()}, - {"sys": sys_freq, "srv": srv_freq}) - self.assertEqual(test_seq, received_seq) - - def test_cross_domain_notification(self): - prng = random.Random(1) - - emi_stb = Signal() - emi_data = Signal(8) - rec_stb = Signal() - rec_ack = Signal() - rec_data = Signal(8) - - test_seq = [23, 12, 8, 3, 28] - received_seq = [] - - def emitter(): - for data in test_seq: - yield emi_stb.eq(1) - yield emi_data.eq(data) - yield - yield emi_stb.eq(0) - yield - for j in range(prng.randrange(0, 3)): - yield - - def receiver(): - for i in range(len(test_seq)): - while not (yield rec_stb): - yield - received_seq.append((yield rec_data)) - yield rec_ack.eq(1) - yield - yield rec_ack.eq(0) - yield - for j in range(prng.randrange(0, 3)): - yield - - dut = _CrossDomainNotification("emi", - emi_stb, emi_data, - rec_stb, rec_ack, rec_data) - run_simulation(dut, - {"emi": emitter(), "sys": receiver()}, - {"emi": 13, "sys": 3}) - self.assertEqual(test_seq, received_seq) diff --git a/artiq/gateware/test/drtio/test_rt_packet_repeater.py b/artiq/gateware/test/drtio/test_rt_packet_repeater.py new file mode 100644 index 000000000..9c73af625 --- /dev/null +++ b/artiq/gateware/test/drtio/test_rt_packet_repeater.py @@ -0,0 +1,192 @@ +import unittest +from types import SimpleNamespace + +from migen import * + +from artiq.gateware.rtio import cri +from artiq.gateware.test.drtio.packet_interface import PacketInterface +from artiq.gateware.drtio.rt_packet_repeater import RTPacketRepeater + + +def create_dut(nwords): + pt = PacketInterface("s2m", nwords*8) + pr = PacketInterface("m2s", nwords*8) + ts = Signal(64) + dut = ClockDomainsRenamer({"rtio": "sys", "rtio_rx": "sys"})( + RTPacketRepeater( + SimpleNamespace(coarse_ts=ts), + SimpleNamespace( + rx_rt_frame=pt.frame, rx_rt_data=pt.data, + tx_rt_frame=pr.frame, tx_rt_data=pr.data))) + return pt, pr, ts, dut + + +class TestRepeater(unittest.TestCase): + def test_set_time(self): + nwords = 2 + pt, pr, ts, dut = create_dut(nwords) + + def send(): + yield + yield ts.eq(0x12345678) + yield dut.set_time_stb.eq(1) + while not (yield dut.set_time_ack): + yield + yield dut.set_time_stb.eq(0) + yield + for _ in range(30): + yield + + received = False + def receive(packet_type, field_dict, trailer): + nonlocal received + self.assertEqual(packet_type, "set_time") + self.assertEqual(trailer, []) + self.assertEqual(field_dict["timestamp"], 0x12345678) + self.assertEqual(received, False) + received = True + + run_simulation(dut, [send(), pr.receive(receive)]) + self.assertEqual(received, True) + + def test_output(self): + test_writes = [ + (1, 10, 21, 0x42), + (2, 11, 34, 0x2342), + (3, 12, 83, 0x2345566633), + (4, 13, 25, 0x98da14959a19498ae1), + (5, 14, 75, 0x3998a1883ae14f828ae24958ea2479) + ] + + for nwords in range(1, 8): + pt, pr, ts, dut = create_dut(nwords) + + def send(): + yield + for channel, timestamp, address, data in test_writes: + yield dut.cri.chan_sel.eq(channel) + yield dut.cri.o_timestamp.eq(timestamp) + yield dut.cri.o_address.eq(address) + yield dut.cri.o_data.eq(data) + yield dut.cri.cmd.eq(cri.commands["write"]) + yield + yield dut.cri.cmd.eq(cri.commands["nop"]) + yield + for i in range(30): + yield + for i in range(50): + yield + + short_data_len = pr.plm.field_length("write", "short_data") + + received = [] + def receive(packet_type, field_dict, trailer): + self.assertEqual(packet_type, "write") + self.assertEqual(len(trailer), field_dict["extra_data_cnt"]) + data = field_dict["short_data"] + for n, te in enumerate(trailer): + data |= te << (n*nwords*8 + short_data_len) + received.append((field_dict["chan_sel"], field_dict["timestamp"], + field_dict["address"], data)) + + run_simulation(dut, [send(), pr.receive(receive)]) + self.assertEqual(test_writes, received) + + def test_buffer_space(self): + for nwords in range(1, 8): + pt, pr, ts, dut = create_dut(nwords) + + def send_requests(): + for i in range(10): + yield + yield dut.cri.chan_sel.eq(i << 16) + yield dut.cri.cmd.eq(cri.commands["get_buffer_space"]) + yield + yield dut.cri.cmd.eq(cri.commands["nop"]) + yield + while not (yield dut.cri.o_buffer_space_valid): + yield + buffer_space = yield dut.cri.o_buffer_space + self.assertEqual(buffer_space, 2*i) + + current_request = None + + @passive + def send_replies(): + nonlocal current_request + while True: + while current_request is None: + yield + yield from pt.send("buffer_space_reply", space=2*current_request) + current_request = None + + def receive(packet_type, field_dict, trailer): + nonlocal current_request + self.assertEqual(packet_type, "buffer_space_request") + self.assertEqual(trailer, []) + self.assertEqual(current_request, None) + current_request = field_dict["destination"] + + run_simulation(dut, [send_requests(), send_replies(), pr.receive(receive)]) + + def test_input(self): + for nwords in range(1, 8): + pt, pr, ts, dut = create_dut(nwords) + + def read(chan_sel, timeout): + yield dut.cri.chan_sel.eq(chan_sel) + yield dut.cri.i_timeout.eq(timeout) + yield dut.cri.cmd.eq(cri.commands["read"]) + yield + yield dut.cri.cmd.eq(cri.commands["nop"]) + yield + status = yield dut.cri.i_status + while status & 4: + yield + status = yield dut.cri.i_status + if status & 0x1: + return "timeout" + if status & 0x2: + return "overflow" + if status & 0x8: + return "destination unreachable" + return ((yield dut.cri.i_data), + (yield dut.cri.i_timestamp)) + + def send_requests(): + for timeout in range(20, 200000, 100000): + for chan_sel in range(3): + data, timestamp = yield from read(chan_sel, timeout) + self.assertEqual(data, chan_sel*2) + self.assertEqual(timestamp, timeout//2) + + i2 = yield from read(10, 400000) + self.assertEqual(i2, "timeout") + i3 = yield from read(11, 400000) + self.assertEqual(i3, "overflow") + + current_request = None + + @passive + def send_replies(): + nonlocal current_request + while True: + while current_request is None: + yield + chan_sel, timeout = current_request + if chan_sel == 10: + yield from pt.send("read_reply_noevent", overflow=0) + elif chan_sel == 11: + yield from pt.send("read_reply_noevent", overflow=1) + else: + yield from pt.send("read_reply", data=chan_sel*2, timestamp=timeout//2) + current_request = None + + def receive(packet_type, field_dict, trailer): + nonlocal current_request + self.assertEqual(packet_type, "read_request") + self.assertEqual(trailer, []) + self.assertEqual(current_request, None) + current_request = (field_dict["chan_sel"], field_dict["timeout"]) + + run_simulation(dut, [send_requests(), send_replies(), pr.receive(receive)]) diff --git a/artiq/gateware/test/drtio/test_rt_packet_satellite.py b/artiq/gateware/test/drtio/test_rt_packet_satellite.py new file mode 100644 index 000000000..1d934b8df --- /dev/null +++ b/artiq/gateware/test/drtio/test_rt_packet_satellite.py @@ -0,0 +1,54 @@ +import unittest +from types import SimpleNamespace + +from migen import * + +from artiq.gateware.test.drtio.packet_interface import PacketInterface +from artiq.gateware.drtio.rt_packet_satellite import RTPacketSatellite + + +def create_dut(nwords): + pt = PacketInterface("m2s", nwords*8) + pr = PacketInterface("s2m", nwords*8) + dut = RTPacketSatellite(SimpleNamespace( + rx_rt_frame=pt.frame, rx_rt_data=pt.data, + tx_rt_frame=pr.frame, tx_rt_data=pr.data)) + return pt, pr, dut + + +class TestSatellite(unittest.TestCase): + def test_echo(self): + for nwords in range(1, 8): + pt, pr, dut = create_dut(nwords) + completed = False + def send(): + yield from pt.send("echo_request") + while not completed: + yield + def receive(packet_type, field_dict, trailer): + nonlocal completed + self.assertEqual(packet_type, "echo_reply") + self.assertEqual(trailer, []) + completed = True + run_simulation(dut, [send(), pr.receive(receive)]) + + def test_set_time(self): + for nwords in range(1, 8): + pt, _, dut = create_dut(nwords) + tx_times = [0x12345678aabbccdd, 0x0102030405060708, + 0xaabbccddeeff1122] + def send(): + for t in tx_times: + yield from pt.send("set_time", timestamp=t) + # flush + for i in range(10): + yield + rx_times = [] + @passive + def receive(): + while True: + if (yield dut.tsc_load): + rx_times.append((yield dut.tsc_load_value)) + yield + run_simulation(dut, [send(), receive()]) + self.assertEqual(tx_times, rx_times) diff --git a/artiq/gateware/test/drtio/test_switching.py b/artiq/gateware/test/drtio/test_switching.py new file mode 100644 index 000000000..dddfb9bd2 --- /dev/null +++ b/artiq/gateware/test/drtio/test_switching.py @@ -0,0 +1,247 @@ +import unittest +from types import SimpleNamespace +import random + +from migen import * + +from artiq.gateware.drtio import * +from artiq.gateware.drtio import rt_serializer, rt_packet_repeater +from artiq.gateware import rtio +from artiq.gateware.rtio import cri +from artiq.coredevice.exceptions import * +from artiq.gateware.test.drtio.packet_interface import PacketInterface + + +class DummyTransceiverPair: + def __init__(self, nwords): + a2b_k = [Signal() for _ in range(nwords)] + a2b_d = [Signal(8) for _ in range(nwords)] + b2a_k = [Signal() for _ in range(nwords)] + b2a_d = [Signal(8) for _ in range(nwords)] + + self.alice = SimpleNamespace( + encoder=SimpleNamespace(k=a2b_k, d=a2b_d), + decoders=[SimpleNamespace(k=k, d=d) for k, d in zip(b2a_k, b2a_d)], + rx_ready=1 + ) + self.bob = SimpleNamespace( + encoder=SimpleNamespace(k=b2a_k, d=b2a_d), + decoders=[SimpleNamespace(k=k, d=d) for k, d in zip(a2b_k, a2b_d)], + rx_ready=1 + ) + + +class DummyRXSynchronizer: + def resync(self, signal): + return signal + + +class DUT(Module): + def __init__(self, nwords): + self.transceivers = DummyTransceiverPair(nwords) + + self.submodules.tsc_master = rtio.TSC("async") + self.submodules.master = DRTIOMaster(self.tsc_master, + self.transceivers.alice) + + rx_synchronizer = DummyRXSynchronizer() + self.submodules.tsc_satellite = rtio.TSC("sync") + self.submodules.satellite = DRTIOSatellite( + self.tsc_satellite, self.transceivers.bob, rx_synchronizer) + self.satellite.reset.storage.reset = 0 + self.satellite.reset.storage_full.reset = 0 + self.satellite.reset_phy.storage.reset = 0 + self.satellite.reset_phy.storage_full.reset = 0 + + self.pt = PacketInterface("s2m", nwords*8) + self.pr = PacketInterface("m2s", nwords*8) + rep_if = SimpleNamespace( + rx_rt_frame=self.pt.frame, rx_rt_data=self.pt.data, + tx_rt_frame=self.pr.frame, tx_rt_data=self.pr.data) + self.submodules.repeater = rt_packet_repeater.RTPacketRepeater( + self.tsc_satellite, rep_if) + self.comb += self.satellite.cri.connect(self.repeater.cri) + + +class Testbench: + def __init__(self): + self.dut = DUT(2) + self.now = 0 + + def init(self, with_buffer_space=True): + yield from self.dut.master.rt_controller.csrs.underflow_margin.write(100) + while not (yield from self.dut.master.link_layer.rx_up.read()): + yield + if with_buffer_space: + yield from self.get_buffer_space() + + def get_buffer_space(self): + csrs = self.dut.master.rt_controller.csrs + yield from csrs.o_get_buffer_space.write(1) + yield + while (yield from csrs.o_wait.read()): + yield + r = (yield from csrs.o_dbg_buffer_space.read()) + return r + + def delay(self, dt): + self.now += dt + + def write(self, channel, data): + mcri = self.dut.master.cri + yield mcri.chan_sel.eq(channel) + yield mcri.o_timestamp.eq(self.now) + yield mcri.o_data.eq(data) + yield + yield mcri.cmd.eq(cri.commands["write"]) + yield + yield mcri.cmd.eq(cri.commands["nop"]) + yield + status = yield mcri.o_status + while status & 0x1: + yield + status = yield mcri.o_status + if status & 0x2: + return "underflow" + if status & 0x4: + return "destination unreachable" + + def read(self, channel, timeout): + mcri = self.dut.master.cri + yield mcri.chan_sel.eq(channel) + yield mcri.i_timeout.eq(timeout) + yield + yield mcri.cmd.eq(cri.commands["read"]) + yield + yield mcri.cmd.eq(cri.commands["nop"]) + yield + status = yield mcri.i_status + while status & 0x4: + yield + status = yield mcri.i_status + if status & 0x1: + return "timeout" + if status & 0x2: + return "overflow" + if status & 0x8: + return "destination unreachable" + return ((yield mcri.i_timestamp), + (yield mcri.i_data)) + + +class TestSwitching(unittest.TestCase): + clocks = {"sys": 8, "rtio": 5, "rtio_rx": 5, + "rio": 5, "rio_phy": 5} + + def test_outputs(self): + tb = Testbench() + + def test(): + yield from tb.init() + tb.delay(200) + yield from tb.write(1, 20) + for _ in range(40): + yield + + current_request = None + + def get_request(): + nonlocal current_request + while current_request is None: + yield + r = current_request + current_request = None + return r + + def expect_buffer_space_request(destination, space): + packet_type, field_dict, trailer = yield from get_request() + self.assertEqual(packet_type, "buffer_space_request") + self.assertEqual(trailer, []) + self.assertEqual(field_dict["destination"], destination) + yield from tb.dut.pt.send("buffer_space_reply", space=space) + + def expect_write(timestamp, channel, data): + packet_type, field_dict, trailer = yield from get_request() + self.assertEqual(packet_type, "write") + self.assertEqual(trailer, []) + self.assertEqual(field_dict["timestamp"], timestamp) + self.assertEqual(field_dict["chan_sel"], channel) + self.assertEqual(field_dict["short_data"], data) + + @passive + def send_replies(): + yield from expect_buffer_space_request(0, 1) + yield from expect_write(200, 1, 20) + yield from expect_buffer_space_request(0, 1) + + unexpected = yield from get_request() + self.fail("unexpected packet: {}".format(unexpected)) + + def receive(packet_type, field_dict, trailer): + nonlocal current_request + self.assertEqual(current_request, None) + current_request = (packet_type, field_dict, trailer) + + run_simulation(tb.dut, + {"sys": test(), "rtio": tb.dut.pr.receive(receive), "rtio_rx": send_replies()}, self.clocks) + + + def test_inputs(self): + tb = Testbench() + + def test(): + yield from tb.init(with_buffer_space=False) + reply = yield from tb.read(19, 145) + self.assertEqual(reply, (333, 23)) + reply = yield from tb.read(20, 146) + self.assertEqual(reply, (334, 24)) + reply = yield from tb.read(10, 34) + self.assertEqual(reply, "timeout") + reply = yield from tb.read(1, 20) + self.assertEqual(reply, "overflow") + reply = yield from tb.read(21, 147) + self.assertEqual(reply, (335, 25)) + for _ in range(40): + yield + + current_request = None + + def get_request(): + nonlocal current_request + while current_request is None: + yield + r = current_request + current_request = None + return r + + def expect_read(chan_sel, timeout, reply): + packet_type, field_dict, trailer = yield from get_request() + self.assertEqual(packet_type, "read_request") + self.assertEqual(trailer, []) + self.assertEqual(field_dict["chan_sel"], chan_sel) + self.assertEqual(field_dict["timeout"], timeout) + if reply == "timeout": + yield from tb.dut.pt.send("read_reply_noevent", overflow=0) + elif reply == "overflow": + yield from tb.dut.pt.send("read_reply_noevent", overflow=1) + else: + timestamp, data = reply + yield from tb.dut.pt.send("read_reply", timestamp=timestamp, data=data) + + @passive + def send_replies(): + yield from expect_read(19, 145, (333, 23)) + yield from expect_read(20, 146, (334, 24)) + yield from expect_read(10, 34, "timeout") + yield from expect_read(1, 20, "overflow") + yield from expect_read(21, 147, (335, 25)) + unexpected = yield from get_request() + self.fail("unexpected packet: {}".format(unexpected)) + + def receive(packet_type, field_dict, trailer): + nonlocal current_request + self.assertEqual(current_request, None) + current_request = (packet_type, field_dict, trailer) + + run_simulation(tb.dut, + {"sys": test(), "rtio": tb.dut.pr.receive(receive), "rtio_rx": send_replies()}, self.clocks) diff --git a/artiq/gateware/test/dsp/test_sawg.py b/artiq/gateware/test/dsp/test_sawg.py index a5eea31bd..fd392607d 100644 --- a/artiq/gateware/test/dsp/test_sawg.py +++ b/artiq/gateware/test/dsp/test_sawg.py @@ -4,7 +4,7 @@ from migen import * from migen.fhdl.verilog import convert from artiq.gateware.dsp import sawg -from .tools import xfer +from artiq.gateware.test.dsp.tools import xfer def _test_gen_dds(dut, o): diff --git a/artiq/gateware/test/dsp/test_sawg_fe.py b/artiq/gateware/test/dsp/test_sawg_fe.py index a6595e03c..1f7cdc08c 100644 --- a/artiq/gateware/test/dsp/test_sawg_fe.py +++ b/artiq/gateware/test/dsp/test_sawg_fe.py @@ -14,8 +14,10 @@ class RTIOManager: def __init__(self): self.outputs = [] - def rtio_output(self, now, channel, addr, data): - self.outputs.append((now, channel, addr, data)) + def rtio_output(self, target, data): + channel = target >> 8 + addr = target & 0xff + self.outputs.append((now_mu(), channel, addr, data)) def rtio_output_wide(self, *args, **kwargs): self.rtio_output(*args, **kwargs) @@ -42,7 +44,7 @@ class SAWGTest(unittest.TestCase): self.rtio_manager.patch(spline) self.rtio_manager.patch(sawg) self.core = sim_devices.Core({}) - self.core.coarse_ref_period = 6.66666 + self.core.coarse_ref_period = 20/3 self.core.ref_multiplier = 1 self.t = self.core.coarse_ref_period self.channel = mg.ClockDomainsRenamer({"rio_phy": "sys"})( diff --git a/artiq/gateware/test/rtio/test_dma.py b/artiq/gateware/test/rtio/test_dma.py index 759fe60b0..84bc4a3ff 100644 --- a/artiq/gateware/test/rtio/test_dma.py +++ b/artiq/gateware/test/rtio/test_dma.py @@ -1,10 +1,14 @@ import unittest import random +import itertools from migen import * from misoc.interconnect import wishbone +from artiq.coredevice.exceptions import RTIOUnderflow, RTIODestinationUnreachable +from artiq.gateware import rtio from artiq.gateware.rtio import dma, cri +from artiq.gateware.rtio.phy import ttl_simple def encode_n(n, min_length, max_length): @@ -22,7 +26,7 @@ def encode_record(channel, timestamp, address, data): r = [] r += encode_n(channel, 3, 3) r += encode_n(timestamp, 8, 8) - r += encode_n(address, 2, 2) + r += encode_n(address, 1, 1) r += encode_n(data, 1, 64) return encode_n(len(r)+1, 1, 1) + r @@ -47,9 +51,22 @@ def encode_sequence(writes, ws): return pack(sequence, ws) +def do_dma(dut, address): + yield from dut.dma.base_address.write(address) + yield from dut.enable.write(1) + yield + while ((yield from dut.enable.read())): + yield + error = yield from dut.cri_master.error.read() + if error & 1: + raise RTIOUnderflow + if error & 2: + raise RTIODestinationUnreachable + + test_writes1 = [ (0x01, 0x23, 0x12, 0x33), - (0x901, 0x902, 0x911, 0xeeeeeeeeeeeeeefffffffffffffffffffffffffffffff28888177772736646717738388488), + (0x901, 0x902, 0x11, 0xeeeeeeeeeeeeeefffffffffffffffffffffffffffffff28888177772736646717738388488), (0x81, 0x288, 0x88, 0x8888) ] @@ -83,21 +100,45 @@ class TB(Module): self.submodules.dut = dma.DMA(bus) +test_writes_full_stack = [ + (0, 32, 0, 1), + (1, 40, 0, 1), + (0, 48, 0, 0), + (1, 50, 0, 0), +] + + +class FullStackTB(Module): + def __init__(self, ws): + self.ttl0 = Signal() + self.ttl1 = Signal() + + self.submodules.phy0 = ttl_simple.Output(self.ttl0) + self.submodules.phy1 = ttl_simple.Output(self.ttl1) + + rtio_channels = [ + rtio.Channel.from_phy(self.phy0), + rtio.Channel.from_phy(self.phy1) + ] + + sequence = encode_sequence(test_writes_full_stack, ws) + + bus = wishbone.Interface(ws*8) + self.submodules.memory = wishbone.SRAM( + 256, init=sequence, bus=bus) + self.submodules.dut = dma.DMA(bus) + self.submodules.tsc = rtio.TSC("async") + self.submodules.rtio = rtio.Core(self.tsc, rtio_channels) + self.comb += self.dut.cri.connect(self.rtio.cri) + + class TestDMA(unittest.TestCase): def test_dma_noerror(self): - ws = 64 - tb = TB(ws) - - def do_dma(address): - yield from tb.dut.dma.base_address.write(address) - yield from tb.dut.enable.write(1) - yield - while ((yield from tb.dut.enable.read())): - yield + tb = TB(64) def do_writes(): - yield from do_dma(0) - yield from do_dma(512) + yield from do_dma(tb.dut, 0) + yield from do_dma(tb.dut, 512) received = [] @passive @@ -109,7 +150,7 @@ class TestDMA(unittest.TestCase): pass elif cmd == cri.commands["write"]: channel = yield dut_cri.chan_sel - timestamp = yield dut_cri.timestamp + timestamp = yield dut_cri.o_timestamp address = yield dut_cri.o_address data = yield dut_cri.o_data received.append((channel, timestamp, address, data)) @@ -124,3 +165,30 @@ class TestDMA(unittest.TestCase): run_simulation(tb, [do_writes(), rtio_sim()]) self.assertEqual(received, test_writes1 + test_writes2) + + def test_full_stack(self): + tb = FullStackTB(64) + + ttl_changes = [] + @passive + def monitor(): + old_ttl_states = [0, 0] + for time in itertools.count(): + ttl_states = [ + (yield tb.ttl0), + (yield tb.ttl1) + ] + for i, (old, new) in enumerate(zip(old_ttl_states, ttl_states)): + if new != old: + ttl_changes.append((time, i)) + old_ttl_states = ttl_states + yield + + run_simulation(tb, {"sys": [ + do_dma(tb.dut, 0), monitor(), + (None for _ in range(70)), + ]}, {"sys": 8, "rsys": 8, "rtio": 8, "rio": 8, "rio_phy": 8}) + + correct_changes = [(timestamp + 11, channel) + for channel, timestamp, _, _ in test_writes_full_stack] + self.assertEqual(ttl_changes, correct_changes) diff --git a/artiq/gateware/test/rtio/test_edge_counter.py b/artiq/gateware/test/rtio/test_edge_counter.py new file mode 100644 index 000000000..89a388f2b --- /dev/null +++ b/artiq/gateware/test/rtio/test_edge_counter.py @@ -0,0 +1,134 @@ +import unittest + +from migen import * +from artiq.gateware.rtio.phy.edge_counter import * + +CONFIG_COUNT_RISING = 0b0001 +CONFIG_COUNT_FALLING = 0b0010 +CONFIG_SEND_COUNT_EVENT = 0b0100 +CONFIG_RESET_TO_ZERO = 0b1000 + + +class TimeoutError(Exception): + pass + + +class Testbench: + def __init__(self, counter_width=32): + self.input = Signal() + self.dut = SimpleEdgeCounter(self.input, counter_width=counter_width) + + self.fragment = self.dut.get_fragment() + cd = ClockDomain("rio") + self.fragment.clock_domains.append(cd) + self.rio_rst = cd.rst + + def write_config(self, config): + bus = self.dut.rtlink.o + yield bus.data.eq(config) + yield bus.stb.eq(1) + yield + yield bus.stb.eq(0) + yield + + def read_event(self, timeout): + bus = self.dut.rtlink.i + for _ in range(timeout): + if (yield bus.stb): + break + yield + else: + raise TimeoutError + return (yield bus.data) + + def fetch_count(self, zero=False): + c = CONFIG_SEND_COUNT_EVENT + if zero: + c |= CONFIG_RESET_TO_ZERO + yield from self.write_config(c) + return (yield from self.read_event(1)) + + def toggle_input(self): + yield self.input.eq(1) + yield + yield self.input.eq(0) + yield + + def reset_rio(self): + yield self.rio_rst.eq(1) + yield + yield self.rio_rst.eq(0) + yield + + def run(self, gen): + run_simulation(self.fragment, gen, + clocks={n: 5 for n in ["sys", "rio", "rio_phy"]}) + + +class TestEdgeCounter(unittest.TestCase): + def test_init(self): + tb = Testbench() + + def gen(): + # No counts initially... + self.assertEqual((yield from tb.fetch_count()), 0) + + # ...nor any sensitivity. + yield from tb.toggle_input() + self.assertEqual((yield from tb.fetch_count()), 0) + + tb.run(gen()) + + def test_sensitivity(self): + tb = Testbench() + + def gen(sensitivity_config, expected_rising, expected_falling): + yield from tb.write_config(sensitivity_config) + yield tb.input.eq(1) + yield + self.assertEqual((yield from tb.fetch_count(zero=True)), + expected_rising) + + yield from tb.write_config(sensitivity_config) + yield tb.input.eq(0) + yield + self.assertEqual((yield from tb.fetch_count()), expected_falling) + + yield + with self.assertRaises(TimeoutError): + # Make sure there are no more suprious events. + yield from tb.read_event(10) + + tb.run(gen(CONFIG_COUNT_RISING, 1, 0)) + tb.run(gen(CONFIG_COUNT_FALLING, 0, 1)) + tb.run(gen(CONFIG_COUNT_RISING | CONFIG_COUNT_FALLING, 1, 1)) + + def test_reset(self): + tb = Testbench() + + def gen(): + # Generate one count. + yield from tb.write_config(CONFIG_COUNT_RISING) + yield from tb.toggle_input() + self.assertEqual((yield from tb.fetch_count()), 1) + + # Make sure it is gone after an RTIO reset, and the counter isn't + # sensitive anymore. + yield from tb.write_config(CONFIG_COUNT_RISING) + yield from tb.reset_rio() + yield from tb.toggle_input() + self.assertEqual((yield from tb.fetch_count()), 0) + + tb.run(gen()) + + def test_saturation(self): + for width in range(3, 5): + tb = Testbench(counter_width=width) + + def gen(): + yield from tb.write_config(CONFIG_COUNT_RISING) + for _ in range(2**width + 1): + yield from tb.toggle_input() + self.assertEqual((yield from tb.fetch_count()), 2**width - 1) + + tb.run(gen()) diff --git a/artiq/gateware/test/rtio/test_fastlink.py b/artiq/gateware/test/rtio/test_fastlink.py new file mode 100644 index 000000000..df8840951 --- /dev/null +++ b/artiq/gateware/test/rtio/test_fastlink.py @@ -0,0 +1,84 @@ +import unittest + +from migen import * +from artiq.gateware.rtio.phy.fastlink import * + + + +class TestPhaser(unittest.TestCase): + def setUp(self): + self.dut = SerDes(n_data=8, t_clk=8, d_clk=0b00001111, + n_frame=10, n_crc=6, poly=0x2f) + + def test_init(self): + pass + + def record_frame(self, frame): + clk = 0 + marker = 0 + stb = 0 + while True: + if stb == 2: + frame.append((yield self.dut.data)) + clk = (clk << 2) & 0xff + clk |= (yield self.dut.data[0]) + if clk == 0x0f: + if marker == 0x01: + stb += 1 + if stb >= 3: + break + # 10/2 + 1 marker bits + marker = (marker << 1) & 0x3f + marker |= (yield self.dut.data[1]) & 1 + yield + + def test_frame(self): + frame = [] + self.dut.comb += self.dut.payload.eq((1 << len(self.dut.payload)) - 1) + run_simulation(self.dut, self.record_frame(frame), + clocks={n: 2 for n in ["sys", "rio", "rio_phy"]}) + self.assertEqual(len(frame), 8*10//2) + self.assertEqual([d[0] for d in frame], [0, 0, 3, 3] * 10) + self.assertEqual([d[1] & 1 for d in frame[4*4 - 1:10*4 - 1:4]], + [0, 0, 0, 0, 0, 1]) + + +class TestFastino(unittest.TestCase): + def setUp(self): + self.dut = SerDes( + n_data=8, t_clk=7, d_clk=0b1100011, + n_frame=14, n_crc=12, poly=0x80f) + + def test_init(self): + pass + + def record_frame(self, frame): + clk = 0 + marker = 0 + stb = 0 + while True: + if stb == 2: + frame.append((yield self.dut.data)) + clk = (clk << 2) & 0xff + clk |= (yield self.dut.data[0]) + if clk in (0b11100011, 0b11000111): + if marker == 0x01: + stb += 1 + if stb >= 3: + break + # 14/2 + 1 marker bits + marker = (marker << 1) & 0xff + if clk & 0b100: + marker |= (yield self.dut.data[1]) >> 1 + else: + marker |= (yield self.dut.data[1]) & 1 + yield + + def test_frame(self): + frame = [] + self.dut.comb += self.dut.payload.eq((1 << len(self.dut.payload)) - 1) + run_simulation(self.dut, self.record_frame(frame), + clocks={n: 2 for n in ["sys", "rio", "rio_phy"]}) + self.assertEqual(len(frame), 7*14//2) + self.assertEqual([d[0] for d in frame], [3, 0, 1, 3, 2, 0, 3] * 7) + self.assertEqual(frame[-1], [3, 3, 1, 1, 1, 2, 1, 0]) # crc12 diff --git a/artiq/gateware/test/rtio/test_input_collector.py b/artiq/gateware/test/rtio/test_input_collector.py new file mode 100644 index 000000000..bf68a17e4 --- /dev/null +++ b/artiq/gateware/test/rtio/test_input_collector.py @@ -0,0 +1,89 @@ +import unittest + +from migen import * + +from artiq.gateware import rtio +from artiq.gateware.rtio import rtlink +from artiq.gateware.rtio import cri +from artiq.gateware.rtio.input_collector import * + + +class OscInput(Module): + def __init__(self): + self.rtlink = rtlink.Interface( + rtlink.OInterface(1), + rtlink.IInterface(1)) + self.overrides = [] + self.probes = [] + + # # # + + counter = Signal(2) + trigger = Signal() + self.sync += [ + Cat(counter, trigger).eq(counter + 1), + self.rtlink.i.stb.eq(0), + If(trigger, + self.rtlink.i.stb.eq(1), + self.rtlink.i.data.eq(~self.rtlink.i.data) + ) + ] + + +class DUT(Module): + def __init__(self): + self.submodules.phy0 = OscInput() + self.submodules.phy1 = OscInput() + rtio_channels = [ + rtio.Channel.from_phy(self.phy0, ififo_depth=4), + rtio.Channel.from_phy(self.phy1, ififo_depth=4) + ] + self.submodules.tsc = ClockDomainsRenamer({"rtio": "sys"})(rtio.TSC("sync")) + self.submodules.input_collector = InputCollector(self.tsc, rtio_channels, "sync") + + @property + def cri(self): + return self.input_collector.cri + + +def simulate(wait_cycles, ts_timeouts): + result = [] + dut = DUT() + def gen(): + for _ in range(wait_cycles): + yield + + for ts_timeout in ts_timeouts: + yield dut.cri.i_timeout.eq(ts_timeout) + yield dut.cri.cmd.eq(cri.commands["read"]) + yield + yield dut.cri.cmd.eq(cri.commands["nop"]) + yield + while (yield dut.cri.i_status) & 4: + yield + status = yield dut.cri.i_status + if status & 2: + result.append("overflow") + elif status & 1: + result.append("timeout") + else: + i_timestamp = yield dut.cri.i_timestamp + i_data = yield dut.cri.i_data + result.append((i_timestamp, i_data)) + + run_simulation(dut, gen()) + return result + + +class TestInput(unittest.TestCase): + def test_get_data(self): + result = simulate(0, [256]*8) + self.assertEqual(result, [(n*4+1, n % 2) for n in range(1, 9)]) + + def test_timeout(self): + result = simulate(0, [3, 16]) + self.assertEqual(result, ["timeout", (5, 1)]) + + def test_overflow(self): + result = simulate(32, [256]) + self.assertEqual(result, ["overflow"]) diff --git a/artiq/gateware/test/rtio/test_sed_lane_distributor.py b/artiq/gateware/test/rtio/test_sed_lane_distributor.py new file mode 100644 index 000000000..6c9135c86 --- /dev/null +++ b/artiq/gateware/test/rtio/test_sed_lane_distributor.py @@ -0,0 +1,163 @@ +import unittest + +from migen import * + +from artiq.gateware.rtio import cri +from artiq.gateware.rtio.sed import lane_distributor + + +LANE_COUNT = 8 + + +def simulate(input_events, compensation=None, wait=True): + layout = [("channel", 8), ("timestamp", 32)] + if compensation is None: + compensation = [0]*256 + dut = lane_distributor.LaneDistributor(LANE_COUNT, 8, layout, compensation, 3) + + output = [] + access_results = [] + + def gen(): + for channel, timestamp in input_events: + yield dut.cri.chan_sel.eq(channel) + yield dut.cri.o_timestamp.eq(timestamp) + yield + + yield dut.cri.cmd.eq(cri.commands["write"]) + yield + yield dut.cri.cmd.eq(cri.commands["nop"]) + + access_time = 0 + yield + while (yield dut.cri.o_status) & 0x01: + yield + access_time += 1 + + status = (yield dut.cri.o_status) + access_status = "ok" + if status & 0x02: + access_status = "underflow" + if (yield dut.sequence_error): + access_status = "sequence_error" + + access_results.append((access_status, access_time)) + + @passive + def monitor_lane(n, lio, wait_time): + yield lio.writable.eq(1) + while True: + while not (yield lio.we): + yield + seqn = (yield lio.seqn) + channel = (yield lio.payload.channel) + timestamp = (yield lio.payload.timestamp) + output.append((n, seqn, channel, timestamp)) + + yield lio.writable.eq(0) + for i in range(wait_time): + yield + yield lio.writable.eq(1) + yield + + generators = [gen()] + for n, lio in enumerate(dut.output): + lio.writable.reset = 1 + wait_time = 0 + if wait: + if n == 6: + wait_time = 1 + elif n == 7: + wait_time = 4 + generators.append(monitor_lane(n, lio, wait_time)) + run_simulation(dut, generators) + + return output, access_results + + +class TestLaneDistributor(unittest.TestCase): + def test_regular(self): + # N sequential events, all on lane 0 + N = 16 + output, access_results = simulate([(42+n, (n+1)*8) for n in range(N)], wait=False) + self.assertEqual(output, [(0, n, 42+n, (n+1)*8) for n in range(N)]) + self.assertEqual(access_results, [("ok", 0)]*N) + + def test_wait_time(self): + # LANE_COUNT simultaneous events should be distributed and written to + # the lanes when the latter are writable + output, access_results = simulate([(42+n, 8) for n in range(LANE_COUNT)]) + self.assertEqual(output, [(n, n, 42+n, 8) for n in range(LANE_COUNT)]) + expected_access_results = [("ok", 0)]*LANE_COUNT + expected_access_results[6] = ("ok", 1) + expected_access_results[7] = ("ok", 4) + self.assertEqual(access_results, expected_access_results) + + def test_lane_switch(self): + # N events separated by one fine timestamp distributed onto lanes + # LANE_COUNT == 1 << fine_ts_width + N = 32 + output, access_results = simulate([(42+n, n+8) for n in range(N)], wait=False) + self.assertEqual(output, [((n-n//8) % LANE_COUNT, n, 42+n, n+8) for n in range(N)]) + self.assertEqual([ar[0] for ar in access_results], ["ok"]*N) + + def test_sequence_error(self): + # LANE_COUNT + 1 simultaneous events, the last one being discarded due + # to sequence error, followed by a valid event + input_events = [(42+n, 8) for n in range(LANE_COUNT+1)] + input_events.append((42+LANE_COUNT+1, 16)) + output, access_results = simulate(input_events) + self.assertEqual(len(output), len(input_events)-1) # event with sequence error must get discarded + self.assertEqual(output[-1], (0, LANE_COUNT, 42+LANE_COUNT+1, 16)) + self.assertEqual([ar[0] for ar in access_results[:LANE_COUNT]], ["ok"]*LANE_COUNT) + self.assertEqual(access_results[LANE_COUNT][0], "sequence_error") + self.assertEqual(access_results[LANE_COUNT + 1][0], "ok") + + def test_underflow(self): + # N sequential events except the penultimate which underflows + N = 16 + input_events = [(42+n, (n+1)*8) for n in range(N)] + input_events[-2] = (0, 0) # timestamp < 8 underflows + output, access_results = simulate(input_events) + self.assertEqual(len(output), len(input_events)-1) # event with underflow must get discarded + self.assertEqual([ar[0] for ar in access_results[:N-2]], ["ok"]*(N-2)) + self.assertEqual(access_results[N-2][0], "underflow") + self.assertEqual(output[N-2], (0, N-2, 42+N-1, N*8)) + self.assertEqual(access_results[N-1][0], "ok") + + def test_spread(self): + # 6 simultaneous events to reach lane 6 and 7 which are not writable + # for 1 and 4 cycles respectively causing a forced lane switch + input_events = [(42+n, 8) for n in range(7)] + input_events.append((100, 16)) + input_events.append((100, 32)) + output, access_results = simulate(input_events) + self.assertEqual([o[0] for o in output], [x % LANE_COUNT for x in range(9)]) + self.assertEqual([ar[0] for ar in access_results], ["ok"]*9) + + def test_regular_lc(self): + N = 16 + output, access_results = simulate([(n, 8) for n in range(N)], + compensation=range(N), wait=False) + self.assertEqual(output, [(0, n, n, (n+1)*8) for n in range(N)]) + self.assertEqual(access_results, [("ok", 0)]*N) + + def test_lane_switch_lc(self): + N = 32 + compensation = [n//2 for n in range(N)] + output, access_results = simulate([(n, 8) for n in range(N)], + compensation=compensation, wait=False) + self.assertEqual(output, [((n-n//2) % LANE_COUNT, n, n, 8*(1+n//2)) for n in range(N)]) + self.assertEqual([ar[0] for ar in access_results], ["ok"]*N) + + def test_underflow_lc(self): + N = 16 + compensation = [0]*N + input_events = [(n, (n+1)*8) for n in range(N)] + compensation[N-2] = -input_events[N-2][1]//8 + output, access_results = simulate(input_events, compensation=compensation) + self.assertEqual(len(output), len(input_events)-1) # event with underflow must get discarded + self.assertEqual([ar[0] for ar in access_results[:N-2]], ["ok"]*(N-2)) + self.assertEqual(access_results[N-2][0], "underflow") + self.assertEqual(output[N-2], (0, N-2, N-1, N*8)) + self.assertEqual(access_results[N-1][0], "ok") diff --git a/artiq/gateware/test/rtio/test_sed_output_driver.py b/artiq/gateware/test/rtio/test_sed_output_driver.py new file mode 100644 index 000000000..4da3bd463 --- /dev/null +++ b/artiq/gateware/test/rtio/test_sed_output_driver.py @@ -0,0 +1,125 @@ +import unittest + +from migen import * + +from artiq.gateware import rtio +from artiq.gateware.rtio.sed import output_network, output_driver +from artiq.gateware.rtio.phy import ttl_simple +from artiq.gateware.rtio import rtlink + + +LANE_COUNT = 8 + + +class BusyPHY(Module): + def __init__(self): + self.rtlink = rtlink.Interface(rtlink.OInterface(1)) + self.comb += self.rtlink.o.busy.eq(1) + + +class DUT(Module): + def __init__(self): + self.ttl0 = Signal() + self.ttl1 = Signal() + self.ttl2 = Signal() + + self.submodules.phy0 = ttl_simple.Output(self.ttl0) + self.submodules.phy1 = ttl_simple.Output(self.ttl1) + self.submodules.phy2 = ttl_simple.Output(self.ttl2) + self.phy2.rtlink.o.enable_replace = False + self.submodules.phy3 = BusyPHY() + + rtio_channels = [ + rtio.Channel.from_phy(self.phy0), + rtio.Channel.from_phy(self.phy1), + rtio.Channel.from_phy(self.phy2), + rtio.Channel.from_phy(self.phy3), + ] + + self.submodules.output_driver = output_driver.OutputDriver( + rtio_channels, 0, LANE_COUNT, 4*LANE_COUNT) + + +def simulate(input_events): + dut = DUT() + + def gen(): + for n, input_event in enumerate(input_events): + yield dut.output_driver.input[n].valid.eq(1) + yield dut.output_driver.input[n].seqn.eq(n) + for k, v in input_event.items(): + yield getattr(dut.output_driver.input[n].payload, k).eq(v) + yield + for n in range(len(input_events)): + yield dut.output_driver.input[n].valid.eq(0) + for i in range(output_network.latency(LANE_COUNT) + 2): + yield + for i in range(3): + yield + + output = "" + + @passive + def monitor(): + nonlocal output + + ttls = [dut.ttl0, dut.ttl1, dut.ttl2] + prev_ttl_values = [0, 0, 0] + while True: + ttl_values = [] + for ttl in ttls: + ttl_values.append((yield ttl)) + for n, (old, new) in enumerate(zip(prev_ttl_values, ttl_values)): + if old != new: + output += "TTL{} {}->{}\n".format(n, old, new) + prev_ttl_values = ttl_values + + if (yield dut.output_driver.collision): + output += "collision ch{}\n".format((yield dut.output_driver.collision_channel)) + if (yield dut.output_driver.busy): + output += "busy ch{}\n".format((yield dut.output_driver.busy_channel)) + + yield + + run_simulation(dut, {"sys": [gen(), monitor()]}, + {"sys": 5, "rio": 5, "rio_phy": 5}) + return output + + +class TestOutputNetwork(unittest.TestCase): + def test_one_ttl(self): + self.assertEqual( + simulate([{"data": 1}]), + "TTL0 0->1\n") + + def test_simultaneous_ttl(self): + self.assertEqual( + simulate([{"channel": 0, "data": 1}, + {"channel": 1, "data": 1}, + {"channel": 2, "data": 1}]), + "TTL0 0->1\n" + "TTL1 0->1\n" + "TTL2 0->1\n") + + def test_replace(self): + self.assertEqual( + simulate([{"data": 0}, + {"data": 1}, + {"data": 0}]), + "") + self.assertEqual( + simulate([{"data": 1}, + {"data": 0}, + {"data": 1}]), + "TTL0 0->1\n") + + def test_collision(self): + self.assertEqual( + simulate([{"channel": 2}, + {"channel": 2}]), + "collision ch2\n") + + def test_busy(self): + self.assertEqual( + simulate([{"channel": 3}]), + "busy ch3\n") diff --git a/artiq/gateware/test/rtio/test_sed_output_network.py b/artiq/gateware/test/rtio/test_sed_output_network.py new file mode 100644 index 000000000..b9c10526e --- /dev/null +++ b/artiq/gateware/test/rtio/test_sed_output_network.py @@ -0,0 +1,61 @@ +import unittest + +from migen import * + +from artiq.gateware.rtio.sed import output_network + + +LANE_COUNT = 8 + + +def simulate(input_events): + layout_payload = [ + ("channel", 8), + ("fine_ts", 3), + ("address", 16), + ("data", 512), + ] + dut = output_network.OutputNetwork(LANE_COUNT, LANE_COUNT*4, layout_payload) + output = [] + def gen(): + for n, input_event in enumerate(input_events): + yield dut.input[n].valid.eq(1) + yield dut.input[n].seqn.eq(n) + for k, v in input_event.items(): + yield getattr(dut.input[n].payload, k).eq(v) + yield + for n in range(len(input_events)): + yield dut.input[n].valid.eq(0) + for i in range(output_network.latency(LANE_COUNT)): + yield + for x in range(LANE_COUNT): + if (yield dut.output[x].valid): + d = { + "replace_occured": (yield dut.output[x].replace_occured), + "channel": (yield dut.output[x].payload.channel), + "fine_ts": (yield dut.output[x].payload.fine_ts), + "address": (yield dut.output[x].payload.address), + "data": (yield dut.output[x].payload.data), + } + output.append(d) + run_simulation(dut, gen()) + return output + + +class TestOutputNetwork(unittest.TestCase): + def test_replace(self): + for n_events in range(2, LANE_COUNT+1): + with self.subTest(n_events=n_events): + input = [{"channel": 1, "address": i} for i in range(n_events)] + output = simulate(input) + expect = [{'replace_occured': 1, 'channel': 1, 'fine_ts': 0, 'address': n_events-1, 'data': 0}] + self.assertEqual(output, expect) + + def test_no_replace(self): + for n_events in range(1, LANE_COUNT+1): + with self.subTest(n_events=n_events): + input = [{"channel": i, "address": i} for i in range(n_events)] + output = simulate(input) + expect = [{'replace_occured': 0, 'channel': i, 'fine_ts': 0, 'address': i, 'data': 0} + for i in range(n_events)] + self.assertEqual(output, expect) diff --git a/artiq/gateware/test/rtio/test_sed_top.py b/artiq/gateware/test/rtio/test_sed_top.py new file mode 100644 index 000000000..5efc257a0 --- /dev/null +++ b/artiq/gateware/test/rtio/test_sed_top.py @@ -0,0 +1,115 @@ +import unittest +import itertools + +from migen import * + +from artiq.gateware import rtio +from artiq.gateware.rtio import cri +from artiq.gateware.rtio.sed.core import * +from artiq.gateware.rtio.phy import ttl_simple + + +class DUT(Module): + def __init__(self, **kwargs): + self.ttl0 = Signal() + self.ttl1 = Signal() + + self.submodules.phy0 = ttl_simple.Output(self.ttl0) + self.submodules.phy1 = ttl_simple.Output(self.ttl1) + + rtio_channels = [ + rtio.Channel.from_phy(self.phy0), + rtio.Channel.from_phy(self.phy1) + ] + + self.submodules.sed = SED(rtio_channels, 0, "sync", **kwargs) + self.sync += [ + self.sed.coarse_timestamp.eq(self.sed.coarse_timestamp + 1), + self.sed.minimum_coarse_timestamp.eq(self.sed.coarse_timestamp + 16) + ] + + +def simulate(input_events, **kwargs): + dut = DUT(**kwargs) + + ttl_changes = [] + access_results = [] + + def gen(): + yield dut.sed.cri.chan_sel.eq(0) + for timestamp, data in input_events: + yield dut.sed.cri.o_timestamp.eq(timestamp) + yield dut.sed.cri.o_data.eq(data) + yield + + yield dut.sed.cri.cmd.eq(cri.commands["write"]) + yield + yield dut.sed.cri.cmd.eq(cri.commands["nop"]) + + access_time = 0 + yield + while (yield dut.sed.cri.o_status) & 0x01: + yield + access_time += 1 + + status = (yield dut.sed.cri.o_status) + access_status = "ok" + if status & 0x02: + access_status = "underflow" + if (yield dut.sed.sequence_error): + access_status = "sequence_error" + + access_results.append((access_status, access_time)) + + @passive + def monitor(): + old_ttl_state = 0 + for time in itertools.count(): + ttl_state = yield dut.ttl0 + if ttl_state != old_ttl_state: + ttl_changes.append(time) + old_ttl_state = ttl_state + yield + + run_simulation(dut, {"sys": [ + gen(), monitor(), + (None for _ in range(max(ts for ts, _ in input_events) + 15)) + ]}, {"sys": 5, "rio": 5, "rio_phy": 5}) + + return ttl_changes, access_results + + +class TestSED(unittest.TestCase): + def test_sed(self): + input_events = [(18, 1), (20, 0), (25, 1), (30, 0)] + latency = 11 + ttl_changes, access_results = simulate(input_events) + self.assertEqual(ttl_changes, [e[0] + latency for e in input_events]) + self.assertEqual(access_results, [("ok", 0)]*len(input_events)) + + def test_replace(self): + input_events = [] + now = 19 + for i in range(5): + now += 10 + input_events += [(now, 1)] + now += 10 + input_events += [(now, 1), (now, 0)] + + ttl_changes, access_results = simulate(input_events) + self.assertEqual(access_results, [("ok", 0)]*len(input_events)) + self.assertEqual(ttl_changes, list(range(40, 40+5*20, 10))) + + def test_replace_rollover(self): + input_events = [] + now = 24 + for i in range(40): + now += 10 + input_events += [(now, 1)] + now += 10 + input_events += [(now, 1), (now, 0)] + + ttl_changes, access_results = simulate(input_events, + lane_count=2, fifo_depth=2, enable_spread=False) + self.assertEqual([r[0] for r in access_results], ["ok"]*len(input_events)) + self.assertEqual(ttl_changes, list(range(40, 40+40*20, 10))) diff --git a/artiq/gateware/test/rtio/test_ttl_serdes.py b/artiq/gateware/test/rtio/test_ttl_serdes.py new file mode 100644 index 000000000..7b4976cac --- /dev/null +++ b/artiq/gateware/test/rtio/test_ttl_serdes.py @@ -0,0 +1,125 @@ +import unittest + +from migen import * + +from artiq.gateware.rtio.phy.ttl_serdes_generic import * + + +class _FakeSerdes: + def __init__(self): + self.o = Signal(8) + self.i = Signal(8) + self.oe = Signal() + + +class _TB(Module): + def __init__(self): + self.serdes = _FakeSerdes() + self.submodules.dut = ClockDomainsRenamer({"rio_phy": "sys", "rio": "sys"})( + InOut(self.serdes)) + + +class TestTTLSerdes(unittest.TestCase): + def test_input(self): + tb = _TB() + + def gen(): + yield tb.dut.rtlink.o.address.eq(2) + yield tb.dut.rtlink.o.data.eq(0b11) + yield tb.dut.rtlink.o.stb.eq(1) # set sensitivity to rising + falling + yield + yield tb.dut.rtlink.o.stb.eq(0) + yield + + self.assertEqual((yield tb.serdes.oe), 0) + self.assertEqual((yield tb.dut.rtlink.i.stb), 0) + + yield tb.serdes.i.eq(0b11111110) # rising edge at fine_ts = 1 + yield + yield tb.serdes.i.eq(0b11111111) + yield + self.assertEqual((yield tb.dut.rtlink.i.stb), 1) + self.assertEqual((yield tb.dut.rtlink.i.fine_ts), 1) + + yield tb.serdes.i.eq(0b01111111) # falling edge at fine_ts = 7 + yield + yield tb.serdes.i.eq(0b00000000) + yield + self.assertEqual((yield tb.dut.rtlink.i.stb), 1) + self.assertEqual((yield tb.dut.rtlink.i.fine_ts), 7) + + yield tb.serdes.i.eq(0b11000000) # rising edge at fine_ts = 6 + yield + yield tb.serdes.i.eq(0b11111111) + yield + self.assertEqual((yield tb.dut.rtlink.i.stb), 1) + self.assertEqual((yield tb.dut.rtlink.i.fine_ts), 6) + + yield tb.dut.rtlink.o.address.eq(2) + yield tb.dut.rtlink.o.data.eq(0b01) + yield tb.dut.rtlink.o.stb.eq(1) # set sensitivity to rising only + yield + yield tb.dut.rtlink.o.stb.eq(0) + yield + + yield tb.serdes.i.eq(0b00001111) # falling edge at fine_ts = 4 + yield + yield tb.serdes.i.eq(0b00000000) + yield + # no strobe, sensitivity is rising edge + self.assertEqual((yield tb.dut.rtlink.i.stb), 0) + + yield tb.serdes.i.eq(0b11110000) # rising edge at fine_ts = 4 + yield + yield tb.serdes.i.eq(0b11111111) + yield + self.assertEqual((yield tb.dut.rtlink.i.stb), 1) + self.assertEqual((yield tb.dut.rtlink.i.fine_ts), 4) + + run_simulation(tb, gen()) + + def test_output(self): + tb = _TB() + + def gen(): + yield tb.dut.rtlink.o.address.eq(1) + yield tb.dut.rtlink.o.data.eq(1) + yield tb.dut.rtlink.o.stb.eq(1) # set Output Enable to 1 + yield + yield tb.dut.rtlink.o.stb.eq(0) + yield + yield + self.assertEqual((yield tb.serdes.oe), 1) + + yield tb.dut.rtlink.o.address.eq(0) + yield tb.dut.rtlink.o.data.eq(1) + yield tb.dut.rtlink.o.fine_ts.eq(3) + yield tb.dut.rtlink.o.stb.eq(1) # rising edge at fine_ts = 3 + yield + yield tb.dut.rtlink.o.stb.eq(0) + yield + self.assertEqual((yield tb.serdes.o), 0b11111000) + + yield + self.assertEqual((yield tb.serdes.o), 0b11111111) # stays at 1 + + yield tb.dut.rtlink.o.data.eq(0) + yield tb.dut.rtlink.o.fine_ts.eq(0) + yield tb.dut.rtlink.o.stb.eq(1) # falling edge at fine_ts = 0 + yield + yield tb.dut.rtlink.o.stb.eq(0) + yield + self.assertEqual((yield tb.serdes.o), 0b00000000) + + yield + self.assertEqual((yield tb.serdes.o), 0b00000000) + + yield tb.dut.rtlink.o.data.eq(1) + yield tb.dut.rtlink.o.fine_ts.eq(7) + yield tb.dut.rtlink.o.stb.eq(1) # rising edge at fine_ts = 7 + yield + yield tb.dut.rtlink.o.stb.eq(0) + yield + self.assertEqual((yield tb.serdes.o), 0b10000000) + + run_simulation(tb, gen()) diff --git a/artiq/gateware/test/serwb/__init__.py b/artiq/gateware/test/serwb/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/artiq/gateware/test/serwb/test_etherbone.py b/artiq/gateware/test/serwb/test_etherbone.py deleted file mode 100644 index 963769a1f..000000000 --- a/artiq/gateware/test/serwb/test_etherbone.py +++ /dev/null @@ -1,70 +0,0 @@ -import unittest -import random - -from migen import * - -from misoc.interconnect.wishbone import SRAM -from misoc.interconnect.stream import Converter - -from artiq.gateware.serwb import packet -from artiq.gateware.serwb import etherbone - - -class DUT(Module): - def __init__(self): - # wishbone slave - slave_depacketizer = packet.Depacketizer(int(100e6)) - slave_packetizer = packet.Packetizer() - self.submodules += slave_depacketizer, slave_packetizer - slave_etherbone = etherbone.Etherbone(mode="slave") - self.submodules += slave_etherbone - self.comb += [ - slave_depacketizer.source.connect(slave_etherbone.sink), - slave_etherbone.source.connect(slave_packetizer.sink) - ] - - # wishbone master - master_depacketizer = packet.Depacketizer(int(100e6)) - master_packetizer = packet.Packetizer() - self.submodules += master_depacketizer, master_packetizer - master_etherbone = etherbone.Etherbone(mode="master") - master_sram = SRAM(64, bus=master_etherbone.wishbone.bus) - self.submodules += master_etherbone, master_sram - self.comb += [ - master_depacketizer.source.connect(master_etherbone.sink), - master_etherbone.source.connect(master_packetizer.sink) - ] - - # connect core directly with converters in the loop - s2m_downconverter = Converter(32, 16) - s2m_upconverter = Converter(16, 32) - self.submodules += s2m_downconverter, s2m_upconverter - m2s_downconverter = Converter(32, 16) - m2s_upconverter = Converter(16, 32) - self.submodules += m2s_upconverter, m2s_downconverter - self.comb += [ - slave_packetizer.source.connect(s2m_downconverter.sink), - s2m_downconverter.source.connect(s2m_upconverter.sink), - s2m_upconverter.source.connect(master_depacketizer.sink), - - master_packetizer.source.connect(m2s_downconverter.sink), - m2s_downconverter.source.connect(m2s_upconverter.sink), - m2s_upconverter.source.connect(slave_depacketizer.sink) - ] - - # expose wishbone slave - self.wishbone = slave_etherbone.wishbone.bus - - -class TestEtherbone(unittest.TestCase): - def test_write_read_sram(self): - dut = DUT() - prng = random.Random(1) - def generator(dut): - datas = [prng.randrange(0, 2**32-1) for i in range(16)] - for i in range(16): - yield from dut.wishbone.write(i, datas[i]) - for i in range(16): - data = (yield from dut.wishbone.read(i)) - self.assertEqual(data, datas[i]) - run_simulation(dut, generator(dut)) diff --git a/artiq/gateware/test/serwb/test_serwb_phy_init.py b/artiq/gateware/test/serwb/test_serwb_phy_init.py deleted file mode 100644 index ea807f97d..000000000 --- a/artiq/gateware/test/serwb/test_serwb_phy_init.py +++ /dev/null @@ -1,164 +0,0 @@ -#!/usr/bin/env python3 -import unittest - -from migen import * - -from artiq.gateware.serwb import packet -from artiq.gateware.serwb import etherbone -from artiq.gateware.serwb.phy import _SerdesMasterInit, _SerdesSlaveInit - - -class SerdesModel(Module): - def __init__(self, taps, mode="slave"): - self.tx_idle = Signal() - self.tx_comma = Signal() - self.rx_idle = Signal() - self.rx_comma = Signal() - - self.rx_bitslip_value = Signal(6) - self.rx_delay_rst = Signal() - self.rx_delay_inc = Signal() - self.rx_delay_ce = Signal() - - self.valid_bitslip = Signal(6) - self.valid_delays = Signal(taps) - - # # # - - delay = Signal(max=taps) - bitslip = Signal(6) - - valid_delays = Array(Signal() for i in range(taps)) - for i in range(taps): - self.comb += valid_delays[taps-1-i].eq(self.valid_delays[i]) - - self.sync += [ - bitslip.eq(self.rx_bitslip_value), - If(self.rx_delay_rst, - delay.eq(0) - ).Elif(self.rx_delay_inc & self.rx_delay_ce, - delay.eq(delay + 1) - ) - ] - - if mode == "master": - self.submodules.fsm = fsm = ResetInserter()(FSM(reset_state="IDLE")) - self.comb += self.fsm.reset.eq(self.tx_idle) - fsm.act("IDLE", - If(self.tx_comma, - NextState("SEND_COMMA") - ), - self.rx_idle.eq(1) - ) - fsm.act("SEND_COMMA", - If(valid_delays[delay] & - (bitslip == self.valid_bitslip), - self.rx_comma.eq(1) - ), - If(~self.tx_comma, - NextState("READY") - ) - ) - fsm.act("READY") - elif mode == "slave": - self.submodules.fsm = fsm = FSM(reset_state="IDLE") - fsm.act("IDLE", - self.rx_idle.eq(1), - NextState("SEND_COMMA") - ) - fsm.act("SEND_COMMA", - If(valid_delays[delay] & - (bitslip == self.valid_bitslip), - self.rx_comma.eq(1) - ), - If(~self.tx_idle, - NextState("READY") - ) - ) - fsm.act("READY") - - -class DUTMaster(Module): - def __init__(self, taps=32): - self.submodules.serdes = SerdesModel(taps, mode="master") - self.submodules.init = _SerdesMasterInit(self.serdes, taps, timeout=1) - - -class DUTSlave(Module): - def __init__(self, taps=32): - self.submodules.serdes = SerdesModel(taps, mode="slave") - self.submodules.init = _SerdesSlaveInit(self.serdes, taps, timeout=1) - - -def generator(test, dut, valid_bitslip, valid_delays, check_success): - yield dut.serdes.valid_bitslip.eq(valid_bitslip) - yield dut.serdes.valid_delays.eq(valid_delays) - while not ((yield dut.init.ready) or - (yield dut.init.error)): - yield - if check_success: - ready = (yield dut.init.ready) - error = (yield dut.init.error) - delay_min = (yield dut.init.delay_min) - delay_max = (yield dut.init.delay_max) - delay = (yield dut.init.delay) - bitslip = (yield dut.init.bitslip) - test.assertEqual(ready, 1) - test.assertEqual(error, 0) - test.assertEqual(delay_min, 4) - test.assertEqual(delay_max, 9) - test.assertEqual(delay, 6) - test.assertEqual(bitslip, valid_bitslip) - else: - ready = (yield dut.init.ready) - error = (yield dut.init.error) - test.assertEqual(ready, 0) - test.assertEqual(error, 1) - - -class TestPHYInit(unittest.TestCase): - def test_master_init_success(self): - dut = DUTMaster() - valid_bitslip = 2 - valid_delays = 0b10001111100000111110000011111000 - run_simulation(dut, generator(self, dut, valid_bitslip, valid_delays, True)) - - def test_master_init_failure(self): - # partial window at the beginning - dut = DUTMaster() - valid_bitslip = 2 - valid_delays = 0b11000000000000000000000000000000 - run_simulation(dut, generator(self, dut, valid_bitslip, valid_delays, False)) - # partial window at the end - dut = DUTMaster() - valid_bitslip = 2 - valid_delays = 0b00000000000000000000000000000011 - run_simulation(dut, generator(self, dut, valid_bitslip, valid_delays, False)) - # too small window - dut = DUTMaster() - valid_bitslip = 2 - valid_delays = 0b00000000000000010000000000000000 - run_simulation(dut, generator(self, dut, valid_bitslip, valid_delays, False)) - - def test_slave_init_success(self): - dut = DUTSlave() - valid_bitslip = 2 - valid_delays = 0b10001111100000111110000011111000 - run_simulation(dut, generator(self, dut, valid_bitslip, valid_delays, True)) - - def test_slave_init_failure(self): - # partial window at the beginning - dut = DUTSlave() - valid_bitslip = 2 - valid_delays = 0b11000000000000000000000000000000 - run_simulation(dut, generator(self, dut, valid_bitslip, valid_delays, False)) - # partial window at the end - dut = DUTSlave() - valid_bitslip = 2 - valid_delays = 0b00000000000000000000000000000011 - run_simulation(dut, generator(self, dut, valid_bitslip, valid_delays, False)) - # too small window - dut = DUTSlave() - valid_bitslip = 2 - valid_delays = 0b00000000000000010000000000000000 - run_simulation(dut, generator(self, dut, valid_bitslip, valid_delays, False)) diff --git a/artiq/devices/lda/__init__.py b/artiq/gateware/test/suservo/__init__.py similarity index 100% rename from artiq/devices/lda/__init__.py rename to artiq/gateware/test/suservo/__init__.py diff --git a/artiq/gateware/test/suservo/test_adc.py b/artiq/gateware/test/suservo/test_adc.py new file mode 100644 index 000000000..3b31b0708 --- /dev/null +++ b/artiq/gateware/test/suservo/test_adc.py @@ -0,0 +1,110 @@ +import logging +import string +import unittest + +from migen import * +from migen.genlib import io + +from artiq.gateware.suservo.adc_ser import ADC, ADCParams + + + +class TB(Module): + def __init__(self, params): + self.params = p = params + + self.sck = Signal() + self.clkout = Signal(reset_less=True) + self.cnv = Signal() + + self.sck_en = Signal() + self.sck_en_ret = Signal() + + adc_sck_en = Signal() + cd_adc = ClockDomain("adc", reset_less=True) + self.clock_domains += cd_adc + + self.sdo = [] + self.data = [Signal((p.width, True), reset_less=True) + for i in range(p.channels)] + + srs = [] + for i in range(p.lanes): + name = "sdo" + string.ascii_lowercase[i] + sdo = Signal(name=name, reset_less=True) + self.sdo.append(sdo) + setattr(self, name, sdo) + sr = Signal(p.width*p.channels//p.lanes, reset_less=True) + srs.append(sr) + self.sync.adc += [ + sdo.eq(self._dly(sr[-1], 0)), + If(adc_sck_en, + sr[1:].eq(sr) + ) + ] + cnv_old = Signal(reset_less=True) + self.sync.async += [ + cnv_old.eq(self.cnv), + If(Cat(cnv_old, self.cnv) == 0b10, + sr.eq(Cat(reversed(self.data[2*i:2*i + 2]))), + ) + ] + + adc_clk_rec = Signal() + self.comb += [ + adc_sck_en.eq(self._dly(self.sck_en, 0)), + self.sck_en_ret.eq(self._dly(adc_sck_en)), + + adc_clk_rec.eq(self._dly(self.sck, 0)), + self.clkout.eq(self._dly(adc_clk_rec)), + ] + + def _dly(self, sig, n=0): + n += self.params.t_rtt*4//2 # t_{sys,adc,ret}/t_async half rtt + dly = Signal(n, reset_less=True) + self.sync.async += dly.eq(Cat(sig, dly)) + return dly[-1] + + +def main(): + params = ADCParams(width=8, channels=4, lanes=2, + t_cnvh=3, t_conv=5, t_rtt=4) + tb = TB(params) + adc = ADC(tb, params) + tb.submodules += adc + + def run(tb): + dut = adc + for i, ch in enumerate(tb.data): + yield ch.eq(i) + assert (yield dut.done) + yield dut.start.eq(1) + yield + yield dut.start.eq(0) + yield + assert not (yield dut.done) + while not (yield dut.done): + yield + x = (yield from [(yield d) for d in dut.data]) + for i, ch in enumerate(x): + assert ch == i, (hex(ch), hex(i)) + + run_simulation(tb, [run(tb)], + vcd_name="adc.vcd", + clocks={ + "sys": (8, 0), + "adc": (8, 0), + "ret": (8, 0), + "async": (2, 0), + }, + ) + + +class ADCTest(unittest.TestCase): + def test_run(self): + main() + + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + main() diff --git a/artiq/gateware/test/suservo/test_dds.py b/artiq/gateware/test/suservo/test_dds.py new file mode 100644 index 000000000..e8c221f51 --- /dev/null +++ b/artiq/gateware/test/suservo/test_dds.py @@ -0,0 +1,93 @@ +import logging +import unittest + +from migen import * + +from artiq.gateware.suservo.dds_ser import DDSParams, DDS + + +class TB(Module): + def __init__(self, p): + self.cs_n = Signal() + self.clk = Signal() + self.mosi = [Signal() for i in range(p.channels)] + for i, m in enumerate(self.mosi): + setattr(self, "mosi{}".format(i), m) + self.miso = Signal() + self.io_update = Signal() + + clk0 = Signal() + self.sync += clk0.eq(self.clk) + sample = Signal() + self.comb += sample.eq(Cat(self.clk, clk0) == 0b01) + + self.ddss = [] + for i in range(p.channels): + dds = Record([("ftw", 32), ("pow", 16), ("asf", 16), ("cmd", 8)]) + sr = Signal(len(dds)) + self.sync += [ + If(~self.cs_n & sample, + sr.eq(Cat(self.mosi[i], sr)) + ), + If(self.io_update, + dds.raw_bits().eq(sr) + ) + ] + self.ddss.append(dds) + + @passive + def log(self, data): + i = 0 + while True: + i += 1 + if (yield self.io_update): + yield + dat = [] + for dds in self.ddss: + v = yield from [(yield getattr(dds, k)) + for k in "cmd ftw pow asf".split()] + dat.append(v) + data.append((i, dat)) + else: + yield + + +def main(): + p = DDSParams(channels=4, width=8 + 32 + 16 + 16, clk=1) + tb = TB(p) + dds = DDS(tb, p) + tb.submodules += dds + + def run(tb): + dut = dds + for i, ch in enumerate(dut.profile): + yield ch.eq((((0 + << 16 | i | 0x20) + << 16 | i | 0x30) + << 32 | i | 0x40)) + # assert (yield dut.done) + yield dut.start.eq(1) + yield + yield dut.start.eq(0) + yield + yield + assert not (yield dut.done) + while not (yield dut.done): + yield + yield + + data = [] + run_simulation(tb, [tb.log(data), run(tb)], vcd_name="dds.vcd") + + assert data[-1][1] == [[0xe, 0x40 | i, 0x30 | i, 0x20 | i] for i in + range(4)] + + +class DDSTest(unittest.TestCase): + def test_run(self): + main() + + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + main() diff --git a/artiq/gateware/test/suservo/test_iir.py b/artiq/gateware/test/suservo/test_iir.py new file mode 100644 index 000000000..919e7a6bf --- /dev/null +++ b/artiq/gateware/test/suservo/test_iir.py @@ -0,0 +1,56 @@ +import logging +import unittest + +from migen import * +from artiq.gateware.suservo import iir + + +def main(): + w_kasli = iir.IIRWidths(state=25, coeff=18, adc=16, + asf=14, word=16, accu=48, shift=11, + channel=3, profile=5, dly=8) + w = iir.IIRWidths(state=17, coeff=16, adc=16, + asf=14, word=16, accu=48, shift=11, + channel=2, profile=1, dly=8) + + def run(dut): + for i, ch in enumerate(dut.adc): + yield ch.eq(i) + for i, ch in enumerate(dut.ctrl): + yield ch.en_iir.eq(1) + yield ch.en_out.eq(1) + yield ch.profile.eq(i) + for i in range(1 << w.channel): + yield from dut.set_state(i, i << 8, coeff="x1") + yield from dut.set_state(i, i << 8, coeff="x0") + for j in range(1 << w.profile): + yield from dut.set_state(i, + (j << 1) | (i << 8), profile=j, coeff="y1") + for k, l in enumerate("pow offset ftw0 ftw1".split()): + yield from dut.set_coeff(i, profile=j, coeff=l, + value=(i << 12) | (j << 8) | (k << 4)) + yield + for i in range(1 << w.channel): + for j in range(1 << w.profile): + for k, l in enumerate("cfg a1 b0 b1".split()): + yield from dut.set_coeff(i, profile=j, coeff=l, + value=(i << 12) | (j << 8) | (k << 4)) + yield from dut.set_coeff(i, profile=j, coeff="cfg", + value=(i << 0) | (j << 8)) # sel, dly + yield + for i in range(10): + yield from dut.check_iter() + yield + + dut = iir.IIR(w) + run_simulation(dut, [run(dut)], vcd_name="iir.vcd") + + +class IIRTest(unittest.TestCase): + def test_run(self): + main() + + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + main() diff --git a/artiq/gateware/test/suservo/test_servo.py b/artiq/gateware/test/suservo/test_servo.py new file mode 100644 index 000000000..c28557d89 --- /dev/null +++ b/artiq/gateware/test/suservo/test_servo.py @@ -0,0 +1,104 @@ +import logging +import unittest + +from migen import * +from migen.genlib import io + +from artiq.gateware.test.suservo import test_adc, test_dds +from artiq.gateware.suservo import servo + + +class ServoSim(servo.Servo): + def __init__(self): + adc_p = servo.ADCParams(width=16, channels=8, lanes=4, + t_cnvh=4, t_conv=57 - 4, t_rtt=4 + 4) + iir_p = servo.IIRWidths(state=25, coeff=18, adc=16, asf=14, word=16, + accu=48, shift=11, channel=3, profile=5, dly=8) + dds_p = servo.DDSParams(width=8 + 32 + 16 + 16, + channels=adc_p.channels, clk=1) + + self.submodules.adc_tb = test_adc.TB(adc_p) + self.submodules.dds_tb = test_dds.TB(dds_p) + + servo.Servo.__init__(self, self.adc_tb, self.dds_tb, + adc_p, iir_p, dds_p) + + def test(self): + assert (yield self.done) + + adc = 1 + x0 = 0x0141 + yield self.adc_tb.data[-adc-1].eq(x0) + channel = 3 + yield self.iir.adc[channel].eq(adc) + yield self.iir.ctrl[channel].en_iir.eq(1) + yield self.iir.ctrl[channel].en_out.eq(1) + profile = 5 + yield self.iir.ctrl[channel].profile.eq(profile) + x1 = 0x0743 + yield from self.iir.set_state(adc, x1, coeff="x1") + y1 = 0x1145 + yield from self.iir.set_state(channel, y1, + profile=profile, coeff="y1") + coeff = dict(pow=0x1333, offset=0x1531, ftw0=0x1727, ftw1=0x1929, + a1=0x0135, b0=0x0337, b1=0x0539, cfg=adc | (0 << 3)) + for ks in "pow offset ftw0 ftw1", "a1 b0 b1 cfg": + for k in ks.split(): + yield from self.iir.set_coeff(channel, value=coeff[k], + profile=profile, coeff=k) + yield + + yield self.start.eq(1) + yield + yield self.start.eq(0) + while not (yield self.dds_tb.io_update): + yield + yield # io_update + + w = self.iir.widths + + x0 = x0 << (w.state - w.adc - 1) + _ = yield from self.iir.get_state(adc, coeff="x1") + assert _ == x0, (hex(_), hex(x0)) + + offset = coeff["offset"] << (w.state - w.coeff - 1) + a1, b0, b1 = coeff["a1"], coeff["b0"], coeff["b1"] + out = ( + 0*(1 << w.shift - 1) + # rounding + a1*(y1 + 0) + b0*(x0 + offset) + b1*(x1 + offset) + ) >> w.shift + y1 = min(max(0, out), (1 << w.state - 1) - 1) + + _ = yield from self.iir.get_state(channel, profile, coeff="y1") + assert _ == y1, (hex(_), hex(y1)) + + _ = yield self.dds_tb.ddss[channel].ftw + ftw = (coeff["ftw1"] << 16) | coeff["ftw0"] + assert _ == ftw, (hex(_), hex(ftw)) + + _ = yield self.dds_tb.ddss[channel].pow + assert _ == coeff["pow"], (hex(_), hex(coeff["pow"])) + + _ = yield self.dds_tb.ddss[channel].asf + asf = y1 >> (w.state - w.asf - 1) + assert _ == asf, (hex(_), hex(asf)) + + +def main(): + servo = ServoSim() + run_simulation(servo, servo.test(), vcd_name="servo.vcd", + clocks={ + "sys": (8, 0), + "adc": (8, 0), + "ret": (8, 0), + "async": (2, 0), + }) + + +class ServoTest(unittest.TestCase): + def test_run(self): + main() + + +if __name__ == "__main__": + main() diff --git a/artiq/devices/novatech409b/__init__.py b/artiq/gateware/test/wrpll/__init__.py similarity index 100% rename from artiq/devices/novatech409b/__init__.py rename to artiq/gateware/test/wrpll/__init__.py diff --git a/artiq/gateware/test/wrpll/test_dsp.py b/artiq/gateware/test/wrpll/test_dsp.py new file mode 100644 index 000000000..033e69853 --- /dev/null +++ b/artiq/gateware/test/wrpll/test_dsp.py @@ -0,0 +1,158 @@ +import unittest + +import numpy as np + +from migen import * + +from artiq.gateware.drtio.wrpll.ddmtd import Collector +from artiq.gateware.drtio.wrpll import thls, filters + + +class HelperChainTB(Module): + def __init__(self, N): + self.tag_ref = Signal(N) + self.input_stb = Signal() + self.adpll = Signal((24, True)) + self.out_stb = Signal() + + ### + + self.submodules.collector = Collector(N) + self.submodules.loop_filter = thls.make(filters.helper, data_width=48) + + self.comb += [ + self.collector.tag_ref.eq(self.tag_ref), + self.collector.ref_stb.eq(self.input_stb), + self.collector.main_stb.eq(self.input_stb), + self.loop_filter.input.eq(self.collector.out_helper << 22), + self.loop_filter.input_stb.eq(self.collector.out_stb), + self.adpll.eq(self.loop_filter.output), + self.out_stb.eq(self.loop_filter.output_stb), + ] + + +class TestDSP(unittest.TestCase): + def test_main_collector(self): + N = 2 + collector = Collector(N=N) + # check collector phase unwrapping + tags = [(0, 0, 0), + (0, 1, 1), + (2, 1, -1), + (3, 1, -2), + (0, 1, -3), + (1, 1, -4), + (2, 1, -5), + (3, 1, -6), + (3, 3, -4), + (0, 0, -4), + (0, 1, -3), + (0, 2, -2), + (0, 3, -1), + (0, 0, 0)] + for i in range(10): + tags.append((i % (2**N), (i+1) % (2**N), 1)) + + def generator(): + for tag_ref, tag_main, out in tags: + yield collector.tag_ref.eq(tag_ref) + yield collector.tag_main.eq(tag_main) + yield collector.main_stb.eq(1) + yield collector.ref_stb.eq(1) + + yield + + yield collector.main_stb.eq(0) + yield collector.ref_stb.eq(0) + + while not (yield collector.out_stb): + yield + + out_main = yield collector.out_main + self.assertEqual(out_main, out) + + run_simulation(collector, generator()) + + def test_helper_collector(self): + N = 3 + collector = Collector(N=N) + # check collector phase unwrapping + tags = [((2**N - 1 - tag) % (2**N), -1) for tag in range(20)] + tags += [((tags[-1][0] + 1 + tag) % (2**N), 1) for tag in range(20)] + tags += [((tags[-1][0] - 2 - 2*tag) % (2**N), -2) for tag in range(20)] + + def generator(): + for tag_ref, out in tags: + yield collector.tag_ref.eq(tag_ref) + yield collector.main_stb.eq(1) + yield collector.ref_stb.eq(1) + + yield + + yield collector.main_stb.eq(0) + yield collector.ref_stb.eq(0) + + while not (yield collector.out_stb): + yield + + out_helper = yield collector.out_helper + self.assertEqual(out_helper, out) + + run_simulation(collector, generator()) + + # test helper collector + filter against output from MATLAB model + def test_helper_chain(self): + pll = HelperChainTB(15) + + initial_helper_out = -8000 + ref_tags = np.array([ + 24778, 16789, 8801, 814, 25596, 17612, 9628, 1646, + 26433, 18453, 10474, 2496, 27287, 19311, 11337, 3364, 28160, + 20190, 12221, 4253, 29054, 21088, 13124, 5161, 29966, 22005, + 14045, 6087, 30897, 22940, 14985, 7031, 31847, 23895, 15944, + 7995, 47, 24869, 16923, 8978, 1035, 25861, 17920, 9981, + 2042, 26873, 18937, 11002, 3069, 27904, 19973, 12042, 4113, + 28953, 21026, 13100, 5175, 30020, 22098, 14177, 6257, 31106, + 23189, 15273, 7358, 32212, 24300, 16388, 8478, 569, 25429, + 17522, 9617, 1712, 26577, 18675, 10774, 2875, 27745, 19848, + 11951, 4056, 28930, 21038, 13147, 5256, 30135, 22247, 14361, + 6475, 31359, 23476, 15595, 7714, 32603, 24725, 16847, 8971, + 1096 + ]) + adpll_sim = np.array([ + 8, 24, 41, 57, 74, 91, 107, 124, 140, 157, 173, + 190, 206, 223, 239, 256, 273, 289, 306, 322, 339, 355, + 372, 388, 405, 421, 438, 454, 471, 487, 504, 520, 537, + 553, 570, 586, 603, 619, 636, 652, 668, 685, 701, 718, + 734, 751, 767, 784, 800, 817, 833, 850, 866, 882, 899, + 915, 932, 948, 965, 981, 998, 1014, 1030, 1047, 1063, 1080, + 1096, 1112, 1129, 1145, 1162, 1178, 1194, 1211, 1227, 1244, 1260, + 1276, 1293, 1309, 1326, 1342, 1358, 1375, 1391, 1407, 1424, 1440, + 1457, 1473, 1489, 1506, 1522, 1538, 1555, 1571, 1587, 1604, 1620, + 1636]) + + def sim(): + yield pll.collector.out_helper.eq(initial_helper_out) + for ref_tag, adpll_matlab in zip(ref_tags, adpll_sim): + # feed collector + yield pll.tag_ref.eq(int(ref_tag)) + yield pll.input_stb.eq(1) + + yield + + yield pll.input_stb.eq(0) + + while not (yield pll.collector.out_stb): + yield + + tag_diff = yield pll.collector.out_helper + + while not (yield pll.loop_filter.output_stb): + yield + + adpll_migen = yield pll.adpll + self.assertEqual(adpll_migen, adpll_matlab) + + yield + + run_simulation(pll, [sim()]) diff --git a/artiq/gateware/test/wrpll/test_thls.py b/artiq/gateware/test/wrpll/test_thls.py new file mode 100644 index 000000000..c1013de30 --- /dev/null +++ b/artiq/gateware/test/wrpll/test_thls.py @@ -0,0 +1,55 @@ +import unittest + +from migen import * + +from artiq.gateware.drtio.wrpll import thls + + +a = 0 + +def simple_test(x): + global a + a = a + (x*4 >> 1) + return a + + +class TestTHLS(unittest.TestCase): + def test_thls(self): + global a + + proc = thls.Processor() + a = 0 + cp = thls.compile(proc, simple_test) + print("Program:") + cp.pretty_print() + cp.dimension_processor() + print("Encoded program:", cp.encode()) + proc_impl = proc.implement(cp.encode(), cp.data) + + def send_values(values): + for value in values: + yield proc_impl.input.eq(value) + yield proc_impl.input_stb.eq(1) + yield + yield proc_impl.input.eq(0) + yield proc_impl.input_stb.eq(0) + yield + while (yield proc_impl.busy): + yield + @passive + def receive_values(callback): + while True: + while not (yield proc_impl.output_stb): + yield + callback((yield proc_impl.output)) + yield + + send_list = [42, 40, 10, 10] + receive_list = [] + + run_simulation(proc_impl, [send_values(send_list), receive_values(receive_list.append)]) + print("Execution:", send_list, "->", receive_list) + + a = 0 + expected_list = [simple_test(x) for x in send_list] + self.assertEqual(receive_list, expected_list) diff --git a/artiq/gui/applets.py b/artiq/gui/applets.py index d440120fa..432fb458d 100644 --- a/artiq/gui/applets.py +++ b/artiq/gui/applets.py @@ -10,9 +10,10 @@ from itertools import count from PyQt5 import QtCore, QtGui, QtWidgets -from artiq.protocols.pipe_ipc import AsyncioParentComm -from artiq.protocols.logging import LogParser -from artiq.protocols import pyon +from sipyco.pipe_ipc import AsyncioParentComm +from sipyco.logging_tools import LogParser +from sipyco import pyon + from artiq.gui.tools import QDockWidgetCloseDetect, LayoutWidget @@ -321,7 +322,6 @@ class AppletsDock(QtWidgets.QDockWidget): self.main_window = main_window self.datasets_sub = datasets_sub - self.dock_to_item = dict() self.applet_uids = set() self.table = QtWidgets.QTreeWidget() @@ -414,12 +414,12 @@ class AppletsDock(QtWidgets.QDockWidget): finally: self.table.itemChanged.connect(self.item_changed) - def create(self, uid, name, spec): - dock = _AppletDock(self.datasets_sub, uid, name, spec) + def create(self, item, name, spec): + dock = _AppletDock(self.datasets_sub, item.applet_uid, name, spec) self.main_window.addDockWidget(QtCore.Qt.RightDockWidgetArea, dock) dock.setFloating(True) asyncio.ensure_future(dock.start()) - dock.sigClosed.connect(partial(self.on_dock_closed, dock)) + dock.sigClosed.connect(partial(self.on_dock_closed, item, dock)) return dock def item_changed(self, item, column): @@ -437,15 +437,15 @@ class AppletsDock(QtWidgets.QDockWidget): if item.applet_dock is None: name = item.text(0) spec = self.get_spec(item) - dock = self.create(item.applet_uid, name, spec) + dock = self.create(item, name, spec) item.applet_dock = dock if item.applet_geometry is not None: dock.restoreGeometry(item.applet_geometry) # geometry is now handled by main window state item.applet_geometry = None - self.dock_to_item[dock] = item else: dock = item.applet_dock + item.applet_dock = None if dock is not None: # This calls self.on_dock_closed dock.close() @@ -455,12 +455,9 @@ class AppletsDock(QtWidgets.QDockWidget): else: raise ValueError - def on_dock_closed(self, dock): - item = self.dock_to_item[dock] - item.applet_dock = None + def on_dock_closed(self, item, dock): item.applet_geometry = dock.saveGeometry() asyncio.ensure_future(dock.terminate()) - del self.dock_to_item[dock] item.setCheckState(0, QtCore.Qt.Unchecked) def get_untitled(self): diff --git a/artiq/gui/entries.py b/artiq/gui/entries.py index 2469edfbf..7e5e80916 100644 --- a/artiq/gui/entries.py +++ b/artiq/gui/entries.py @@ -275,6 +275,78 @@ class _RangeScan(LayoutWidget): randomize.setChecked(state["randomize"]) +class _CenterScan(LayoutWidget): + def __init__(self, procdesc, state): + LayoutWidget.__init__(self) + + scale = procdesc["scale"] + + def apply_properties(widget): + widget.setDecimals(procdesc["ndecimals"]) + if procdesc["global_min"] is not None: + widget.setMinimum(procdesc["global_min"]/scale) + else: + widget.setMinimum(float("-inf")) + if procdesc["global_max"] is not None: + widget.setMaximum(procdesc["global_max"]/scale) + else: + widget.setMaximum(float("inf")) + if procdesc["global_step"] is not None: + widget.setSingleStep(procdesc["global_step"]/scale) + if procdesc["unit"]: + widget.setSuffix(" " + procdesc["unit"]) + + center = ScientificSpinBox() + disable_scroll_wheel(center) + apply_properties(center) + center.setPrecision() + center.setRelativeStep() + center.setValue(state["center"]/scale) + self.addWidget(center, 0, 1) + self.addWidget(QtWidgets.QLabel("Center:"), 0, 0) + + span = ScientificSpinBox() + disable_scroll_wheel(span) + apply_properties(span) + span.setPrecision() + span.setRelativeStep() + span.setMinimum(0) + span.setValue(state["span"]/scale) + self.addWidget(span, 1, 1) + self.addWidget(QtWidgets.QLabel("Span:"), 1, 0) + + step = ScientificSpinBox() + disable_scroll_wheel(step) + apply_properties(step) + step.setPrecision() + step.setRelativeStep() + step.setMinimum(0) + step.setValue(state["step"]/scale) + self.addWidget(step, 2, 1) + self.addWidget(QtWidgets.QLabel("Step:"), 2, 0) + + randomize = QtWidgets.QCheckBox("Randomize") + self.addWidget(randomize, 3, 1) + randomize.setChecked(state["randomize"]) + + def update_center(value): + state["center"] = value*scale + + def update_span(value): + state["span"] = value*scale + + def update_step(value): + state["step"] = value*scale + + def update_randomize(value): + state["randomize"] = value + + center.valueChanged.connect(update_center) + span.valueChanged.connect(update_span) + step.valueChanged.connect(update_step) + randomize.stateChanged.connect(update_randomize) + + class _ExplicitScan(LayoutWidget): def __init__(self, state): LayoutWidget.__init__(self) @@ -307,6 +379,7 @@ class ScanEntry(LayoutWidget): self.widgets = OrderedDict() self.widgets["NoScan"] = _NoScan(procdesc, state["NoScan"]) self.widgets["RangeScan"] = _RangeScan(procdesc, state["RangeScan"]) + self.widgets["CenterScan"] = _CenterScan(procdesc, state["CenterScan"]) self.widgets["ExplicitScan"] = _ExplicitScan(state["ExplicitScan"]) for widget in self.widgets.values(): self.stack.addWidget(widget) @@ -314,6 +387,7 @@ class ScanEntry(LayoutWidget): self.radiobuttons = OrderedDict() self.radiobuttons["NoScan"] = QtWidgets.QRadioButton("No scan") self.radiobuttons["RangeScan"] = QtWidgets.QRadioButton("Range") + self.radiobuttons["CenterScan"] = QtWidgets.QRadioButton("Center") self.radiobuttons["ExplicitScan"] = QtWidgets.QRadioButton("Explicit") scan_type = QtWidgets.QButtonGroup() for n, b in enumerate(self.radiobuttons.values()): @@ -343,6 +417,8 @@ class ScanEntry(LayoutWidget): "NoScan": {"value": 0.0, "repetitions": 1}, "RangeScan": {"start": 0.0, "stop": 100.0*scale, "npoints": 10, "randomize": False}, + "CenterScan": {"center": 0.*scale, "span": 100.*scale, + "step": 10.*scale, "randomize": False}, "ExplicitScan": {"sequence": []} } if "default" in procdesc: @@ -361,6 +437,9 @@ class ScanEntry(LayoutWidget): state[ty]["npoints"] = default["npoints"] state[ty]["randomize"] = default["randomize"] state[ty]["seed"] = default["seed"] + elif ty == "CenterScan": + for key in "center span step randomize seed".split(): + state[ty][key] = default[key] elif ty == "ExplicitScan": state[ty]["sequence"] = default["sequence"] else: diff --git a/artiq/gui/fuzzy_select.py b/artiq/gui/fuzzy_select.py new file mode 100644 index 000000000..50e95d5ea --- /dev/null +++ b/artiq/gui/fuzzy_select.py @@ -0,0 +1,294 @@ +import re + +from functools import partial +from typing import List, Tuple +from PyQt5 import QtCore, QtWidgets + +from artiq.gui.tools import LayoutWidget + + +class FuzzySelectWidget(LayoutWidget): + """Widget to select from a list of pre-defined choices by typing in a + substring match (cf. Ctrl+P "Quick Open"/"Goto anything" functions in + editors/IDEs). + """ + + #: Raised when the selection process is aborted by the user (Esc, loss of + #: focus, etc.). + aborted = QtCore.pyqtSignal() + + #: Raised when an entry has been selected, giving the label of the user + #: choice and any additional QEvent.modifiers() (e.g. Ctrl key pressed). + finished = QtCore.pyqtSignal(str, int) + + def __init__(self, + choices: List[Tuple[str, int]] = [], + entry_count_limit: int = 10, + *args): + """ + :param choices: The choices the user can select from, given as tuples + of labels to display and an additional weight added to the + fuzzy-matching score. + :param entry_count_limit: Maximum number of entries to show. + """ + super().__init__(*args) + self.entry_count_limit = entry_count_limit + assert entry_count_limit >= 2, ("Need to allow at least two entries " + + "to show the ' not shown' hint") + + self.line_edit = QtWidgets.QLineEdit(self) + self.layout.addWidget(self.line_edit) + + line_edit_focus_filter = _FocusEventFilter(self.line_edit) + line_edit_focus_filter.focus_gained.connect(self._activate) + line_edit_focus_filter.focus_lost.connect(self._line_edit_focus_lost) + self.line_edit.installEventFilter(line_edit_focus_filter) + self.line_edit.textChanged.connect(self._update_menu) + + escape_filter = _EscapeKeyFilter(self) + escape_filter.escape_pressed.connect(self.abort) + self.line_edit.installEventFilter(escape_filter) + + self.menu = None + + self.update_when_text_changed = True + self.menu_typing_filter = None + self.line_edit_up_down_filter = None + self.abort_when_menu_hidden = False + self.abort_when_line_edit_unfocussed = True + + self.set_choices(choices) + + def set_choices(self, choices: List[Tuple[str, int]]) -> None: + """Update the list of choices available to the user.""" + # Keep sorted in the right order for when the query is empty. + self.choices = sorted(choices, key=lambda a: (a[1], a[0])) + if self.menu: + self._update_menu() + + def _activate(self): + self.update_when_text_changed = True + if not self.menu: + # Show menu after initial layout is complete. + QtCore.QTimer.singleShot(0, self._update_menu) + + def _ensure_menu(self): + if self.menu: + return + + self.menu = QtWidgets.QMenu(self) + + # Display menu with search results beneath line edit. + menu_pos = self.line_edit.mapToGlobal(self.line_edit.pos()) + menu_pos.setY(menu_pos.y() + self.line_edit.height()) + + self.menu.popup(menu_pos) + self.menu.aboutToHide.connect(self._menu_hidden) + + def _menu_hidden(self): + if self.abort_when_menu_hidden: + self.abort_when_menu_hidden = False + self.abort() + + def _line_edit_focus_lost(self): + if self.abort_when_line_edit_unfocussed: + self.abort() + + def _update_menu(self): + if not self.update_when_text_changed: + return + + filtered_choices = self._filter_choices() + + if not filtered_choices: + # No matches, don't display menu at all. + if self.menu: + self.abort_when_menu_hidden = False + self.menu.close() + self.menu = None + self.abort_when_line_edit_unfocussed = True + self.line_edit.setFocus() + return + + # Truncate the list, leaving room for the " not shown" entry. + num_omitted = 0 + if len(filtered_choices) > self.entry_count_limit: + num_omitted = len(filtered_choices) - (self.entry_count_limit - 1) + filtered_choices = filtered_choices[:self.entry_count_limit - 1] + + # We are going to end up with a menu shown and the line edit losing + # focus. + self.abort_when_line_edit_unfocussed = False + + if self.menu: + # Hide menu temporarily to avoid re-layouting on every added item. + self.abort_when_menu_hidden = False + self.menu.hide() + self.menu.clear() + + self._ensure_menu() + + first_action = None + last_action = None + for choice in filtered_choices: + action = QtWidgets.QAction(choice, self.menu) + action.triggered.connect(partial(self._finish, action, choice)) + action.modifiers = 0 + self.menu.addAction(action) + if not first_action: + first_action = action + last_action = action + + if num_omitted > 0: + action = QtWidgets.QAction("<{} not shown>".format(num_omitted), + self.menu) + action.setEnabled(False) + self.menu.addAction(action) + + if self.menu_typing_filter: + self.menu.removeEventFilter(self.menu_typing_filter) + self.menu_typing_filter = _NonUpDownKeyFilter(self.menu, + self.line_edit) + self.menu.installEventFilter(self.menu_typing_filter) + + if self.line_edit_up_down_filter: + self.line_edit.removeEventFilter(self.line_edit_up_down_filter) + self.line_edit_up_down_filter = _UpDownKeyFilter( + self.line_edit, self.menu, first_action, last_action) + self.line_edit.installEventFilter(self.line_edit_up_down_filter) + + self.abort_when_menu_hidden = True + self.menu.show() + if first_action: + self.menu.setActiveAction(first_action) + self.menu.setFocus() + else: + self.line_edit.setFocus() + + def _filter_choices(self): + """Return a filtered and ranked list of choices based on the current + user input. + + For a choice not to be filtered out, it needs to contain the entered + characters in order. Entries are further sorted by the length of the + match (i.e. preferring matches where the entered string occurrs + without interruptions), then the position of the match, and finally + lexicographically. + """ + query = self.line_edit.text() + if not query: + return [label for label, _ in self.choices] + + # Find all "substring" matches of the given query in the labels, + # allowing any number of characters between each query character. + # Sort first by length of match (short matches preferred), to which the + # set weight is also applied, then by location (early in the label + # preferred), and at last alphabetically. + + # TODO: More SublimeText-like heuristics taking capital letters and + # punctuation into account. Also, requiring the matches to be in order + # seems to be a bit annoying in practice. + + # `re` seems to be the fastest way of doing this in CPython, even with + # all the (non-greedy) wildcards. + suggestions = [] + pattern_str = ".*?".join(map(re.escape, query)) + pattern = re.compile(pattern_str, flags=re.IGNORECASE) + for label, weight in self.choices: + matches = [] + # Manually loop over shortest matches at each position; + # re.finditer() only returns non-overlapping matches. + pos = 0 + while True: + r = pattern.search(label, pos=pos) + if not r: + break + start, stop = r.span() + matches.append((stop - start - weight, start, label)) + pos = start + 1 + if matches: + suggestions.append(min(matches)) + return [x for _, _, x in sorted(suggestions)] + + def _close(self): + if self.menu: + self.menu.close() + self.menu = None + self.update_when_text_changed = False + self.line_edit.clear() + + def abort(self): + self._close() + self.aborted.emit() + + def _finish(self, action, name): + self._close() + self.finished.emit(name, action.modifiers) + + +class _FocusEventFilter(QtCore.QObject): + """Emits signals when focus is gained/lost.""" + focus_gained = QtCore.pyqtSignal() + focus_lost = QtCore.pyqtSignal() + + def eventFilter(self, obj, event): + if event.type() == QtCore.QEvent.FocusIn: + self.focus_gained.emit() + elif event.type() == QtCore.QEvent.FocusOut: + self.focus_lost.emit() + return False + + +class _EscapeKeyFilter(QtCore.QObject): + """Emits a signal if the Escape key is pressed.""" + escape_pressed = QtCore.pyqtSignal() + + def eventFilter(self, obj, event): + if event.type() == QtCore.QEvent.KeyPress: + if event.key() == QtCore.Qt.Key_Escape: + self.escape_pressed.emit() + return False + + +class _UpDownKeyFilter(QtCore.QObject): + """Handles focussing the menu when pressing up/down in the line edit.""" + def __init__(self, parent, menu, first_item, last_item): + super().__init__(parent) + self.menu = menu + self.first_item = first_item + self.last_item = last_item + + def eventFilter(self, obj, event): + if event.type() == QtCore.QEvent.KeyPress: + if event.key() == QtCore.Qt.Key_Down: + self.menu.setActiveAction(self.first_item) + self.menu.setFocus() + return True + + if event.key() == QtCore.Qt.Key_Up: + self.menu.setActiveAction(self.last_item) + self.menu.setFocus() + return True + return False + + +class _NonUpDownKeyFilter(QtCore.QObject): + """Forwards input while the menu is focussed to the line edit.""" + def __init__(self, parent, target): + super().__init__(parent) + self.target = target + + def eventFilter(self, obj, event): + if event.type() == QtCore.QEvent.KeyPress: + k = event.key() + if k in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter): + action = obj.activeAction() + if action is not None: + action.modifiers = event.modifiers() + return False + if (k != QtCore.Qt.Key_Down and k != QtCore.Qt.Key_Up + and k != QtCore.Qt.Key_Enter + and k != QtCore.Qt.Key_Return): + QtWidgets.QApplication.sendEvent(self.target, event) + return True + return False diff --git a/artiq/gui/log.py b/artiq/gui/log.py index a6d68cb3d..9bf9d7efa 100644 --- a/artiq/gui/log.py +++ b/artiq/gui/log.py @@ -6,7 +6,7 @@ from functools import partial from PyQt5 import QtCore, QtGui, QtWidgets -from artiq.protocols.logging import SourceFilter +from sipyco.logging_tools import SourceFilter from artiq.gui.tools import (LayoutWidget, log_level_to_name, QDockWidgetCloseDetect) @@ -161,8 +161,13 @@ class _Model(QtCore.QAbstractItemModel): return v[3][item.row+1] elif role == QtCore.Qt.ToolTipRole: v = self.entries[msgnum] + if item.parent is self: + lineno = 0 + else: + lineno = item.row + 1 return (log_level_to_name(v[0]) + ", " + - time.strftime("%m/%d %H:%M:%S", time.localtime(v[2]))) + time.strftime("%m/%d %H:%M:%S", time.localtime(v[2])) + + "\n" + v[3][lineno]) class LogDock(QDockWidgetCloseDetect): diff --git a/artiq/gui/logo_ver.svg b/artiq/gui/logo_ver.svg index e24387b27..6a6ece31b 100644 --- a/artiq/gui/logo_ver.svg +++ b/artiq/gui/logo_ver.svg @@ -18,11 +18,11 @@ enable-background="new 0 0 800 800" xml:space="preserve" id="svg2" - inkscape:version="0.92.2 5c3e80d, 2017-08-06" + inkscape:version="0.92.4 (5da689c313, 2019-01-14)" sodipodi:docname="logo_ver.svg">image/svg+xml \ No newline at end of file + aria-label="6" + style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:96px;line-height:25px;font-family:'Droid Sans Thai';-inkscape-font-specification:'Droid Sans Thai, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;text-align:start;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#ffffff;fill-opacity:1;stroke:none" + id="text843"> + \ No newline at end of file diff --git a/artiq/gui/models.py b/artiq/gui/models.py index 9b9551593..4d6f19c22 100644 --- a/artiq/gui/models.py +++ b/artiq/gui/models.py @@ -1,6 +1,6 @@ from PyQt5 import QtCore -from artiq.protocols.sync_struct import Subscriber, process_mod +from sipyco.sync_struct import Subscriber, process_mod class ModelManager: diff --git a/artiq/gui/state.py b/artiq/gui/state.py index fc19f7919..3a3f21be8 100644 --- a/artiq/gui/state.py +++ b/artiq/gui/state.py @@ -2,8 +2,8 @@ import asyncio from collections import OrderedDict import logging -from artiq.tools import TaskObject -from artiq.protocols import pyon +from sipyco.asyncio_tools import TaskObject +from sipyco import pyon logger = logging.getLogger(__name__) diff --git a/artiq/language/core.py b/artiq/language/core.py index 47c1746f3..5560398dd 100644 --- a/artiq/language/core.py +++ b/artiq/language/core.py @@ -8,7 +8,7 @@ import numpy __all__ = ["kernel", "portable", "rpc", "syscall", "host_only", - "set_time_manager", "set_watchdog_factory", + "kernel_from_string", "set_time_manager", "set_watchdog_factory", "TerminationRequested"] # global namespace for kernels @@ -28,19 +28,20 @@ def kernel(arg=None, flags={}): This decorator marks an object's method for execution on the core device. - When a decorated method is called from the Python interpreter, the ``core`` + When a decorated method is called from the Python interpreter, the :attr:`core` attribute of the object is retrieved and used as core device driver. The core device driver will typically compile, transfer and run the method (kernel) on the device. When kernels call another method: - - if the method is a kernel for the same core device, is it compiled + + - if the method is a kernel for the same core device, it is compiled and sent in the same binary. Calls between kernels happen entirely on the device. - if the method is a regular Python method (not a kernel), it generates a remote procedure call (RPC) for execution on the host. - The decorator takes an optional parameter that defaults to ``core`` and + The decorator takes an optional parameter that defaults to :attr`core` and specifies the name of the attribute to use as core device driver. This decorator must be present in the global namespace of all modules using @@ -117,7 +118,7 @@ def syscall(arg=None, flags={}): def inner_decorator(function): function.artiq_embedded = \ _ARTIQEmbeddedInfo(core_name=None, portable=False, function=None, - syscall=function.__name__, forbidden=False, + syscall=arg, forbidden=False, flags=set(flags)) return function return inner_decorator @@ -139,6 +140,59 @@ def host_only(function): return function +def kernel_from_string(parameters, body_code, decorator=kernel): + """Build a kernel function from the supplied source code in string form, + similar to ``exec()``/``eval()``. + + Operating on pieces of source code as strings is a very brittle form of + metaprogramming; kernels generated like this are hard to debug, and + inconvenient to write. Nevertheless, this can sometimes be useful to work + around restrictions in ARTIQ Python. In that instance, care should be taken + to keep string-generated code to a minimum and cleanly separate it from + surrounding code. + + The resulting function declaration is also evaluated using ``exec()`` for + use from host Python code. To encourage a modicum of code hygiene, no + global symbols are available by default; any objects accessed by the + function body must be passed in explicitly as parameters. + + :param parameters: A list of parameter names the generated functions + accepts. Each entry can either be a string or a tuple of two strings; + if the latter, the second element specifies the type annotation. + :param body_code: The code for the function body, in string form. + ``return`` statements can be used to return values, as usual. + :param decorator: One of ``kernel`` or ``portable`` (optionally with + parameters) to specify how the function will be executed. + + :return: The function generated from the arguments. + """ + + # Build complete function declaration. + decl = "def kernel_from_string_fn(" + for p in parameters: + type_annotation = "" + if isinstance(p, tuple): + name, typ = p + type_annotation = ": " + typ + else: + name = p + decl += name + type_annotation + "," + decl += "):\n" + decl += "\n".join(" " + line for line in body_code.split("\n")) + + # Evaluate to get host-side function declaration. + context = {} + try: + exec(decl, context) + except SyntaxError: + raise SyntaxError("Error parsing kernel function: '{}'".format(decl)) + fn = decorator(context["kernel_from_string_fn"]) + + # Save source code for the compiler to pick up later. + fn.artiq_embedded = fn.artiq_embedded._replace(function=decl) + return fn + + class _DummyTimeManager: def _not_implemented(self, *args, **kwargs): raise NotImplementedError( @@ -198,7 +252,12 @@ def delay_mu(duration): def now_mu(): - """Retrieves the current RTIO time, in machine units.""" + """Retrieve the current RTIO timeline cursor, in machine units. + + Note the conceptual difference between this and the current value of the + hardware RTIO counter; see e.g. + :meth:`artiq.coredevice.core.Core.get_rtio_counter_mu` for the latter. + """ return _time_manager.get_time_mu() diff --git a/artiq/language/environment.py b/artiq/language/environment.py index ccfdcdb72..f7aab5502 100644 --- a/artiq/language/environment.py +++ b/artiq/language/environment.py @@ -1,7 +1,9 @@ +import warnings from collections import OrderedDict from inspect import isclass -from artiq.protocols import pyon +from sipyco import pyon + from artiq.language import units from artiq.language.core import rpc @@ -30,7 +32,7 @@ class _SimpleArgProcessor: if isinstance(default, list): raise NotImplementedError if default is not NoDefault: - self.default_value = default + self.default_value = self.process(default) def default(self): if not hasattr(self, "default_value"): @@ -67,7 +69,10 @@ class PYONValue(_SimpleArgProcessor): class BooleanValue(_SimpleArgProcessor): """A boolean argument.""" - pass + def process(self, x): + if type(x) != bool: + raise ValueError("Invalid BooleanValue value") + return x class EnumerationValue(_SimpleArgProcessor): @@ -78,22 +83,26 @@ class EnumerationValue(_SimpleArgProcessor): argument. """ def __init__(self, choices, default=NoDefault): - _SimpleArgProcessor.__init__(self, default) - assert default is NoDefault or default in choices self.choices = choices + super().__init__(default) + + def process(self, x): + if x not in self.choices: + raise ValueError("Invalid EnumerationValue value") + return x def describe(self): d = _SimpleArgProcessor.describe(self) d["choices"] = self.choices return d + class NumberValue(_SimpleArgProcessor): """An argument that can take a numerical value. - If ndecimals = 0, scale = 1 and step is integer, then it returns - an integer value. Otherwise, it returns a floating point value. - The simplest way to represent an integer argument is - ``NumberValue(step=1, ndecimals=0)``. + If ``type=="auto"``, the result will be a ``float`` unless + ndecimals = 0, scale = 1 and step is an integer. Setting ``type`` to + ``int`` will also result in an error unless these conditions are met. When ``scale`` is not specified, and the unit is a common one (i.e. defined in ``artiq.language.units``), then the scale is obtained from @@ -116,9 +125,13 @@ class NumberValue(_SimpleArgProcessor): :param min: The minimum value of the argument. :param max: The maximum value of the argument. :param ndecimals: The number of decimals a UI should use. + :param type: Type of this number. Accepts ``"float"``, ``"int"`` or + ``"auto"``. Defaults to ``"auto"``. """ + valid_types = ["auto", "float", "int"] + def __init__(self, default=NoDefault, unit="", scale=None, - step=None, min=None, max=None, ndecimals=2): + step=None, min=None, max=None, ndecimals=2, type="auto"): if scale is None: if unit == "": scale = 1.0 @@ -130,27 +143,39 @@ class NumberValue(_SimpleArgProcessor): "the scale manually".format(unit)) if step is None: step = scale/10.0 - if default is not NoDefault: - self.default_value = default self.unit = unit self.scale = scale self.step = step self.min = min self.max = max self.ndecimals = ndecimals + self.type = type - def _is_int(self): + if self.type not in NumberValue.valid_types: + raise TypeError("type must be 'float', 'int' or 'auto'") + + if self.type == "int" and not self._is_int_compatible(): + raise ValueError(("Value marked as integer but settings are " + "not compatible. Please set ndecimals = 0, " + "scale = 1 and step to an integer")) + + super().__init__(default) + + def _is_int_compatible(self): + ''' + Are the settings other than `type` compatible with this being + an integer? + ''' return (self.ndecimals == 0 and int(self.step) == self.step and self.scale == 1) - def default(self): - if not hasattr(self, "default_value"): - raise DefaultMissing - if self._is_int(): - return int(self.default_value) - else: - return float(self.default_value) + def _is_int(self): + ''' + Will this argument return an integer? + ''' + return (self.type == "int" + or (self.type == "auto" and self._is_int_compatible())) def process(self, x): if self._is_int(): @@ -168,6 +193,7 @@ class NumberValue(_SimpleArgProcessor): d["min"] = self.min d["max"] = self.max d["ndecimals"] = self.ndecimals + d["type"] = self.type return d @@ -206,10 +232,12 @@ class HasEnvironment: self.__device_mgr = managers_or_parent[0] self.__dataset_mgr = managers_or_parent[1] self.__argument_mgr = managers_or_parent[2] + self.__scheduler_defaults = managers_or_parent[3] else: self.__device_mgr = managers_or_parent.__device_mgr self.__dataset_mgr = managers_or_parent.__dataset_mgr self.__argument_mgr = managers_or_parent.__argument_mgr + self.__scheduler_defaults = {} managers_or_parent.register_child(self) self.__in_build = True @@ -219,6 +247,23 @@ class HasEnvironment: def register_child(self, child): self.children.append(child) + def call_child_method(self, method, *args, **kwargs): + """Calls the named method for each child, if it exists for that child, + in the order of registration. + + :param method: Name of the method to call + :type method: str + :param args: Tuple of positional arguments to pass to all children + :param kwargs: Dict of keyword arguments to pass to all children + """ + for child in self.children: + try: + child_method = getattr(child, method) + except AttributeError: + pass + else: + child_method(*args, **kwargs) + def build(self): """Should be implemented by the user to request arguments. @@ -230,7 +275,7 @@ class HasEnvironment: instead: when the repository is scanned to build the list of available experiments and when the dataset browser ``artiq_browser`` is used to open or run the analysis stage of an experiment. Do not - rely on being able to operate on devices or arguments in ``build()``. + rely on being able to operate on devices or arguments in :meth:`build`. Datasets are read-only in this method. @@ -277,7 +322,7 @@ class HasEnvironment: def setattr_device(self, key): """Sets a device driver as attribute. The names of the device driver - and of the attribute are the same. + and of the attribute are the same. The key is added to the instance's kernel invariants.""" setattr(self, key, self.get_device(key)) @@ -286,7 +331,7 @@ class HasEnvironment: @rpc(flags={"async"}) def set_dataset(self, key, value, - broadcast=False, persist=False, save=True): + broadcast=False, persist=False, archive=True, save=None): """Sets the contents and handling modes of a dataset. Datasets must be scalars (``bool``, ``int``, ``float`` or NumPy scalar) @@ -296,10 +341,15 @@ class HasEnvironment: dispatches it. :param persist: the master should store the data on-disk. Implies broadcast. - :param save: the data is saved into the local storage of the current + :param archive: the data is saved into the local storage of the current run (archived as a HDF5 file). + :param save: deprecated. """ - self.__dataset_mgr.set(key, value, broadcast, persist, save) + if save is not None: + warnings.warn("set_dataset save parameter is deprecated, " + "use archive instead", FutureWarning) + archive = save + self.__dataset_mgr.set(key, value, broadcast, persist, archive) @rpc(flags={"async"}) def mutate_dataset(self, key, index, value): @@ -315,6 +365,18 @@ class HasEnvironment: as ``slice(*sub_tuple)`` (multi-dimensional slicing).""" self.__dataset_mgr.mutate(key, index, value) + @rpc(flags={"async"}) + def append_to_dataset(self, key, value): + """Append a value to a dataset. + + The target dataset must be a list (i.e. support ``append()``), and must + have previously been set from this experiment. + + The broadcast/persist/archive mode of the given key remains unchanged + from when the dataset was last set. Appended values are transmitted + efficiently as incremental modifications in broadcast mode.""" + self.__dataset_mgr.append_to(key, value) + def get_dataset(self, key, default=NoDefault, archive=True): """Returns the contents of a dataset. @@ -328,8 +390,10 @@ class HasEnvironment: By default, datasets obtained by this method are archived into the output HDF5 file of the experiment. If an archived dataset is requested more than one time (and therefore its value has potentially changed) or is - modified, a warning is emitted. Archival can be turned off by setting - the ``archive`` argument to ``False``. + modified, a warning is emitted. + + :param archive: Set to ``False`` to prevent archival together with the run's results. + Default is ``True`` """ try: return self.__dataset_mgr.get(key, archive) @@ -344,6 +408,21 @@ class HasEnvironment: dataset and of the attribute are the same.""" setattr(self, key, self.get_dataset(key, default, archive)) + def set_default_scheduling(self, priority=None, pipeline_name=None, flush=None): + """Sets the default scheduling options. + + This function should only be called from ``build``.""" + if not self.__in_build: + raise TypeError("set_default_scheduling() should only " + "be called from build()") + + if priority is not None: + self.__scheduler_defaults["priority"] = int(priority) + if pipeline_name is not None: + self.__scheduler_defaults["pipeline_name"] = pipeline_name + if flush is not None: + self.__scheduler_defaults["flush"] = flush + class Experiment: """Base class for top-level experiments. @@ -355,7 +434,7 @@ class Experiment: """Entry point for pre-computing data necessary for running the experiment. - Doing such computations outside of ``run`` enables more efficient + Doing such computations outside of :meth:`run` enables more efficient scheduling of multiple experiments that need to access the shared hardware during part of their execution. @@ -371,8 +450,8 @@ class Experiment: This method may interact with the hardware. - The experiment may call the scheduler's ``pause`` method while in - ``run``. + The experiment may call the scheduler's :meth:`pause` method while in + :meth:`run`. """ raise NotImplementedError @@ -382,7 +461,7 @@ class Experiment: This method may be overloaded by the user to implement the analysis phase of the experiment, for example fitting curves. - Splitting this phase from ``run`` enables tweaking the analysis + Splitting this phase from :meth:`run` enables tweaking the analysis algorithm on pre-existing data, and CPU-bound analyses to be run overlapped with the next experiment in a pipelined manner. @@ -392,16 +471,15 @@ class Experiment: class EnvExperiment(Experiment, HasEnvironment): - """Base class for top-level experiments that use the ``HasEnvironment`` - environment manager. + """Base class for top-level experiments that use the + :class:`~artiq.language.environment.HasEnvironment` environment manager. - Most experiment should derive from this class.""" + Most experiments should derive from this class.""" def prepare(self): - """The default prepare method calls prepare for all children, in the - order of instantiation, if the child has a prepare method.""" - for child in self.children: - if hasattr(child, "prepare"): - child.prepare() + """This default prepare method calls :meth:`~artiq.language.environment.Experiment.prepare` + for all children, in the order of registration, if the child has a + :meth:`~artiq.language.environment.Experiment.prepare` method.""" + self.call_child_method("prepare") def is_experiment(o): diff --git a/artiq/language/scan.py b/artiq/language/scan.py index 8c38ce63c..4dd4cdd1b 100644 --- a/artiq/language/scan.py +++ b/artiq/language/scan.py @@ -27,7 +27,7 @@ from artiq.language import units __all__ = ["ScanObject", - "NoScan", "RangeScan", "ExplicitScan", + "NoScan", "RangeScan", "CenterScan", "ExplicitScan", "Scannable", "MultiScanManager"] @@ -42,12 +42,10 @@ class NoScan(ScanObject): self.value = value self.repetitions = repetitions - @portable def _gen(self): for i in range(self.repetitions): yield self.value - @portable def __iter__(self): return self._gen() @@ -81,7 +79,6 @@ class RangeScan(ScanObject): rng = random.Random(seed) random.shuffle(self.sequence, rng.random) - @portable def __iter__(self): return iter(self.sequence) @@ -96,12 +93,48 @@ class RangeScan(ScanObject): "seed": self.seed} +class CenterScan(ScanObject): + """A scan object that yields evenly spaced values within a span around a + center. If ``step`` is finite, then ``center`` is always included. + Values outside ``span`` around center are never included. + If ``randomize`` is True the points are randomly ordered.""" + def __init__(self, center, span, step, randomize=False, seed=None): + self.center = center + self.span = span + self.step = step + self.randomize = randomize + self.seed = seed + + if step == 0.: + self.sequence = [] + else: + n = 1 + int(span/(2.*step)) + self.sequence = [center + sign*i*step + for i in range(n) for sign in [-1, 1]][1:] + + if randomize: + rng = random.Random(seed) + random.shuffle(self.sequence, rng.random) + + def __iter__(self): + return iter(self.sequence) + + def __len__(self): + return len(self.sequence) + + def describe(self): + return {"ty": "CenterScan", + "center": self.center, "step": self.step, + "span": self.span, + "randomize": self.randomize, + "seed": self.seed} + + class ExplicitScan(ScanObject): """A scan object that yields values from an explicitly defined sequence.""" def __init__(self, sequence): self.sequence = sequence - @portable def __iter__(self): return iter(self.sequence) @@ -115,6 +148,7 @@ class ExplicitScan(ScanObject): _ty_to_scan = { "NoScan": NoScan, "RangeScan": RangeScan, + "CenterScan": CenterScan, "ExplicitScan": ExplicitScan } diff --git a/artiq/language/types.py b/artiq/language/types.py index c767dd663..4a186788a 100644 --- a/artiq/language/types.py +++ b/artiq/language/types.py @@ -8,7 +8,7 @@ from artiq.compiler import types, builtins __all__ = ["TNone", "TTuple", "TBool", "TInt32", "TInt64", "TFloat", "TStr", "TBytes", "TByteArray", - "TList", "TRange32", "TRange64", + "TList", "TArray", "TRange32", "TRange64", "TVar"] TNone = builtins.TNone() @@ -21,6 +21,7 @@ TBytes = builtins.TBytes() TByteArray = builtins.TByteArray() TTuple = types.TTuple TList = builtins.TList +TArray = builtins.TArray TRange32 = builtins.TRange(builtins.TInt(types.TValue(32))) TRange64 = builtins.TRange(builtins.TInt(types.TValue(64))) TVar = types.TVar diff --git a/artiq/language/units.py b/artiq/language/units.py index d57fd860b..13f6f1535 100644 --- a/artiq/language/units.py +++ b/artiq/language/units.py @@ -15,7 +15,7 @@ def _register_unit(unit, prefixes): _register_unit("s", "pnum_") -_register_unit("Hz", "_kMG") +_register_unit("Hz", "m_kMG") _register_unit("dB", "_") _register_unit("V", "um_k") _register_unit("A", "um_") diff --git a/artiq/master/databases.py b/artiq/master/databases.py index e71862d44..977cfae44 100644 --- a/artiq/master/databases.py +++ b/artiq/master/databases.py @@ -1,13 +1,14 @@ import asyncio +import tokenize -from artiq.protocols.sync_struct import Notifier, process_mod -from artiq.protocols import pyon -from artiq.tools import TaskObject +from sipyco.sync_struct import Notifier, process_mod, update_from_dict +from sipyco import pyon +from sipyco.asyncio_tools import TaskObject def device_db_from_file(filename): glbs = dict() - with open(filename, "r") as f: + with tokenize.open(filename) as f: exec(f.read(), glbs) return glbs["device_db"] @@ -18,20 +19,18 @@ class DeviceDB: self.data = Notifier(device_db_from_file(self.backing_file)) def scan(self): - new_data = device_db_from_file(self.backing_file) - - for k in list(self.data.read.keys()): - if k not in new_data: - del self.data[k] - for k in new_data.keys(): - if k not in self.data.read or self.data.read[k] != new_data[k]: - self.data[k] = new_data[k] + update_from_dict(self.data, + device_db_from_file(self.backing_file)) def get_device_db(self): - return self.data.read + return self.data.raw_view - def get(self, key): - return self.data.read[key] + def get(self, key, resolve_alias=False): + desc = self.data.raw_view[key] + if resolve_alias: + while isinstance(desc, str): + desc = self.data.raw_view[desc] + return desc class DatasetDB(TaskObject): @@ -46,7 +45,7 @@ class DatasetDB(TaskObject): self.data = Notifier({k: (True, v) for k, v in file_data.items()}) def save(self): - data = {k: v[1] for k, v in self.data.read.items() if v[0]} + data = {k: v[1] for k, v in self.data.raw_view.items() if v[0]} pyon.store_file(self.persist_file, data) async def _do(self): @@ -58,7 +57,7 @@ class DatasetDB(TaskObject): self.save() def get(self, key): - return self.data.read[key][1] + return self.data.raw_view[key][1] def update(self, mod): process_mod(self.data, mod) @@ -66,8 +65,8 @@ class DatasetDB(TaskObject): # convenience functions (update() can be used instead) def set(self, key, value, persist=None): if persist is None: - if key in self.data.read: - persist = self.data.read[key][0] + if key in self.data.raw_view: + persist = self.data.raw_view[key][0] else: persist = False self.data[key] = (persist, value) diff --git a/artiq/master/experiments.py b/artiq/master/experiments.py index fc2787476..b9a46d7ba 100644 --- a/artiq/master/experiments.py +++ b/artiq/master/experiments.py @@ -5,7 +5,8 @@ import shutil import time import logging -from artiq.protocols.sync_struct import Notifier +from sipyco.sync_struct import Notifier, update_from_dict + from artiq.master.worker import (Worker, WorkerInternalException, log_worker_exception) from artiq.tools import get_windows_drives, exc_to_warning @@ -46,7 +47,8 @@ class _RepoScanner: entry = { "file": filename, "class_name": class_name, - "arginfo": arginfo + "arginfo": arginfo, + "scheduler_defaults": class_desc["scheduler_defaults"] } entry_dict[name] = entry @@ -81,15 +83,6 @@ class _RepoScanner: return r -def _sync_explist(target, source): - for k in list(target.read.keys()): - if k not in source: - del target[k] - for k in source.keys(): - if k not in target.read or target.read[k] != source[k]: - target[k] = source[k] - - class ExperimentDB: def __init__(self, repo_backend, worker_handlers): self.repo_backend = repo_backend @@ -124,8 +117,7 @@ class ExperimentDB: t1 = time.monotonic() new_explist = await _RepoScanner(self.worker_handlers).scan(wd) logger.info("repository scan took %d seconds", time.monotonic()-t1) - - _sync_explist(self.explist, new_explist) + update_from_dict(self.explist, new_explist) finally: self._scanning = False self.status["scanning"] = False diff --git a/artiq/master/log.py b/artiq/master/log.py index 958076505..945e44d86 100644 --- a/artiq/master/log.py +++ b/artiq/master/log.py @@ -1,8 +1,7 @@ import logging import logging.handlers -from artiq.protocols.sync_struct import Notifier -from artiq.protocols.logging import SourceFilter +from sipyco.logging_tools import SourceFilter class LogForwarder(logging.Handler): diff --git a/artiq/master/rid_counter.py b/artiq/master/rid_counter.py new file mode 100644 index 000000000..b4bbbd033 --- /dev/null +++ b/artiq/master/rid_counter.py @@ -0,0 +1,84 @@ +import logging +import os +import tempfile +import re + +logger = logging.getLogger(__name__) + + +class RIDCounter: + """Monotonically incrementing counter for RIDs (experiment run ids). + + A cache is used, but if necessary, the last used rid will be determined + from the given results directory. + """ + + def __init__(self, cache_filename="last_rid.pyon", results_dir="results"): + self.cache_filename = cache_filename + self.results_dir = results_dir + self._next_rid = self._last_rid() + 1 + logger.debug("Next RID is %d", self._next_rid) + + def get(self): + rid = self._next_rid + self._next_rid += 1 + self._update_cache(rid) + return rid + + def _last_rid(self): + try: + rid = self._last_rid_from_cache() + except FileNotFoundError: + logger.debug("Last RID cache not found, scanning results") + rid = self._last_rid_from_results() + self._update_cache(rid) + return rid + else: + logger.debug("Using last RID from cache") + return rid + + def _update_cache(self, rid): + contents = str(rid) + "\n" + directory = os.path.abspath(os.path.dirname(self.cache_filename)) + with tempfile.NamedTemporaryFile("w", dir=directory, delete=False + ) as f: + f.write(contents) + tmpname = f.name + os.replace(tmpname, self.cache_filename) + + def _last_rid_from_cache(self): + with open(self.cache_filename, "r") as f: + return int(f.read()) + + def _last_rid_from_results(self): + r = -1 + try: + day_folders = os.listdir(self.results_dir) + except: + return r + day_folders = filter( + lambda x: re.fullmatch("\\d\\d\\d\\d-\\d\\d-\\d\\d", x), + day_folders) + for df in day_folders: + day_path = os.path.join(self.results_dir, df) + try: + hm_folders = os.listdir(day_path) + except: + continue + hm_folders = filter(lambda x: re.fullmatch("\\d\\d(-\\d\\d)?", x), + hm_folders) + for hmf in hm_folders: + hm_path = os.path.join(day_path, hmf) + try: + h5files = os.listdir(hm_path) + except: + continue + for x in h5files: + m = re.fullmatch( + "(\\d\\d\\d\\d\\d\\d\\d\\d\\d)-.*\\.h5", x) + if m is None: + continue + rid = int(m.group(1)) + if rid > r: + r = rid + return r diff --git a/artiq/master/scheduler.py b/artiq/master/scheduler.py index 941df76e4..d978fa2f9 100644 --- a/artiq/master/scheduler.py +++ b/artiq/master/scheduler.py @@ -3,9 +3,11 @@ import logging from enum import Enum from time import time +from sipyco.sync_struct import Notifier +from sipyco.asyncio_tools import TaskObject, Condition + from artiq.master.worker import Worker, log_worker_exception -from artiq.tools import asyncio_wait_or_cancel, TaskObject, Condition -from artiq.protocols.sync_struct import Notifier +from artiq.tools import asyncio_wait_or_cancel logger = logging.getLogger(__name__) @@ -85,17 +87,12 @@ class Run: self._notifier[self.rid]["status"] = self._status.name self._state_changed.notify() - # The run with the largest priority_key is to be scheduled first - def priority_key(self, now=None): - if self.due_date is None: - due_date_k = 0 - else: - due_date_k = -self.due_date - if now is not None and self.due_date is not None: - runnable = int(now > self.due_date) - else: - runnable = 1 - return (runnable, self.priority, due_date_k, -self.rid) + def priority_key(self): + """Return a comparable value that defines a run priority order. + + Applies only to runs the due date of which has already elapsed. + """ + return (self.priority, -(self.due_date or 0), -self.rid) async def close(self): # called through pool @@ -113,7 +110,6 @@ class Run: run = _mk_worker_method("run") resume = _mk_worker_method("resume") analyze = _mk_worker_method("analyze") - write_results = _mk_worker_method("write_results") class RunPool: @@ -160,36 +156,37 @@ class PrepareStage(TaskObject): self.delete_cb = delete_cb def _get_run(self): - """If a run should get prepared now, return it. - Otherwise, return a float representing the time before the next timed - run becomes due, or None if there is no such run.""" + """If a run should get prepared now, return it. Otherwise, return a + float giving the time until the next check, or None if no time-based + check is required. + + The latter can be the case if there are no due-date runs, or none + of them are going to become next-in-line before further pool state + changes (which will also cause a re-evaluation). + """ + pending_runs = list( + filter(lambda r: r.status == RunStatus.pending, + self.pool.runs.values())) + now = time() - pending_runs = filter(lambda r: r.status == RunStatus.pending, - self.pool.runs.values()) - try: - candidate = max(pending_runs, key=lambda r: r.priority_key(now)) - except ValueError: - # pending_runs is an empty sequence - return None + def is_runnable(r): + return (r.due_date or 0) < now - prepared_runs = filter(lambda r: r.status == RunStatus.prepare_done, - self.pool.runs.values()) - try: - top_prepared_run = max(prepared_runs, - key=lambda r: r.priority_key()) - except ValueError: - # there are no existing prepared runs - go ahead with - pass - else: - # prepare (as well) only if it has higher priority than - # the highest priority prepared run - if top_prepared_run.priority_key() >= candidate.priority_key(): - return None + prepared_max = max((r.priority_key() for r in self.pool.runs.values() + if r.status == RunStatus.prepare_done), + default=None) + def takes_precedence(r): + return prepared_max is None or r.priority_key() > prepared_max - if candidate.due_date is None or candidate.due_date < now: + candidate = max(filter(is_runnable, pending_runs), + key=lambda r: r.priority_key(), + default=None) + if candidate is not None and takes_precedence(candidate): return candidate - else: - return candidate.due_date - now + + return min((r.due_date - now for r in pending_runs + if (not is_runnable(r) and takes_precedence(r))), + default=None) async def _do(self): while True: @@ -214,7 +211,7 @@ class PrepareStage(TaskObject): if run.worker.closed.is_set(): break if run.worker.closed.is_set(): - continue + continue run.status = RunStatus.preparing try: await run.build() @@ -307,13 +304,8 @@ class AnalyzeStage(TaskObject): try: await run.analyze() except: - logger.error("got worker exception in analyze stage of RID %d." - " Results will still be saved.", run.rid) - log_worker_exception() - try: - await run.write_results() - except: - logger.error("failed to write results of RID %d.", run.rid) + logger.error("got worker exception in analyze stage of RID %d.", + run.rid) log_worker_exception() self.delete_cb(run.rid) @@ -338,11 +330,20 @@ class Pipeline: class Deleter(TaskObject): + """Provides a synchronous interface for instigating deletion of runs. + + :meth:`RunPool.delete` is an async function (it needs to close the worker + connection, etc.), so we maintain a queue of RIDs to delete on a background task. + """ def __init__(self, pipelines): self._pipelines = pipelines self._queue = asyncio.Queue() def delete(self, rid): + """Delete the run with the given RID. + + Multiple calls for the same RID are silently ignored. + """ logger.debug("delete request for RID %d", rid) for pipeline in self._pipelines.values(): if rid in pipeline.pool.runs: @@ -354,6 +355,8 @@ class Deleter(TaskObject): await self._queue.join() async def _delete(self, rid): + # By looking up the run by RID, we implicitly make sure to delete each run only + # once. for pipeline in self._pipelines.values(): if rid in pipeline.pool.runs: logger.debug("deleting RID %d...", rid) @@ -442,8 +445,10 @@ class Scheduler: def get_status(self): """Returns a dictionary containing information about the runs currently - tracked by the scheduler.""" - return self.notifier.read + tracked by the scheduler. + + Must not be modified.""" + return self.notifier.raw_view def check_pause(self, rid): """Returns ``True`` if there is a condition that could make ``pause`` diff --git a/artiq/master/worker.py b/artiq/master/worker.py index 69b6d9a28..36d5a202f 100644 --- a/artiq/master/worker.py +++ b/artiq/master/worker.py @@ -5,9 +5,10 @@ import logging import subprocess import time -from artiq.protocols import pipe_ipc, pyon -from artiq.protocols.logging import LogParser -from artiq.protocols.packed_exceptions import current_exc_packed +from sipyco import pipe_ipc, pyon +from sipyco.logging_tools import LogParser +from sipyco.packed_exceptions import current_exc_packed + from artiq.tools import asyncio_wait_or_cancel @@ -165,12 +166,15 @@ class Worker: ifs, timeout=self.send_timeout, return_when=asyncio.FIRST_COMPLETED) if all(f.cancelled() for f in fs): - raise WorkerTimeout("Timeout sending data to worker") + raise WorkerTimeout( + "Timeout sending data to worker (RID {})".format(self.rid)) for f in fs: if not f.cancelled() and f.done(): f.result() # raise any exceptions if cancellable and self.closed.is_set(): - raise WorkerError("Data transmission to worker cancelled") + raise WorkerError( + "Data transmission to worker cancelled (RID {})".format( + self.rid)) async def _recv(self, timeout): assert self.io_lock.locked() @@ -178,16 +182,22 @@ class Worker: [self.ipc.readline(), self.closed.wait()], timeout=timeout, return_when=asyncio.FIRST_COMPLETED) if all(f.cancelled() for f in fs): - raise WorkerTimeout("Timeout receiving data from worker") + raise WorkerTimeout( + "Timeout receiving data from worker (RID {})".format(self.rid)) if self.closed.is_set(): - raise WorkerError("Data transmission to worker cancelled") + raise WorkerError( + "Receiving data from worker cancelled (RID {})".format( + self.rid)) line = fs[0].result() if not line: - raise WorkerError("Worker ended while attempting to receive data") + raise WorkerError( + "Worker ended while attempting to receive data (RID {})". + format(self.rid)) try: obj = pyon.decode(line.decode()) except: - raise WorkerError("Worker sent invalid PYON data") + raise WorkerError("Worker sent invalid PYON data (RID {})".format( + self.rid)) return obj async def _handle_worker_requests(self): @@ -283,10 +293,6 @@ class Worker: async def analyze(self): await self._worker_action({"action": "analyze"}) - async def write_results(self, timeout=15.0): - await self._worker_action({"action": "write_results"}, - timeout) - async def examine(self, rid, file, timeout=20.0): self.rid = rid self.filename = os.path.basename(file) @@ -294,8 +300,8 @@ class Worker: await self._create_process(logging.WARNING) r = dict() - def register(class_name, name, arginfo): - r[class_name] = {"name": name, "arginfo": arginfo} + def register(class_name, name, arginfo, scheduler_defaults): + r[class_name] = {"name": name, "arginfo": arginfo, "scheduler_defaults": scheduler_defaults} self.register_experiment = register await self._worker_action({"action": "examine", "file": file}, timeout) diff --git a/artiq/master/worker_db.py b/artiq/master/worker_db.py index 1a4ef61e6..172846145 100644 --- a/artiq/master/worker_db.py +++ b/artiq/master/worker_db.py @@ -1,90 +1,20 @@ +"""Client-side interfaces to the master databases (devices, datasets). + +These artefacts are intended for out-of-process use (i.e. from workers or the +standalone command line tools). +""" + from operator import setitem -from collections import OrderedDict import importlib import logging -import os -import tempfile -import re -from artiq.protocols.sync_struct import Notifier -from artiq.protocols.pc_rpc import AutoTarget, Client, BestEffortClient +from sipyco.sync_struct import Notifier +from sipyco.pc_rpc import AutoTarget, Client, BestEffortClient logger = logging.getLogger(__name__) -class RIDCounter: - def __init__(self, cache_filename="last_rid.pyon", results_dir="results"): - self.cache_filename = cache_filename - self.results_dir = results_dir - self._next_rid = self._last_rid() + 1 - logger.debug("Next RID is %d", self._next_rid) - - def get(self): - rid = self._next_rid - self._next_rid += 1 - self._update_cache(rid) - return rid - - def _last_rid(self): - try: - rid = self._last_rid_from_cache() - except FileNotFoundError: - logger.debug("Last RID cache not found, scanning results") - rid = self._last_rid_from_results() - self._update_cache(rid) - return rid - else: - logger.debug("Using last RID from cache") - return rid - - def _update_cache(self, rid): - contents = str(rid) + "\n" - directory = os.path.abspath(os.path.dirname(self.cache_filename)) - with tempfile.NamedTemporaryFile("w", dir=directory, delete=False - ) as f: - f.write(contents) - tmpname = f.name - os.replace(tmpname, self.cache_filename) - - def _last_rid_from_cache(self): - with open(self.cache_filename, "r") as f: - return int(f.read()) - - def _last_rid_from_results(self): - r = -1 - try: - day_folders = os.listdir(self.results_dir) - except: - return r - day_folders = filter( - lambda x: re.fullmatch("\\d\\d\\d\\d-\\d\\d-\\d\\d", x), - day_folders) - for df in day_folders: - day_path = os.path.join(self.results_dir, df) - try: - hm_folders = os.listdir(day_path) - except: - continue - hm_folders = filter(lambda x: re.fullmatch("\\d\\d(-\\d\\d)?", x), - hm_folders) - for hmf in hm_folders: - hm_path = os.path.join(day_path, hmf) - try: - h5files = os.listdir(hm_path) - except: - continue - for x in h5files: - m = re.fullmatch( - "(\\d\\d\\d\\d\\d\\d\\d\\d\\d)-.*\\.h5", x) - if m is None: - continue - rid = int(m.group(1)) - if rid > r: - r = rid - return r - - class DummyDevice: pass @@ -129,44 +59,43 @@ class DeviceManager: def __init__(self, ddb, virtual_devices=dict()): self.ddb = ddb self.virtual_devices = virtual_devices - self.active_devices = OrderedDict() + self.active_devices = [] def get_device_db(self): """Returns the full contents of the device database.""" return self.ddb.get_device_db() def get_desc(self, name): - desc = self.ddb.get(name) - while isinstance(desc, str): - # alias - desc = self.ddb.get(desc) - return desc + return self.ddb.get(name, resolve_alias=True) def get(self, name): """Get the device driver or controller client corresponding to a device database entry.""" if name in self.virtual_devices: return self.virtual_devices[name] - if name in self.active_devices: - return self.active_devices[name] - else: - try: - desc = self.get_desc(name) - except Exception as e: - raise DeviceError("Failed to get description of device '{}'" - .format(name)) from e - try: - dev = _create_device(desc, self) - except Exception as e: - raise DeviceError("Failed to create device '{}'" - .format(name)) from e - self.active_devices[name] = dev - return dev + + try: + desc = self.get_desc(name) + except Exception as e: + raise DeviceError("Failed to get description of device '{}'" + .format(name)) from e + + for existing_desc, existing_dev in self.active_devices: + if desc == existing_desc: + return existing_dev + + try: + dev = _create_device(desc, self) + except Exception as e: + raise DeviceError("Failed to create device '{}'" + .format(name)) from e + self.active_devices.append((desc, dev)) + return dev def close_devices(self): """Closes all active devices, in the opposite order as they were requested.""" - for dev in reversed(list(self.active_devices.values())): + for _desc, dev in reversed(self.active_devices): try: if isinstance(dev, (Client, BestEffortClient)): dev.close_rpc() @@ -179,14 +108,14 @@ class DeviceManager: class DatasetManager: def __init__(self, ddb): - self.broadcast = Notifier(dict()) + self._broadcaster = Notifier(dict()) self.local = dict() self.archive = dict() self.ddb = ddb - self.broadcast.publish = ddb.update + self._broadcaster.publish = ddb.update - def set(self, key, value, broadcast=False, persist=False, save=True): + def set(self, key, value, broadcast=False, persist=False, archive=True): if key in self.archive: logger.warning("Modifying dataset '%s' which is in archive, " "archive will remain untouched", @@ -194,26 +123,29 @@ class DatasetManager: if persist: broadcast = True + if broadcast: - self.broadcast[key] = persist, value - elif key in self.broadcast.read: - del self.broadcast[key] - if save: + self._broadcaster[key] = persist, value + elif key in self._broadcaster.raw_view: + del self._broadcaster[key] + + if archive: self.local[key] = value elif key in self.local: del self.local[key] - def mutate(self, key, index, value): - target = None - if key in self.local: - target = self.local[key] - if key in self.broadcast.read: + def _get_mutation_target(self, key): + target = self.local.get(key, None) + if key in self._broadcaster.raw_view: if target is not None: - assert target is self.broadcast.read[key][1] - target = self.broadcast[key][1] + assert target is self._broadcaster.raw_view[key][1] + return self._broadcaster[key][1] if target is None: - raise KeyError("Cannot mutate non-existing dataset") + raise KeyError("Cannot mutate nonexistent dataset '{}'".format(key)) + return target + def mutate(self, key, index, value): + target = self._get_mutation_target(key) if isinstance(index, tuple): if isinstance(index[0], tuple): index = tuple(slice(*e) for e in index) @@ -221,22 +153,36 @@ class DatasetManager: index = slice(*index) setitem(target, index, value) + def append_to(self, key, value): + self._get_mutation_target(key).append(value) + def get(self, key, archive=False): if key in self.local: return self.local[key] - else: - data = self.ddb.get(key) - if archive: - if key in self.archive: - logger.warning("Dataset '%s' is already in archive, " - "overwriting", key, stack_info=True) - self.archive[key] = data - return data + + data = self.ddb.get(key) + if archive: + if key in self.archive: + logger.warning("Dataset '%s' is already in archive, " + "overwriting", key, stack_info=True) + self.archive[key] = data + return data def write_hdf5(self, f): datasets_group = f.create_group("datasets") for k, v in self.local.items(): - datasets_group[k] = v + _write(datasets_group, k, v) + archive_group = f.create_group("archive") for k, v in self.archive.items(): - archive_group[k] = v + _write(archive_group, k, v) + + +def _write(group, k, v): + # Add context to exception message when the user writes a dataset that is + # not representable in HDF5. + try: + group[k] = v + except TypeError as e: + raise TypeError("Error writing dataset '{}' of type '{}': {}".format( + k, type(v), e)) diff --git a/artiq/master/worker_impl.py b/artiq/master/worker_impl.py index 3d6153603..784e4297a 100644 --- a/artiq/master/worker_impl.py +++ b/artiq/master/worker_impl.py @@ -1,3 +1,11 @@ +"""Worker process implementation. + +This module contains the worker process main() function and the glue code +necessary to connect the global artefacts used from experiment code (scheduler, +device database, etc.) to their actual implementation in the parent master +process via IPC. +""" + import sys import time import os @@ -7,10 +15,12 @@ from collections import OrderedDict import h5py +from sipyco import pipe_ipc, pyon +from sipyco.packed_exceptions import raise_packed_exc +from sipyco.logging_tools import multiline_log_config + import artiq -from artiq.protocols import pipe_ipc, pyon -from artiq.protocols.packed_exceptions import raise_packed_exc -from artiq.tools import multiline_log_config, file_import +from artiq.tools import file_import from artiq.master.worker_db import DeviceManager, DatasetManager, DummyDevice from artiq.language.environment import (is_experiment, TraceArgumentManager, ProcessArgumentManager) @@ -152,23 +162,30 @@ class ExamineDatasetMgr: def examine(device_mgr, dataset_mgr, file): - module = file_import(file) - for class_name, exp_class in module.__dict__.items(): - if class_name[0] == "_": - continue - if is_experiment(exp_class): - if exp_class.__doc__ is None: - name = class_name - else: - name = exp_class.__doc__.splitlines()[0].strip() - if name[-1] == ".": - name = name[:-1] - argument_mgr = TraceArgumentManager() - exp_class((device_mgr, dataset_mgr, argument_mgr)) - arginfo = OrderedDict( - (k, (proc.describe(), group, tooltip)) - for k, (proc, group, tooltip) in argument_mgr.requested_args.items()) - register_experiment(class_name, name, arginfo) + previous_keys = set(sys.modules.keys()) + try: + module = file_import(file) + for class_name, exp_class in module.__dict__.items(): + if class_name[0] == "_": + continue + if is_experiment(exp_class): + if exp_class.__doc__ is None: + name = class_name + else: + name = exp_class.__doc__.strip().splitlines()[0].strip() + if name[-1] == ".": + name = name[:-1] + argument_mgr = TraceArgumentManager() + scheduler_defaults = {} + cls = exp_class((device_mgr, dataset_mgr, argument_mgr, scheduler_defaults)) + arginfo = OrderedDict( + (k, (proc.describe(), group, tooltip)) + for k, (proc, group, tooltip) in argument_mgr.requested_args.items()) + register_experiment(class_name, name, arginfo, scheduler_defaults) + finally: + new_keys = set(sys.modules.keys()) + for key in new_keys - previous_keys: + del sys.modules[key] def setup_diagnostics(experiment_file, repository_path): @@ -196,6 +213,11 @@ def setup_diagnostics(experiment_file, repository_path): artiq.coredevice.core._DiagnosticEngine.render_diagnostic = \ render_diagnostic + +def put_completed(): + put_object({"action": "completed"}) + + def put_exception_report(): _, exc, _ = sys.exc_info() # When we get CompileError, a more suitable diagnostic has already @@ -230,6 +252,16 @@ def main(): exp_inst = None repository_path = None + def write_results(): + filename = "{:09}-{}.h5".format(rid, exp.__name__) + with h5py.File(filename, "w") as f: + dataset_mgr.write_hdf5(f) + f["artiq_version"] = artiq_version + f["rid"] = rid + f["start_time"] = start_time + f["run_time"] = run_time + f["expid"] = pyon.encode(expid) + device_mgr = DeviceManager(ParentDeviceDB, virtual_devices={"scheduler": Scheduler(), "ccb": CCB()}) @@ -263,37 +295,30 @@ def main(): os.makedirs(dirname, exist_ok=True) os.chdir(dirname) argument_mgr = ProcessArgumentManager(expid["arguments"]) - exp_inst = exp((device_mgr, dataset_mgr, argument_mgr)) - put_object({"action": "completed"}) + exp_inst = exp((device_mgr, dataset_mgr, argument_mgr, {})) + put_completed() elif action == "prepare": exp_inst.prepare() - put_object({"action": "completed"}) + put_completed() elif action == "run": run_time = time.time() - exp_inst.run() - put_object({"action": "completed"}) + try: + exp_inst.run() + except: + # Only write results in run() on failure; on success wait + # for end of analyze stage. + write_results() + raise + put_completed() elif action == "analyze": try: exp_inst.analyze() - except: - # make analyze failure non-fatal, as we may still want to - # write results afterwards - put_exception_report() - else: - put_object({"action": "completed"}) - elif action == "write_results": - filename = "{:09}-{}.h5".format(rid, exp.__name__) - with h5py.File(filename, "w") as f: - dataset_mgr.write_hdf5(f) - f["artiq_version"] = artiq_version - f["rid"] = rid - f["start_time"] = start_time - f["run_time"] = run_time - f["expid"] = pyon.encode(expid) - put_object({"action": "completed"}) + put_completed() + finally: + write_results() elif action == "examine": examine(ExamineDeviceMgr, ExamineDatasetMgr, obj["file"]) - put_object({"action": "completed"}) + put_completed() elif action == "terminate": break except: diff --git a/artiq/monkey_patches.py b/artiq/monkey_patches.py deleted file mode 100644 index 715453931..000000000 --- a/artiq/monkey_patches.py +++ /dev/null @@ -1,13 +0,0 @@ -import sys - - -__all__ = [] - - -if sys.version_info[:3] >= (3, 5, 2): - import asyncio - - # See https://github.com/m-labs/artiq/issues/506 - def _ipaddr_info(host, port, family, type, proto): - return None - asyncio.base_events._ipaddr_info = _ipaddr_info diff --git a/artiq/protocols/__init__.py b/artiq/protocols/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/artiq/protocols/asyncio_server.py b/artiq/protocols/asyncio_server.py deleted file mode 100644 index e7e300eed..000000000 --- a/artiq/protocols/asyncio_server.py +++ /dev/null @@ -1,50 +0,0 @@ -import asyncio -from copy import copy - - -class AsyncioServer: - """Generic TCP server based on asyncio. - - Users of this class must derive from it and define the - ``_handle_connection_cr`` method and coroutine. - """ - def __init__(self): - self._client_tasks = set() - - async def start(self, host, port): - """Starts the server. - - The user must call ``stop`` to free resources properly after this - method completes successfully. - - This method is a `coroutine`. - - :param host: Bind address of the server (see ``asyncio.start_server`` - from the Python standard library). - :param port: TCP port to bind to. - """ - self.server = await asyncio.start_server(self._handle_connection, - host, port, - limit=4*1024*1024) - - async def stop(self): - """Stops the server.""" - wait_for = copy(self._client_tasks) - for task in self._client_tasks: - task.cancel() - for task in wait_for: - try: - await asyncio.wait_for(task, None) - except asyncio.CancelledError: - pass - self.server.close() - await self.server.wait_closed() - del self.server - - def _client_done(self, task): - self._client_tasks.remove(task) - - def _handle_connection(self, reader, writer): - task = asyncio.ensure_future(self._handle_connection_cr(reader, writer)) - self._client_tasks.add(task) - task.add_done_callback(self._client_done) diff --git a/artiq/protocols/broadcast.py b/artiq/protocols/broadcast.py deleted file mode 100644 index 354a1b560..000000000 --- a/artiq/protocols/broadcast.py +++ /dev/null @@ -1,109 +0,0 @@ -import asyncio - -from artiq.monkey_patches import * -from artiq.protocols import pyon -from artiq.protocols.asyncio_server import AsyncioServer - - -_init_string = b"ARTIQ broadcast\n" - - -class Receiver: - def __init__(self, name, notify_cb, disconnect_cb=None): - self.name = name - if not isinstance(notify_cb, list): - notify_cb = [notify_cb] - self.notify_cbs = notify_cb - self.disconnect_cb = disconnect_cb - - async def connect(self, host, port): - self.reader, self.writer = \ - await asyncio.open_connection(host, port, limit=100*1024*1024) - try: - self.writer.write(_init_string) - self.writer.write((self.name + "\n").encode()) - self.receive_task = asyncio.ensure_future(self._receive_cr()) - except: - self.writer.close() - del self.reader - del self.writer - raise - - async def close(self): - self.disconnect_cb = None - try: - self.receive_task.cancel() - try: - await asyncio.wait_for(self.receive_task, None) - except asyncio.CancelledError: - pass - finally: - self.writer.close() - del self.reader - del self.writer - - async def _receive_cr(self): - try: - target = None - while True: - line = await self.reader.readline() - if not line: - return - obj = pyon.decode(line.decode()) - - for notify_cb in self.notify_cbs: - notify_cb(obj) - finally: - if self.disconnect_cb is not None: - self.disconnect_cb() - - -class Broadcaster(AsyncioServer): - def __init__(self, queue_limit=1024): - AsyncioServer.__init__(self) - self._queue_limit = queue_limit - self._recipients = dict() - - async def _handle_connection_cr(self, reader, writer): - try: - line = await reader.readline() - if line != _init_string: - return - - line = await reader.readline() - if not line: - return - name = line.decode()[:-1] - - queue = asyncio.Queue(self._queue_limit) - if name in self._recipients: - self._recipients[name].add(queue) - else: - self._recipients[name] = {queue} - try: - while True: - line = await queue.get() - writer.write(line) - # raise exception on connection error - await writer.drain() - finally: - self._recipients[name].remove(queue) - if not self._recipients[name]: - del self._recipients[name] - except (ConnectionResetError, ConnectionAbortedError, BrokenPipeError): - # receivers disconnecting are a normal occurence - pass - finally: - writer.close() - - def broadcast(self, name, obj): - if name in self._recipients: - line = pyon.encode(obj) + "\n" - line = line.encode() - for recipient in self._recipients[name]: - try: - recipient.put_nowait(line) - except asyncio.QueueFull: - # do not log: log messages may be sent back to us - # as broadcasts, and cause infinite recursion. - pass diff --git a/artiq/protocols/fire_and_forget.py b/artiq/protocols/fire_and_forget.py deleted file mode 100644 index 61b2a4aa2..000000000 --- a/artiq/protocols/fire_and_forget.py +++ /dev/null @@ -1,51 +0,0 @@ -import threading -import logging -import inspect - - -logger = logging.getLogger(__name__) - - -class FFProxy: - """Proxies a target object and runs its methods in the background. - - All method calls to this object are forwarded to the target and executed - in a background thread. Method calls return immediately. Exceptions from - the target method are turned into warnings. At most one method from the - target object may be executed in the background; if a new call is - submitted while the previous one is still executing, a warning is printed - and the new call is dropped. - - This feature is typically used to wrap slow and non-critical RPCs in - experiments. - """ - def __init__(self, target): - self.target = target - - valid_methods = inspect.getmembers(target, inspect.ismethod) - self._valid_methods = {m[0] for m in valid_methods} - self._thread = None - - def ff_join(self): - """Waits until any background method finishes its execution.""" - if self._thread is not None: - self._thread.join() - - def __getattr__(self, k): - if k not in self._valid_methods: - raise AttributeError - def run_in_thread(*args, **kwargs): - if self._thread is not None and self._thread.is_alive(): - logger.warning("skipping fire-and-forget call to %r.%s as " - "previous call did not complete", - self.target, k) - return - def thread_body(): - try: - getattr(self.target, k)(*args, **kwargs) - except: - logger.warning("fire-and-forget call to %r.%s raised an " - "exception:", self.target, k, exc_info=True) - self._thread = threading.Thread(target=thread_body) - self._thread.start() - return run_in_thread diff --git a/artiq/protocols/logging.py b/artiq/protocols/logging.py deleted file mode 100644 index 5c282549a..000000000 --- a/artiq/protocols/logging.py +++ /dev/null @@ -1,185 +0,0 @@ -import asyncio -import logging -import re - -from artiq.monkey_patches import * -from artiq.protocols.asyncio_server import AsyncioServer -from artiq.tools import TaskObject, MultilineFormatter - - -logging.TRACE = 5 -logging.addLevelName(logging.TRACE, 'TRACE') - - -logger = logging.getLogger(__name__) -_fwd_logger = logging.getLogger("fwd") - - -def log_with_name(name, *args, **kwargs): - _fwd_logger.name = name - _fwd_logger.log(*args, **kwargs) - - -_name_to_level = { - "CRITICAL": logging.CRITICAL, - "ERROR": logging.ERROR, - "WARN": logging.WARNING, - "WARNING": logging.WARNING, - "INFO": logging.INFO, - "DEBUG": logging.DEBUG, - "TRACE": logging.TRACE, -} - - -def parse_log_message(msg): - lr = "|".join(_name_to_level.keys()) - m = re.fullmatch('('+lr+')(<\d+>)?:([^:]*):(.*)', msg) - if m is None: - return 0, logging.INFO, "print", msg - level = _name_to_level[m.group(1)] - if m.group(2): - multiline = int(m.group(2)[1:-1]) - 1 - else: - multiline = 0 - name = m.group(3) - message = m.group(4) - return multiline, level, name, message - - -class LogParser: - def __init__(self, source_cb): - self.source_cb = source_cb - self.multiline_count = 0 - self.multiline_level = None - self.multiline_name = None - self.multiline_message = None - - def line_input(self, msg): - if self.multiline_count: - self.multiline_message += "\n" + msg - self.multiline_count -= 1 - if not self.multiline_count: - log_with_name( - self.multiline_name, - self.multiline_level, - self.multiline_message, - extra={"source": self.source_cb()}) - self.multiline_level = None - self.multiline_name = None - self.multiline_message = None - else: - multiline, level, name, message = parse_log_message(msg) - if multiline: - self.multiline_count = multiline - self.multiline_level = level - self.multiline_name = name - self.multiline_message = message - else: - log_with_name(name, level, message, - extra={"source": self.source_cb()}) - - async def stream_task(self, stream): - while True: - try: - entry = (await stream.readline()) - if not entry: - break - self.line_input(entry.decode().rstrip("\r\n")) - except: - logger.debug("exception in log forwarding", exc_info=True) - break - logger.debug("stopped log forwarding of stream %s of %s", - stream, self.source_cb()) - - -_init_string = b"ARTIQ logging\n" - - -class Server(AsyncioServer): - """Remote logging TCP server. - - Log entries are in the format: - source:levelno:name:message - continuation... - ...continuation - """ - async def _handle_connection_cr(self, reader, writer): - try: - line = await reader.readline() - if line != _init_string: - return - - source = None - parser = LogParser(lambda: source) - - while True: - line = await reader.readline() - if not line: - break - try: - line = line.decode() - except: - return - line = line[:-1] - if parser.multiline_count: - parser.line_input(line) - else: - linesplit = line.split(":", maxsplit=1) - if len(linesplit) != 2: - logger.warning("received improperly formatted message, " - "dropping connection") - return - source, remainder = linesplit - parser.line_input(remainder) - except (ConnectionResetError, ConnectionAbortedError, BrokenPipeError): - # May happens on Windows when client disconnects - pass - finally: - writer.close() - - -class SourceFilter: - def __init__(self, local_level, local_source): - self.local_level = local_level - self.local_source = local_source - - def filter(self, record): - if not hasattr(record, "source"): - record.source = self.local_source - if record.source == self.local_source: - return record.levelno >= self.local_level - else: - # log messages that are forwarded from a source have already - # been filtered, and may have a level below the local level. - return True - - -class LogForwarder(logging.Handler, TaskObject): - def __init__(self, host, port, reconnect_timer=5.0, queue_size=1000, - **kwargs): - logging.Handler.__init__(self, **kwargs) - self.host = host - self.port = port - self.setFormatter(MultilineFormatter()) - self._queue = asyncio.Queue(queue_size) - self.reconnect_timer = reconnect_timer - - def emit(self, record): - self._queue.put_nowait(record.source + ":" + self.format(record)) - - async def _do(self): - while True: - try: - reader, writer = await asyncio.open_connection(self.host, - self.port) - writer.write(_init_string) - while True: - message = await self._queue.get() + "\n" - writer.write(message.encode()) - await writer.drain() - except asyncio.CancelledError: - return - except: - await asyncio.sleep(self.reconnect_timer) - finally: - writer.close() diff --git a/artiq/protocols/packed_exceptions.py b/artiq/protocols/packed_exceptions.py deleted file mode 100644 index 2c453bf80..000000000 --- a/artiq/protocols/packed_exceptions.py +++ /dev/null @@ -1,42 +0,0 @@ -import inspect -import builtins -import traceback -import sys - - -__all__ = ["GenericRemoteException", "current_exc_packed", "raise_packed_exc"] - - -class GenericRemoteException(Exception): - pass - - -builtin_exceptions = {v: k for k, v in builtins.__dict__.items() - if inspect.isclass(v) and issubclass(v, BaseException)} - - -def current_exc_packed(): - exc_class, exc, exc_tb = sys.exc_info() - if exc_class in builtin_exceptions: - return { - "class": builtin_exceptions[exc_class], - "message": str(exc), - "traceback": traceback.format_tb(exc_tb) - } - else: - message = traceback.format_exception_only(exc_class, exc)[0].rstrip() - return { - "class": "GenericRemoteException", - "message": message, - "traceback": traceback.format_tb(exc_tb) - } - - -def raise_packed_exc(pack): - if pack["class"] == "GenericRemoteException": - cls = GenericRemoteException - else: - cls = getattr(builtins, pack["class"]) - exc = cls(pack["message"]) - exc.parent_traceback = pack["traceback"] - raise exc diff --git a/artiq/protocols/pc_rpc.py b/artiq/protocols/pc_rpc.py deleted file mode 100644 index d99ca06fb..000000000 --- a/artiq/protocols/pc_rpc.py +++ /dev/null @@ -1,604 +0,0 @@ -""" -This module provides a remote procedure call (RPC) mechanism over sockets -between conventional computers (PCs) running Python. It strives to be -transparent and uses ``artiq.protocols.pyon`` internally so that e.g. Numpy -arrays can be easily used. - -Note that the server operates on copies of objects provided by the client, -and modifications to mutable types are not written back. For example, if the -client passes a list as a parameter of an RPC method, and that method -``append()s`` an element to the list, the element is not appended to the -client's list. -""" - -import socket -import asyncio -import threading -import time -import logging -import inspect -from operator import itemgetter - -from artiq.monkey_patches import * -from artiq.protocols import pyon -from artiq.protocols.asyncio_server import AsyncioServer as _AsyncioServer -from artiq.protocols.packed_exceptions import * - - -logger = logging.getLogger(__name__) - - -class AutoTarget: - """Use this as target value in clients for them to automatically connect - to the target exposed by the server. Servers must have only one target.""" - pass - - -class IncompatibleServer(Exception): - """Raised by the client when attempting to connect to a server that does - not have the expected target.""" - pass - - -_init_string = b"ARTIQ pc_rpc\n" - - -def _validate_target_name(target_name, target_names): - if target_name is AutoTarget: - if len(target_names) > 1: - raise ValueError("Server has multiple targets: " + - " ".join(sorted(target_names))) - else: - target_name = target_names[0] - elif target_name not in target_names: - raise IncompatibleServer( - "valid target name(s): " + " ".join(sorted(target_names))) - return target_name - - -class Client: - """This class proxies the methods available on the server so that they - can be used as if they were local methods. - - For example, if the server provides method ``foo``, and ``c`` is a local - ``Client`` object, then the method can be called as: :: - - result = c.foo(param1, param2) - - The parameters and the result are automatically transferred with the - server. - - Only methods are supported. Attributes must be accessed by providing and - using "get" and/or "set" methods on the server side. - - At object initialization, the connection to the remote server is - automatically attempted. The user must call ``close_rpc`` to - free resources properly after initialization completes successfully. - - :param host: Identifier of the server. The string can represent a - hostname or a IPv4 or IPv6 address (see - ``socket.create_connection`` in the Python standard library). - :param port: TCP port to use. - :param target_name: Target name to select. ``IncompatibleServer`` is - raised if the target does not exist. - Use ``AutoTarget`` for automatic selection if the server has only one - target. - Use ``None`` to skip selecting a target. The list of targets can then - be retrieved using ``get_rpc_id`` and then one can be selected later - using ``select_rpc_target``. - :param timeout: Socket operation timeout. Use ``None`` for blocking - (default), ``0`` for non-blocking, and a finite value to raise - ``socket.timeout`` if an operation does not complete within the - given time. See also ``socket.create_connection()`` and - ``socket.settimeout()`` in the Python standard library. A timeout - in the middle of a RPC can break subsequent RPCs (from the same - client). - """ - def __init__(self, host, port, target_name=AutoTarget, timeout=None): - self.__socket = socket.create_connection((host, port), timeout) - - try: - self.__socket.sendall(_init_string) - - server_identification = self.__recv() - self.__target_names = server_identification["targets"] - self.__description = server_identification["description"] - self.__selected_target = None - self.__valid_methods = set() - if target_name is not None: - self.select_rpc_target(target_name) - except: - self.__socket.close() - raise - - def select_rpc_target(self, target_name): - """Selects a RPC target by name. This function should be called - exactly once if the object was created with ``target_name=None``.""" - target_name = _validate_target_name(target_name, self.__target_names) - self.__socket.sendall((target_name + "\n").encode()) - self.__selected_target = target_name - self.__valid_methods = self.__recv() - - def get_selected_target(self): - """Returns the selected target, or ``None`` if no target has been - selected yet.""" - return self.__selected_target - - def get_rpc_id(self): - """Returns a tuple (target_names, description) containing the - identification information of the server.""" - return (self.__target_names, self.__description) - - def get_local_host(self): - """Returns the address of the local end of the connection.""" - return self.__socket.getsockname()[0] - - def close_rpc(self): - """Closes the connection to the RPC server. - - No further method calls should be done after this method is called. - """ - self.__socket.close() - - def __send(self, obj): - line = pyon.encode(obj) + "\n" - self.__socket.sendall(line.encode()) - - def __recv(self): - buf = self.__socket.recv(4096).decode() - while "\n" not in buf: - more = self.__socket.recv(4096) - if not more: - break - buf += more.decode() - return pyon.decode(buf) - - def __do_action(self, action): - self.__send(action) - - obj = self.__recv() - if obj["status"] == "ok": - return obj["ret"] - elif obj["status"] == "failed": - raise_packed_exc(obj["exception"]) - else: - raise ValueError - - def __do_rpc(self, name, args, kwargs): - obj = {"action": "call", "name": name, "args": args, "kwargs": kwargs} - return self.__do_action(obj) - - def get_rpc_method_list(self): - obj = {"action": "get_rpc_method_list"} - return self.__do_action(obj) - - def __getattr__(self, name): - if name not in self.__valid_methods: - raise AttributeError - def proxy(*args, **kwargs): - return self.__do_rpc(name, args, kwargs) - return proxy - - -class AsyncioClient: - """This class is similar to :class:`artiq.protocols.pc_rpc.Client`, but - uses ``asyncio`` instead of blocking calls. - - All RPC methods are coroutines. - - Concurrent access from different asyncio tasks is supported; all calls - use a single lock. - """ - def __init__(self): - self.__lock = asyncio.Lock() - self.__reader = None - self.__writer = None - self.__target_names = None - self.__description = None - - async def connect_rpc(self, host, port, target_name): - """Connects to the server. This cannot be done in __init__ because - this method is a coroutine. See ``Client`` for a description of the - parameters.""" - self.__reader, self.__writer = \ - await asyncio.open_connection(host, port, limit=100*1024*1024) - try: - self.__writer.write(_init_string) - server_identification = await self.__recv() - self.__target_names = server_identification["targets"] - self.__description = server_identification["description"] - self.__selected_target = None - self.__valid_methods = set() - if target_name is not None: - await self.select_rpc_target(target_name) - except: - self.close_rpc() - raise - - async def select_rpc_target(self, target_name): - """Selects a RPC target by name. This function should be called - exactly once if the connection was created with ``target_name=None``. - """ - target_name = _validate_target_name(target_name, self.__target_names) - self.__writer.write((target_name + "\n").encode()) - self.__selected_target = target_name - self.__valid_methods = await self.__recv() - - def get_selected_target(self): - """Returns the selected target, or ``None`` if no target has been - selected yet.""" - return self.__selected_target - - def get_local_host(self): - """Returns the address of the local end of the connection.""" - return self.__writer.get_extra_info("socket").getsockname()[0] - - def get_rpc_id(self): - """Returns a tuple (target_names, description) containing the - identification information of the server.""" - return (self.__target_names, self.__description) - - def close_rpc(self): - """Closes the connection to the RPC server. - - No further method calls should be done after this method is called. - """ - self.__writer.close() - self.__reader = None - self.__writer = None - self.__target_names = None - self.__description = None - - def __send(self, obj): - line = pyon.encode(obj) + "\n" - self.__writer.write(line.encode()) - - async def __recv(self): - line = await self.__reader.readline() - return pyon.decode(line.decode()) - - async def __do_rpc(self, name, args, kwargs): - await self.__lock.acquire() - try: - obj = {"action": "call", "name": name, - "args": args, "kwargs": kwargs} - self.__send(obj) - - obj = await self.__recv() - if obj["status"] == "ok": - return obj["ret"] - elif obj["status"] == "failed": - raise_packed_exc(obj["exception"]) - else: - raise ValueError - finally: - self.__lock.release() - - def __getattr__(self, name): - if name not in self.__valid_methods: - raise AttributeError - async def proxy(*args, **kwargs): - res = await self.__do_rpc(name, args, kwargs) - return res - return proxy - - -class BestEffortClient: - """This class is similar to :class:`artiq.protocols.pc_rpc.Client`, but - network errors are suppressed and connections are retried in the - background. - - RPC calls that failed because of network errors return ``None``. Other RPC - calls are blocking and return the correct value. - - :param firstcon_timeout: Timeout to use during the first (blocking) - connection attempt at object initialization. - :param retry: Amount of time to wait between retries when reconnecting - in the background. - """ - def __init__(self, host, port, target_name, - firstcon_timeout=1.0, retry=5.0): - self.__host = host - self.__port = port - self.__target_name = target_name - self.__retry = retry - - self.__conretry_terminate = False - self.__socket = None - self.__valid_methods = set() - try: - self.__coninit(firstcon_timeout) - except: - logger.warning("first connection attempt to %s:%d[%s] failed, " - "retrying in the background", - self.__host, self.__port, self.__target_name, - exc_info=True) - self.__start_conretry() - else: - self.__conretry_thread = None - - def __coninit(self, timeout): - if timeout is None: - self.__socket = socket.create_connection( - (self.__host, self.__port)) - else: - self.__socket = socket.create_connection( - (self.__host, self.__port), timeout) - self.__socket.settimeout(None) - self.__socket.sendall(_init_string) - server_identification = self.__recv() - target_name = _validate_target_name(self.__target_name, - server_identification["targets"]) - self.__socket.sendall((target_name + "\n").encode()) - self.__valid_methods = self.__recv() - - def __start_conretry(self): - self.__conretry_thread = threading.Thread(target=self.__conretry) - self.__conretry_thread.start() - - def __conretry(self): - while True: - try: - self.__coninit(None) - except: - if self.__conretry_terminate: - break - time.sleep(self.__retry) - else: - break - if not self.__conretry_terminate: - logger.warning("connection to %s:%d[%s] established in " - "the background", - self.__host, self.__port, self.__target_name) - if self.__conretry_terminate and self.__socket is not None: - self.__socket.close() - # must be after __socket.close() to avoid race condition - self.__conretry_thread = None - - def close_rpc(self): - """Closes the connection to the RPC server. - - No further method calls should be done after this method is called. - """ - if self.__conretry_thread is None: - if self.__socket is not None: - self.__socket.close() - else: - # Let the thread complete I/O and then do the socket closing. - # Python fails to provide a way to cancel threads... - self.__conretry_terminate = True - - def __send(self, obj): - line = pyon.encode(obj) + "\n" - self.__socket.sendall(line.encode()) - - def __recv(self): - buf = self.__socket.recv(4096).decode() - while "\n" not in buf: - more = self.__socket.recv(4096) - if not more: - break - buf += more.decode() - return pyon.decode(buf) - - def __do_rpc(self, name, args, kwargs): - if self.__conretry_thread is not None: - return None - - obj = {"action": "call", "name": name, "args": args, "kwargs": kwargs} - try: - self.__send(obj) - obj = self.__recv() - except: - logger.warning("connection failed while attempting " - "RPC to %s:%d[%s], re-establishing connection " - "in the background", - self.__host, self.__port, self.__target_name) - self.__start_conretry() - return None - else: - if obj["status"] == "ok": - return obj["ret"] - elif obj["status"] == "failed": - raise_packed_exc(obj["exception"]) - else: - raise ValueError - - def __getattr__(self, name): - if name not in self.__valid_methods: - raise AttributeError - def proxy(*args, **kwargs): - return self.__do_rpc(name, args, kwargs) - return proxy - - def get_selected_target(self): - raise NotImplementedError - - def get_local_host(self): - raise NotImplementedError - - -def _format_arguments(arguments): - fmtargs = [] - for k, v in sorted(arguments.items(), key=itemgetter(0)): - fmtargs.append(k + "=" + repr(v)) - if fmtargs: - return ", ".join(fmtargs) - else: - return "" - - -class _PrettyPrintCall: - def __init__(self, obj): - self.obj = obj - - def __str__(self): - r = self.obj["name"] + "(" - args = ", ".join([repr(a) for a in self.obj["args"]]) - r += args - kwargs = _format_arguments(self.obj["kwargs"]) - if args and kwargs: - r += ", " - r += kwargs - r += ")" - return r - - -class Server(_AsyncioServer): - """This class creates a TCP server that handles requests coming from - ``Client`` objects. - - The server is designed using ``asyncio`` so that it can easily support - multiple connections without the locking issues that arise in - multi-threaded applications. Multiple connection support is useful even in - simple cases: it allows new connections to be be accepted even when the - previous client failed to properly shut down its connection. - - If a target method is a coroutine, it is awaited and its return value - is sent to the RPC client. If ``allow_parallel`` is true, multiple - target coroutines may be executed in parallel (one per RPC client), - otherwise a lock ensures that the calls from several clients are executed - sequentially. - - :param targets: A dictionary of objects providing the RPC methods to be - exposed to the client. Keys are names identifying each object. - Clients select one of these objects using its name upon connection. - :param description: An optional human-readable string giving more - information about the server. - :param builtin_terminate: If set, the server provides a built-in - ``terminate`` method that unblocks any tasks waiting on - ``wait_terminate``. This is useful to handle server termination - requests from clients. - :param allow_parallel: Allow concurrent asyncio calls to the target's - methods. - """ - def __init__(self, targets, description=None, builtin_terminate=False, - allow_parallel=False): - _AsyncioServer.__init__(self) - self.targets = targets - self.description = description - self.builtin_terminate = builtin_terminate - if builtin_terminate: - self._terminate_request = asyncio.Event() - if allow_parallel: - self._noparallel = None - else: - self._noparallel = asyncio.Lock() - - async def _process_action(self, target, obj): - if self._noparallel is not None: - await self._noparallel.acquire() - try: - if obj["action"] == "get_rpc_method_list": - members = inspect.getmembers(target, inspect.ismethod) - doc = { - "docstring": inspect.getdoc(target), - "methods": {} - } - for name, method in members: - if name.startswith("_"): - continue - method = getattr(target, name) - argspec = inspect.getfullargspec(method) - doc["methods"][name] = (dict(argspec._asdict()), - inspect.getdoc(method)) - if self.builtin_terminate: - doc["methods"]["terminate"] = ( - { - "args": ["self"], - "defaults": None, - "varargs": None, - "varkw": None, - "kwonlyargs": [], - "kwonlydefaults": [], - }, - "Terminate the server.") - return {"status": "ok", "ret": doc} - elif obj["action"] == "call": - logger.debug("calling %s", _PrettyPrintCall(obj)) - if (self.builtin_terminate and obj["name"] == - "terminate"): - self._terminate_request.set() - return {"status": "ok", "ret": None} - else: - method = getattr(target, obj["name"]) - ret = method(*obj["args"], **obj["kwargs"]) - if inspect.iscoroutine(ret): - ret = await ret - return {"status": "ok", "ret": ret} - else: - raise ValueError("Unknown action: {}" - .format(obj["action"])) - except asyncio.CancelledError: - raise - except: - return { - "status": "failed", - "exception": current_exc_packed() - } - finally: - if self._noparallel is not None: - self._noparallel.release() - - async def _handle_connection_cr(self, reader, writer): - try: - line = await reader.readline() - if line != _init_string: - return - - obj = { - "targets": sorted(self.targets.keys()), - "description": self.description - } - line = pyon.encode(obj) + "\n" - writer.write(line.encode()) - line = await reader.readline() - if not line: - return - target_name = line.decode()[:-1] - try: - target = self.targets[target_name] - except KeyError: - return - - if callable(target): - target = target() - - valid_methods = inspect.getmembers(target, inspect.ismethod) - valid_methods = {m[0] for m in valid_methods} - if self.builtin_terminate: - valid_methods.add("terminate") - writer.write((pyon.encode(valid_methods) + "\n").encode()) - - while True: - line = await reader.readline() - if not line: - break - reply = await self._process_action(target, pyon.decode(line.decode())) - writer.write((pyon.encode(reply) + "\n").encode()) - except (ConnectionResetError, ConnectionAbortedError, BrokenPipeError): - # May happens on Windows when client disconnects - pass - finally: - writer.close() - - async def wait_terminate(self): - await self._terminate_request.wait() - - -def simple_server_loop(targets, host, port, description=None): - """Runs a server until an exception is raised (e.g. the user hits Ctrl-C) - or termination is requested by a client. - - See ``Server`` for a description of the parameters. - """ - loop = asyncio.get_event_loop() - try: - server = Server(targets, description, True) - loop.run_until_complete(server.start(host, port)) - try: - loop.run_until_complete(server.wait_terminate()) - finally: - loop.run_until_complete(server.stop()) - finally: - loop.close() diff --git a/artiq/protocols/pipe_ipc.py b/artiq/protocols/pipe_ipc.py deleted file mode 100644 index e81176c4e..000000000 --- a/artiq/protocols/pipe_ipc.py +++ /dev/null @@ -1,218 +0,0 @@ -import os -import asyncio -from asyncio.streams import FlowControlMixin - - -__all__ = ["AsyncioParentComm", "AsyncioChildComm", "ChildComm"] - - -class _BaseIO: - def write(self, data): - self.writer.write(data) - - async def drain(self): - await self.writer.drain() - - async def readline(self): - return await self.reader.readline() - - async def read(self, n): - return await self.reader.read(n) - - -if os.name != "nt": - async def _fds_to_asyncio(rfd, wfd, loop): - reader = asyncio.StreamReader(loop=loop, limit=100*1024*1024) - reader_protocol = asyncio.StreamReaderProtocol(reader, loop=loop) - rf = open(rfd, "rb", 0) - rt, _ = await loop.connect_read_pipe(lambda: reader_protocol, rf) - - wf = open(wfd, "wb", 0) - wt, _ = await loop.connect_write_pipe(FlowControlMixin, wf) - writer = asyncio.StreamWriter(wt, reader_protocol, None, loop) - - return rt, reader, writer - - - class AsyncioParentComm(_BaseIO): - def __init__(self): - self.c_rfd, self.p_wfd = os.pipe() - self.p_rfd, self.c_wfd = os.pipe() - - def get_address(self): - return "{},{}".format(self.c_rfd, self.c_wfd) - - async def _autoclose(self): - await self.process.wait() - self.reader_transport.close() - self.writer.close() - - async def create_subprocess(self, *args, **kwargs): - loop = asyncio.get_event_loop() - self.process = await asyncio.create_subprocess_exec( - *args, pass_fds={self.c_rfd, self.c_wfd}, **kwargs) - os.close(self.c_rfd) - os.close(self.c_wfd) - - self.reader_transport, self.reader, self.writer = \ - await _fds_to_asyncio(self.p_rfd, self.p_wfd, loop) - asyncio.ensure_future(self._autoclose()) - - - class AsyncioChildComm(_BaseIO): - def __init__(self, address): - self.address = address - - async def connect(self): - rfd, wfd = self.address.split(",", maxsplit=1) - self.reader_transport, self.reader, self.writer = \ - await _fds_to_asyncio(int(rfd), int(wfd), - asyncio.get_event_loop()) - - def close(self): - self.reader_transport.close() - self.writer.close() - - - class ChildComm: - def __init__(self, address): - rfd, wfd = address.split(",", maxsplit=1) - self.rf = open(int(rfd), "rb", 0) - self.wf = open(int(wfd), "wb", 0) - - def read(self, n): - return self.rf.read(n) - - def readline(self): - return self.rf.readline() - - def write(self, data): - return self.wf.write(data) - - def close(self): - self.rf.close() - self.wf.close() - - -else: # windows - import itertools - - - _pipe_count = itertools.count() - - - class AsyncioParentComm: - """Requires ProactorEventLoop""" - def __init__(self): - # We cannot use anonymous pipes on Windows, because we do not know - # in advance if the child process wants a handle open in overlapped - # mode or not. - self.address = "\\\\.\\pipe\\artiq-{}-{}".format(os.getpid(), - next(_pipe_count)) - self.ready = asyncio.Event() - self.write_buffer = b"" - - def get_address(self): - return self.address - - async def _autoclose(self): - await self.process.wait() - self.server[0].close() - del self.server - if self.ready.is_set(): - self.writer.close() - del self.reader - del self.writer - - async def create_subprocess(self, *args, **kwargs): - loop = asyncio.get_event_loop() - - def factory(): - reader = asyncio.StreamReader(loop=loop, limit=100*1024*1024) - protocol = asyncio.StreamReaderProtocol(reader, - self._child_connected, - loop=loop) - return protocol - self.server = await loop.start_serving_pipe( - factory, self.address) - - self.process = await asyncio.create_subprocess_exec( - *args, **kwargs) - asyncio.ensure_future(self._autoclose()) - - def _child_connected(self, reader, writer): - # HACK: We should shut down the pipe server here. - # However, self.server[0].close() is racy, and will cause an - # invalid handle error if loop.start_serving_pipe has not finished - # its work in the background. - # The bug manifests itself here frequently as the event loop is - # reopening the server as soon as a new client connects. - # There is still a race condition in the AsyncioParentComm - # creation/destruction, but it is unlikely to cause problems - # in most practical cases. - if self.ready.is_set(): - # A child already connected before. We should have shut down - # the server, but asyncio won't let us do that. - # Drop connections immediately instead. - writer.close() - return - self.reader = reader - self.writer = writer - if self.write_buffer: - self.writer.write(self.write_buffer) - self.write_buffer = b"" - self.ready.set() - - def write(self, data): - if self.ready.is_set(): - self.writer.write(data) - else: - self.write_buffer += data - - async def drain(self): - await self.ready.wait() - await self.writer.drain() - - async def readline(self): - await self.ready.wait() - return await self.reader.readline() - - async def read(self, n): - await self.ready.wait() - return await self.reader.read(n) - - - class AsyncioChildComm(_BaseIO): - """Requires ProactorEventLoop""" - def __init__(self, address): - self.address = address - - async def connect(self): - loop = asyncio.get_event_loop() - self.reader = asyncio.StreamReader(loop=loop, limit=100*1024*1024) - reader_protocol = asyncio.StreamReaderProtocol( - self.reader, loop=loop) - transport, _ = await loop.create_pipe_connection( - lambda: reader_protocol, self.address) - self.writer = asyncio.StreamWriter(transport, reader_protocol, - self.reader, loop) - - def close(self): - self.writer.close() - - - class ChildComm: - def __init__(self, address): - self.f = open(address, "a+b", 0) - - def read(self, n): - return self.f.read(n) - - def readline(self): - return self.f.readline() - - def write(self, data): - return self.f.write(data) - - def close(self): - self.f.close() diff --git a/artiq/protocols/pyon.py b/artiq/protocols/pyon.py deleted file mode 100644 index 092e46b52..000000000 --- a/artiq/protocols/pyon.py +++ /dev/null @@ -1,219 +0,0 @@ -""" -This module provide serialization and deserialization functions for Python -objects. Its main features are: - -* Human-readable format compatible with the Python syntax. -* Each object is serialized on a single line, with only ASCII characters. -* Supports all basic Python data structures: None, booleans, integers, - floats, complex numbers, strings, tuples, lists, dictionaries. -* Those data types are accurately reconstructed (unlike JSON where e.g. tuples - become lists, and dictionary keys are turned into strings). -* Supports Numpy arrays. - -The main rationale for this new custom serializer (instead of using JSON) is -that JSON does not support Numpy and more generally cannot be extended with -other data types while keeping a concise syntax. Here we can use the Python -function call syntax to express special data types. -""" - - -import base64 -from fractions import Fraction -from collections import OrderedDict -import os -import tempfile - -import numpy - - -_encode_map = { - type(None): "none", - bool: "bool", - int: "number", - float: "number", - complex: "number", - str: "str", - bytes: "bytes", - tuple: "tuple", - list: "list", - set: "set", - dict: "dict", - slice: "slice", - Fraction: "fraction", - OrderedDict: "ordereddict", - numpy.ndarray: "nparray" -} - -_numpy_scalar = { - "int8", "int16", "int32", "int64", - "uint8", "uint16", "uint32", "uint64", - "float16", "float32", "float64", - "complex64", "complex128", -} - - -for _t in _numpy_scalar: - _encode_map[getattr(numpy, _t)] = "npscalar" - - -_str_translation = { - ord("\""): "\\\"", - ord("\\"): "\\\\", - ord("\n"): "\\n", - ord("\r"): "\\r", -} - - -class _Encoder: - def __init__(self, pretty): - self.pretty = pretty - self.indent_level = 0 - - def indent(self): - return " "*self.indent_level - - def encode_none(self, x): - return "null" - - def encode_bool(self, x): - if x: - return "true" - else: - return "false" - - def encode_number(self, x): - return repr(x) - - def encode_str(self, x): - # Do not use repr() for JSON compatibility. - return "\"" + x.translate(_str_translation) + "\"" - - def encode_bytes(self, x): - return repr(x) - - def encode_tuple(self, x): - if len(x) == 1: - return "(" + self.encode(x[0]) + ", )" - else: - r = "(" - r += ", ".join([self.encode(item) for item in x]) - r += ")" - return r - - def encode_list(self, x): - r = "[" - r += ", ".join([self.encode(item) for item in x]) - r += "]" - return r - - def encode_set(self, x): - r = "{" - r += ", ".join([self.encode(item) for item in x]) - r += "}" - return r - - def encode_dict(self, x): - r = "{" - if not self.pretty or len(x) < 2: - r += ", ".join([self.encode(k) + ": " + self.encode(v) - for k, v in x.items()]) - else: - self.indent_level += 1 - r += "\n" - first = True - for k, v in x.items(): - if not first: - r += ",\n" - first = False - r += self.indent() + self.encode(k) + ": " + self.encode(v) - r += "\n" # no ',' - self.indent_level -= 1 - r += self.indent() - r += "}" - return r - - def encode_slice(self, x): - return repr(x) - - def encode_fraction(self, x): - return "Fraction({}, {})".format(self.encode(x.numerator), - self.encode(x.denominator)) - - def encode_ordereddict(self, x): - return "OrderedDict(" + self.encode(list(x.items())) + ")" - - def encode_nparray(self, x): - r = "nparray(" - r += self.encode(x.shape) + ", " - r += self.encode(x.dtype.str) + ", " - r += self.encode(base64.b64encode(x.data)) - r += ")" - return r - - def encode_npscalar(self, x): - r = "npscalar(" - r += self.encode(x.dtype.str) + ", " - r += self.encode(base64.b64encode(x.data)) - r += ")" - return r - - def encode(self, x): - ty = _encode_map.get(type(x), None) - if ty is None: - raise TypeError("`{!r}` ({}) is not PYON serializable" - .format(x, type(x))) - return getattr(self, "encode_" + ty)(x) - - -def encode(x, pretty=False): - """Serializes a Python object and returns the corresponding string in - Python syntax.""" - return _Encoder(pretty).encode(x) - - -def _nparray(shape, dtype, data): - a = numpy.frombuffer(base64.b64decode(data), dtype=dtype) - a = a.copy() - return a.reshape(shape) - - -def _npscalar(ty, data): - return numpy.frombuffer(base64.b64decode(data), dtype=ty)[0] - - -_eval_dict = { - "__builtins__": {}, - - "null": None, - "false": False, - "true": True, - "slice": slice, - - "Fraction": Fraction, - "OrderedDict": OrderedDict, - "nparray": _nparray, - "npscalar": _npscalar -} - - -def decode(s): - """Parses a string in the Python syntax, reconstructs the corresponding - object, and returns it.""" - return eval(s, _eval_dict, {}) - - -def store_file(filename, x): - """Encodes a Python object and writes it to the specified file.""" - contents = encode(x, True) - directory = os.path.abspath(os.path.dirname(filename)) - with tempfile.NamedTemporaryFile("w", dir=directory, delete=False) as f: - f.write(contents) - f.write("\n") - tmpname = f.name - os.replace(tmpname, filename) - - -def load_file(filename): - """Parses the specified file and returns the decoded Python object.""" - with open(filename, "r") as f: - return decode(f.read()) diff --git a/artiq/protocols/remote_exec.py b/artiq/protocols/remote_exec.py deleted file mode 100644 index 0b7f2419c..000000000 --- a/artiq/protocols/remote_exec.py +++ /dev/null @@ -1,116 +0,0 @@ -""" -This module provides facilities for experiment to execute code remotely on -controllers. - -The remotely executed code has direct access to the driver, so it can transfer -large amounts of data with it, and only exchange higher-level, processed data -with the experiment (and over the network). - -Controllers with support for remote execution contain an additional target -that gives RPC access to instances of ``RemoteExecServer``. One such instance -is created per client (experiment) connection and manages one Python namespace -in which the experiment can execute arbitrary code by calling the methods of -``RemoteExecServer``. - -The namespaces are initialized with the following global values: - - * ``controller_driver`` - the driver instance of the controller. - * ``controller_initial_namespace`` - a controller-wide dictionary copied - when initializing a new namespace. - * all values from ``controller_initial_namespace``. - -Access to a controller with support for remote execution is done through an -additional device database entry of this form: :: - - "$REXEC_DEVICE_NAME": { - "type": "controller_aux_target", - "controller": "$CONTROLLER_DEVICE_NAME", - "target_name": "$TARGET_NAME_FOR_REXEC" - } - -Specifying ``target_name`` is mandatory in all device database entries for all -controllers with remote execution support. - -""" - -from functools import partial -import inspect - -from artiq.protocols.pc_rpc import simple_server_loop - - -__all__ = ["RemoteExecServer", "simple_rexec_server_loop", "connect_global_rpc"] - - -class RemoteExecServer: - """RPC target created at each connection by controllers with remote - execution support. Manages one Python namespace and provides RPCs - for code execution. - """ - def __init__(self, initial_namespace): - self.namespace = dict(initial_namespace) - # The module actually has to exist, otherwise it breaks e.g. Numba - self.namespace["__name__"] = "artiq.protocols.remote_exec" - - def add_code(self, code): - """Executes the specified code in the namespace. - - :param code: a string containing valid Python code - """ - exec(code, self.namespace) - - def call(self, function, *args, **kwargs): - """Calls a function in the namespace, passing it positional and - keyword arguments, and returns its value. - - :param function: a string containing the name of the function to - execute. - """ - return self.namespace[function](*args, **kwargs) - - -def simple_rexec_server_loop(target_name, target, host, port, - description=None): - """Runs a server with remote execution support, until an exception is - raised (e.g. the user hits Ctrl-C) or termination is requested by a client. - """ - initial_namespace = {"controller_driver": target} - initial_namespace["controller_initial_namespace"] = initial_namespace - targets = { - target_name: target, - target_name + "_rexec": lambda: RemoteExecServer(initial_namespace) - } - simple_server_loop(targets, host, port, description) - - -def connect_global_rpc(controller_rexec, host=None, port=3251, - target="master_dataset_db", name="dataset_db"): - """Creates a global RPC client in a controller that is used across - all remote execution connections. With the default parameters, it connects - to the dataset database (i.e. gives direct dataset access to experiment - code remotely executing in controllers). - - If a global object with the same name already exists, the function does - nothing. - - :param controller_rexec: the RPC client connected to the controller's - remote execution interface. - :param host: the host name to connect the RPC client to. Default is the - local end of the remote execution interface (typically, the ARTIQ - master). - :param port: TCP port to connect the RPC client to. - :param target: name of the RPC target. - :param name: name of the object to insert into the global namespace. - """ - if host is None: - host = controller_rexec.get_local_host() - code = """ -if "{name}" not in controller_initial_namespace: - import atexit - from artiq.protocols.pc_rpc import Client - - {name} = Client("{host}", {port}, "{target}") - atexit.register({name}.close_rpc) - controller_initial_namespace["{name}"] = {name} -""".format(host=host, port=port, target=target, name=name) - controller_rexec.add_code(code) diff --git a/artiq/protocols/sync_struct.py b/artiq/protocols/sync_struct.py deleted file mode 100644 index 35b72ff75..000000000 --- a/artiq/protocols/sync_struct.py +++ /dev/null @@ -1,258 +0,0 @@ -"""This module helps synchronizing a mutable Python structure owned and -modified by one process (the *publisher*) with copies of it (the -*subscribers*) in different processes and possibly different machines. - -Synchronization is achieved by sending a full copy of the structure to each -subscriber upon connection (*initialization*), followed by dictionaries -describing each modification made to the structure (*mods*). - -Structures must be PYON serializable and contain only lists, dicts, and -immutable types. Lists and dicts can be nested arbitrarily. -""" - -import asyncio -from operator import getitem -from functools import partial - -from artiq.monkey_patches import * -from artiq.protocols import pyon -from artiq.protocols.asyncio_server import AsyncioServer - - -_init_string = b"ARTIQ sync_struct\n" - - -def process_mod(target, mod): - """Apply a *mod* to the target, mutating it.""" - for key in mod["path"]: - target = getitem(target, key) - action = mod["action"] - if action == "append": - target.append(mod["x"]) - elif action == "insert": - target.insert(mod["i"], mod["x"]) - elif action == "pop": - target.pop(mod["i"]) - elif action == "setitem": - target.__setitem__(mod["key"], mod["value"]) - elif action == "delitem": - target.__delitem__(mod["key"]) - else: - raise ValueError - - -class Subscriber: - """An asyncio-based client to connect to a ``Publisher``. - - :param notifier_name: Name of the notifier to subscribe to. - :param target_builder: A function called during initialization that takes - the object received from the publisher and returns the corresponding - local structure to use. Can be identity. - :param notify_cb: An optional function called every time a mod is received - from the publisher. The mod is passed as parameter. The function is - called after the mod has been processed. - A list of functions may also be used, and they will be called in turn. - :param disconnect_cb: An optional function called when disconnection happens - from external causes (i.e. not when ``close`` is called). - """ - def __init__(self, notifier_name, target_builder, notify_cb=None, disconnect_cb=None): - self.notifier_name = notifier_name - self.target_builder = target_builder - if notify_cb is None: - notify_cb = [] - if not isinstance(notify_cb, list): - notify_cb = [notify_cb] - self.notify_cbs = notify_cb - self.disconnect_cb = disconnect_cb - - async def connect(self, host, port, before_receive_cb=None): - self.reader, self.writer = \ - await asyncio.open_connection(host, port, limit=100*1024*1024) - try: - if before_receive_cb is not None: - before_receive_cb() - self.writer.write(_init_string) - self.writer.write((self.notifier_name + "\n").encode()) - self.receive_task = asyncio.ensure_future(self._receive_cr()) - except: - self.writer.close() - del self.reader - del self.writer - raise - - async def close(self): - self.disconnect_cb = None - try: - self.receive_task.cancel() - try: - await asyncio.wait_for(self.receive_task, None) - except asyncio.CancelledError: - pass - finally: - self.writer.close() - del self.reader - del self.writer - - async def _receive_cr(self): - try: - target = None - while True: - line = await self.reader.readline() - if not line: - return - mod = pyon.decode(line.decode()) - - if mod["action"] == "init": - target = self.target_builder(mod["struct"]) - else: - process_mod(target, mod) - - for notify_cb in self.notify_cbs: - notify_cb(mod) - finally: - if self.disconnect_cb is not None: - self.disconnect_cb() - - -class Notifier: - """Encapsulates a structure whose changes need to be published. - - All mutations to the structure must be made through the ``Notifier``. The - original structure must only be accessed for reads. - - In addition to the list methods below, the ``Notifier`` supports the index - syntax for modification and deletion of elements. Modification of nested - structures can be also done using the index syntax, for example: - - >>> n = Notifier([]) - >>> n.append([]) - >>> n[0].append(42) - >>> n.read - [[42]] - - This class does not perform any network I/O and is meant to be used with - e.g. the ``Publisher`` for this purpose. Only one publisher at most can be - associated with a ``Notifier``. - - :param backing_struct: Structure to encapsulate. For convenience, it - also becomes available as the ``read`` property of the ``Notifier``. - """ - def __init__(self, backing_struct, root=None, path=[]): - self.read = backing_struct - if root is None: - self.root = self - self.publish = None - else: - self.root = root - self._backing_struct = backing_struct - self._path = path - - # Backing struct modification methods. - # All modifications must go through them! - - def append(self, x): - """Append to a list.""" - self._backing_struct.append(x) - if self.root.publish is not None: - self.root.publish({"action": "append", - "path": self._path, - "x": x}) - - def insert(self, i, x): - """Insert an element into a list.""" - self._backing_struct.insert(i, x) - if self.root.publish is not None: - self.root.publish({"action": "insert", - "path": self._path, - "i": i, "x": x}) - - def pop(self, i=-1): - """Pop an element from a list. The returned element is not - encapsulated in a ``Notifier`` and its mutations are no longer - tracked.""" - r = self._backing_struct.pop(i) - if self.root.publish is not None: - self.root.publish({"action": "pop", - "path": self._path, - "i": i}) - return r - - def __setitem__(self, key, value): - self._backing_struct.__setitem__(key, value) - if self.root.publish is not None: - self.root.publish({"action": "setitem", - "path": self._path, - "key": key, - "value": value}) - - def __delitem__(self, key): - self._backing_struct.__delitem__(key) - if self.root.publish is not None: - self.root.publish({"action": "delitem", - "path": self._path, - "key": key}) - - def __getitem__(self, key): - item = getitem(self._backing_struct, key) - return Notifier(item, self.root, self._path + [key]) - - -class Publisher(AsyncioServer): - """A network server that publish changes to structures encapsulated in - ``Notifiers``. - - :param notifiers: A dictionary containing the notifiers to associate with - the ``Publisher``. The keys of the dictionary are the names of the - notifiers to be used with ``Subscriber``. - """ - def __init__(self, notifiers): - AsyncioServer.__init__(self) - self.notifiers = notifiers - self._recipients = {k: set() for k in notifiers.keys()} - self._notifier_names = {id(v): k for k, v in notifiers.items()} - - for notifier in notifiers.values(): - notifier.publish = partial(self.publish, notifier) - - async def _handle_connection_cr(self, reader, writer): - try: - line = await reader.readline() - if line != _init_string: - return - - line = await reader.readline() - if not line: - return - notifier_name = line.decode()[:-1] - - try: - notifier = self.notifiers[notifier_name] - except KeyError: - return - - obj = {"action": "init", "struct": notifier.read} - line = pyon.encode(obj) + "\n" - writer.write(line.encode()) - - queue = asyncio.Queue() - self._recipients[notifier_name].add(queue) - try: - while True: - line = await queue.get() - writer.write(line) - # raise exception on connection error - await writer.drain() - finally: - self._recipients[notifier_name].remove(queue) - except (ConnectionResetError, ConnectionAbortedError, BrokenPipeError): - # subscribers disconnecting are a normal occurence - pass - finally: - writer.close() - - def publish(self, notifier, mod): - line = pyon.encode(mod) + "\n" - line = line.encode() - notifier_name = self._notifier_names[id(notifier)] - for recipient in self._recipients[notifier_name]: - recipient.put_nowait(line) diff --git a/artiq/remoting.py b/artiq/remoting.py new file mode 100644 index 000000000..41ca35928 --- /dev/null +++ b/artiq/remoting.py @@ -0,0 +1,167 @@ +import os +import sys +import logging +import tempfile +import shutil +import shlex +import subprocess +import hashlib +import random +import getpass + +__all__ = ["LocalClient", "SSHClient"] + +logger = logging.getLogger(__name__) + + +class Client: + def upload(self, filename, rewriter=None): + raise NotImplementedError + + def prepare_download(self, filename): + raise NotImplementedError + + def download(self): + raise NotImplementedError + + def run_command(self, cmd, **kws): + raise NotImplementedError + + +class LocalClient(Client): + def __init__(self): + self._tmp = os.path.join(tempfile.gettempdir(), "artiq") + + def upload(self, filename, rewriter=None): + logger.debug("Uploading {}".format(filename)) + if rewriter is None: + return filename + else: + os.makedirs(self._tmp, exist_ok=True) + with open(filename, 'rb') as local: + rewritten = rewriter(local.read()) + tmp_filename = os.path.join(self._tmp, hashlib.sha1(rewritten).hexdigest()) + with open(tmp_filename, 'wb') as tmp: + tmp.write(rewritten) + return tmp_filename + + def prepare_download(self, filename): + logger.debug("Downloading {}".format(filename)) + return filename + + def download(self): + pass + + def run_command(self, cmd, **kws): + logger.debug("Executing {}".format(cmd)) + subprocess.check_call([arg.format(tmp=self._tmp, **kws) for arg in cmd]) + + +class SSHClient(Client): + def __init__(self, host, jump_host=None): + if "@" in host: + self.username, self.host = host.split("@") + else: + self.host = host + self.username = None + self.jump_host = jump_host + self.ssh = None + self.sftp = None + self._tmpr = "/tmp/artiq-" + getpass.getuser() + self._tmpl = tempfile.TemporaryDirectory(prefix="artiq") + self._cached = [] + self._downloads = {} + + def get_ssh(self): + if self.ssh is None: + import paramiko + logging.getLogger("paramiko").setLevel(logging.WARNING) + + if self.jump_host: + proxy_cmd = "ssh -W {}:22 {}".format(self.host, self.jump_host) + logger.debug("Using proxy command '{}'".format(proxy_cmd)) + proxy = paramiko.proxy.ProxyCommand(proxy_cmd) + else: + proxy = None + + self.ssh = paramiko.SSHClient() + self.ssh.load_system_host_keys() + self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + self.ssh.connect(self.host, username=self.username, sock=proxy) + + logger.debug("Connecting to {}".format(self.host)) + return self.ssh + + def get_transport(self): + return self.get_ssh().get_transport() + + def get_sftp(self): + if self.sftp is None: + self.sftp = self.get_ssh().open_sftp() + try: + self._cached = self.sftp.listdir(self._tmpr) + except OSError: + self.sftp.mkdir(self._tmpr) + return self.sftp + + def upload(self, filename, rewriter=lambda x: x): + with open(filename, 'rb') as local: + rewritten = rewriter(local.read()) + digest = hashlib.sha1(rewritten).hexdigest() + remote_filename = "{}/{}".format(self._tmpr, digest) + + sftp = self.get_sftp() + if digest in self._cached: + logger.debug("Using cached {}".format(filename)) + else: + logger.debug("Uploading {}".format(filename)) + # Avoid a race condition by writing into a temporary file + # and atomically replacing + with sftp.open(remote_filename + ".~", "wb") as remote: + remote.write(rewritten) + try: + sftp.rename(remote_filename + ".~", remote_filename) + except IOError: + # Either it already exists (this is OK) or something else + # happened (this isn't) and we need to re-raise + sftp.stat(remote_filename) + + return remote_filename + + def prepare_download(self, filename): + tmpname = "".join([random.Random().choice("ABCDEFGHIJKLMNOPQRSTUVWXYZ") + for _ in range(6)]) + remote_filename = "{}/{}_{}".format(self._tmpr, tmpname, filename) + + _sftp = self.get_sftp() + logger.debug("Downloading {}".format(filename)) + self._downloads[filename] = remote_filename + + return remote_filename + + def download(self): + sftp = self.get_sftp() + for filename, remote_filename in self._downloads.items(): + sftp.get(remote_filename, filename) + + self._downloads = {} + + def spawn_command(self, cmd, get_pty=False, **kws): + chan = self.get_transport().open_session() + chan.set_combine_stderr(True) + if get_pty: + chan.get_pty() + cmd = " ".join([shlex.quote(arg.format(tmp=self._tmpr, **kws)) for arg in cmd]) + logger.debug("Executing {}".format(cmd)) + chan.exec_command(cmd) + return chan + + def drain(self, chan): + while True: + char = chan.recv(1) + if char == b"": + break + sys.stderr.write(char.decode("utf-8", errors='replace')) + + def run_command(self, cmd, **kws): + self.drain(self.spawn_command(cmd, **kws)) diff --git a/artiq/sim/devices.py b/artiq/sim/devices.py index ff8e5eb57..41b7b59c5 100644 --- a/artiq/sim/devices.py +++ b/artiq/sim/devices.py @@ -49,13 +49,13 @@ class Input: delay(duration) @kernel - def count(self): + def count(self, up_to_timestamp_mu): result = self.prng.randrange(0, 100) time.manager.event(("count", self.name, result)) return result @kernel - def timestamp_mu(self): + def timestamp_mu(self, up_to_timestamp_mu): result = time.manager.get_time_mu() result += self.prng.randrange(100, 1000) time.manager.event(("timestamp_mu", self.name, result)) diff --git a/artiq/test/coredevice/test_ad9910.py b/artiq/test/coredevice/test_ad9910.py new file mode 100644 index 000000000..4af554784 --- /dev/null +++ b/artiq/test/coredevice/test_ad9910.py @@ -0,0 +1,410 @@ +from artiq.experiment import * +from artiq.test.hardware_testbench import ExperimentCase +from artiq.coredevice.ad9910 import ( + _AD9910_REG_FTW, _AD9910_REG_PROFILE0, RAM_MODE_RAMPUP, + RAM_DEST_FTW) +from artiq.coredevice.urukul import ( + urukul_sta_smp_err, CFG_CLK_SEL0, CFG_CLK_SEL1) + + +class AD9910Exp(EnvExperiment): + def build(self, runner): + self.setattr_device("core") + self.dev = self.get_device("urukul_ad9910") + self.runner = runner + + def run(self): + getattr(self, self.runner)() + + @kernel + def instantiate(self): + pass + + @kernel + def init(self): + self.core.break_realtime() + self.dev.cpld.init() + self.dev.init() + + @kernel + def init_fail(self): + self.core.break_realtime() + self.dev.cpld.init() + cfg = self.dev.cpld.cfg_reg + cfg &= ~(1 << CFG_CLK_SEL1) + cfg |= 1 << CFG_CLK_SEL0 + self.dev.cpld.cfg_write(cfg) + # clk_sel=1, external SMA, should fail PLL lock + self.dev.init() + + @kernel + def set_get(self): + self.core.break_realtime() + self.dev.cpld.init() + self.dev.init() + self.dev.set_att(20*dB) + f = 81.2345*MHz + self.dev.set(frequency=f, phase=.33, amplitude=.89) + self.set_dataset("ftw_set", self.dev.frequency_to_ftw(f)) + self.set_dataset("ftw_get", self.dev.read32(_AD9910_REG_FTW)) + + @kernel + def read_write64(self): + self.core.break_realtime() + self.dev.cpld.init() + self.dev.init() + lo = 0x12345678 + hi = 0x09abcdef + self.dev.write64(_AD9910_REG_PROFILE0, hi, lo) + self.dev.cpld.io_update.pulse_mu(8) + read = self.dev.read64(_AD9910_REG_PROFILE0) + self.set_dataset("write", (int64(hi) << 32) | lo) + self.set_dataset("read", read) + + @kernel + def set_speed(self): + self.core.break_realtime() + self.dev.cpld.init() + self.dev.init() + f = 81.2345*MHz + n = 10 + t0 = self.core.get_rtio_counter_mu() + for i in range(n): + self.dev.set(frequency=f, phase=.33, amplitude=.89) + self.set_dataset("dt", self.core.mu_to_seconds( + self.core.get_rtio_counter_mu() - t0)/n) + + @kernel + def set_speed_mu(self): + self.core.break_realtime() + self.dev.cpld.init() + self.dev.init() + n = 10 + t0 = self.core.get_rtio_counter_mu() + for i in range(n): + self.dev.set_mu(0x12345678, 0x1234, 0x4321) + self.set_dataset("dt", self.core.mu_to_seconds( + self.core.get_rtio_counter_mu() - t0)/n) + + @kernel + def sync_window(self): + self.core.break_realtime() + self.dev.cpld.init() + self.dev.init() + err = [0] * 32 + for i in range(6): + self.sync_scan(err, win=i) + print(err) + self.core.break_realtime() + dly, win = self.dev.tune_sync_delay() + self.sync_scan(err, win=win) + # FIXME: win + 1 # tighten window by 2*75ps + # after https://github.com/sinara-hw/Urukul/issues/16 + self.set_dataset("dly", dly) + self.set_dataset("win", win) + self.set_dataset("err", err) + + @kernel + def sync_scan(self, err, win): + for in_delay in range(len(err)): + self.dev.set_sync(in_delay=in_delay, window=win) + self.dev.clear_smp_err() + # delay(10*us) # integrate SMP_ERR statistics + e = urukul_sta_smp_err(self.dev.cpld.sta_read()) + err[in_delay] = (e >> (self.dev.chip_select - 4)) & 1 + delay(50*us) # slack + + @kernel + def io_update_delay(self): + self.core.break_realtime() + self.dev.cpld.init() + self.dev.init() + bins1 = [0]*4 + bins2 = [0]*4 + self.scan_io_delay(bins1, bins2) + self.set_dataset("bins1", bins1) + self.set_dataset("bins2", bins2) + self.set_dataset("dly", self.dev.tune_io_update_delay()) + + @kernel + def scan_io_delay(self, bins1, bins2): + delay(100*us) + n = 100 + for i in range(n): + for j in range(len(bins1)): + bins1[j] += self.dev.measure_io_update_alignment(j, j + 1) + bins2[j] += self.dev.measure_io_update_alignment(j, j + 2) + delay(10*ms) + + @kernel + def sw_readback(self): + self.core.break_realtime() + self.dev.cpld.init() + self.dev.init() + self.dev.cfg_sw(0) + self.dev.sw.on() + sw_on = (self.dev.cpld.sta_read() >> (self.dev.chip_select - 4)) & 1 + delay(10*us) + self.dev.sw.off() + sw_off = (self.dev.cpld.sta_read() >> (self.dev.chip_select - 4)) & 1 + self.set_dataset("sw", (sw_on, sw_off)) + + @kernel + def profile_readback(self): + self.core.break_realtime() + self.dev.cpld.init() + self.dev.init() + for i in range(8): + self.dev.set_mu(ftw=i, profile=i) + ftw = [0] * 8 + for i in range(8): + self.dev.cpld.set_profile(i) + # If PROFILE is not alligned to SYNC_CLK a multi-bit change + # doesn't transfer cleanly. Use IO_UPDATE to load the profile + # again. + self.dev.cpld.io_update.pulse_mu(8) + ftw[i] = self.dev.read32(_AD9910_REG_FTW) + delay(100*us) + self.set_dataset("ftw", ftw) + + @kernel + def ram_write(self): + n = 1 << 10 + write = [0]*n + for i in range(n): + write[i] = i | (i << 16) + read = [0]*n + + self.core.break_realtime() + self.dev.cpld.init() + self.dev.init() + self.dev.set_cfr1(ram_enable=0) + self.dev.cpld.io_update.pulse_mu(8) + self.dev.set_profile_ram( + start=0, end=0 + n - 1, step=1, + profile=0, mode=RAM_MODE_RAMPUP) + self.dev.cpld.set_profile(0) + self.dev.cpld.io_update.pulse_mu(8) + delay(1*ms) + self.dev.write_ram(write) + delay(1*ms) + self.dev.read_ram(read) + self.set_dataset("w", write) + self.set_dataset("r", read) + + @kernel + def ram_read_overlapping(self): + write = [0]*989 + for i in range(len(write)): + write[i] = i + read = [0]*100 + offset = 367 + + self.core.break_realtime() + self.dev.cpld.init() + self.dev.init() + self.dev.set_cfr1(ram_enable=0) + self.dev.cpld.io_update.pulse_mu(8) + + self.dev.set_profile_ram( + start=0, end=0 + len(write) - 1, step=1, + profile=0, mode=RAM_MODE_RAMPUP) + self.dev.set_profile_ram( + start=offset, end=offset + len(read) - 1, step=1, + profile=1, mode=RAM_MODE_RAMPUP) + + self.dev.cpld.set_profile(0) + self.dev.cpld.io_update.pulse_mu(8) + delay(1*ms) + self.dev.write_ram(write) + delay(1*ms) + self.dev.cpld.set_profile(1) + self.dev.cpld.io_update.pulse_mu(8) + self.dev.read_ram(read) + + # RAM profile addresses are apparently aligned + # to the last address of the RAM + start = len(write) - offset - len(read) + end = len(write) - offset + self.set_dataset("w", write[start:end]) + self.set_dataset("r", read) + + @kernel + def ram_exec(self): + ftw0 = [0x12345678]*2 + ftw1 = [0x55aaaa55]*2 + self.core.break_realtime() + self.dev.cpld.init() + self.dev.init() + self.dev.set_cfr1(ram_enable=0) + self.dev.cpld.io_update.pulse_mu(8) + + self.dev.set_profile_ram( + start=100, end=100 + len(ftw0) - 1, step=1, + profile=3, mode=RAM_MODE_RAMPUP) + self.dev.set_profile_ram( + start=200, end=200 + len(ftw1) - 1, step=1, + profile=4, mode=RAM_MODE_RAMPUP) + + self.dev.cpld.set_profile(3) + self.dev.cpld.io_update.pulse_mu(8) + self.dev.write_ram(ftw0) + + self.dev.cpld.set_profile(4) + self.dev.cpld.io_update.pulse_mu(8) + self.dev.write_ram(ftw1) + + self.dev.set_cfr1(ram_enable=1, ram_destination=RAM_DEST_FTW) + self.dev.cpld.io_update.pulse_mu(8) + + self.dev.cpld.set_profile(3) + self.dev.cpld.io_update.pulse_mu(8) + ftw0r = self.dev.read32(_AD9910_REG_FTW) + delay(100*us) + + self.dev.cpld.set_profile(4) + self.dev.cpld.io_update.pulse_mu(8) + ftw1r = self.dev.read32(_AD9910_REG_FTW) + + self.set_dataset("ftw", [ftw0[0], ftw0r, ftw1[0], ftw1r]) + + @kernel + def ram_convert_frequency(self): + freq = [33*MHz]*2 + ram = [0]*len(freq) + self.dev.frequency_to_ram(freq, ram) + + self.core.break_realtime() + self.dev.cpld.init() + self.dev.init() + self.dev.set_cfr1(ram_enable=0) + self.dev.cpld.io_update.pulse_mu(8) + self.dev.set_profile_ram( + start=100, end=100 + len(ram) - 1, step=1, + profile=6, mode=RAM_MODE_RAMPUP) + self.dev.cpld.set_profile(6) + self.dev.cpld.io_update.pulse_mu(8) + self.dev.write_ram(ram) + self.dev.set_cfr1(ram_enable=1, ram_destination=RAM_DEST_FTW) + self.dev.cpld.io_update.pulse_mu(8) + ftw_read = self.dev.read32(_AD9910_REG_FTW) + self.set_dataset("ram", ram) + self.set_dataset("ftw_read", ftw_read) + self.set_dataset("freq", freq) + + @kernel + def ram_convert_powasf(self): + amplitude = [.1, .9] + turns = [.3, .5] + ram = [0]*2 + self.dev.turns_amplitude_to_ram(turns, amplitude, ram) + self.set_dataset("amplitude", amplitude) + self.set_dataset("turns", turns) + self.set_dataset("ram", ram) + + +class AD9910Test(ExperimentCase): + def test_instantiate(self): + self.execute(AD9910Exp, "instantiate") + + def test_init(self): + self.execute(AD9910Exp, "init") + + def test_init_fail(self): + with self.assertRaises(ValueError): + self.execute(AD9910Exp, "init_fail") + + def test_set_get(self): + self.execute(AD9910Exp, "set_get") + ftw_get = self.dataset_mgr.get("ftw_get") + ftw_set = self.dataset_mgr.get("ftw_set") + self.assertEqual(ftw_get, ftw_set) + + def test_read_write64(self): + self.execute(AD9910Exp, "read_write64") + write = self.dataset_mgr.get("write") + read = self.dataset_mgr.get("read") + self.assertEqual(hex(write), hex(read)) + + def test_set_speed(self): + self.execute(AD9910Exp, "set_speed") + dt = self.dataset_mgr.get("dt") + print(dt) + self.assertLess(dt, 70*us) + + def test_set_speed_mu(self): + self.execute(AD9910Exp, "set_speed_mu") + dt = self.dataset_mgr.get("dt") + print(dt) + self.assertLess(dt, 11*us) + + def test_sync_window(self): + self.execute(AD9910Exp, "sync_window") + err = self.dataset_mgr.get("err") + dly = self.dataset_mgr.get("dly") + win = self.dataset_mgr.get("win") + print(dly, win, err) + # make sure one tap margin on either side of optimal delay + for i in -1, 0, 1: + self.assertEqual(err[i + dly], 0) + + def test_io_update_delay(self): + self.execute(AD9910Exp, "io_update_delay") + dly = self.dataset_mgr.get("dly") + bins1 = self.dataset_mgr.get("bins1") + bins2 = self.dataset_mgr.get("bins2") + print(dly, bins1, bins2) + n = max(bins2) + # no edge at optimal delay + self.assertEqual(bins2[(dly + 1) & 3], 0) + # many edges near expected position + self.assertGreater(bins2[(dly + 3) & 3], n*.9) + + def test_sw_readback(self): + self.execute(AD9910Exp, "sw_readback") + self.assertEqual(self.dataset_mgr.get("sw"), (1, 0)) + + def test_profile_readback(self): + self.execute(AD9910Exp, "profile_readback") + self.assertEqual(self.dataset_mgr.get("ftw"), list(range(8))) + + def test_ram_write(self): + self.execute(AD9910Exp, "ram_write") + read = self.dataset_mgr.get("r") + write = self.dataset_mgr.get("w") + self.assertEqual(len(read), len(write)) + self.assertEqual(read, write) + + def test_ram_read_overlapping(self): + self.execute(AD9910Exp, "ram_read_overlapping") + read = self.dataset_mgr.get("r") + write = self.dataset_mgr.get("w") + self.assertEqual(len(read), 100) + self.assertEqual(read, write) + + def test_ram_exec(self): + self.execute(AD9910Exp, "ram_exec") + ftw = self.dataset_mgr.get("ftw") + self.assertEqual(ftw[0], ftw[1]) + self.assertEqual(ftw[2], ftw[3]) + + def test_ram_convert_frequency(self): + exp = self.execute(AD9910Exp, "ram_convert_frequency") + ram = self.dataset_mgr.get("ram") + ftw_read = self.dataset_mgr.get("ftw_read") + self.assertEqual(ftw_read, ram[0]) + freq = self.dataset_mgr.get("freq") + self.assertEqual(ftw_read, exp.dev.frequency_to_ftw(freq[0])) + self.assertAlmostEqual(freq[0], exp.dev.ftw_to_frequency(ftw_read), + delta=.25) + + def test_ram_convert_powasf(self): + exp = self.execute(AD9910Exp, "ram_convert_powasf") + ram = self.dataset_mgr.get("ram") + amplitude = self.dataset_mgr.get("amplitude") + turns = self.dataset_mgr.get("turns") + for i in range(len(ram)): + self.assertEqual((ram[i] >> 16) & 0xffff, + exp.dev.turns_to_pow(turns[i])) + self.assertEqual(ram[i] & 0xffff, + exp.dev.amplitude_to_asf(amplitude[i])) diff --git a/artiq/test/coredevice/test_adf5356.py b/artiq/test/coredevice/test_adf5356.py new file mode 100644 index 000000000..e98189c0f --- /dev/null +++ b/artiq/test/coredevice/test_adf5356.py @@ -0,0 +1,206 @@ +import unittest +import numpy as np + +from artiq.experiment import * +from artiq.test.hardware_testbench import ExperimentCase +from artiq.coredevice.adf5356 import ( + calculate_pll, + split_msb_lsb_28b, + ADF5356_MODULUS1, + ADF5356_MAX_MODULUS2, +) + + +class ADF5356Exp(EnvExperiment): + def build(self, runner): + self.setattr_device("core") + self.dev = self.get_device("mirny0_ch0") + self.runner = runner + + def run(self): + getattr(self, self.runner)() + + @kernel + def instantiate(self): + pass + + @kernel + def init(self): + self.core.break_realtime() + self.dev.cpld.init() + self.dev.init() + + @kernel + def set_get_freq(self): + self.core.break_realtime() + self.dev.cpld.init() + self.dev.init() + self.dev.set_att_mu(0) + f = 300.123456 * MHz + self.dev.set_frequency(f) + self.set_dataset("freq_set", round(f / Hz)) + self.set_dataset( + "freq_get", round(self.dev.f_vco() / self.dev.output_divider() / Hz) + ) + + @kernel + def set_too_high_frequency(self): + self.dev.set_frequency(10 * GHz) + + @kernel + def set_too_low_frequency(self): + self.dev.set_frequency(1 * MHz) + + @kernel + def muxout_lock_detect(self): + self.core.break_realtime() + self.dev.cpld.init() + self.dev.init() + self.dev.set_att_mu(0) + f = 300.123 * MHz + self.dev.set_frequency(f) + delay(5 * ms) + self.set_dataset("muxout", self.dev.read_muxout()) + + @kernel + def muxout_lock_detect_no_lock(self): + self.core.break_realtime() + self.dev.cpld.init() + self.dev.init() + # set external SMA reference input + self.dev.cpld.write_reg(1, (1 << 4)) + self.dev.set_frequency(100 * MHz) + delay(5 * ms) + self.set_dataset("muxout", self.dev.read_muxout()) + + @kernel + def set_get_output_power(self): + self.core.break_realtime() + self.dev.cpld.init() + self.dev.init() + self.dev.set_att_mu(0) + self.dev.set_frequency(100 * MHz) + self.set_dataset("get_power", np.full(4, np.nan)) + for n in range(4): + delay(10 * ms) + self.dev.set_output_power_mu(n) + m = self.dev.output_power_mu() + self.mutate_dataset("get_power", n, m) + + @kernel + def invalid_output_power_setting(self): + self.dev.set_output_power_mu(42) + + @kernel + def enable_disable_output(self): + self.core.break_realtime() + self.dev.cpld.init() + self.dev.init() + self.dev.set_att_mu(0) + self.dev.set_frequency(100 * MHz) + self.dev.disable_output() + delay(100 * us) + self.dev.enable_output() + + +class TestCalculateParameters(unittest.TestCase): + def setUp(self): + self.f_pfd = 50 * MHz + self.mod1 = ADF5356_MODULUS1 + + def test_split_msb_lsb(self): + a = (0x123 << 14) | 0x3456 + msb, lsb = split_msb_lsb_28b(a) + + self.assertEqual(msb, 0x123) + self.assertEqual(lsb, 0x3456) + + def test_integer_pll(self): + p_n = 30 + n, frac1, frac2, mod2 = calculate_pll(p_n * self.f_pfd, self.f_pfd) + + self.assertEqual(p_n, n) + self.assertEqual(frac1, 0) + self.assertEqual(frac2, (0, 0)) + self.assertNotEqual(mod2, (0, 0)) + + def test_frac1_pll(self): + p_n = 30 + p_frac1 = 1 << 22 + n, frac1, frac2, mod2 = calculate_pll( + (p_n + p_frac1 / self.mod1) * self.f_pfd, self.f_pfd + ) + + self.assertEqual(p_n, n) + self.assertEqual(p_frac1, frac1) + self.assertEqual(frac2, (0, 0)) + self.assertNotEqual(mod2, (0, 0)) + + def test_frac_pll(self): + p_n = 30 + p_frac1 = 1 << 14 + p_frac2 = 1 << 24 + p_mod2 = 1 << 25 + n, frac1, frac2, mod2 = calculate_pll( + (p_n + (p_frac1 + p_frac2 / p_mod2) / self.mod1) * self.f_pfd, self.f_pfd + ) + + self.assertEqual(p_n, n) + self.assertEqual(p_frac1, frac1) + + frac2 = (frac2[0] << 14) | frac2[1] + mod2 = (mod2[0] << 14) | mod2[1] + + self.assertNotEqual(frac2, 0) + self.assertNotEqual(mod2, 0) + self.assertLessEqual(mod2, ADF5356_MAX_MODULUS2) + + self.assertEqual( + self.mod1 // (p_frac1 + p_frac2 // p_mod2), + self.mod1 // (frac1 + frac2 // mod2), + ) + + +class ADF5356Test(ExperimentCase): + def test_instantiate(self): + self.execute(ADF5356Exp, "instantiate") + + def test_init(self): + self.execute(ADF5356Exp, "init") + + def test_set_get_freq(self): + self.execute(ADF5356Exp, "set_get_freq") + f_set = self.dataset_mgr.get("freq_set") + f_get = self.dataset_mgr.get("freq_get") + self.assertEqual(f_set, f_get) + + def test_muxout_lock_detect(self): + self.execute(ADF5356Exp, "muxout_lock_detect") + muxout = self.dataset_mgr.get("muxout") + self.assertTrue(muxout) + + def test_muxout_lock_detect_no_lock(self): + self.execute(ADF5356Exp, "muxout_lock_detect_no_lock") + muxout = self.dataset_mgr.get("muxout") + self.assertFalse(muxout) + + def test_set_too_high_frequency(self): + with self.assertRaises(ValueError): + self.execute(ADF5356Exp, "set_too_high_frequency") + + def test_set_too_low_frequency(self): + with self.assertRaises(ValueError): + self.execute(ADF5356Exp, "set_too_low_frequency") + + def test_set_get_output_power(self): + self.execute(ADF5356Exp, "set_get_output_power") + get_power = self.dataset_mgr.get("get_power") + for n in range(4): + self.assertEqual(n, get_power[n]) + + def test_invalid_output_power_setting(self): + with self.assertRaises(ValueError): + self.execute(ADF5356Exp, "invalid_output_power_setting") + + def test_enable_disable_output(self): + self.execute(ADF5356Exp, "enable_disable_output") diff --git a/artiq/test/coredevice/test_analyzer.py b/artiq/test/coredevice/test_analyzer.py index 0098e78dd..cf8a64540 100644 --- a/artiq/test/coredevice/test_analyzer.py +++ b/artiq/test/coredevice/test_analyzer.py @@ -15,17 +15,18 @@ class CreateTTLPulse(EnvExperiment): def initialize_io(self): self.core.reset() self.loop_in.input() + self.loop_out.output() + delay(1*us) self.loop_out.off() @kernel def run(self): self.core.break_realtime() with parallel: - self.loop_in.gate_both_mu(1200) with sequential: delay_mu(100) self.loop_out.pulse_mu(1000) - self.loop_in.count() + self.loop_in.count(self.loop_in.gate_both_mu(1200)) class WriteLog(EnvExperiment): @@ -59,9 +60,10 @@ class AnalyzerTest(ExperimentCase): input_messages = [msg for msg in dump.messages if isinstance(msg, InputMessage)] self.assertEqual(len(input_messages), 2) + # on Kasli systems, this has to go through the isolated DIO card self.assertAlmostEqual( abs(input_messages[0].timestamp - input_messages[1].timestamp), - 1000, delta=1) + 1000, delta=4) def test_rtio_log(self): core_host = self.device_mgr.get_desc("core")["arguments"]["host"] diff --git a/artiq/test/coredevice/test_cache.py b/artiq/test/coredevice/test_cache.py index 368515e0b..07452cf07 100644 --- a/artiq/test/coredevice/test_cache.py +++ b/artiq/test/coredevice/test_cache.py @@ -1,5 +1,6 @@ from artiq.experiment import * from artiq.coredevice.exceptions import CacheError +from artiq.compiler.targets import CortexA9Target from artiq.test.hardware_testbench import ExperimentCase @@ -40,6 +41,8 @@ class CacheTest(ExperimentCase): def test_borrow(self): exp = self.create(_Cache) + if exp.core.target_cls == CortexA9Target: + self.skipTest("Zynq port memory management does not need CacheError") exp.put("x4", [1, 2, 3]) with self.assertRaises(CacheError): exp.get_put("x4", []) diff --git a/artiq/test/coredevice/test_compile.py b/artiq/test/coredevice/test_compile.py index 0efb5eb8b..060cd1b8f 100644 --- a/artiq/test/coredevice/test_compile.py +++ b/artiq/test/coredevice/test_compile.py @@ -28,7 +28,7 @@ class TestCompile(ExperimentCase): with tempfile.TemporaryDirectory() as tmp: db_path = os.path.join(artiq_root, "device_db.py") subprocess.call([sys.executable, "-m", "artiq.frontend.artiq_compile", "--device-db", db_path, - "-e", "CheckLog", "-o", os.path.join(tmp, "check_log.elf"), __file__]) + "-c", "CheckLog", "-o", os.path.join(tmp, "check_log.elf"), __file__]) subprocess.call([sys.executable, "-m", "artiq.frontend.artiq_run", "--device-db", db_path, os.path.join(tmp, "check_log.elf")]) log = mgmt.get_log() diff --git a/artiq/test/coredevice/test_edge_counter.py b/artiq/test/coredevice/test_edge_counter.py new file mode 100644 index 000000000..1199af6a5 --- /dev/null +++ b/artiq/test/coredevice/test_edge_counter.py @@ -0,0 +1,97 @@ +from artiq.experiment import * +from artiq.test.hardware_testbench import ExperimentCase + + +class EdgeCounterExp(EnvExperiment): + def build(self): + self.setattr_device("core") + self.setattr_device("loop_in_counter") + self.setattr_device("loop_out") + + @kernel + def count_pulse_edges(self, gate_fn): + self.core.break_realtime() + with parallel: + with sequential: + delay(5 * us) + self.loop_out.pulse(10 * us) + with sequential: + gate_fn(10 * us) + delay(1 * us) + gate_fn(10 * us) + return (self.loop_in_counter.fetch_count(), + self.loop_in_counter.fetch_count()) + + @kernel + def timeout_timestamp(self): + self.core.break_realtime() + timestamp_mu, _ = self.loop_in_counter.fetch_timestamped_count( + now_mu()) + return timestamp_mu + + @kernel + def gate_relative_timestamp(self): + self.core.break_realtime() + gate_end_mu = self.loop_in_counter.gate_rising(1 * us) + timestamp_mu, _ = self.loop_in_counter.fetch_timestamped_count() + return timestamp_mu - gate_end_mu + + @kernel + def many_pulses_split(self, num_pulses): + self.core.break_realtime() + + self.loop_in_counter.set_config( + count_rising=True, + count_falling=True, + send_count_event=False, + reset_to_zero=True) + + for _ in range(num_pulses): + self.loop_out.pulse(5 * us) + delay(5 * us) + + self.loop_in_counter.set_config( + count_rising=True, + count_falling=True, + send_count_event=True, + reset_to_zero=False) + + for _ in range(num_pulses): + self.loop_out.pulse(5 * us) + delay(5 * us) + + self.loop_in_counter.set_config( + count_rising=False, + count_falling=False, + send_count_event=True, + reset_to_zero=False) + + return (self.loop_in_counter.fetch_count(), + self.loop_in_counter.fetch_count()) + + +class EdgeCounterTest(ExperimentCase): + def setUp(self): + super().setUp() + self.exp = self.create(EdgeCounterExp) + + def test_sensitivity(self): + c = self.exp.loop_in_counter + self.assertEqual(self.exp.count_pulse_edges(c.gate_rising), (1, 0)) + self.assertEqual(self.exp.count_pulse_edges(c.gate_falling), (0, 1)) + self.assertEqual(self.exp.count_pulse_edges(c.gate_both), (1, 1)) + + def test_timeout_timestamp(self): + self.assertEqual(self.exp.timeout_timestamp(), -1) + + def test_gate_timestamp(self): + # The input event should be received at some point after it was + # requested, with some extra latency as it makes its way through the + # DRTIO machinery. (We only impose a somewhat arbitrary upper limit + # on the latency here.) + delta_mu = self.exp.gate_relative_timestamp() + self.assertGreaterEqual(delta_mu, 0) + self.assertLess(delta_mu, 100) + + def test_many_pulses_split(self): + self.assertEqual(self.exp.many_pulses_split(500), (1000, 2000)) diff --git a/artiq/test/coredevice/test_embedding.py b/artiq/test/coredevice/test_embedding.py index 26277a7c9..23d74fd7e 100644 --- a/artiq/test/coredevice/test_embedding.py +++ b/artiq/test/coredevice/test_embedding.py @@ -1,4 +1,5 @@ import numpy +import unittest from time import sleep from artiq.experiment import * @@ -22,6 +23,12 @@ class RoundtripTest(ExperimentCase): self.assertEqual(obj, objcopy) exp.roundtrip(obj, callback) + def assertArrayRoundtrip(self, obj): + exp = self.create(_Roundtrip) + def callback(objcopy): + numpy.testing.assert_array_equal(obj, objcopy) + exp.roundtrip(obj, callback) + def test_None(self): self.assertRoundtrip(None) @@ -48,9 +55,6 @@ class RoundtripTest(ExperimentCase): def test_list(self): self.assertRoundtrip([10]) - def test_array(self): - self.assertRoundtrip(numpy.array([10])) - def test_object(self): obj = object() self.assertRoundtrip(obj) @@ -58,6 +62,29 @@ class RoundtripTest(ExperimentCase): def test_object_list(self): self.assertRoundtrip([object(), object()]) + def test_list_tuple(self): + self.assertRoundtrip(([1, 2], [3, 4])) + + def test_list_mixed_tuple(self): + self.assertRoundtrip([(0x12345678, [("foo", [0.0, 1.0], [0, 1])])]) + + def test_array_1d(self): + self.assertArrayRoundtrip(numpy.array([1, 2, 3], dtype=numpy.int32)) + self.assertArrayRoundtrip(numpy.array([1.0, 2.0, 3.0])) + self.assertArrayRoundtrip(numpy.array(["a", "b", "c"])) + + def test_array_2d(self): + self.assertArrayRoundtrip(numpy.array([[1, 2], [3, 4]], dtype=numpy.int32)) + self.assertArrayRoundtrip(numpy.array([[1.0, 2.0], [3.0, 4.0]])) + self.assertArrayRoundtrip(numpy.array([["a", "b"], ["c", "d"]])) + + # FIXME: This should work, but currently passes as the argument is just + # synthesised as a call to array() without forwarding the dype form the host + # NumPy object. + @unittest.expectedFailure + def test_array_jagged(self): + self.assertArrayRoundtrip(numpy.array([[1, 2], [3]], dtype=object)) + class _DefaultArg(EnvExperiment): def build(self): @@ -80,7 +107,6 @@ class DefaultArgTest(ExperimentCase): class _RPCTypes(EnvExperiment): def build(self): self.setattr_device("core") - self.setattr_device("led") def return_bool(self) -> TBool: return True @@ -112,6 +138,12 @@ class _RPCTypes(EnvExperiment): def return_range(self) -> TRange32: return range(10) + def return_array(self) -> TArray(TInt32): + return numpy.array([1, 2]) + + def return_matrix(self) -> TArray(TInt32, 2): + return numpy.array([[1, 2], [3, 4]]) + def return_mismatch(self): return b"foo" @@ -127,6 +159,8 @@ class _RPCTypes(EnvExperiment): core_log(self.return_tuple()) core_log(self.return_list()) core_log(self.return_range()) + core_log(self.return_array()) + core_log(self.return_matrix()) def accept(self, value): pass @@ -140,9 +174,13 @@ class _RPCTypes(EnvExperiment): self.accept("foo") self.accept(b"foo") self.accept(bytearray(b"foo")) + self.accept(bytes([1, 2])) + self.accept(bytearray([1, 2])) self.accept((2, 3)) self.accept([1, 2]) self.accept(range(10)) + self.accept(numpy.array([1, 2])) + self.accept(numpy.array([[1, 2], [3, 4]])) self.accept(self) @kernel @@ -211,10 +249,29 @@ class _RPCCalls(EnvExperiment): def numpy_full(self): return numpy.full(10, 20) + @kernel + def numpy_full_matrix(self): + return numpy.full((3, 2), 13) + + @kernel + def numpy_nan(self): + return numpy.full(10, numpy.nan) + @kernel def builtin(self): sleep(1.0) + @rpc(flags={"async"}) + def async_rpc(self): + pass + + @kernel + def async_in_try(self): + try: + self.async_rpc() + except ValueError: + pass + class RPCCallsTest(ExperimentCase): def test_args(self): @@ -229,7 +286,10 @@ class RPCCallsTest(ExperimentCase): self.assertEqual(exp.numpy_things(), (numpy.int32(10), numpy.int64(20), numpy.array([42,]))) self.assertTrue((exp.numpy_full() == numpy.full(10, 20)).all()) + self.assertTrue((exp.numpy_full_matrix() == numpy.full((3, 2), 13)).all()) + self.assertTrue(numpy.isnan(exp.numpy_nan()).all()) exp.builtin() + exp.async_in_try() class _Annotation(EnvExperiment): @@ -290,3 +350,141 @@ class LargePayloadTest(ExperimentCase): def test_1MB(self): exp = self.create(_Payload1MB) exp.run() + + +class _ListTuple(EnvExperiment): + def build(self): + self.setattr_device("core") + + @kernel + def run(self): + # Make sure lifetime for the array data in tuples of lists is managed + # correctly. This is written in a somewhat convoluted fashion to provoke + # memory corruption even in the face of compiler optimizations. + for _ in range(self.get_num_iters()): + a, b = self.get_values(0, 1, 32) + c, d = self.get_values(2, 3, 64) + self.verify(a) + self.verify(c) + self.verify(b) + self.verify(d) + + @kernel + def verify(self, data): + for i in range(len(data)): + if data[i] != data[0] + i: + raise ValueError + + def get_num_iters(self) -> TInt32: + return 2 + + def get_values(self, base_a, base_b, n) -> TTuple([TList(TInt32), TList(TInt32)]): + return [numpy.int32(base_a + i) for i in range(n)], \ + [numpy.int32(base_b + i) for i in range(n)] + + +class _NestedTupleList(EnvExperiment): + def build(self): + self.setattr_device("core") + self.data = [(0x12345678, [("foo", [0.0, 1.0], [2, 3])]), + (0x76543210, [("bar", [4.0, 5.0], [6, 7])])] + + def get_data(self) -> TList(TTuple( + [TInt32, TList(TTuple([TStr, TList(TFloat), TList(TInt32)]))])): + return self.data + + @kernel + def run(self): + a = self.get_data() + if a != self.data: + raise ValueError + + +class _EmptyList(EnvExperiment): + def build(self): + self.setattr_device("core") + + def get_empty(self) -> TList(TInt32): + return [] + + @kernel + def run(self): + a = self.get_empty() + if a != []: + raise ValueError + + +class ListTupleTest(ExperimentCase): + def test_list_tuple(self): + self.create(_ListTuple).run() + + def test_nested_tuple_list(self): + self.create(_NestedTupleList).run() + + def test_empty_list(self): + self.create(_EmptyList).run() + + +class _ArrayQuoting(EnvExperiment): + def build(self): + self.setattr_device("core") + self.vec_i32 = numpy.array([0, 1], dtype=numpy.int32) + self.mat_i64 = numpy.array([[0, 1], [2, 3]], dtype=numpy.int64) + self.arr_f64 = numpy.array([[[0.0, 1.0], [2.0, 3.0]], + [[4.0, 5.0], [6.0, 7.0]]]) + self.strs = numpy.array(["foo", "bar"]) + + @kernel + def run(self): + assert self.vec_i32[0] == 0 + assert self.vec_i32[1] == 1 + + assert self.mat_i64[0, 0] == 0 + assert self.mat_i64[0, 1] == 1 + assert self.mat_i64[1, 0] == 2 + assert self.mat_i64[1, 1] == 3 + + assert self.arr_f64[0, 0, 0] == 0.0 + assert self.arr_f64[0, 0, 1] == 1.0 + assert self.arr_f64[0, 1, 0] == 2.0 + assert self.arr_f64[0, 1, 1] == 3.0 + assert self.arr_f64[1, 0, 0] == 4.0 + assert self.arr_f64[1, 0, 1] == 5.0 + assert self.arr_f64[1, 1, 0] == 6.0 + assert self.arr_f64[1, 1, 1] == 7.0 + + assert self.strs[0] == "foo" + assert self.strs[1] == "bar" + + +class ArrayQuotingTest(ExperimentCase): + def test_quoting(self): + self.create(_ArrayQuoting).run() + + +class _Assert(EnvExperiment): + def build(self): + self.setattr_device("core") + + @kernel + def check(self, value): + assert value + + @kernel + def check_msg(self, value): + assert value, "foo" + + +class AssertTest(ExperimentCase): + def test_assert(self): + exp = self.create(_Assert) + + def check_fail(fn, msg): + with self.assertRaises(AssertionError) as ctx: + fn() + self.assertEqual(str(ctx.exception), msg) + + exp.check(True) + check_fail(lambda: exp.check(False), "AssertionError") + exp.check_msg(True) + check_fail(lambda: exp.check_msg(False), "foo") diff --git a/artiq/test/coredevice/test_i2c.py b/artiq/test/coredevice/test_i2c.py index b3d03c107..e5424efc6 100644 --- a/artiq/test/coredevice/test_i2c.py +++ b/artiq/test/coredevice/test_i2c.py @@ -24,6 +24,7 @@ class I2CSwitch(EnvExperiment): class NonexistentI2CBus(EnvExperiment): def build(self): self.setattr_device("core") + self.setattr_device("i2c_switch") # HACK: only run this test on boards with I2C self.broken_switch = PCA9548(self._HasEnvironment__device_mgr, 255) @kernel diff --git a/artiq/test/coredevice/test_moninj.py b/artiq/test/coredevice/test_moninj.py index 2d95e4f8e..83786a8a1 100644 --- a/artiq/test/coredevice/test_moninj.py +++ b/artiq/test/coredevice/test_moninj.py @@ -1,3 +1,4 @@ +import unittest import asyncio from artiq.coredevice.comm_moninj import * @@ -6,9 +7,17 @@ from artiq.test.hardware_testbench import ExperimentCase class MonInjTest(ExperimentCase): def test_moninj(self): - core_host = self.device_mgr.get_desc("core")["arguments"]["host"] - loop_out_channel = self.device_mgr.get_desc("loop_out")["arguments"]["channel"] - loop_in_channel = self.device_mgr.get_desc("loop_in")["arguments"]["channel"] + try: + core = self.device_mgr.get_desc("core") + loop_out = self.device_mgr.get_desc("loop_out") + loop_in = self.device_mgr.get_desc("loop_in") + except KeyError as e: + # skip if ddb does not match requirements + raise unittest.SkipTest( + "test device not available: `{}`".format(*e.args)) + core_host = core["arguments"]["host"] + loop_out_channel = loop_out["arguments"]["channel"] + loop_in_channel = loop_in["arguments"]["channel"] notifications = [] injection_statuses = [] @@ -25,8 +34,11 @@ class MonInjTest(ExperimentCase): loop.run_until_complete(moninj_comm.connect(core_host)) try: moninj_comm.get_injection_status(loop_out_channel, TTLOverride.en.value) - moninj_comm.monitor(True, loop_in_channel, TTLProbe.level.value) + moninj_comm.monitor_probe(True, loop_in_channel, TTLProbe.level.value) + moninj_comm.monitor_injection(True, loop_out_channel, TTLOverride.level.en.value) + loop.run_until_complete(asyncio.sleep(0.5)) moninj_comm.inject(loop_out_channel, TTLOverride.level.value, 0) + moninj_comm.inject(loop_out_channel, TTLOverride.level.oe.value, 1) moninj_comm.inject(loop_out_channel, TTLOverride.level.en.value, 1) loop.run_until_complete(asyncio.sleep(0.5)) moninj_comm.get_injection_status(loop_out_channel, TTLOverride.en.value) @@ -50,5 +62,7 @@ class MonInjTest(ExperimentCase): ]) self.assertEqual(injection_statuses, [ (loop_out_channel, TTLOverride.en.value, 0), + (loop_out_channel, TTLOverride.en.value, 0), + (loop_out_channel, TTLOverride.en.value, 1), (loop_out_channel, TTLOverride.en.value, 1) ]) diff --git a/artiq/test/coredevice/test_numpy.py b/artiq/test/coredevice/test_numpy.py new file mode 100644 index 000000000..a4b8897bd --- /dev/null +++ b/artiq/test/coredevice/test_numpy.py @@ -0,0 +1,126 @@ +from artiq.experiment import * +import numpy +import scipy.special +from artiq.test.hardware_testbench import ExperimentCase +from artiq.compiler.targets import CortexA9Target +from artiq.compiler import math_fns + + +class _RunOnDevice(EnvExperiment): + def build(self): + self.setattr_device("core") + + @kernel + def run_on_kernel_unary(self, a, callback, numpy, scipy): + self.run(a, callback, numpy, scipy) + + @kernel + def run_on_kernel_binary(self, a, b, callback, numpy, scipy): + self.run(a, b, callback, numpy, scipy) + + +# Binary operations supported for scalars and arrays of any dimension, including +# broadcasting. +ELEM_WISE_BINOPS = ["+", "*", "//", "%", "**", "-", "/"] + + +class CompareHostDeviceTest(ExperimentCase): + def _test_binop(self, op, a, b): + exp = self.create(_RunOnDevice) + exp.run = kernel_from_string(["a", "b", "callback", "numpy", "scipy"], + "callback(" + op + ")", + decorator=portable) + checked = False + + def with_host_result(host): + def with_both_results(device): + nonlocal checked + checked = True + self.assertTrue( + numpy.allclose(host, device, equal_nan=True), + "Discrepancy in binop test for '{}': Expexcted ({}, {}) -> {}, got {}" + .format(op, a, b, host, device)) + + exp.run_on_kernel_binary(a, b, with_both_results, numpy, scipy) + + exp.run(a, b, with_host_result, numpy, scipy) + self.assertTrue(checked, "Test did not run") + + def _test_unaryop(self, op, a): + exp = self.create(_RunOnDevice) + exp.run = kernel_from_string(["a", "callback", "numpy", "scipy"], + "callback(" + op + ")", + decorator=portable) + checked = False + + def with_host_result(host): + def with_both_results(device): + nonlocal checked + checked = True + self.assertTrue( + numpy.allclose(host, device, equal_nan=True), + "Discrepancy in unaryop test for '{}': Expexcted {} -> {}, got {}" + .format(op, a, host, device)) + + exp.run_on_kernel_unary(a, with_both_results, numpy, scipy) + + exp.run(a, with_host_result, numpy, scipy) + self.assertTrue(checked, "Test did not run") + + def test_scalar_scalar_binops(self): + # Some arbitrarily chosen arguments of different types. Could be turned into + # randomised tests instead. + # TODO: Provoke overflows, division by zero, etc., and compare results. + args = [(typ(a), typ(b)) for a, b in [(0, 1), (3, 2), (11, 6)] + for typ in [numpy.int32, numpy.int64, numpy.float]] + for op in ELEM_WISE_BINOPS: + for arg in args: + self._test_binop("a" + op + "b", *arg) + + def test_scalar_matrix_binops(self): + for typ in [numpy.int32, numpy.int64, numpy.float]: + scalar = typ(3) + matrix = numpy.array([[4, 5, 6], [7, 8, 9]], dtype=typ) + for op in ELEM_WISE_BINOPS: + code = "a" + op + "b" + self._test_binop(code, scalar, matrix) + self._test_binop(code, matrix, scalar) + self._test_binop(code, matrix, matrix) + + def test_unary_math_fns(self): + names = [ + a for a, _ in math_fns.unary_fp_intrinsics + math_fns.unary_fp_runtime_calls + ] + exp = self.create(_RunOnDevice) + if exp.core.target_cls != CortexA9Target: + names.remove("exp2") + names.remove("log2") + names.remove("trunc") + for name in names: + op = "numpy.{}(a)".format(name) + # Avoid 0.5, as numpy.rint's rounding mode currently doesn't match. + self._test_unaryop(op, 0.51) + self._test_unaryop(op, numpy.array([[0.3, 0.4], [0.51, 0.6]])) + + def test_unary_scipy_fns(self): + names = [name for name, _ in math_fns.scipy_special_unary_runtime_calls] + if self.create(_RunOnDevice).core.target_cls != CortexA9Target: + names.remove("gamma") + for name in names: + op = "scipy.special.{}(a)".format(name) + self._test_unaryop(op, 0.5) + self._test_unaryop(op, numpy.array([[0.3, 0.4], [0.5, 0.6]])) + + def test_binary_math_fns(self): + names = [name for name, _ in math_fns.binary_fp_runtime_calls] + exp = self.create(_RunOnDevice) + if exp.core.target_cls != CortexA9Target: + names.remove("fmax") + names.remove("fmin") + for name in names: + code = "numpy.{}(a, b)".format(name) + # Avoid 0.5, as numpy.rint's rounding mode currently doesn't match. + self._test_binop(code, 1.0, 2.0) + self._test_binop(code, 1.0, numpy.array([2.0, 3.0])) + self._test_binop(code, numpy.array([1.0, 2.0]), 3.0) + self._test_binop(code, numpy.array([1.0, 2.0]), numpy.array([3.0, 4.0])) diff --git a/artiq/test/coredevice/test_performance.py b/artiq/test/coredevice/test_performance.py index 71101d508..3ca1f86ba 100644 --- a/artiq/test/coredevice/test_performance.py +++ b/artiq/test/coredevice/test_performance.py @@ -1,54 +1,295 @@ import os +import time import unittest +import numpy from artiq.experiment import * from artiq.test.hardware_testbench import ExperimentCase +# large: 1MB payload +# small: 1KB payload +bytes_large = b"\x00" * (1 << 20) +bytes_small = b"\x00" * (1 << 10) -artiq_low_latency = os.getenv("ARTIQ_LOW_LATENCY") +list_large = [123] * (1 << 18) +list_small = [123] * (1 << 8) +array_large = numpy.array(list_large, numpy.int32) +array_small = numpy.array(list_small, numpy.int32) + +byte_list_large = [True] * (1 << 20) +byte_list_small = [True] * (1 << 10) + +received_bytes = 0 +time_start = 0 +time_end = 0 class _Transfer(EnvExperiment): def build(self): self.setattr_device("core") - self.data = b"\x00"*(10**6) + self.count = 10 + self.h2d = [0.0] * self.count + self.d2h = [0.0] * self.count @rpc - def source(self) -> TBytes: - return self.data + def get_bytes(self, large: TBool) -> TBytes: + if large: + return bytes_large + else: + return bytes_small + + @rpc + def get_list(self, large: TBool) -> TList(TInt32): + if large: + return list_large + else: + return list_small + + @rpc + def get_byte_list(self, large: TBool) -> TList(TBool): + if large: + return byte_list_large + else: + return byte_list_small + + @rpc + def get_array(self, large: TBool) -> TArray(TInt32): + if large: + return array_large + else: + return array_small + + @rpc + def get_string_list(self) -> TList(TStr): + return string_list + + @rpc + def sink(self, data): + pass @rpc(flags={"async"}) - def sink(self, data): - assert data == self.data + def sink_async(self, data): + global received_bytes, time_start, time_end + if received_bytes == 0: + time_start = time.time() + received_bytes += len(data) + if received_bytes == (1024 ** 2)*128: + time_end = time.time() + + @rpc + def get_async_throughput(self) -> TFloat: + return 128.0 / (time_end - time_start) @kernel - def host_to_device(self): - t0 = self.core.get_rtio_counter_mu() - data = self.source() - t1 = self.core.get_rtio_counter_mu() - return len(data)/self.core.mu_to_seconds(t1-t0) + def test_bytes(self, large): + def inner(): + t0 = self.core.get_rtio_counter_mu() + data = self.get_bytes(large) + t1 = self.core.get_rtio_counter_mu() + self.sink(data) + t2 = self.core.get_rtio_counter_mu() + self.h2d[i] = self.core.mu_to_seconds(t1 - t0) + self.d2h[i] = self.core.mu_to_seconds(t2 - t1) + + for i in range(self.count): + inner() + return (self.h2d, self.d2h) @kernel - def device_to_host(self): - t0 = self.core.get_rtio_counter_mu() - self.sink(self.data) - t1 = self.core.get_rtio_counter_mu() - return len(self.data)/self.core.mu_to_seconds(t1-t0) + def test_byte_list(self, large): + def inner(): + t0 = self.core.get_rtio_counter_mu() + data = self.get_byte_list(large) + t1 = self.core.get_rtio_counter_mu() + self.sink(data) + t2 = self.core.get_rtio_counter_mu() + self.h2d[i] = self.core.mu_to_seconds(t1 - t0) + self.d2h[i] = self.core.mu_to_seconds(t2 - t1) + for i in range(self.count): + inner() + return (self.h2d, self.d2h) + + @kernel + def test_list(self, large): + def inner(): + t0 = self.core.get_rtio_counter_mu() + data = self.get_list(large) + t1 = self.core.get_rtio_counter_mu() + self.sink(data) + t2 = self.core.get_rtio_counter_mu() + self.h2d[i] = self.core.mu_to_seconds(t1 - t0) + self.d2h[i] = self.core.mu_to_seconds(t2 - t1) + + for i in range(self.count): + inner() + return (self.h2d, self.d2h) + + @kernel + def test_array(self, large): + def inner(): + t0 = self.core.get_rtio_counter_mu() + data = self.get_array(large) + t1 = self.core.get_rtio_counter_mu() + self.sink(data) + t2 = self.core.get_rtio_counter_mu() + self.h2d[i] = self.core.mu_to_seconds(t1 - t0) + self.d2h[i] = self.core.mu_to_seconds(t2 - t1) + + for i in range(self.count): + inner() + return (self.h2d, self.d2h) + + @kernel + def test_async(self): + data = self.get_bytes(True) + for _ in range(128): + self.sink_async(data) + return self.get_async_throughput() class TransferTest(ExperimentCase): - @unittest.skipUnless(artiq_low_latency, - "timings are dependent on CPU load and network conditions") - def test_host_to_device(self): - exp = self.create(_Transfer) - host_to_device_rate = exp.host_to_device() - print(host_to_device_rate, "B/s") - self.assertGreater(host_to_device_rate, 2e6) + @classmethod + def setUpClass(self): + self.results = [] - @unittest.skipUnless(artiq_low_latency, - "timings are dependent on CPU load and network conditions") - def test_device_to_host(self): + @classmethod + def tearDownClass(self): + if len(self.results) == 0: + return + max_length = max(max(len(row[0]) for row in self.results), len("Test")) + + def pad(name): + nonlocal max_length + return name + " " * (max_length - len(name)) + print() + print("| {} | Mean (MiB/s) | std (MiB/s) |".format(pad("Test"))) + print("| {} | ------------ | ------------ |".format("-" * max_length)) + for v in self.results: + print("| {} | {:>12.2f} | {:>12.2f} |".format( + pad(v[0]), v[1], v[2])) + + def test_bytes_large(self): exp = self.create(_Transfer) - device_to_host_rate = exp.device_to_host() - print(device_to_host_rate, "B/s") - self.assertGreater(device_to_host_rate, 2e6) + results = exp.test_bytes(True) + host_to_device = (1 << 20) / numpy.array(results[0], numpy.float64) + device_to_host = (1 << 20) / numpy.array(results[1], numpy.float64) + host_to_device /= 1024*1024 + device_to_host /= 1024*1024 + self.results.append(["Bytes (1MB) H2D", host_to_device.mean(), + host_to_device.std()]) + self.results.append(["Bytes (1MB) D2H", device_to_host.mean(), + device_to_host.std()]) + + def test_bytes_small(self): + exp = self.create(_Transfer) + results = exp.test_bytes(False) + host_to_device = (1 << 10) / numpy.array(results[0], numpy.float64) + device_to_host = (1 << 10) / numpy.array(results[1], numpy.float64) + host_to_device /= 1024*1024 + device_to_host /= 1024*1024 + self.results.append(["Bytes (1KB) H2D", host_to_device.mean(), + host_to_device.std()]) + self.results.append(["Bytes (1KB) D2H", device_to_host.mean(), + device_to_host.std()]) + + def test_byte_list_large(self): + exp = self.create(_Transfer) + results = exp.test_byte_list(True) + host_to_device = (1 << 20) / numpy.array(results[0], numpy.float64) + device_to_host = (1 << 20) / numpy.array(results[1], numpy.float64) + host_to_device /= 1024*1024 + device_to_host /= 1024*1024 + self.results.append(["Bytes List (1MB) H2D", host_to_device.mean(), + host_to_device.std()]) + self.results.append(["Bytes List (1MB) D2H", device_to_host.mean(), + device_to_host.std()]) + + def test_byte_list_small(self): + exp = self.create(_Transfer) + results = exp.test_byte_list(False) + host_to_device = (1 << 10) / numpy.array(results[0], numpy.float64) + device_to_host = (1 << 10) / numpy.array(results[1], numpy.float64) + host_to_device /= 1024*1024 + device_to_host /= 1024*1024 + self.results.append(["Bytes List (1KB) H2D", host_to_device.mean(), + host_to_device.std()]) + self.results.append(["Bytes List (1KB) D2H", device_to_host.mean(), + device_to_host.std()]) + + def test_list_large(self): + exp = self.create(_Transfer) + results = exp.test_list(True) + host_to_device = (1 << 20) / numpy.array(results[0], numpy.float64) + device_to_host = (1 << 20) / numpy.array(results[1], numpy.float64) + host_to_device /= 1024*1024 + device_to_host /= 1024*1024 + self.results.append(["I32 List (1MB) H2D", host_to_device.mean(), + host_to_device.std()]) + self.results.append(["I32 List (1MB) D2H", device_to_host.mean(), + device_to_host.std()]) + + def test_list_small(self): + exp = self.create(_Transfer) + results = exp.test_list(False) + host_to_device = (1 << 10) / numpy.array(results[0], numpy.float64) + device_to_host = (1 << 10) / numpy.array(results[1], numpy.float64) + host_to_device /= 1024*1024 + device_to_host /= 1024*1024 + self.results.append(["I32 List (1KB) H2D", host_to_device.mean(), + host_to_device.std()]) + self.results.append(["I32 List (1KB) D2H", device_to_host.mean(), + device_to_host.std()]) + + def test_array_large(self): + exp = self.create(_Transfer) + results = exp.test_array(True) + host_to_device = (1 << 20) / numpy.array(results[0], numpy.float64) + device_to_host = (1 << 20) / numpy.array(results[1], numpy.float64) + host_to_device /= 1024*1024 + device_to_host /= 1024*1024 + self.results.append(["I32 Array (1MB) H2D", host_to_device.mean(), + host_to_device.std()]) + self.results.append(["I32 Array (1MB) D2H", device_to_host.mean(), + device_to_host.std()]) + + def test_array_small(self): + exp = self.create(_Transfer) + results = exp.test_array(False) + host_to_device = (1 << 10) / numpy.array(results[0], numpy.float64) + device_to_host = (1 << 10) / numpy.array(results[1], numpy.float64) + host_to_device /= 1024*1024 + device_to_host /= 1024*1024 + self.results.append(["I32 Array (1KB) H2D", host_to_device.mean(), + host_to_device.std()]) + self.results.append(["I32 Array (1KB) D2H", device_to_host.mean(), + device_to_host.std()]) + + def test_async_throughput(self): + exp = self.create(_Transfer) + results = exp.test_async() + print("Async throughput: {:>6.2f}MiB/s".format(results)) + +class _KernelOverhead(EnvExperiment): + def build(self): + self.setattr_device("core") + + def kernel_overhead(self): + n = 100 + t0 = time.monotonic() + for _ in range(n): + self.dummy_kernel() + t1 = time.monotonic() + return (t1-t0)/n + + @kernel + def dummy_kernel(self): + pass + + +class KernelOverheadTest(ExperimentCase): + def test_kernel_overhead(self): + exp = self.create(_KernelOverhead) + kernel_overhead = exp.kernel_overhead() + print(kernel_overhead, "s") + self.assertGreater(kernel_overhead, 0.001) + self.assertLess(kernel_overhead, 0.5) diff --git a/artiq/test/coredevice/test_phaser.py b/artiq/test/coredevice/test_phaser.py new file mode 100644 index 000000000..18aac6a57 --- /dev/null +++ b/artiq/test/coredevice/test_phaser.py @@ -0,0 +1,34 @@ +import unittest +from artiq.experiment import * +from artiq.test.hardware_testbench import ExperimentCase +from artiq.language.core import kernel, delay +from artiq.language.units import us + + +class PhaserExperiment(EnvExperiment): + def build(self): + self.setattr_device("core") + self.setattr_device("phaser0") + + @kernel + def run(self): + self.core.reset() + # The Phaser initialization performs a comprehensive test: + # * Fastlink bringup + # * Fastlink error counter + # * Board identification + # * Hardware identification + # * SPI write, readback, timing + # * Temperature readout + # * DAC identification, IOTEST, alarm sweep, PLL configuration, FIFO + # alignmend + # * DUC+Oscillator configuration, data end-to-end verification and + # readback + # * Attenuator write and readback + # * TRF bringup PLL locking + self.phaser0.init() + + +class PhaserTest(ExperimentCase): + def test(self): + self.execute(PhaserExperiment) diff --git a/artiq/test/coredevice/test_portability.py b/artiq/test/coredevice/test_portability.py index cd301d29b..a861b6833 100644 --- a/artiq/test/coredevice/test_portability.py +++ b/artiq/test/coredevice/test_portability.py @@ -10,7 +10,7 @@ def _run_on_host(k_class, *args, **kwargs): device_mgr = dict() device_mgr["core"] = sim_devices.Core(device_mgr) - k_inst = k_class((device_mgr, None, None), + k_inst = k_class((device_mgr, None, None, {}), *args, **kwargs) k_inst.run() return k_inst @@ -202,6 +202,20 @@ class _RPCExceptions(EnvExperiment): self.success = True +class _Keywords(EnvExperiment): + def build(self, value, output): + self.setattr_device("core") + self.value = value + self.output = output + + def rpc(self, kw): + self.output.append(kw) + + @kernel + def run(self): + self.rpc(kw=self.value) + + class HostVsDeviceCase(ExperimentCase): def test_primes(self): l_device, l_host = [], [] @@ -245,3 +259,18 @@ class HostVsDeviceCase(ExperimentCase): f(_RPCExceptions, catch=False) uut = self.execute(_RPCExceptions, catch=True) self.assertTrue(uut.success) + + def test_keywords(self): + for f in self.execute, _run_on_host: + output = [] + f(_Keywords, value=0, output=output) + self.assertEqual(output, [0]) + output = [] + f(_Keywords, value=1, output=output) + self.assertEqual(output, [1]) + output = [] + f(_Keywords, value=False, output=output) + self.assertEqual(output, [False]) + output = [] + f(_Keywords, value=True, output=output) + self.assertEqual(output, [True]) diff --git a/artiq/test/coredevice/test_rtio.py b/artiq/test/coredevice/test_rtio.py index 92a0d1abc..3313b5c14 100644 --- a/artiq/test/coredevice/test_rtio.py +++ b/artiq/test/coredevice/test_rtio.py @@ -12,12 +12,41 @@ from artiq.coredevice import exceptions from artiq.coredevice.comm_mgmt import CommMgmt from artiq.coredevice.comm_analyzer import (StoppedMessage, OutputMessage, InputMessage, decode_dump, get_analyzer_dump) +from artiq.compiler.targets import CortexA9Target artiq_low_latency = os.getenv("ARTIQ_LOW_LATENCY") artiq_in_devel = os.getenv("ARTIQ_IN_DEVEL") +class RTIOCounter(EnvExperiment): + def build(self): + self.setattr_device("core") + + @kernel + def run(self): + t0 = self.core.get_rtio_counter_mu() + t1 = self.core.get_rtio_counter_mu() + self.set_dataset("dt", self.core.mu_to_seconds(t1 - t0)) + + +class InvalidCounter(Exception): + pass + + +class WaitForRTIOCounter(EnvExperiment): + def build(self): + self.setattr_device("core") + + @kernel + def run(self): + self.core.break_realtime() + target_mu = now_mu() + 10000 + self.core.wait_until_mu(target_mu) + if self.core.get_rtio_counter_mu() < target_mu: + raise InvalidCounter + + class PulseNotReceived(Exception): pass @@ -40,7 +69,7 @@ class RTT(EnvExperiment): delay(1*us) t0 = now_mu() self.ttl_inout.pulse(1*us) - t1 = self.ttl_inout.timestamp_mu() + t1 = self.ttl_inout.timestamp_mu(now_mu()) if t1 < 0: raise PulseNotReceived() self.set_dataset("rtt", self.core.mu_to_seconds(t1 - t0)) @@ -64,7 +93,7 @@ class Loopback(EnvExperiment): delay(1*us) t0 = now_mu() self.loop_out.pulse(1*us) - t1 = self.loop_in.timestamp_mu() + t1 = self.loop_in.timestamp_mu(now_mu()) if t1 < 0: raise PulseNotReceived() self.set_dataset("rtt", self.core.mu_to_seconds(t1 - t0)) @@ -81,13 +110,13 @@ class ClockGeneratorLoopback(EnvExperiment): self.core.reset() self.loop_clock_in.input() self.loop_clock_out.stop() - delay(20*us) + delay(200*us) with parallel: self.loop_clock_in.gate_rising(10*us) with sequential: delay(200*ns) self.loop_clock_out.set(1*MHz) - self.set_dataset("count", self.loop_clock_in.count()) + self.set_dataset("count", self.loop_clock_in.count(now_mu())) class PulseRate(EnvExperiment): @@ -113,25 +142,25 @@ class PulseRate(EnvExperiment): return -class PulseRateDDS(EnvExperiment): +class PulseRateAD9914DDS(EnvExperiment): def build(self): self.setattr_device("core") - self.setattr_device("core_dds") - self.setattr_device("dds0") - self.setattr_device("dds1") + self.setattr_device("ad9914dds0") + self.setattr_device("ad9914dds1") @kernel def run(self): self.core.reset() dt = self.core.seconds_to_mu(5*us) - freq = self.core_dds.frequency_to_ftw(100*MHz) + freq = self.ad9914dds0.frequency_to_ftw(100*MHz) while True: delay(10*ms) for i in range(1250): try: - with self.core_dds.batch: - self.dds0.set_mu(freq) - self.dds1.set_mu(freq) + delay_mu(-self.ad9914dds0.set_duration_mu) + self.ad9914dds0.set_mu(freq) + delay_mu(self.ad9914dds0.set_duration_mu) + self.ad9914dds1.set_mu(freq) delay_mu(dt) except RTIOUnderflow: dt += 100 @@ -142,17 +171,6 @@ class PulseRateDDS(EnvExperiment): return -class Watchdog(EnvExperiment): - def build(self): - self.setattr_device("core") - - @kernel - def run(self): - with watchdog(50*ms): - while True: - pass - - class LoopbackCount(EnvExperiment): def build(self, npulses): self.setattr_device("core") @@ -175,7 +193,69 @@ class LoopbackCount(EnvExperiment): for i in range(self.npulses): delay(25*ns) self.loop_out.pulse(25*ns) - self.set_dataset("count", self.loop_in.count()) + self.set_dataset("count", self.loop_in.count(now_mu())) + + +class IncorrectPulseTiming(Exception): + pass + + +class LoopbackGateTiming(EnvExperiment): + def build(self): + self.setattr_device("core") + self.setattr_device("loop_in") + self.setattr_device("loop_out") + + @kernel + def run(self): + # Make sure there are no leftover events. + self.core.reset() + self.loop_in.input() + self.loop_out.output() + delay_mu(500) + self.loop_out.off() + delay_mu(5000) + + # Determine loop delay. + with parallel: + self.loop_in.gate_rising_mu(10000) + with sequential: + delay_mu(5000) + out_mu = now_mu() + self.loop_out.pulse_mu(1000) + in_mu = self.loop_in.timestamp_mu(now_mu()) + if in_mu < 0: + raise PulseNotReceived("Cannot determine loop delay") + loop_delay_mu = in_mu - out_mu + + # With the exact delay known, make sure tight gate timings work. + # In the most common configuration, 24 mu == 24 ns == 3 coarse periods, + # which should be plenty of slack. + # FIXME: ZC706 with NIST_QC2 needs 48ns - hw problem? + delay_mu(10000) + + gate_start_mu = now_mu() + self.loop_in.gate_both_mu(48) # XXX + gate_end_mu = now_mu() + + # gateware latency offset between gate and input + lat_offset = 11*8 + out_mu = gate_start_mu - loop_delay_mu + lat_offset + at_mu(out_mu) + self.loop_out.pulse_mu(48) # XXX + + in_mu = self.loop_in.timestamp_mu(gate_end_mu) + print("timings: ", gate_start_mu, in_mu - lat_offset, gate_end_mu) + if in_mu < 0: + raise PulseNotReceived() + if not (gate_start_mu <= (in_mu - lat_offset) <= gate_end_mu): + raise IncorrectPulseTiming("Input event should occur during gate") + if not (-2 < (in_mu - out_mu - loop_delay_mu) < 2): + raise IncorrectPulseTiming("Loop delay should not change") + + in_mu = self.loop_in.timestamp_mu(gate_end_mu) + if in_mu > 0: + raise IncorrectPulseTiming("Only one pulse should be received") class IncorrectLevel(Exception): @@ -258,10 +338,10 @@ class SequenceError(EnvExperiment): @kernel def run(self): self.core.reset() - t = now_mu() - self.ttl_out.pulse(25*us) - at_mu(t) - self.ttl_out.pulse(25*us) + delay(55*256*us) + for _ in range(256): + self.ttl_out.pulse(25*us) + delay(-75*us) class Collision(EnvExperiment): @@ -276,6 +356,8 @@ class Collision(EnvExperiment): for i in range(16): self.ttl_out_serdes.pulse_mu(1) delay_mu(1) + while self.core.get_rtio_counter_mu() < now_mu(): + pass class AddressCollision(EnvExperiment): @@ -288,6 +370,8 @@ class AddressCollision(EnvExperiment): self.core.reset() self.loop_in.input() self.loop_in.pulse(10*us) + while self.core.get_rtio_counter_mu() < now_mu(): + pass class TimeKeepsRunning(EnvExperiment): @@ -353,12 +437,23 @@ class HandoverException(EnvExperiment): class CoredeviceTest(ExperimentCase): + def test_rtio_counter(self): + self.execute(RTIOCounter) + dt = self.dataset_mgr.get("dt") + print(dt) + self.assertGreater(dt, 50*ns) + self.assertLess(dt, 1*us) + + def test_wait_for_rtio_counter(self): + self.execute(WaitForRTIOCounter) + def test_loopback(self): self.execute(Loopback) rtt = self.dataset_mgr.get("rtt") print(rtt) - self.assertGreater(rtt, 0*ns) - self.assertLess(rtt, 60*ns) + self.assertGreater(rtt, 20*ns) + # on Kasli systems, this has to go through the isolated DIO card + self.assertLess(rtt, 170*ns) def test_clock_generator_loopback(self): self.execute(ClockGeneratorLoopback) @@ -367,15 +462,19 @@ class CoredeviceTest(ExperimentCase): def test_pulse_rate(self): """Minimum interval for sustained TTL output switching""" - self.execute(PulseRate) + exp = self.execute(PulseRate) rate = self.dataset_mgr.get("pulse_rate") print(rate) self.assertGreater(rate, 100*ns) - self.assertLess(rate, 700*ns) + if exp.core.target_cls == CortexA9Target: + # Crappy AXI PS/PL interface from Xilinx is slow. + self.assertLess(rate, 810*ns) + else: + self.assertLess(rate, 480*ns) - def test_pulse_rate_dds(self): - """Minimum interval for sustained DDS frequency switching""" - self.execute(PulseRateDDS) + def test_pulse_rate_ad9914_dds(self): + """Minimum interval for sustained AD9914 DDS frequency switching""" + self.execute(PulseRateAD9914DDS) rate = self.dataset_mgr.get("pulse_rate") print(rate) self.assertGreater(rate, 1*us) @@ -387,6 +486,9 @@ class CoredeviceTest(ExperimentCase): count = self.dataset_mgr.get("count") self.assertEqual(count, npulses) + def test_loopback_gate_timing(self): + self.execute(LoopbackGateTiming) + def test_level(self): self.execute(Level) @@ -397,35 +499,24 @@ class CoredeviceTest(ExperimentCase): with self.assertRaises(RTIOUnderflow): self.execute(Underflow) + def execute_and_test_in_log(self, experiment, string): + core_addr = self.device_mgr.get_desc("core")["arguments"]["host"] + mgmt = CommMgmt(core_addr) + mgmt.clear_log() + self.execute(experiment) + log = mgmt.get_log() + self.assertIn(string, log) + mgmt.close() + def test_sequence_error(self): - with self.assertRaises(RTIOSequenceError): - self.execute(SequenceError) + self.execute_and_test_in_log(SequenceError, "RTIO sequence error") def test_collision(self): - core_addr = self.device_mgr.get_desc("core")["arguments"]["host"] - mgmt = CommMgmt(core_addr) - mgmt.clear_log() - self.execute(Collision) - log = mgmt.get_log() - self.assertIn("RTIO collision", log) - mgmt.close() + self.execute_and_test_in_log(Collision, "RTIO collision") def test_address_collision(self): - core_addr = self.device_mgr.get_desc("core")["arguments"]["host"] - mgmt = CommMgmt(core_addr) - mgmt.clear_log() - self.execute(AddressCollision) - log = mgmt.get_log() - self.assertIn("RTIO collision", log) - mgmt.close() + self.execute_and_test_in_log(AddressCollision, "RTIO collision") - def test_watchdog(self): - # watchdog only works on the device - with self.assertRaises(exceptions.WatchdogExpired): - self.execute(Watchdog) - - @unittest.skipUnless(artiq_low_latency, - "timings are dependent on CPU load and network conditions") def test_time_keeps_running(self): self.execute(TimeKeepsRunning) t1 = self.dataset_mgr.get("time_at_start") @@ -491,20 +582,24 @@ class RPCTest(ExperimentCase): class _DMA(EnvExperiment): - def build(self, trace_name="foobar"): + def build(self, trace_name="test_rtio"): self.setattr_device("core") self.setattr_device("core_dma") - self.setattr_device("ttl1") + self.setattr_device("ttl_out") self.trace_name = trace_name self.delta = np.int64(0) @kernel - def record(self): + def record(self, for_handle=True): with self.core_dma.record(self.trace_name): + # When not using the handle, retrieving the DMA trace + # in dma.playback() can be slow. Allow some time. + if not for_handle: + delay(1*ms) delay(100*ns) - self.ttl1.on() + self.ttl_out.on() delay(100*ns) - self.ttl1.off() + self.ttl_out.off() @kernel def record_many(self, n): @@ -512,29 +607,33 @@ class _DMA(EnvExperiment): with self.core_dma.record(self.trace_name): for i in range(n//2): delay(100*ns) - self.ttl1.on() + self.ttl_out.on() delay(100*ns) - self.ttl1.off() + self.ttl_out.off() t2 = self.core.get_rtio_counter_mu() self.set_dataset("dma_record_time", self.core.mu_to_seconds(t2 - t1)) @kernel - def playback(self, use_handle=False): - self.core.break_realtime() - start = now_mu() + def playback(self, use_handle=True): if use_handle: handle = self.core_dma.get_handle(self.trace_name) + self.core.break_realtime() + start = now_mu() self.core_dma.playback_handle(handle) else: + self.core.break_realtime() + start = now_mu() self.core_dma.playback(self.trace_name) self.delta = now_mu() - start @kernel - def playback_many(self, n): - self.core.break_realtime() + def playback_many(self, n, add_delay=False): handle = self.core_dma.get_handle(self.trace_name) + self.core.break_realtime() t1 = self.core.get_rtio_counter_mu() for i in range(n): + if add_delay: + delay(2*us) self.core_dma.playback_handle(handle) t2 = self.core.get_rtio_counter_mu() self.set_dataset("dma_playback_time", self.core.mu_to_seconds(t2 - t1)) @@ -579,9 +678,10 @@ class DMATest(ExperimentCase): core_host = self.device_mgr.get_desc("core")["arguments"]["host"] exp = self.create(_DMA) - exp.record() + channel = exp.ttl_out.channel for use_handle in [False, True]: + exp.record(use_handle) get_analyzer_dump(core_host) # clear analyzer buffer exp.playback(use_handle) @@ -589,11 +689,11 @@ class DMATest(ExperimentCase): self.assertEqual(len(dump.messages), 3) self.assertIsInstance(dump.messages[-1], StoppedMessage) self.assertIsInstance(dump.messages[0], OutputMessage) - self.assertEqual(dump.messages[0].channel, 1) + self.assertEqual(dump.messages[0].channel, channel) self.assertEqual(dump.messages[0].address, 0) self.assertEqual(dump.messages[0].data, 1) self.assertIsInstance(dump.messages[1], OutputMessage) - self.assertEqual(dump.messages[1].channel, 1) + self.assertEqual(dump.messages[1].channel, channel) self.assertEqual(dump.messages[1].address, 0) self.assertEqual(dump.messages[1].data, 0) self.assertEqual(dump.messages[1].timestamp - @@ -603,9 +703,13 @@ class DMATest(ExperimentCase): exp = self.create(_DMA) exp.record() - for use_handle in [False, True]: - exp.playback(use_handle) - self.assertEqual(exp.delta, 200) + exp.record(False) + exp.playback(False) + self.assertEqual(exp.delta, 1000200) + + exp.record(True) + exp.playback(True) + self.assertEqual(exp.delta, 200) def test_dma_record_time(self): exp = self.create(_DMA) @@ -613,16 +717,33 @@ class DMATest(ExperimentCase): exp.record_many(count) dt = self.dataset_mgr.get("dma_record_time") print("dt={}, dt/count={}".format(dt, dt/count)) - self.assertLess(dt/count, 20*us) + self.assertLess(dt/count, 11*us) def test_dma_playback_time(self): + # Skip on Kasli until #946 is resolved. + try: + # hack to detect Kasli. + self.device_mgr.get_desc("ad9914dds0") + except KeyError: + raise unittest.SkipTest("skipped on Kasli for now") + exp = self.create(_DMA) + is_zynq = exp.core.target_cls == CortexA9Target count = 20000 - exp.record() - exp.playback_many(count) + exp.record_many(40) + exp.playback_many(count, is_zynq) dt = self.dataset_mgr.get("dma_playback_time") print("dt={}, dt/count={}".format(dt, dt/count)) - self.assertLess(dt/count, 3*us) + if is_zynq: + self.assertLess(dt/count, 6.2*us) + else: + self.assertLess(dt/count, 4.5*us) + + def test_dma_underflow(self): + exp = self.create(_DMA) + exp.record() + with self.assertRaises(RTIOUnderflow): + exp.playback_many(20000) def test_handle_invalidation(self): exp = self.create(_DMA) diff --git a/artiq/test/coredevice/test_spi.py b/artiq/test/coredevice/test_spi.py index 076185c0a..17bf6eeb6 100644 --- a/artiq/test/coredevice/test_spi.py +++ b/artiq/test/coredevice/test_spi.py @@ -3,13 +3,15 @@ from artiq.experiment import * from artiq.test.hardware_testbench import ExperimentCase from artiq.language.core import (kernel, delay_mu, delay) from artiq.language.units import us -from artiq.coredevice import spi +from artiq.coredevice import spi2 as spi -_SDCARD_SPI_CONFIG = (0*spi.SPI_OFFLINE | 0*spi.SPI_CS_POLARITY | +_SDCARD_SPI_CONFIG = (0*spi.SPI_OFFLINE | 0*spi.SPI_END | + 0*spi.SPI_INPUT | 0*spi.SPI_CS_POLARITY | 0*spi.SPI_CLK_POLARITY | 0*spi.SPI_CLK_PHASE | 0*spi.SPI_LSB_FIRST | 0*spi.SPI_HALF_DUPLEX) + class CardTest(EnvExperiment): def build(self): self.setattr_device("core") @@ -18,44 +20,41 @@ class CardTest(EnvExperiment): @kernel def run(self): self.core.reset() - self.core.break_realtime() - response = 0xff - self.spi_mmc.set_config(_SDCARD_SPI_CONFIG, 500*kHz, 500*kHz) - self.spi_mmc.set_xfer(0, 8, 0) + + freq = 1*MHz + cs = 1 + # run a couple of clock cycles with miso high to wake up the card + self.spi_mmc.set_config(_SDCARD_SPI_CONFIG, 32, freq, 0) for i in range(10): self.spi_mmc.write(0xffffffff) - delay(-5*us) - - delay(100*us) - - self.spi_mmc.set_xfer(1, 8, 0) - self.spi_mmc.write(0x40000000) - delay(-5*us) - self.spi_mmc.write(0x00000000) - delay(-5*us) - self.spi_mmc.write(0x00000000) - delay(-5*us) - self.spi_mmc.write(0x00000000) - delay(-5*us) - self.spi_mmc.write(0x00000000) - delay(-5*us) - self.spi_mmc.write(0x95000000) - delay(-5*us) - - self.spi_mmc.set_xfer(1, 0, 24) + self.spi_mmc.set_config(_SDCARD_SPI_CONFIG | spi.SPI_END, 32, freq, 0) self.spi_mmc.write(0xffffffff) - response = self.spi_mmc.read_sync() + delay(200*us) - sd_response = False - for i in range(3): - if ((response >> 8*i) & 0x0000ff) == 0x01: - sd_response = True + self.spi_mmc.set_config(_SDCARD_SPI_CONFIG, 8, freq, cs) + self.spi_mmc.write(0x40 << 24) # CMD + self.spi_mmc.set_config(_SDCARD_SPI_CONFIG, 32, freq, cs) + self.spi_mmc.write(0x00000000) # ARG + self.spi_mmc.set_config(_SDCARD_SPI_CONFIG, 8, freq, cs) + self.spi_mmc.write(0x95 << 24) # CRC + self.spi_mmc.set_config(_SDCARD_SPI_CONFIG | spi.SPI_INPUT, 8, freq, cs) + idle = False + response = 0 + for i in range(8): + self.spi_mmc.write(0xff << 24) # NCR + response = self.spi_mmc.read() + delay(100*us) + if response == 0x01: + idle = True break - self.set_dataset("sd_response", sd_response) + self.spi_mmc.set_config(_SDCARD_SPI_CONFIG | spi.SPI_END, 8, freq, cs) + self.spi_mmc.write(0xff << 24) + if not idle: + print(response) + raise ValueError("SD Card did not reply with IDLE") class SDTest(ExperimentCase): def test(self): self.execute(CardTest) - self.assertTrue(self.dataset_mgr.get("sd_response")) diff --git a/artiq/test/coredevice/test_stress.py b/artiq/test/coredevice/test_stress.py new file mode 100644 index 000000000..514a2d8ac --- /dev/null +++ b/artiq/test/coredevice/test_stress.py @@ -0,0 +1,26 @@ +import os +import time +import unittest + +from artiq.experiment import * +from artiq.test.hardware_testbench import ExperimentCase + + +class _Stress(EnvExperiment): + def build(self): + self.setattr_device("core") + + @rpc(flags={"async"}) + def sink(self, data): + pass + + @kernel + def async_rpc(self, n): + for _ in range(n): + self.sink(b"") + + +class StressTest(ExperimentCase): + def test_async_rpc(self): + exp = self.create(_Stress) + exp.async_rpc(16000) diff --git a/artiq/test/coredevice/test_urukul.py b/artiq/test/coredevice/test_urukul.py new file mode 100644 index 000000000..be43e6641 --- /dev/null +++ b/artiq/test/coredevice/test_urukul.py @@ -0,0 +1,191 @@ +from artiq.experiment import * +from artiq.test.hardware_testbench import ExperimentCase +from artiq.coredevice import urukul + + +class UrukulExp(EnvExperiment): + def build(self, runner): + self.setattr_device("core") + self.dev = self.get_device("urukul_cpld") + self.runner = runner + + def run(self): + getattr(self, self.runner)() + + @kernel + def instantiate(self): + pass + + @kernel + def init(self): + self.core.break_realtime() + self.dev.init() + + @kernel + def cfg_write(self): + self.core.break_realtime() + self.dev.init() + self.dev.cfg_write(self.dev.cfg_reg) + + @kernel + def sta_read(self): + self.core.break_realtime() + self.dev.init() + sta = self.dev.sta_read() + self.set_dataset("sta", sta) + + @kernel + def switches(self): + self.core.break_realtime() + self.dev.init() + self.dev.io_rst() + self.dev.cfg_sw(0, 0) + self.dev.cfg_sw(0, 1) + self.dev.cfg_sw(3, 1) + self.dev.cfg_switches(0b1010) + + @kernel + def switch_speed(self): + self.core.break_realtime() + self.dev.init() + n = 10 + t0 = self.core.get_rtio_counter_mu() + for i in range(n): + self.dev.cfg_sw(3, i & 1) + self.set_dataset("dt", self.core.mu_to_seconds( + self.core.get_rtio_counter_mu() - t0)/n) + + @kernel + def switches_readback(self): + self.core.reset() # clear switch TTLs + self.dev.init() + sw_set = 0b1010 + self.dev.cfg_switches(sw_set) + sta_get = self.dev.sta_read() + self.set_dataset("sw_set", sw_set) + self.set_dataset("sta_get", sta_get) + + @kernel + def att(self): + self.core.break_realtime() + self.dev.init() + # clear backing state + self.dev.att_reg = 0 + att_set = 0x12345678 + self.dev.set_all_att_mu(att_set) + # confirm that we can set all attenuators and read back + att_get = self.dev.get_att_mu() + # confirm backing state + att_reg = self.dev.att_reg + self.set_dataset("att_set", att_set) + self.set_dataset("att_get", att_get) + self.set_dataset("att_reg", att_reg) + + @kernel + def att_channel(self): + self.core.break_realtime() + self.dev.init() + # clear backing state + self.dev.att_reg = 0 + att_set = int32(0x87654321) + # set individual attenuators + self.dev.set_att_mu(0, 0x21) + self.dev.set_att_mu(1, 0x43) + self.dev.set_att_mu(2, 0x65) + self.dev.set_att_mu(3, 0x87) + # confirm that we can set all attenuators and read back + att_get = self.dev.get_att_mu() + # confirm backing state + att_reg = self.dev.att_reg + self.set_dataset("att_set", att_set) + self.set_dataset("att_get", att_get) + self.set_dataset("att_reg", att_reg) + + @kernel + def att_speed(self): + self.core.break_realtime() + self.dev.init() + n = 10 + t0 = self.core.get_rtio_counter_mu() + for i in range(n): + self.dev.set_att(3, 30*dB) + self.set_dataset("dt", self.core.mu_to_seconds( + self.core.get_rtio_counter_mu() - t0)/n) + + @kernel + def io_update(self): + self.core.break_realtime() + self.dev.init() + self.dev.io_update.pulse_mu(8) + + @kernel + def sync(self): + self.core.break_realtime() + self.dev.init() + self.dev.set_sync_div(2) + + @kernel + def profile(self): + self.core.break_realtime() + self.dev.init() + self.dev.set_profile(7) + self.dev.set_profile(0) + + +class UrukulTest(ExperimentCase): + def test_instantiate(self): + self.execute(UrukulExp, "instantiate") + + def test_init(self): + self.execute(UrukulExp, "init") + + def test_cfg_write(self): + self.execute(UrukulExp, "cfg_write") + + def test_sta_read(self): + self.execute(UrukulExp, "sta_read") + sta = self.dataset_mgr.get("sta") + print(hex(sta)) + # self.assertEqual(urukul.urukul_sta_ifc_mode(sta), 0b0001) + + def test_switches(self): + self.execute(UrukulExp, "switches") + + def test_switch_speed(self): + self.execute(UrukulExp, "switch_speed") + dt = self.dataset_mgr.get("dt") + print(dt) + self.assertLess(dt, 5*us) + + def test_switches_readback(self): + self.execute(UrukulExp, "switches_readback") + sw_get = urukul.urukul_sta_rf_sw(self.dataset_mgr.get("sta_get")) + sw_set = self.dataset_mgr.get("sw_set") + self.assertEqual(sw_get, sw_set) + + def test_att(self): + self.execute(UrukulExp, "att") + att_set = self.dataset_mgr.get("att_set") + self.assertEqual(att_set, self.dataset_mgr.get("att_get")) + self.assertEqual(att_set, self.dataset_mgr.get("att_reg")) + + def test_att_channel(self): + self.execute(UrukulExp, "att_channel") + att_set = self.dataset_mgr.get("att_set") + self.assertEqual(att_set, self.dataset_mgr.get("att_get")) + self.assertEqual(att_set, self.dataset_mgr.get("att_reg")) + + def test_att_speed(self): + self.execute(UrukulExp, "att_speed") + dt = self.dataset_mgr.get("dt") + print(dt) + self.assertLess(dt, 5*us) + + def test_io_update(self): + self.execute(UrukulExp, "io_update") + + def test_sync(self): + self.execute(UrukulExp, "sync") + + def test_profile(self): + self.execute(UrukulExp, "profile") diff --git a/artiq/test/hardware_testbench.py b/artiq/test/hardware_testbench.py index 4ce3114c3..987a1cf6b 100644 --- a/artiq/test/hardware_testbench.py +++ b/artiq/test/hardware_testbench.py @@ -5,96 +5,17 @@ import os import sys import unittest import logging -import subprocess -import shlex -import time -import socket from artiq.master.databases import DeviceDB, DatasetDB -from artiq.master.worker_db import DeviceManager, DatasetManager +from artiq.master.worker_db import DeviceManager, DatasetManager, DeviceError from artiq.coredevice.core import CompileError from artiq.frontend.artiq_run import DummyScheduler -from artiq.protocols.pc_rpc import AutoTarget, Client artiq_root = os.getenv("ARTIQ_ROOT") logger = logging.getLogger(__name__) -class GenericControllerCase(unittest.TestCase): - def get_device_db(self): - raise NotImplementedError - - def setUp(self): - self.device_db = self.get_device_db() - self.device_mgr = DeviceManager(self.device_db) - self.controllers = {} - - def tearDown(self): - self.device_mgr.close_devices() - for name in list(self.controllers): - self.stop_controller(name) - - def start_controller(self, name, sleep=1): - if name in self.controllers: - raise ValueError("controller `{}` already started".format(name)) - try: - entry = self.device_db.get(name) - except KeyError: - raise unittest.SkipTest( - "controller `{}` not found".format(name)) - entry["command"] = entry["command"].format( - name=name, bind=entry["host"], port=entry["port"]) - proc = subprocess.Popen(shlex.split(entry["command"])) - self.controllers[name] = entry, proc - time.sleep(sleep) - - def stop_controller(self, name, default_timeout=1): - desc, proc = self.controllers[name] - t = desc.get("term_timeout", default_timeout) - target_name = desc.get("target_name", None) - if target_name is None: - target_name = AutoTarget - try: - try: - client = Client(desc["host"], desc["port"], target_name, t) - try: - client.terminate() - finally: - client.close_rpc() - proc.wait(t) - return - except (socket.timeout, subprocess.TimeoutExpired): - logger.warning("Controller %s failed to exit on request", name) - try: - proc.terminate() - except ProcessLookupError: - pass - try: - proc.wait(t) - return - except subprocess.TimeoutExpired: - logger.warning("Controller %s failed to exit on terminate", - name) - try: - proc.kill() - except ProcessLookupError: - pass - try: - proc.wait(t) - return - except subprocess.TimeoutExpired: - logger.warning("Controller %s failed to die on kill", name) - finally: - del self.controllers[name] - - -@unittest.skipUnless(artiq_root, "no ARTIQ_ROOT") -class ControllerCase(GenericControllerCase): - def get_device_db(self): - return DeviceDB(os.path.join(artiq_root, "device_db.py")) - - @unittest.skipUnless(artiq_root, "no ARTIQ_ROOT") class ExperimentCase(unittest.TestCase): def setUp(self): @@ -111,14 +32,14 @@ class ExperimentCase(unittest.TestCase): def create(self, cls, *args, **kwargs): try: exp = cls( - (self.device_mgr, self.dataset_mgr, None), + (self.device_mgr, self.dataset_mgr, None, {}), *args, **kwargs) - exp.prepare() - return exp - except KeyError as e: + except DeviceError as e: # skip if ddb does not match requirements raise unittest.SkipTest( - "device_db entry `{}` not found".format(*e.args)) + "test device not available: `{}`".format(*e.args)) + exp.prepare() + return exp def execute(self, cls, *args, **kwargs): expid = { diff --git a/artiq/test/libartiq_support/lib.rs b/artiq/test/libartiq_support/lib.rs index d9c609fe0..77e8f4160 100644 --- a/artiq/test/libartiq_support/lib.rs +++ b/artiq/test/libartiq_support/lib.rs @@ -34,14 +34,33 @@ mod cslice { } } } + + pub trait AsCSlice<'a, T> { + fn as_c_slice(&'a self) -> CSlice<'a, T>; + } + + impl<'a> AsCSlice<'a, u8> for str { + fn as_c_slice(&'a self) -> CSlice<'a, u8> { + CSlice { + base: self.as_ptr(), + len: self.len() as u32, + phantom: PhantomData + } + } + } } -#[path = "../../firmware/ksupport/eh.rs"] -pub mod eh; +#[path = "."] +pub mod eh { + #[path = "../../firmware/libeh/dwarf.rs"] + pub mod dwarf; +} +#[path = "../../firmware/ksupport/eh_artiq.rs"] +pub mod eh_artiq; use std::{str, process}; -fn terminate(exception: &eh::Exception, mut _backtrace: &mut [usize]) -> ! { +fn terminate(exception: &eh_artiq::Exception, mut _backtrace: &mut [usize]) -> ! { println!("Uncaught {}: {} ({}, {}, {})", str::from_utf8(exception.name.as_ref()).unwrap(), str::from_utf8(exception.message.as_ref()).unwrap(), @@ -57,14 +76,3 @@ fn terminate(exception: &eh::Exception, mut _backtrace: &mut [usize]) -> ! { #[export_name = "now"] pub static mut NOW: i64 = 0; - -#[export_name = "watchdog_set"] -pub extern fn watchdog_set(ms: i64) -> i32 { - println!("watchdog_set {}", ms); - ms as i32 -} - -#[export_name = "watchdog_clear"] -pub extern fn watchdog_clear(id: i32) { - println!("watchdog_clear {}", id); -} diff --git a/artiq/test/lit/codegen/custom_comparison.py b/artiq/test/lit/codegen/custom_comparison.py new file mode 100644 index 000000000..8a7a1d32b --- /dev/null +++ b/artiq/test/lit/codegen/custom_comparison.py @@ -0,0 +1,13 @@ +# RUN: %python -m artiq.compiler.testbench.signature +diag %s >%t +# RUN: OutputCheck %s --file-to-check=%t + +class Foo: + def __init__(self): + pass + +a = Foo() +b = Foo() + +# CHECK-L: ${LINE:+1}: error: Custom object comparison is not supported +a > b + diff --git a/artiq/test/lit/codegen/custom_inclusion.py b/artiq/test/lit/codegen/custom_inclusion.py new file mode 100644 index 000000000..92cd1a772 --- /dev/null +++ b/artiq/test/lit/codegen/custom_inclusion.py @@ -0,0 +1,13 @@ +# RUN: %python -m artiq.compiler.testbench.signature +diag %s >%t +# RUN: OutputCheck %s --file-to-check=%t + +class Foo: + def __init__(self): + pass + +a = Foo() +b = Foo() + +# CHECK-L: ${LINE:+1}: error: Custom object inclusion test is not supported +a in b + diff --git a/artiq/test/lit/codegen/error_illegal_builtin.py b/artiq/test/lit/codegen/error_illegal_builtin.py deleted file mode 100644 index ae75ad9b7..000000000 --- a/artiq/test/lit/codegen/error_illegal_builtin.py +++ /dev/null @@ -1,5 +0,0 @@ -# RUN: %python -m artiq.compiler.testbench.signature +diag %s >%t -# RUN: OutputCheck %s --file-to-check=%t - -# CHECK-L: ${LINE:+1}: error: builtin function 'watchdog' cannot be used in this context -watchdog(1.0) diff --git a/artiq/test/lit/codegen/none_retval.py b/artiq/test/lit/codegen/none_retval.py new file mode 100644 index 000000000..ed2c9eb25 --- /dev/null +++ b/artiq/test/lit/codegen/none_retval.py @@ -0,0 +1,11 @@ +# RUN: %python -m artiq.compiler.testbench.llvmgen %s + +def make_none(): + return None + +def take_arg(arg): + pass + +def run(): + retval = make_none() + take_arg(retval) diff --git a/artiq/test/lit/constant_hoisting/device_db.py b/artiq/test/lit/constant_hoisting/device_db.py new file mode 100644 index 000000000..e39c83c09 --- /dev/null +++ b/artiq/test/lit/constant_hoisting/device_db.py @@ -0,0 +1,8 @@ +device_db = { + "core": { + "type": "local", + "module": "artiq.coredevice.core", + "class": "Core", + "arguments": {"host": None, "ref_period": 1e-9} + } +} diff --git a/artiq/test/lit/constant_hoisting/invariant_load.py b/artiq/test/lit/constant_hoisting/invariant_load.py new file mode 100644 index 000000000..62fac4202 --- /dev/null +++ b/artiq/test/lit/constant_hoisting/invariant_load.py @@ -0,0 +1,25 @@ +# RUN: env ARTIQ_DUMP_IR=%t ARTIQ_IR_NO_LOC=1 %python -m artiq.compiler.testbench.embedding +compile %s +# RUN: OutputCheck %s --file-to-check=%t.txt + +from artiq.language.core import * +from artiq.language.types import * + +# CHECK-L: %LOC.self.FLD.foo = numpy.int32 getattr('foo') %ARG.self +# CHECK-L: for.head: + +class c: + kernel_invariants = {"foo"} + + def __init__(self): + self.foo = 1 + + @kernel + def run(self): + for _ in range(10): + core_log(1.0 * self.foo) + +i = c() + +@kernel +def entrypoint(): + i.run() diff --git a/artiq/test/lit/embedding/annotation.py b/artiq/test/lit/embedding/annotation.py index 7e9c05d9f..21ae25332 100644 --- a/artiq/test/lit/embedding/annotation.py +++ b/artiq/test/lit/embedding/annotation.py @@ -11,7 +11,12 @@ def foo(x: TInt64, y: TInt64 = 1) -> TInt64: print(x+y) return x+y +@kernel +def bar(x: TInt64) -> None: + print(x) + @kernel def entrypoint(): print(foo(0, 2)) print(foo(1, 3)) + bar(3) diff --git a/artiq/test/lit/embedding/array_math_fns.py b/artiq/test/lit/embedding/array_math_fns.py new file mode 100644 index 000000000..d23540b48 --- /dev/null +++ b/artiq/test/lit/embedding/array_math_fns.py @@ -0,0 +1,26 @@ +# RUN: %python -m artiq.compiler.testbench.embedding %s + +from artiq.language.core import * +import numpy as np + +@kernel +def entrypoint(): + # Just make sure everything compiles. + + # LLVM intrinsic: + a = np.array([1.0, 2.0, 3.0]) + b = np.sin(a) + assert b.shape == a.shape + + # libm: + c = np.array([1.0, 2.0, 3.0]) + d = np.arctan(c) + assert d.shape == c.shape + + # libm, binary: + e = np.array([1.0, 2.0, 3.0]) + f = np.array([4.0, 5.0, 6.0]) + g = np.arctan2(e, f) + # g = np.arctan2(e, 0.0) + # g = np.arctan2(0.0, f) + assert g.shape == e.shape diff --git a/artiq/test/lit/embedding/array_transpose.py b/artiq/test/lit/embedding/array_transpose.py new file mode 100644 index 000000000..2ab44bd7d --- /dev/null +++ b/artiq/test/lit/embedding/array_transpose.py @@ -0,0 +1,22 @@ +# RUN: %python -m artiq.compiler.testbench.embedding %s + +from artiq.language.core import * +from artiq.language.types import * +import numpy as np + +@kernel +def entrypoint(): + # FIXME: This needs to be a runtime test (but numpy.* integration is + # currently embedding-only). + a = np.array([1, 2, 3]) + b = np.transpose(a) + assert a.shape == b.shape + for i in range(len(a)): + assert a[i] == b[i] + + c = np.array([[1, 2, 3], [4, 5, 6]]) + d = np.transpose(c) + assert c.shape == d.shape + for i in range(2): + for j in range(3): + assert c[i][j] == d[j][i] diff --git a/artiq/test/lit/embedding/arrays.py b/artiq/test/lit/embedding/arrays.py new file mode 100644 index 000000000..63d846585 --- /dev/null +++ b/artiq/test/lit/embedding/arrays.py @@ -0,0 +1,36 @@ +# RUN: %python -m artiq.compiler.testbench.embedding %s + +from artiq.language.core import * +from artiq.language.types import * +from numpy import array + +int_vec = array([1, 2, 3]) +float_vec = array([1.0, 2.0, 3.0]) +int_mat = array([[1, 2], [3, 4]]) +float_mat = array([[1.0, 2.0], [3.0, 4.0]]) + + +@kernel +def entrypoint(): + # TODO: These need to be runtime tests! + assert int_vec.shape == (3, ) + assert int_vec[0] == 1 + assert int_vec[1] == 2 + assert int_vec[2] == 3 + + assert float_vec.shape == (3, ) + assert float_vec[0] == 1.0 + assert float_vec[1] == 2.0 + assert float_vec[2] == 3.0 + + assert int_mat.shape == (2, 2) + assert int_mat[0][0] == 1 + assert int_mat[0][1] == 2 + assert int_mat[1][0] == 3 + assert int_mat[1][1] == 4 + + assert float_mat.shape == (2, 2) + assert float_mat[0][0] == 1.0 + assert float_mat[0][1] == 2.0 + assert float_mat[1][0] == 3.0 + assert float_mat[1][1] == 4.0 diff --git a/artiq/test/lit/embedding/class_fn_direct_call.py b/artiq/test/lit/embedding/class_fn_direct_call.py new file mode 100644 index 000000000..91bdac519 --- /dev/null +++ b/artiq/test/lit/embedding/class_fn_direct_call.py @@ -0,0 +1,20 @@ +# RUN: %python -m artiq.compiler.testbench.embedding %s + +from artiq.language.core import * +from artiq.language.types import * + +class C: + @kernel + def f(self): + pass + +class D(C): + @kernel + def f(self): + # super().f() # super() not bound + C.f(self) # KeyError in compile + +di = D() +@kernel +def entrypoint(): + di.f() diff --git a/artiq/test/lit/embedding/error_specialized_annot.py b/artiq/test/lit/embedding/error_specialized_annot.py new file mode 100644 index 000000000..2f5955043 --- /dev/null +++ b/artiq/test/lit/embedding/error_specialized_annot.py @@ -0,0 +1,19 @@ +# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t +# RUN: OutputCheck %s --file-to-check=%t + +from artiq.experiment import * + +class c(): +# CHECK-L: ${LINE:+2}: error: type annotation for argument 'x', '', is not an ARTIQ type + @kernel + def hello(self, x: float): + pass + + @kernel + def run(self): + self.hello(2) + +i = c() +@kernel +def entrypoint(): + i.run() diff --git a/artiq/test/lit/embedding/eval.py b/artiq/test/lit/embedding/eval.py new file mode 100644 index 000000000..d2f91cbf9 --- /dev/null +++ b/artiq/test/lit/embedding/eval.py @@ -0,0 +1,18 @@ +# RUN: %python -m artiq.compiler.testbench.embedding %s + +from artiq.language.core import * + + +def make_incrementer(increment): + return kernel_from_string(["a"], "return a + {}".format(increment), + portable) + + +foo = make_incrementer(1) +bar = make_incrementer(2) + + +@kernel +def entrypoint(): + assert foo(4) == 5 + assert bar(4) == 6 diff --git a/artiq/test/lit/embedding/fn_ptr_list.py b/artiq/test/lit/embedding/fn_ptr_list.py new file mode 100644 index 000000000..73e6ad3be --- /dev/null +++ b/artiq/test/lit/embedding/fn_ptr_list.py @@ -0,0 +1,15 @@ +# RUN: %python -m artiq.compiler.testbench.embedding %s + +from artiq.language.core import * +from artiq.language.types import * + +@kernel +def a(): + pass + +fns = [a, a] + +@kernel +def entrypoint(): + fns[0]() + fns[1]() diff --git a/artiq/test/lit/embedding/math_fns.py b/artiq/test/lit/embedding/math_fns.py new file mode 100644 index 000000000..6f9416c8d --- /dev/null +++ b/artiq/test/lit/embedding/math_fns.py @@ -0,0 +1,31 @@ +# RUN: env ARTIQ_DUMP_LLVM=%t %python -m artiq.compiler.testbench.embedding %s +# RUN: OutputCheck %s --file-to-check=%t.ll + +from artiq.language.core import * +from artiq.language.types import * +import numpy + +@kernel +def entrypoint(): + # LLVM's constant folding for transcendental functions is good enough that + # we can do a basic smoke test by just making sure the module compiles and + # all assertions are statically eliminated. + + # CHECK-NOT: assert + assert numpy.sin(0.0) == 0.0 + assert numpy.cos(0.0) == 1.0 + assert numpy.exp(0.0) == 1.0 + assert numpy.exp2(1.0) == 2.0 + assert numpy.log(numpy.exp(1.0)) == 1.0 + assert numpy.log10(10.0) == 1.0 + assert numpy.log2(2.0) == 1.0 + assert numpy.fabs(-1.0) == 1.0 + assert numpy.floor(42.5) == 42.0 + assert numpy.ceil(42.5) == 43.0 + assert numpy.trunc(41.5) == 41.0 + assert numpy.rint(41.5) == 42.0 + assert numpy.tan(0.0) == 0.0 + assert numpy.arcsin(0.0) == 0.0 + assert numpy.arccos(1.0) == 0.0 + assert numpy.arctan(0.0) == 0.0 + assert numpy.arctan2(0.0, 1.0) == 0.0 diff --git a/artiq/test/lit/embedding/tuple.py b/artiq/test/lit/embedding/tuple.py new file mode 100644 index 000000000..6f9a14a32 --- /dev/null +++ b/artiq/test/lit/embedding/tuple.py @@ -0,0 +1,9 @@ +# RUN: %python -m artiq.compiler.testbench.embedding %s + +from artiq.language.core import * + +values = (1, 2) + +@kernel +def entrypoint(): + assert values == (1, 2) diff --git a/artiq/test/lit/escape/const_string.py b/artiq/test/lit/escape/const_string.py index f5f405c86..f5c844c05 100644 --- a/artiq/test/lit/escape/const_string.py +++ b/artiq/test/lit/escape/const_string.py @@ -9,3 +9,7 @@ def foo(): @kernel def entrypoint(): foo() + + # Test reassigning strings. + a = "a" + b = a diff --git a/artiq/test/lit/inferencer/array_creation.py b/artiq/test/lit/inferencer/array_creation.py new file mode 100644 index 000000000..e3e00a254 --- /dev/null +++ b/artiq/test/lit/inferencer/array_creation.py @@ -0,0 +1,16 @@ +# RUN: %python -m artiq.compiler.testbench.inferencer %s >%t +# RUN: OutputCheck %s --file-to-check=%t + +# Nothing known, as there could be several more dimensions +# hidden from view by the array being empty. +# CHECK-L: ([]:list(elt='a)):'b +array([]) + +# CHECK-L: numpy.array(elt=numpy.int?, num_dims=1) +array([1, 2, 3]) +# CHECK-L: numpy.array(elt=numpy.int?, num_dims=2) +array([[1, 2, 3], [4, 5, 6]]) + +# Jagged arrays produce runtime failure: +# CHECK-L: numpy.array(elt=numpy.int?, num_dims=2) +array([[1, 2, 3], [4, 5]]) diff --git a/artiq/test/lit/inferencer/builtin_calls.py b/artiq/test/lit/inferencer/builtin_calls.py index be1b797d9..a4b2f81fe 100644 --- a/artiq/test/lit/inferencer/builtin_calls.py +++ b/artiq/test/lit/inferencer/builtin_calls.py @@ -30,3 +30,9 @@ len([]) # CHECK-L: round:(1.0:float):numpy.int? round(1.0) + +# CHECK-L: abs:(1:numpy.int?):numpy.int? +abs(1) + +# CHECK-L: abs:(1.0:float):float +abs(1.0) diff --git a/artiq/test/lit/inferencer/cast.py b/artiq/test/lit/inferencer/cast.py index 0f9c4c8d9..be2ddbf4a 100644 --- a/artiq/test/lit/inferencer/cast.py +++ b/artiq/test/lit/inferencer/cast.py @@ -1,5 +1,8 @@ # RUN: %python -m artiq.compiler.testbench.inferencer +mono %s >%t # RUN: OutputCheck %s --file-to-check=%t -# CHECK-L: numpy.int64 +# CHECK-L: 2:numpy.int64 int64(2)**32 + +# CHECK-L: round:(1.0:float):numpy.int64 +int64(round(1.0)) diff --git a/artiq/test/lit/inferencer/coerce_explicit.py b/artiq/test/lit/inferencer/coerce_explicit.py new file mode 100644 index 000000000..4455c596c --- /dev/null +++ b/artiq/test/lit/inferencer/coerce_explicit.py @@ -0,0 +1,12 @@ +# RUN: %python -m artiq.compiler.testbench.inferencer +mono %s >%t +# RUN: OutputCheck %s --file-to-check=%t + +# CHECK-L: n:numpy.int32 = +n = 0 +# CHECK-L: a:numpy.int32 = +a = n // 1 +# CHECK-L: b:numpy.int32 = +b = n // 10 +# CHECK-L: q:numpy.int64 = +q = (a << 0) + (b << 8) +core_log(int64(q)) diff --git a/artiq/test/lit/inferencer/error_array.py b/artiq/test/lit/inferencer/error_array.py new file mode 100644 index 000000000..5e79f99e6 --- /dev/null +++ b/artiq/test/lit/inferencer/error_array.py @@ -0,0 +1,13 @@ +# RUN: %python -m artiq.compiler.testbench.inferencer +diag %s >%t +# RUN: OutputCheck %s --file-to-check=%t + +# CHECK-L: ${LINE:+1}: error: array cannot be invoked with the arguments () +a = array() + +b = array([1, 2, 3]) + +# CHECK-L: ${LINE:+1}: error: too many indices for array of dimension 1 +b[1, 2] + +# CHECK-L: ${LINE:+1}: error: array attributes cannot be assigned to +b.shape = (5, ) diff --git a/artiq/test/lit/inferencer/error_array_ops.py b/artiq/test/lit/inferencer/error_array_ops.py new file mode 100644 index 000000000..4f85290c1 --- /dev/null +++ b/artiq/test/lit/inferencer/error_array_ops.py @@ -0,0 +1,12 @@ +# RUN: %python -m artiq.compiler.testbench.inferencer +diag %s >%t +# RUN: OutputCheck %s --file-to-check=%t + +a = array([[1, 2], [3, 4]]) +b = array([7, 8]) + +# NumPy supports implicit broadcasting over axes, which we don't (yet). +# Make sure there is a nice error message. +# CHECK-L: ${LINE:+3}: error: dimensions of '+' array operands must match +# CHECK-L: ${LINE:+2}: note: operand of dimension 2 +# CHECK-L: ${LINE:+1}: note: operand of dimension 1 +a + b diff --git a/artiq/test/lit/inferencer/error_builtin_calls.py b/artiq/test/lit/inferencer/error_builtin_calls.py index f3d8b7df8..643011f2d 100644 --- a/artiq/test/lit/inferencer/error_builtin_calls.py +++ b/artiq/test/lit/inferencer/error_builtin_calls.py @@ -9,3 +9,9 @@ list(1) # CHECK-L: ${LINE:+1}: error: the arguments of min() must be of a numeric type min([1], [1]) + +# CHECK-L: ${LINE:+1}: error: the arguments of abs() must be of a numeric type +abs([1.0]) + +# CHECK-L: ${LINE:+1}: error: strings currently cannot be constructed +str(1) diff --git a/artiq/test/lit/inferencer/error_call.py b/artiq/test/lit/inferencer/error_call.py index 1c3df3a6a..1497c7821 100644 --- a/artiq/test/lit/inferencer/error_call.py +++ b/artiq/test/lit/inferencer/error_call.py @@ -18,3 +18,6 @@ f(1, x=1) # CHECK-L: ${LINE:+1}: error: mandatory argument 'x' is not passed f() + +# CHECK: ${LINE:+1}: error: this function of type .* does not accept argument 'q' +f(1, q=1) diff --git a/artiq/test/lit/inferencer/error_matmult.py b/artiq/test/lit/inferencer/error_matmult.py new file mode 100644 index 000000000..2586aec31 --- /dev/null +++ b/artiq/test/lit/inferencer/error_matmult.py @@ -0,0 +1,11 @@ +# RUN: %python -m artiq.compiler.testbench.inferencer +diag %s >%t +# RUN: OutputCheck %s --file-to-check=%t + +# CHECK-L: ${LINE:+1}: error: expected matrix multiplication operand to be of array type +1 @ 2 + +# CHECK-L: ${LINE:+1}: error: expected matrix multiplication operand to be of array type +[1] @ [2] + +# CHECK-L: ${LINE:+1}: error: expected matrix multiplication operand to be 1- or 2-dimensional +array([[[0]]]) @ array([[[1]]]) diff --git a/artiq/test/lit/inferencer/error_subscript.py b/artiq/test/lit/inferencer/error_subscript.py index 0aadb3289..d8332ab09 100644 --- a/artiq/test/lit/inferencer/error_subscript.py +++ b/artiq/test/lit/inferencer/error_subscript.py @@ -3,7 +3,7 @@ x = [] -# CHECK-L: ${LINE:+1}: error: multi-dimensional slices are not supported +# CHECK-L: ${LINE:+1}: error: multi-dimensional indexing only supported for arrays x[1,2] # CHECK-L: ${LINE:+1}: error: multi-dimensional slices are not supported diff --git a/artiq/test/lit/inferencer/matmult.py b/artiq/test/lit/inferencer/matmult.py new file mode 100644 index 000000000..e8e982c57 --- /dev/null +++ b/artiq/test/lit/inferencer/matmult.py @@ -0,0 +1,17 @@ +# RUN: %python -m artiq.compiler.testbench.inferencer %s >%t +# RUN: OutputCheck %s --file-to-check=%t + +vec = array([0, 1]) +mat = array([[0, 1], [2, 3]]) + +# CHECK-L: ):numpy.int? +vec @ vec + +# CHECK-L: ):numpy.array(elt=numpy.int?, num_dims=1) +vec @ mat + +# CHECK-L: ):numpy.array(elt=numpy.int?, num_dims=1) +mat @ vec + +# CHECK-L: ):numpy.array(elt=numpy.int?, num_dims=2) +mat @ mat diff --git a/artiq/test/lit/inferencer/unify.py b/artiq/test/lit/inferencer/unify.py index e4fea57fe..8dcb1e423 100644 --- a/artiq/test/lit/inferencer/unify.py +++ b/artiq/test/lit/inferencer/unify.py @@ -64,7 +64,7 @@ kb = bytearray(b"x") # CHECK-L: kb:bytearray l = array([1]) -# CHECK-L: l:numpy.array(elt=numpy.int?) +# CHECK-L: l:numpy.array(elt=numpy.int?, num_dims=1) IndexError() # CHECK-L: IndexError:():IndexError diff --git a/artiq/test/lit/integration/abs.py b/artiq/test/lit/integration/abs.py new file mode 100644 index 000000000..ba279ab4b --- /dev/null +++ b/artiq/test/lit/integration/abs.py @@ -0,0 +1,7 @@ +# RUN: %python -m artiq.compiler.testbench.jit %s +# RUN: %python %s + +assert abs(1234) == 1234 +assert abs(-1234) == 1234 +assert abs(1234.5) == 1234.5 +assert abs(-1234.5) == 1234.5 diff --git a/artiq/test/lit/integration/array.py b/artiq/test/lit/integration/array.py deleted file mode 100644 index 34463a799..000000000 --- a/artiq/test/lit/integration/array.py +++ /dev/null @@ -1,8 +0,0 @@ -# RUN: %python -m artiq.compiler.testbench.jit %s -# REQUIRES: exceptions - -ary = array([1, 2, 3]) -assert [x*x for x in ary] == [1, 4, 9] - -assert [1] + [2] == [1, 2] -assert [1] * 3 == [1, 1, 1] diff --git a/artiq/test/lit/integration/array_binops.py b/artiq/test/lit/integration/array_binops.py new file mode 100644 index 000000000..e60052277 --- /dev/null +++ b/artiq/test/lit/integration/array_binops.py @@ -0,0 +1,52 @@ +# RUN: %python -m artiq.compiler.testbench.jit %s + +a = array([1, 2, 3]) +b = array([4, 5, 6]) + +c = a + b +assert c[0] == 5 +assert c[1] == 7 +assert c[2] == 9 + +c = b - a +assert c[0] == 3 +assert c[1] == 3 +assert c[2] == 3 + +c = a * b +assert c[0] == 4 +assert c[1] == 10 +assert c[2] == 18 + +c = b // a +assert c[0] == 4 +assert c[1] == 2 +assert c[2] == 2 + +c = a ** b +assert c[0] == 1 +assert c[1] == 32 +assert c[2] == 729 + +c = b % a +assert c[0] == 0 +assert c[1] == 1 +assert c[2] == 0 + +cf = b / a +assert cf[0] == 4.0 +assert cf[1] == 2.5 +assert cf[2] == 2.0 + +cf2 = cf + a +assert cf2[0] == 5.0 +assert cf2[1] == 4.5 +assert cf2[2] == 5.0 + +d = array([[1, 2], [3, 4]]) +e = array([[5, 6], [7, 8]]) +f = d + e +assert f[0][0] == 6 +assert f[0][1] == 8 +assert f[1][0] == 10 +assert f[1][1] == 12 diff --git a/artiq/test/lit/integration/array_broadcast.py b/artiq/test/lit/integration/array_broadcast.py new file mode 100644 index 000000000..d7cbc5998 --- /dev/null +++ b/artiq/test/lit/integration/array_broadcast.py @@ -0,0 +1,55 @@ +# RUN: %python -m artiq.compiler.testbench.jit %s + +a = array([1, 2, 3]) + +c = a + 1 +assert c[0] == 2 +assert c[1] == 3 +assert c[2] == 4 + +c = 1 - a +assert c[0] == 0 +assert c[1] == -1 +assert c[2] == -2 + +c = a * 1 +assert c[0] == 1 +assert c[1] == 2 +assert c[2] == 3 + +c = a // 2 +assert c[0] == 0 +assert c[1] == 1 +assert c[2] == 1 + +c = a ** 2 +assert c[0] == 1 +assert c[1] == 4 +assert c[2] == 9 + +c = 2 ** a +assert c[0] == 2 +assert c[1] == 4 +assert c[2] == 8 + +c = a % 2 +assert c[0] == 1 +assert c[1] == 0 +assert c[2] == 1 + +cf = a / 2 +assert cf[0] == 0.5 +assert cf[1] == 1.0 +assert cf[2] == 1.5 + +cf2 = 2 / array([1, 2, 4]) +assert cf2[0] == 2.0 +assert cf2[1] == 1.0 +assert cf2[2] == 0.5 + +d = array([[1, 2], [3, 4]]) +e = d + 1 +assert e[0][0] == 2 +assert e[0][1] == 3 +assert e[1][0] == 4 +assert e[1][1] == 5 diff --git a/artiq/test/lit/integration/array_creation.py b/artiq/test/lit/integration/array_creation.py new file mode 100644 index 000000000..512382cda --- /dev/null +++ b/artiq/test/lit/integration/array_creation.py @@ -0,0 +1,50 @@ +# RUN: %python -m artiq.compiler.testbench.jit %s +# REQUIRES: exceptions + +ary = array([1, 2, 3]) +assert len(ary) == 3 +assert ary.shape == (3,) +assert [x * x for x in ary] == [1, 4, 9] + +# Reassign to an existing value to disambiguate type of empty array. +empty_array = array([1]) +empty_array = array([]) +assert len(empty_array) == 0 +assert empty_array.shape == (0,) +assert [x * x for x in empty_array] == [] + +# Creating arrays from generic iterables, rectangularity is assumed (and ensured +# with runtime checks). +list_of_lists = [[1, 2], [3, 4]] +array_of_lists = array(list_of_lists) +assert array_of_lists.shape == (2, 2) +assert [[y for y in x] for x in array_of_lists] == list_of_lists + +matrix = array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) +assert len(matrix) == 2 +assert matrix.shape == (2, 3) +# FIXME: Need to decide on a solution for array comparisons — +# NumPy returns an array of bools! +# assert [x for x in matrix] == [array([1.0, 2.0, 3.0]), array([4.0, 5.0, 6.0])] +assert matrix[0, 0] == 1.0 +assert matrix[0, 1] == 2.0 +assert matrix[0, 2] == 3.0 +assert matrix[1, 0] == 4.0 +assert matrix[1, 1] == 5.0 +assert matrix[1, 2] == 6.0 + +matrix[0, 0] = 7.0 +matrix[1, 1] = 8.0 +assert matrix[0, 0] == 7.0 +assert matrix[0, 1] == 2.0 +assert matrix[0, 2] == 3.0 +assert matrix[1, 0] == 4.0 +assert matrix[1, 1] == 8.0 +assert matrix[1, 2] == 6.0 + +array_of_matrices = array([matrix, matrix]) +assert array_of_matrices.shape == (2, 2, 3) + +three_tensor = array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]) +assert len(three_tensor) == 1 +assert three_tensor.shape == (1, 2, 3) diff --git a/artiq/test/lit/integration/array_matmult.py b/artiq/test/lit/integration/array_matmult.py new file mode 100644 index 000000000..7519c10ff --- /dev/null +++ b/artiq/test/lit/integration/array_matmult.py @@ -0,0 +1,25 @@ +# RUN: %python -m artiq.compiler.testbench.jit %s + +mat23 = array([[1, 2, 3], [4, 5, 6]]) +mat32 = array([[1, 2], [3, 4], [5, 6]]) +vec2 = array([1, 2]) +vec3 = array([1, 2, 3]) + +assert vec3 @ vec3 == 14 + +a = mat23 @ mat32 +assert a.shape == (2, 2) +assert a[0][0] == 22 +assert a[0][1] == 28 +assert a[1][0] == 49 +assert a[1][1] == 64 + +b = mat23 @ vec3 +assert b.shape == (2,) +assert b[0] == 14 +assert b[1] == 32 + +b = vec3 @ mat32 +assert b.shape == (2,) +assert b[0] == 22 +assert b[1] == 28 diff --git a/artiq/test/lit/integration/array_unaryops.py b/artiq/test/lit/integration/array_unaryops.py new file mode 100644 index 000000000..e55b6f733 --- /dev/null +++ b/artiq/test/lit/integration/array_unaryops.py @@ -0,0 +1,11 @@ +# RUN: %python -m artiq.compiler.testbench.jit %s + +a = array([1, 2]) + +b = +a +assert b[0] == 1 +assert b[1] == 2 + +b = -a +assert b[0] == -1 +assert b[1] == -2 diff --git a/artiq/test/lit/integration/instance.py b/artiq/test/lit/integration/instance.py index bf255d88f..5acea8721 100644 --- a/artiq/test/lit/integration/instance.py +++ b/artiq/test/lit/integration/instance.py @@ -6,9 +6,9 @@ class c: i = c() -assert i.a == 1 - def f(): c = None assert i.a == 1 + +assert i.a == 1 f() diff --git a/artiq/test/lit/integration/list.py b/artiq/test/lit/integration/list.py index 06f08e426..97fad6a6c 100644 --- a/artiq/test/lit/integration/list.py +++ b/artiq/test/lit/integration/list.py @@ -7,3 +7,13 @@ assert (x, y) == (1, 2) lst = [1, 2, 3] assert [x*x for x in lst] == [1, 4, 9] + +assert [0] == [0] +assert [0] != [1] +assert [[0]] == [[0]] +assert [[0]] != [[1]] +assert [[[0]]] == [[[0]]] +assert [[[0]]] != [[[1]]] + +assert [1] + [2] == [1, 2] +assert [1] * 3 == [1, 1, 1] diff --git a/artiq/test/lit/integration/str.py b/artiq/test/lit/integration/str.py index 45b430d2a..9d75399e9 100644 --- a/artiq/test/lit/integration/str.py +++ b/artiq/test/lit/integration/str.py @@ -2,4 +2,9 @@ # RUN: %python %s assert "xy" == "xy" +assert not ("xy" == "xz") + +assert "xy" != "xz" +assert not ("xy" != "xy") + assert ("x" + "y") == "xy" diff --git a/artiq/test/lit/integration/tuple.py b/artiq/test/lit/integration/tuple.py index 5d6c153dd..44564c151 100644 --- a/artiq/test/lit/integration/tuple.py +++ b/artiq/test/lit/integration/tuple.py @@ -5,3 +5,9 @@ x, y = 2, 1 x, y = y, x assert x == 1 and y == 2 assert (1, 2) + (3.0,) == (1, 2, 3.0) + +assert (0,) == (0,) +assert (0,) != (1,) + +assert ([0],) == ([0],) +assert ([0],) != ([1],) diff --git a/artiq/test/lit/lit.cfg b/artiq/test/lit/lit.cfg index c31490091..693383297 100644 --- a/artiq/test/lit/lit.cfg +++ b/artiq/test/lit/lit.cfg @@ -2,7 +2,6 @@ import os import sys -import subprocess import lit.util import lit.formats @@ -27,14 +26,7 @@ not_ = "{} {}".format(sys.executable, os.path.join(root, "lit", "not.py")) config.substitutions.append( ("%not", not_) ) if os.name == "posix": - support_build = os.path.join(root, "libartiq_support") - if subprocess.call(["rustc", os.path.join(support_build, "lib.rs"), - "--out-dir", support_build, - "-Cpanic=abort", "-g"]) != 0: - lit_config.fatal("Unable to build JIT support library") - - support_lib = os.path.join(support_build, "libartiq_support.so") - config.environment["LIBARTIQ_SUPPORT"] = support_lib + config.environment["LIBARTIQ_SUPPORT"] = os.getenv("LIBARTIQ_SUPPORT") config.environment["RUST_BACKTRACE"] = "1" config.available_features.add("exceptions") diff --git a/artiq/test/lit/local_demotion/closure.py b/artiq/test/lit/local_demotion/closure.py new file mode 100644 index 000000000..5786eb20e --- /dev/null +++ b/artiq/test/lit/local_demotion/closure.py @@ -0,0 +1,15 @@ +# RUN: %python -m artiq.compiler.testbench.irgen %s >%t +# RUN: OutputCheck %s --file-to-check=%t + +def x(y): pass + +# CHECK-L: NoneType input.a(environment(...) %ARG.ENV, NoneType %ARG.self) { +# CHECK-L: setlocal('self') %ENV, NoneType %ARG.self +# CHECK-NOT-L: call (y:NoneType)->NoneType %LOC.x, NoneType %ARG.self + +def a(self): + def b(): + pass + x(self) + +a(None) diff --git a/artiq/test/lit/local_demotion/demotion.py b/artiq/test/lit/local_demotion/demotion.py new file mode 100644 index 000000000..c9b4ed0c5 --- /dev/null +++ b/artiq/test/lit/local_demotion/demotion.py @@ -0,0 +1,13 @@ +# RUN: %python -m artiq.compiler.testbench.irgen %s >%t +# RUN: OutputCheck %s --file-to-check=%t + +def x(y): pass + +# CHECK-L: NoneType input.a(environment(...) %ARG.ENV, NoneType %ARG.self) { +# CHECK-NOT-L: getlocal('self') %ENV +# CHECK-L: call (y:NoneType)->NoneType %LOC.x, NoneType %ARG.self + +def a(self): + x(self) + +a(None) diff --git a/artiq/test/lit/monomorphism/bug_1242.py b/artiq/test/lit/monomorphism/bug_1242.py new file mode 100644 index 000000000..8918a30cc --- /dev/null +++ b/artiq/test/lit/monomorphism/bug_1242.py @@ -0,0 +1,8 @@ +# RUN: %python -m artiq.compiler.testbench.signature %s >%t +# RUN: OutputCheck %s --file-to-check=%t + +x = 0x100000000 +# CHECK-L: x: numpy.int64 + +y = int32(x) +# CHECK-L: y: numpy.int32 diff --git a/artiq/test/lit/monomorphism/bug_1252.py b/artiq/test/lit/monomorphism/bug_1252.py new file mode 100644 index 000000000..e00a899ab --- /dev/null +++ b/artiq/test/lit/monomorphism/bug_1252.py @@ -0,0 +1,8 @@ +# RUN: %python -m artiq.compiler.testbench.irgen %s >%t +# RUN: OutputCheck %s --file-to-check=%t + +# CHECK-L: %BLT.round = numpy.int64 builtin(round) float +def frequency_to_ftw(frequency): + return int64(round(1e-9*frequency)) + +frequency_to_ftw(1e9) diff --git a/artiq/test/lit/time/watchdog.py b/artiq/test/lit/time/watchdog.py deleted file mode 100644 index ccd3048bc..000000000 --- a/artiq/test/lit/time/watchdog.py +++ /dev/null @@ -1,36 +0,0 @@ -# RUN: %python -m artiq.compiler.testbench.jit %s >%t -# RUN: OutputCheck %s --file-to-check=%t -# REQUIRES: time - -def f(): - with watchdog(1.0): - pass - -def g(): - with watchdog(2.0): - raise Exception() - -def h(): - try: - g() - except: - pass - -def i(): - try: - with watchdog(3.0): - raise Exception() - except: - pass - -# CHECK-L: watchdog_set 1000 -# CHECK-L: watchdog_clear 1000 -f() - -# CHECK-L: watchdog_set 2000 -# CHECK-L: watchdog_clear 2000 -h() - -# CHECK-L: watchdog_set 3000 -# CHECK-L: watchdog_clear 3000 -i() diff --git a/artiq/test/test_arguments.py b/artiq/test/test_arguments.py new file mode 100644 index 000000000..884c8982f --- /dev/null +++ b/artiq/test/test_arguments.py @@ -0,0 +1,79 @@ +import unittest +import numbers + + +from artiq.language.environment import BooleanValue, EnumerationValue, \ + NumberValue, DefaultMissing + + +class NumberValueCase(unittest.TestCase): + def setUp(self): + self.default_value = NumberValue() + self.int_value = NumberValue(42, step=1, ndecimals=0) + self.float_value = NumberValue(42) + + def test_invalid_default(self): + with self.assertRaises(ValueError): + _ = NumberValue("invalid") + + with self.assertRaises(TypeError): + _ = NumberValue(1.+1j) + + def test_no_default(self): + with self.assertRaises(DefaultMissing): + self.default_value.default() + + def test_integer_default(self): + self.assertIsInstance(self.int_value.default(), numbers.Integral) + + def test_default_to_float(self): + self.assertIsInstance(self.float_value.default(), numbers.Real) + self.assertNotIsInstance(self.float_value.default(), numbers.Integral) + + def test_invalid_unit(self): + with self.assertRaises(KeyError): + _ = NumberValue(unit="invalid") + + def test_default_scale(self): + self.assertEqual(self.default_value.scale, 1.) + + +class BooleanValueCase(unittest.TestCase): + def setUp(self): + self.default_value = BooleanValue() + self.true_value = BooleanValue(True) + self.false_value = BooleanValue(False) + + def test_default(self): + self.assertIs(self.true_value.default(), True) + self.assertIs(self.false_value.default(), False) + + def test_no_default(self): + with self.assertRaises(DefaultMissing): + self.default_value.default() + + def test_invalid_default(self): + with self.assertRaises(ValueError): + _ = BooleanValue(1) + + with self.assertRaises(ValueError): + _ = BooleanValue("abc") + + +class EnumerationValueCase(unittest.TestCase): + def setUp(self): + self.default_value = EnumerationValue(["abc"]) + + def test_no_default(self): + with self.assertRaises(DefaultMissing): + self.default_value.default() + + def test_invalid_default(self): + with self.assertRaises(ValueError): + _ = EnumerationValue("abc", "d") + + def test_valid_default(self): + try: + _ = EnumerationValue("abc", "a") + except ValueError: + self.fail("Unexpected ValueError") diff --git a/artiq/test/test_ctlmgr.py b/artiq/test/test_ctlmgr.py deleted file mode 100644 index 825fad38c..000000000 --- a/artiq/test/test_ctlmgr.py +++ /dev/null @@ -1,72 +0,0 @@ -import os -import sys -import unittest -import logging -import asyncio - -from artiq.devices.ctlmgr import Controllers -from artiq.protocols.pc_rpc import AsyncioClient - -logger = logging.getLogger(__name__) - - -class ControllerCase(unittest.TestCase): - def setUp(self): - if os.name == "nt": - self.loop = asyncio.ProactorEventLoop() - else: - self.loop = asyncio.new_event_loop() - asyncio.set_event_loop(self.loop) - self.addCleanup(self.loop.close) - - self.controllers = Controllers() - self.controllers.host_filter = "::1" - self.addCleanup( - self.loop.run_until_complete, self.controllers.shutdown()) - - async def start(self, name, entry): - self.controllers[name] = entry - await self.controllers.queue.join() - await self.wait_for_ping(entry["host"], entry["port"]) - - async def get_client(self, host, port): - remote = AsyncioClient() - await remote.connect_rpc(host, port, None) - targets, _ = remote.get_rpc_id() - await remote.select_rpc_target(targets[0]) - self.addCleanup(remote.close_rpc) - return remote - - async def wait_for_ping(self, host, port, retries=5, timeout=2): - dt = timeout/retries - while timeout > 0: - try: - remote = await self.get_client(host, port) - ok = await asyncio.wait_for(remote.ping(), dt) - if not ok: - raise ValueError("unexcepted ping() response from " - "controller: `{}`".format(ok)) - return ok - except asyncio.TimeoutError: - timeout -= dt - except (ConnectionAbortedError, ConnectionError, - ConnectionRefusedError, ConnectionResetError): - await asyncio.sleep(dt) - timeout -= dt - raise asyncio.TimeoutError - - def test_start_ping_stop_controller(self): - entry = { - "type": "controller", - "host": "::1", - "port": 3253, - "command": (sys.executable.replace("\\", "\\\\") - + " -m artiq.frontend.aqctl_lda " - + "-p {port} --simulation") - } - async def test(): - await self.start("lda_sim", entry) - remote = await self.get_client(entry["host"], entry["port"]) - await remote.ping() - - self.loop.run_until_complete(test()) diff --git a/artiq/test/test_datasets.py b/artiq/test/test_datasets.py new file mode 100644 index 000000000..871568a2a --- /dev/null +++ b/artiq/test/test_datasets.py @@ -0,0 +1,105 @@ +"""Tests for the (Env)Experiment-facing dataset interface.""" + +import copy +import unittest + +from sipyco.sync_struct import process_mod + +from artiq.experiment import EnvExperiment +from artiq.master.worker_db import DatasetManager + + +class MockDatasetDB: + def __init__(self): + self.data = dict() + + def get(self, key): + return self.data[key][1] + + def update(self, mod): + # Copy mod before applying to avoid sharing references to objects + # between this and the DatasetManager, which would lead to mods being + # applied twice. + process_mod(self.data, copy.deepcopy(mod)) + + def delete(self, key): + del self.data[key] + + +class TestExperiment(EnvExperiment): + def get(self, key): + return self.get_dataset(key) + + def set(self, key, value, **kwargs): + self.set_dataset(key, value, **kwargs) + + def append(self, key, value): + self.append_to_dataset(key, value) + + +KEY = "foo" + + +class ExperimentDatasetCase(unittest.TestCase): + def setUp(self): + # Create an instance of TestExperiment locally in this process and a + # mock dataset db to back it. When used from the master, the worker IPC + # connection would marshal updates between dataset_mgr and dataset_db. + self.dataset_db = MockDatasetDB() + self.dataset_mgr = DatasetManager(self.dataset_db) + self.exp = TestExperiment((None, self.dataset_mgr, None, None)) + + def test_set_local(self): + with self.assertRaises(KeyError): + self.exp.get(KEY) + + for i in range(2): + self.exp.set(KEY, i) + self.assertEqual(self.exp.get(KEY), i) + with self.assertRaises(KeyError): + self.dataset_db.get(KEY) + + def test_set_broadcast(self): + with self.assertRaises(KeyError): + self.exp.get(KEY) + + self.exp.set(KEY, 0, broadcast=True) + self.assertEqual(self.exp.get(KEY), 0) + self.assertEqual(self.dataset_db.get(KEY), 0) + + self.exp.set(KEY, 1, broadcast=False) + self.assertEqual(self.exp.get(KEY), 1) + with self.assertRaises(KeyError): + self.dataset_db.get(KEY) + + def test_append_local(self): + self.exp.set(KEY, []) + self.exp.append(KEY, 0) + self.assertEqual(self.exp.get(KEY), [0]) + self.exp.append(KEY, 1) + self.assertEqual(self.exp.get(KEY), [0, 1]) + + def test_append_broadcast(self): + self.exp.set(KEY, [], broadcast=True) + self.exp.append(KEY, 0) + self.assertEqual(self.dataset_db.data[KEY][1], [0]) + self.exp.append(KEY, 1) + self.assertEqual(self.dataset_db.data[KEY][1], [0, 1]) + + def test_append_array(self): + for broadcast in (True, False): + self.exp.set(KEY, [], broadcast=broadcast) + self.exp.append(KEY, []) + self.exp.append(KEY, []) + self.assertEqual(self.exp.get(KEY), [[], []]) + + def test_append_scalar_fails(self): + for broadcast in (True, False): + with self.assertRaises(AttributeError): + self.exp.set(KEY, 0, broadcast=broadcast) + self.exp.append(KEY, 1) + + def test_append_nonexistent_fails(self): + with self.assertRaises(KeyError): + self.exp.append(KEY, 0) + diff --git a/artiq/test/test_frontends.py b/artiq/test/test_frontends.py new file mode 100644 index 000000000..a52d4a17c --- /dev/null +++ b/artiq/test/test_frontends.py @@ -0,0 +1,27 @@ +"""Generic tests for frontend commands.""" +import subprocess +import sys +import unittest + + +class TestFrontends(unittest.TestCase): + def test_help(self): + """Test --help as a simple smoke test against catastrophic breakage.""" + commands = { + "aqctl": [ + "corelog" + ], + "artiq": [ + "client", "compile", "coreanalyzer", "coremgmt", + "flash", "master", "mkfs", "route", + "rtiomon", "run", "session", "browser", "dashboard" + ] + } + + for module in (prefix + "_" + name + for prefix, names in commands.items() + for name in names): + subprocess.check_call( + [sys.executable, "-m", "artiq.frontend." + module, "--help"], + stdout=subprocess.DEVNULL, + stderr=subprocess.STDOUT) diff --git a/artiq/test/test_korad_ka3005p.py b/artiq/test/test_korad_ka3005p.py deleted file mode 100644 index 5469a8f46..000000000 --- a/artiq/test/test_korad_ka3005p.py +++ /dev/null @@ -1,38 +0,0 @@ -import sys -import unittest - -from artiq.test.hardware_testbench import GenericControllerCase, ControllerCase - - -class GenericKoradKA3005PTest: - def test_parameters_readback(self): - - # check device ID baked into firmware - ids = self.driver.get_id() - self.assertEqual(ids, "KORADKA3005PV2.0") - - -class TestKoradKA3005P(GenericKoradKA3005PTest, ControllerCase): - def setUp(self): - ControllerCase.setUp(self) - self.start_controller("koradka3005p") - self.driver = self.device_mgr.get("koradka3005p") - - -class TestKoradKA3005P(GenericKoradKA3005PTest, GenericControllerCase): - def get_device_db(self): - return { - "korad_ka3005p": { - "type": "controller", - "host": "::1", - "port": 3256, - "command": (sys.executable.replace("\\", "\\\\") - + " -m artiq.frontend.aqctl_korad_ka3005p " - + "-p {port} --simulation") - } - } - - def setUp(self): - GenericControllerCase.setUp(self) - self.start_controller("korad_ka3005p") - self.driver = self.device_mgr.get("korad_ka3005p") diff --git a/artiq/test/test_lda.py b/artiq/test/test_lda.py deleted file mode 100644 index 76c9e01bd..000000000 --- a/artiq/test/test_lda.py +++ /dev/null @@ -1,44 +0,0 @@ -import unittest -import sys - -from artiq.devices.lda.driver import Ldasim -from artiq.language.units import dB -from artiq.test.hardware_testbench import GenericControllerCase, ControllerCase - - -class GenericLdaTest: - def test_attenuation(self): - step = self.cont.get_att_step_size() - attmax = self.cont.get_att_max() - test_vector = [i*step*dB for i in range(0, int(attmax*int(1/step)+1))] - for i in test_vector: - with self.subTest(i=i): - self.cont.set_attenuation(i) - j = self.cont.get_attenuation() - self.assertEqual(i, j) - - -class TestLda(ControllerCase, GenericLdaTest): - def setUp(self): - ControllerCase.setUp(self) - self.start_controller("lda") - self.cont = self.device_mgr.get("lda") - - -class TestLdaSim(GenericControllerCase, GenericLdaTest): - def get_device_db(self): - return { - "lda": { - "type": "controller", - "host": "::1", - "port": 3253, - "command": (sys.executable.replace("\\", "\\\\") - + " -m artiq.frontend.aqctl_lda " - + "-p {port} --simulation") - } - } - - def setUp(self): - GenericControllerCase.setUp(self) - self.start_controller("lda") - self.cont = self.device_mgr.get("lda") diff --git a/artiq/test/test_novatech409b.py b/artiq/test/test_novatech409b.py deleted file mode 100644 index f14b28a8e..000000000 --- a/artiq/test/test_novatech409b.py +++ /dev/null @@ -1,46 +0,0 @@ -import sys -import unittest - -from artiq.test.hardware_testbench import GenericControllerCase, ControllerCase - - -class GenericNovatech409BTest: - def test_parameters_readback(self): - # write sample data and read it back - for i in range(4): - self.driver.set_freq(i, 1e6) - self.driver.set_phase(i, 0.5) - self.driver.set_gain(i, 0.25) - result = self.driver.get_status() - - # check for expected status message; ignore all but first 23 bytes - # compare with previous result extracted from Novatech - for i in range(4): - r = result[i] - self.assertEqual(r[0:23], "00989680 2000 01F5 0000") - - -class TestNovatech409B(GenericNovatech409BTest, ControllerCase): - def setUp(self): - ControllerCase.setUp(self) - self.start_controller("novatech409b") - self.driver = self.device_mgr.get("novatech409b") - - -class TestNovatech409BSim(GenericNovatech409BTest, GenericControllerCase): - def get_device_db(self): - return { - "novatech409b": { - "type": "controller", - "host": "::1", - "port": 3254, - "command": (sys.executable.replace("\\", "\\\\") - + " -m artiq.frontend.aqctl_novatech409b " - + "-p {port} --simulation") - } - } - - def setUp(self): - GenericControllerCase.setUp(self) - self.start_controller("novatech409b") - self.driver = self.device_mgr.get("novatech409b") diff --git a/artiq/test/test_pc_rpc.py b/artiq/test/test_pc_rpc.py deleted file mode 100644 index bd8ed96fd..000000000 --- a/artiq/test/test_pc_rpc.py +++ /dev/null @@ -1,134 +0,0 @@ -import unittest -import sys -import subprocess -import asyncio -import time - -import numpy as np - -from artiq.protocols import pc_rpc, fire_and_forget - - -test_address = "::1" -test_port = 7777 -test_object = [5, 2.1, None, True, False, - {"a": 5, 2: np.linspace(0, 10, 1)}, - (4, 5), (10,), "ab\nx\"'"] - - -class RPCCase(unittest.TestCase): - def _run_server_and_test(self, test, *args): - # running this file outside of unittest starts the echo server - with subprocess.Popen([sys.executable, - sys.modules[__name__].__file__]) as proc: - try: - test(*args) - finally: - try: - proc.wait(timeout=1) - except subprocess.TimeoutExpired: - proc.kill() - raise - - def _blocking_echo(self, target): - for attempt in range(100): - time.sleep(.2) - try: - remote = pc_rpc.Client(test_address, test_port, - target) - except ConnectionRefusedError: - pass - else: - break - try: - test_object_back = remote.echo(test_object) - self.assertEqual(test_object, test_object_back) - test_object_back = remote.async_echo(test_object) - self.assertEqual(test_object, test_object_back) - with self.assertRaises(AttributeError): - remote.non_existing_method - remote.terminate() - finally: - remote.close_rpc() - - def test_blocking_echo(self): - self._run_server_and_test(self._blocking_echo, "test") - - def test_blocking_echo_autotarget(self): - self._run_server_and_test(self._blocking_echo, pc_rpc.AutoTarget) - - async def _asyncio_echo(self, target): - remote = pc_rpc.AsyncioClient() - for attempt in range(100): - await asyncio.sleep(.2) - try: - await remote.connect_rpc(test_address, test_port, target) - except ConnectionRefusedError: - pass - else: - break - try: - test_object_back = await remote.echo(test_object) - self.assertEqual(test_object, test_object_back) - test_object_back = await remote.async_echo(test_object) - self.assertEqual(test_object, test_object_back) - with self.assertRaises(AttributeError): - await remote.non_existing_method - await remote.terminate() - finally: - remote.close_rpc() - - def _loop_asyncio_echo(self, target): - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - try: - loop.run_until_complete(self._asyncio_echo(target)) - finally: - loop.close() - - def test_asyncio_echo(self): - self._run_server_and_test(self._loop_asyncio_echo, "test") - - def test_asyncio_echo_autotarget(self): - self._run_server_and_test(self._loop_asyncio_echo, pc_rpc.AutoTarget) - - -class FireAndForgetCase(unittest.TestCase): - def _set_ok(self): - self.ok = True - - def test_fire_and_forget(self): - self.ok = False - p = fire_and_forget.FFProxy(self) - p._set_ok() - with self.assertRaises(AttributeError): - p.non_existing_method - p.ff_join() - self.assertTrue(self.ok) - - -class Echo: - def echo(self, x): - return x - - async def async_echo(self, x): - await asyncio.sleep(0.01) - return x - - -def run_server(): - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - try: - echo = Echo() - server = pc_rpc.Server({"test": echo}, builtin_terminate=True) - loop.run_until_complete(server.start(test_address, test_port)) - try: - loop.run_until_complete(server.wait_terminate()) - finally: - loop.run_until_complete(server.stop()) - finally: - loop.close() - -if __name__ == "__main__": - run_server() diff --git a/artiq/test/test_pipe_ipc.py b/artiq/test/test_pipe_ipc.py deleted file mode 100644 index b066d4276..000000000 --- a/artiq/test/test_pipe_ipc.py +++ /dev/null @@ -1,80 +0,0 @@ -import unittest -import sys -import asyncio -import os - -from artiq.protocols import pipe_ipc - - -class IPCCase(unittest.TestCase): - def setUp(self): - if os.name == "nt": - self.loop = asyncio.ProactorEventLoop() - else: - self.loop = asyncio.new_event_loop() - asyncio.set_event_loop(self.loop) - - def tearDown(self): - self.loop.close() - - async def _coro_test(self, child_asyncio): - ipc = pipe_ipc.AsyncioParentComm() - await ipc.create_subprocess(sys.executable, - sys.modules[__name__].__file__, - str(child_asyncio), - ipc.get_address()) - for i in range(10): - ipc.write("{}\n".format(i).encode()) - await ipc.drain() - s = (await ipc.readline()).decode() - self.assertEqual(int(s), i+1) - ipc.write(b"-1\n") - await ipc.process.wait() - - def test_blocking(self): - self.loop.run_until_complete(self._coro_test(False)) - - def test_asyncio(self): - self.loop.run_until_complete(self._coro_test(True)) - - -def run_child_blocking(): - child_comm = pipe_ipc.ChildComm(sys.argv[2]) - while True: - x = int(child_comm.readline().decode()) - if x < 0: - break - child_comm.write((str(x+1) + "\n").encode()) - child_comm.close() - - -async def coro_child(): - child_comm = pipe_ipc.AsyncioChildComm(sys.argv[2]) - await child_comm.connect() - while True: - x = int((await child_comm.readline()).decode()) - if x < 0: - break - child_comm.write((str(x+1) + "\n").encode()) - await child_comm.drain() - child_comm.close() - - -def run_child_asyncio(): - if os.name == "nt": - loop = asyncio.ProactorEventLoop() - asyncio.set_event_loop(loop) - else: - loop = asyncio.get_event_loop() - loop.run_until_complete(coro_child()) - loop.close() - - -def run_child(): - if sys.argv[1] == "True": - run_child_asyncio() - else: - run_child_blocking() - -if __name__ == "__main__": - run_child() diff --git a/artiq/test/test_rpctool.py b/artiq/test/test_rpctool.py deleted file mode 100644 index fb74395bf..000000000 --- a/artiq/test/test_rpctool.py +++ /dev/null @@ -1,39 +0,0 @@ -import os -import sys -import asyncio -import unittest - -from artiq.protocols.pc_rpc import Server - - -class Target: - def output_value(self): - return 4125380 - - -class TestRPCTool(unittest.TestCase): - async def check_value(self): - proc = await asyncio.create_subprocess_exec( - sys.executable, "-m", "artiq.frontend.artiq_rpctool", "::1", "7777", "call", "output_value", - stdout = asyncio.subprocess.PIPE) - (value, err) = await proc.communicate() - self.assertEqual(value.decode('ascii').rstrip(), '4125380') - await proc.wait() - - async def do_test(self): - server = Server({"target": Target()}) - await server.start("::1", 7777) - await self.check_value() - await server.stop() - - def test_rpc(self): - if os.name == "nt": - loop = asyncio.ProactorEventLoop() - else: - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - try: - loop.run_until_complete(self.do_test()) - finally: - loop.close() - diff --git a/artiq/test/test_scheduler.py b/artiq/test/test_scheduler.py index b95a97d9b..ad4f243bd 100644 --- a/artiq/test/test_scheduler.py +++ b/artiq/test/test_scheduler.py @@ -28,7 +28,18 @@ class BackgroundExperiment(EnvExperiment): sleep(0.2) except TerminationRequested: self.set_dataset("termination_ok", True, - broadcast=True, save=False) + broadcast=True, archive=False) + + +class CheckPauseBackgroundExperiment(EnvExperiment): + def build(self): + self.setattr_device("scheduler") + + def run(self): + while True: + while not self.scheduler.check_pause(): + sleep(0.2) + self.scheduler.pause() def _get_expid(name): @@ -42,7 +53,7 @@ def _get_expid(name): def _get_basic_steps(rid, expid, priority=0, flush=False): return [ - {"action": "setitem", "key": rid, "value": + {"action": "setitem", "key": rid, "value": {"pipeline": "main", "status": "pending", "priority": priority, "expid": expid, "due_date": None, "flush": flush, "repo_msg": None}, @@ -117,6 +128,161 @@ class SchedulerCase(unittest.TestCase): scheduler.notifier.publish = None loop.run_until_complete(scheduler.stop()) + def test_pending_priority(self): + """Check due dates take precedence over priorities when waiting to + prepare.""" + loop = self.loop + handlers = {} + scheduler = Scheduler(_RIDCounter(0), handlers, None) + handlers["scheduler_check_pause"] = scheduler.check_pause + + expid_empty = _get_expid("EmptyExperiment") + + expid_bg = _get_expid("CheckPauseBackgroundExperiment") + # Suppress the SystemExit backtrace when worker process is killed. + expid_bg["log_level"] = logging.CRITICAL + + high_priority = 3 + middle_priority = 2 + low_priority = 1 + late = time() + 100000 + early = time() + 1 + + expect = [ + { + "path": [], + "action": "setitem", + "value": { + "repo_msg": None, + "priority": low_priority, + "pipeline": "main", + "due_date": None, + "status": "pending", + "expid": expid_bg, + "flush": False + }, + "key": 0 + }, + { + "path": [], + "action": "setitem", + "value": { + "repo_msg": None, + "priority": high_priority, + "pipeline": "main", + "due_date": late, + "status": "pending", + "expid": expid_empty, + "flush": False + }, + "key": 1 + }, + { + "path": [], + "action": "setitem", + "value": { + "repo_msg": None, + "priority": middle_priority, + "pipeline": "main", + "due_date": early, + "status": "pending", + "expid": expid_empty, + "flush": False + }, + "key": 2 + }, + { + "path": [0], + "action": "setitem", + "value": "preparing", + "key": "status" + }, + { + "path": [0], + "action": "setitem", + "value": "prepare_done", + "key": "status" + }, + { + "path": [0], + "action": "setitem", + "value": "running", + "key": "status" + }, + { + "path": [2], + "action": "setitem", + "value": "preparing", + "key": "status" + }, + { + "path": [2], + "action": "setitem", + "value": "prepare_done", + "key": "status" + }, + { + "path": [0], + "action": "setitem", + "value": "paused", + "key": "status" + }, + { + "path": [2], + "action": "setitem", + "value": "running", + "key": "status" + }, + { + "path": [2], + "action": "setitem", + "value": "run_done", + "key": "status" + }, + { + "path": [0], + "action": "setitem", + "value": "running", + "key": "status" + }, + { + "path": [2], + "action": "setitem", + "value": "analyzing", + "key": "status" + }, + { + "path": [2], + "action": "setitem", + "value": "deleting", + "key": "status" + }, + { + "path": [], + "action": "delitem", + "key": 2 + }, + ] + done = asyncio.Event() + expect_idx = 0 + def notify(mod): + nonlocal expect_idx + self.assertEqual(mod, expect[expect_idx]) + expect_idx += 1 + if expect_idx >= len(expect): + done.set() + scheduler.notifier.publish = notify + + scheduler.start() + + scheduler.submit("main", expid_bg, low_priority) + scheduler.submit("main", expid_empty, high_priority, late) + scheduler.submit("main", expid_empty, middle_priority, early) + + loop.run_until_complete(done.wait()) + scheduler.notifier.publish = None + loop.run_until_complete(scheduler.stop()) + def test_pause(self): loop = self.loop @@ -152,7 +318,7 @@ class SchedulerCase(unittest.TestCase): if mod == {"path": [0], "value": "deleting", "key": "status", - "action": "setitem"}: + "action": "setitem"}: background_completed.set() if mod == {"path": [1], "value": "prepare_done", @@ -185,6 +351,49 @@ class SchedulerCase(unittest.TestCase): loop.run_until_complete(scheduler.stop()) + def test_close_with_active_runs(self): + """Check scheduler exits with experiments still running""" + loop = self.loop + + scheduler = Scheduler(_RIDCounter(0), {}, None) + + expid_bg = _get_expid("BackgroundExperiment") + # Suppress the SystemExit backtrace when worker process is killed. + expid_bg["log_level"] = logging.CRITICAL + expid = _get_expid("EmptyExperiment") + + background_running = asyncio.Event() + empty_ready = asyncio.Event() + background_completed = asyncio.Event() + def notify(mod): + if mod == {"path": [0], + "value": "running", + "key": "status", + "action": "setitem"}: + background_running.set() + if mod == {"path": [0], + "value": "deleting", + "key": "status", + "action": "setitem"}: + background_completed.set() + if mod == {"path": [1], + "value": "prepare_done", + "key": "status", + "action": "setitem"}: + empty_ready.set() + scheduler.notifier.publish = notify + + scheduler.start() + scheduler.submit("main", expid_bg, -99, None, False) + loop.run_until_complete(background_running.wait()) + + scheduler.submit("main", expid, 0, None, False) + loop.run_until_complete(empty_ready.wait()) + + # At this point, (at least) BackgroundExperiment is still running; make + # sure we can stop the scheduler without hanging. + loop.run_until_complete(scheduler.stop()) + def test_flush(self): loop = self.loop scheduler = Scheduler(_RIDCounter(0), dict(), None) diff --git a/artiq/test/test_serialization.py b/artiq/test/test_serialization.py deleted file mode 100644 index e54234e00..000000000 --- a/artiq/test/test_serialization.py +++ /dev/null @@ -1,54 +0,0 @@ -import unittest -import json -from fractions import Fraction - -import numpy as np - -from artiq.protocols import pyon - - -_pyon_test_object = { - (1, 2): [(3, 4.2), (2, )], - "slice": slice(3), - Fraction(3, 4): np.linspace(5, 10, 1), - "set": {"testing", "sets"}, - "a": np.int8(9), "b": np.int16(-98), "c": np.int32(42), "d": np.int64(-5), - "e": np.uint8(8), "f": np.uint16(5), "g": np.uint32(4), "h": np.uint64(9), - "x": np.float16(9.0), "y": np.float32(9.0), "z": np.float64(9.0), - 1j: 1-9j, - "q": np.complex128(1j), -} - - -class PYON(unittest.TestCase): - def test_encdec(self): - for enc in pyon.encode, lambda x: pyon.encode(x, True): - with self.subTest(enc=enc): - self.assertEqual(pyon.decode(enc(_pyon_test_object)), - _pyon_test_object) - - def test_encdec_array(self): - orig = {k: (np.array(v), np.array([v])) - for k, v in _pyon_test_object.items() - if np.isscalar(v)} - for enc in pyon.encode, lambda x: pyon.encode(x, True): - result = pyon.decode(enc(orig)) - for k in orig: - with self.subTest(enc=enc, k=k, v=orig[k]): - np.testing.assert_equal(result[k], orig[k]) - - -_json_test_object = { - "a": "b", - "x": [1, 2, {}], - "foo\nbaz\\qux\"\r2": ["bar", 1.2, {"x": "y"}], - "bar": [True, False, None] -} - - -class JSONPYON(unittest.TestCase): - def test_encdec(self): - for enc in pyon.encode, lambda x: pyon.encode(x, True), json.dumps: - for dec in pyon.decode, json.loads: - self.assertEqual(dec(enc(_json_test_object)), - _json_test_object) diff --git a/artiq/test/test_sync_struct.py b/artiq/test/test_sync_struct.py deleted file mode 100644 index fbc199e0f..000000000 --- a/artiq/test/test_sync_struct.py +++ /dev/null @@ -1,72 +0,0 @@ -import unittest -import asyncio -import numpy as np - -from artiq.protocols import sync_struct - -test_address = "::1" -test_port = 7777 - - -def write_test_data(test_dict): - test_values = [5, 2.1, None, True, False, - {"a": 5, 2: np.linspace(0, 10, 1)}, - (4, 5), (10,), "ab\nx\"'"] - for i in range(10): - test_dict[str(i)] = i - for key, value in enumerate(test_values): - test_dict[key] = value - test_dict[1.5] = 1.5 - test_dict["list"] = [] - test_dict["list"][:] = [34, 31] - test_dict["list"].append(42) - test_dict["list"].insert(1, 1) - test_dict[100] = 0 - test_dict[100] = 1 - test_dict[101] = 1 - test_dict.pop(101) - test_dict[102] = 1 - del test_dict[102] - test_dict["array"] = np.zeros(1) - test_dict["array"][0] = 10 - test_dict["finished"] = True - - -class SyncStructCase(unittest.TestCase): - def init_test_dict(self, init): - self.received_dict = init - return init - - def notify(self, mod): - if ((mod["action"] == "init" and "finished" in mod["struct"]) - or (mod["action"] == "setitem" and mod["key"] == "finished")): - self.receiving_done.set() - - def setUp(self): - self.loop = asyncio.new_event_loop() - asyncio.set_event_loop(self.loop) - - async def _do_test_recv(self): - self.receiving_done = asyncio.Event() - - test_dict = sync_struct.Notifier(dict()) - publisher = sync_struct.Publisher({"test": test_dict}) - await publisher.start(test_address, test_port) - - subscriber = sync_struct.Subscriber("test", self.init_test_dict, - self.notify) - await subscriber.connect(test_address, test_port) - - write_test_data(test_dict) - await self.receiving_done.wait() - - await subscriber.close() - await publisher.stop() - - self.assertEqual(self.received_dict, test_dict.read) - - def test_recv(self): - self.loop.run_until_complete(self._do_test_recv()) - - def tearDown(self): - self.loop.close() diff --git a/artiq/test/test_thorlabs_tcube.py b/artiq/test/test_thorlabs_tcube.py deleted file mode 100644 index b1adf6208..000000000 --- a/artiq/test/test_thorlabs_tcube.py +++ /dev/null @@ -1,183 +0,0 @@ -import time -import sys -import unittest - -from artiq.language.units import V -from artiq.test.hardware_testbench import ControllerCase, GenericControllerCase - - -class GenericTdcTest: - def test_pot_parameters(self): - test_vector = 1, 2, 3, 4, 5, 6, 7, 8 - self.cont.set_pot_parameters(*test_vector) - self.assertEqual(test_vector, self.cont.get_pot_parameters()) - - def test_position_counter(self): - test_vector = 42 - self.cont.set_position_counter(test_vector) - self.assertEqual(test_vector, self.cont.get_position_counter()) - - def test_encoder_counter(self): - test_vector = 43 - self.cont.set_encoder_counter(test_vector) - self.assertEqual(test_vector, self.cont.get_encoder_counter()) - - def test_velocity_parameters(self): - test_vector = 44, 45 - self.cont.set_velocity_parameters(*test_vector) - self.assertEqual(test_vector, self.cont.get_velocity_parameters()) - - def test_jog_parameters(self): - test_vector = 46, 47, 48, 49, 50 - self.cont.set_jog_parameters(*test_vector) - self.assertEqual(test_vector, self.cont.get_jog_parameters()) - - def test_gen_move_parameters(self): - test_vector = 51 - self.cont.set_gen_move_parameters(test_vector) - self.assertEqual(test_vector, self.cont.get_gen_move_parameters()) - - def test_moverelparams(self): - test_vector = 52 - self.cont.set_move_relative_parameters(test_vector) - self.assertEqual(test_vector, self.cont.get_move_relative_parameters()) - - def test_move_absolute_parameters(self): - test_vector = 53 - self.cont.set_move_absolute_parameters(test_vector) - self.assertEqual(test_vector, self.cont.get_move_absolute_parameters()) - - def test_home_parameters(self): - test_vector = 54 - self.cont.set_home_parameters(test_vector) - self.assertEqual(test_vector, self.cont.get_home_parameters()) - - def test_limit_switch_parameters(self): - test_vector = 2, 1 - self.cont.set_limit_switch_parameters(*test_vector) - self.assertEqual(test_vector, self.cont.get_limit_switch_parameters()) - - def test_dc_pid_parameters(self): - test_vector = 57, 58, 59, 60, 0x0f - self.cont.set_dc_pid_parameters(*test_vector) - self.assertEqual(test_vector, self.cont.get_dc_pid_parameters()) - - def test_av_modes(self): - for i in range(1): - for j in range(1): - for k in range(1): - with self.subTest(i=i): - with self.subTest(j=j): - with self.subTest(k=k): - test_vector = i << 2 + j << 1 + k - self.cont.set_av_modes(test_vector) - self.assertEqual(test_vector, - self.cont.get_av_modes()) - - def test_button_parameters(self): - test_vector = 2, 3, 4 - self.cont.set_button_parameters(*test_vector) - self.assertEqual(test_vector, self.cont.get_button_parameters()) - - -class GenericTpzTest: - def test_position_control_mode(self): - test_vector = 1 - self.cont.set_position_control_mode(test_vector) - self.assertEqual(test_vector, self.cont.get_position_control_mode()) - - def test_ouput_volts(self): - for voltage in 5*V, 10*V, 15*V, \ - round(self.cont.get_tpz_io_settings()[0])*V: - with self.subTest(voltage=voltage): - test_vector = voltage - self.cont.set_output_volts(test_vector) - time.sleep(1) # Wait for the output voltage to converge - self.assertAlmostEqual(test_vector, - self.cont.get_output_volts(), - delta=0.03*V) - - def test_output_position(self): - test_vector = 31000 - self.cont.set_output_position(test_vector) - self.assertEqual(test_vector, self.cont.get_output_position()) - - def test_input_volts_source(self): - for i in range(3): - test_vector = i - self.cont.set_input_volts_source(i) - with self.subTest(i=i): - self.assertEqual(test_vector, - self.cont.get_input_volts_source()) - - def test_pi_constants(self): - test_vector = 42, 43 - self.cont.set_pi_constants(*test_vector) - self.assertEqual(test_vector, self.cont.get_pi_constants()) - - def test_tpz_display_settings(self): - for intensity in 0, 10, 30, 50, 100, 150, 254: - with self.subTest(intensity=intensity): - test_vector = intensity - self.cont.set_tpz_display_settings(test_vector) - self.assertEqual(test_vector, - self.cont.get_tpz_display_settings()) - - def test_tpz_io_settings(self): - for v in 75*V, 100*V, 150*V: - with self.subTest(v=v): - test_vector = v, 1 - self.cont.set_tpz_io_settings(*test_vector) - self.assertEqual(test_vector, self.cont.get_tpz_io_settings()) - - -class TestTdc(ControllerCase, GenericTdcTest): - def setUp(self): - ControllerCase.setUp(self) - self.start_controller("tdc") - self.cont = self.device_mgr.get("tdc") - - -class TestTdcSim(GenericControllerCase, GenericTdcTest): - def get_device_db(self): - return { - "tdc": { - "type": "controller", - "host": "::1", - "port": 3255, - "command": (sys.executable.replace("\\", "\\\\") - + " -m artiq.frontend.aqctl_thorlabs_tcube " - + "-p {port} -P tdc001 --simulation") - } - } - - def setUp(self): - GenericControllerCase.setUp(self) - self.start_controller("tdc") - self.cont = self.device_mgr.get("tdc") - - -class TestTpz(ControllerCase, GenericTpzTest): - def setUp(self): - ControllerCase.setUp(self) - self.start_controller("tpz") - self.cont = self.device_mgr.get("tpz") - - -class TestTpzSim(GenericControllerCase, GenericTpzTest): - def get_device_db(self): - return { - "tpz": { - "type": "controller", - "host": "::1", - "port": 3255, - "command": (sys.executable.replace("\\", "\\\\") - + " -m artiq.frontend.aqctl_thorlabs_tcube " - + "-p {port} -P tpz001 --simulation") - } - } - - def setUp(self): - GenericControllerCase.setUp(self) - self.start_controller("tpz") - self.cont = self.device_mgr.get("tpz") diff --git a/artiq/test/test_worker.py b/artiq/test/test_worker.py index 73c09b352..88e3ead82 100644 --- a/artiq/test/test_worker.py +++ b/artiq/test/test_worker.py @@ -90,9 +90,9 @@ class WorkerCase(unittest.TestCase): with self.assertLogs() as logs: with self.assertRaises(WorkerInternalException): _run_experiment("ExceptionTermination") - self.assertEqual(len(logs.records), 1) + self.assertGreater(len(logs.records), 0) self.assertIn("Terminating with exception (TypeError)", - logs.output[0]) + logs.output[-1]) def test_watchdog_no_timeout(self): _run_experiment("WatchdogNoTimeout") diff --git a/artiq/tools.py b/artiq/tools.py index 95685100f..23095f251 100644 --- a/artiq/tools.py +++ b/artiq/tools.py @@ -1,26 +1,22 @@ +import asyncio import importlib.machinery import logging -import sys -import asyncio -import collections import os -import atexit import string -import random +import sys import numpy as np -from artiq.language.environment import is_experiment -from artiq.protocols import pyon -from artiq.appdirs import user_config_dir +from sipyco import pyon + from artiq import __version__ as artiq_version +from artiq.appdirs import user_config_dir +from artiq.language.environment import is_experiment __all__ = ["parse_arguments", "elide", "short_format", "file_import", - "get_experiment", "verbosity_args", "simple_network_args", - "multiline_log_config", "init_logger", "bind_address_from_args", - "atexit_register_coroutine", "exc_to_warning", - "asyncio_wait_or_cancel", "TaskObject", "Condition", + "get_experiment", + "exc_to_warning", "asyncio_wait_or_cancel", "get_windows_drives", "get_user_config_dir"] @@ -93,9 +89,9 @@ def file_import(filename, prefix="file_import_"): return module -def get_experiment(module, experiment=None): - if experiment: - return getattr(module, experiment) +def get_experiment(module, class_name=None): + if class_name: + return getattr(module, class_name) exps = [(k, v) for k, v in module.__dict__.items() if k[0] != "_" and is_experiment(v)] @@ -106,76 +102,6 @@ def get_experiment(module, experiment=None): return exps[0][1] -def verbosity_args(parser): - group = parser.add_argument_group("verbosity") - group.add_argument("-v", "--verbose", default=0, action="count", - help="increase logging level") - group.add_argument("-q", "--quiet", default=0, action="count", - help="decrease logging level") - - -def simple_network_args(parser, default_port): - group = parser.add_argument_group("network server") - group.add_argument( - "--bind", default=[], action="append", - help="additional hostname or IP addresse to bind to; " - "use '*' to bind to all interfaces (default: %(default)s)") - group.add_argument( - "--no-localhost-bind", default=False, action="store_true", - help="do not implicitly also bind to localhost addresses") - if isinstance(default_port, int): - group.add_argument("-p", "--port", default=default_port, type=int, - help="TCP port to listen on (default: %(default)d)") - else: - for name, purpose, default in default_port: - h = ("TCP port to listen on for {} connections (default: {})" - .format(purpose, default)) - group.add_argument("--port-" + name, default=default, type=int, - help=h) - - -class MultilineFormatter(logging.Formatter): - def __init__(self): - logging.Formatter.__init__( - self, "%(levelname)s:%(name)s:%(message)s") - - def format(self, record): - r = logging.Formatter.format(self, record) - linebreaks = r.count("\n") - if linebreaks: - i = r.index(":") - r = r[:i] + "<" + str(linebreaks + 1) + ">" + r[i:] - return r - - -def multiline_log_config(level): - root_logger = logging.getLogger() - root_logger.setLevel(level) - handler = logging.StreamHandler() - handler.setFormatter(MultilineFormatter()) - root_logger.addHandler(handler) - - -def init_logger(args): - multiline_log_config( - level=logging.WARNING + args.quiet*10 - args.verbose*10) - - -def bind_address_from_args(args): - if "*" in args.bind: - return None - if args.no_localhost_bind: - return args.bind - else: - return ["127.0.0.1", "::1"] + args.bind - - -def atexit_register_coroutine(coroutine, loop=None): - if loop is None: - loop = asyncio.get_event_loop() - atexit.register(lambda: loop.run_until_complete(coroutine())) - - async def exc_to_warning(coro): try: await coro @@ -198,45 +124,6 @@ async def asyncio_wait_or_cancel(fs, **kwargs): return fs -class TaskObject: - def start(self): - self.task = asyncio.ensure_future(self._do()) - - async def stop(self): - self.task.cancel() - try: - await asyncio.wait_for(self.task, None) - except asyncio.CancelledError: - pass - del self.task - - async def _do(self): - raise NotImplementedError - - -class Condition: - def __init__(self, *, loop=None): - if loop is not None: - self._loop = loop - else: - self._loop = asyncio.get_event_loop() - self._waiters = collections.deque() - - async def wait(self): - """Wait until notified.""" - fut = asyncio.Future(loop=self._loop) - self._waiters.append(fut) - try: - await fut - finally: - self._waiters.remove(fut) - - def notify(self): - for fut in self._waiters: - if not fut.done(): - fut.set_result(False) - - def get_windows_drives(): from ctypes import windll @@ -254,54 +141,3 @@ def get_user_config_dir(): dir = user_config_dir("artiq", "m-labs", major) os.makedirs(dir, exist_ok=True) return dir - - -class SSHClient: - def __init__(self, host): - self.host = host - self.ssh = None - self.sftp = None - - tmpname = "".join([random.Random().choice("ABCDEFGHIJKLMNOPQRSTUVWXYZ") - for _ in range(6)]) - self.tmp = "/tmp/artiq" + tmpname - - def get_ssh(self): - if self.ssh is None: - import paramiko - logging.getLogger("paramiko").setLevel(logging.WARNING) - self.ssh = paramiko.SSHClient() - self.ssh.load_system_host_keys() - self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - self.ssh.connect(self.host) - logger.debug("Connecting to {}".format(self.host)) - return self.ssh - - def get_transport(self): - return self.get_ssh().get_transport() - - def get_sftp(self): - if self.sftp is None: - self.sftp = self.get_ssh().open_sftp() - self.sftp.mkdir(self.tmp) - atexit.register(lambda: self.run_command("rm -rf {tmp}")) - return self.sftp - - def spawn_command(self, cmd, get_pty=False, **kws): - chan = self.get_transport().open_session() - chan.set_combine_stderr(True) - if get_pty: - chan.get_pty() - logger.debug("Executing {}".format(cmd)) - chan.exec_command(cmd.format(tmp=self.tmp, **kws)) - return chan - - def drain(self, chan): - while True: - char = chan.recv(1) - if char == b"": - break - sys.stderr.write(char.decode("utf-8", errors='replace')) - - def run_command(self, cmd, **kws): - self.drain(self.spawn_command(cmd, **kws)) diff --git a/conda/artiq-dev.yaml b/conda/artiq-dev.yaml deleted file mode 100644 index 04db33865..000000000 --- a/conda/artiq-dev.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# conda description for an environment to build and develop artiq -# $ conda env create -f conda/artiq-dev.yaml -name: artiq-dev -channels: - - m-labs/label/dev - - m-labs/label/main - - defaults - - conda-forge/label/main -dependencies: - - artiq-dev diff --git a/conda/artiq-dev/meta.yaml b/conda/artiq-dev/meta.yaml deleted file mode 100644 index 91fffb4bf..000000000 --- a/conda/artiq-dev/meta.yaml +++ /dev/null @@ -1,50 +0,0 @@ -package: - name: artiq-dev - version: {{ environ.get("GIT_DESCRIBE_TAG", "") }} - -source: - git_url: ../.. - -build: - noarch: python - number: {{ environ.get("GIT_DESCRIBE_NUMBER", 0) }} - string: py_{{ environ.get("GIT_DESCRIBE_NUMBER", 0) }}+git{{ environ.get("GIT_FULL_HASH", "")[:8] }} - -requirements: - run: - - python >=3.5.3,<3.6 - - setuptools 33.1.1 - - migen 0.6.dev py35_50+git82b06ee - - misoc 0.8.dev py35_15+git5c201712 - - jesd204b 0.4 - - binutils-or1k-linux >=2.27 - - llvm-or1k - - llvmlite-artiq 0.12.0 - - rust-core-or1k 1.20.0 16 - - cargo 0.11.0 - - openocd 0.10.0+git1 - - lit - - outputcheck - - coverage - - scipy - - numpy - - prettytable - - asyncserial - - sphinx 1.4.8 - - sphinx-argparse - - sphinxcontrib-wavedrom - - sphinx_rtd_theme - - h5py - - python-dateutil - - pyqt >=5.5 - - quamash - - pyqtgraph - - pygit2 - - aiohttp - - pythonparser >=1.1 - - levenshtein - -about: - home: https://m-labs.hk/artiq - license: LGPL - summary: 'ARTIQ development metapackage' diff --git a/conda/artiq-doc.yaml b/conda/artiq-doc.yaml deleted file mode 100644 index 8e8cb2e36..000000000 --- a/conda/artiq-doc.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# conda description for an environment to build artiq documentation -# $ conda env create -f conda/artiq-doc.yaml -name: artiq-doc -channels: - - m-labs/label/dev - - m-labs/label/main - - defaults - - conda-forge/label/main -dependencies: - - python>=3.5.3,<3.6 - - sphinx=1.4.8 - - sphinx-argparse - - sphinxcontrib-wavedrom - - sphinx_rtd_theme - - pythonparser>=1.1 diff --git a/conda/artiq-kc705-nist_clock/build.sh b/conda/artiq-kc705-nist_clock/build.sh deleted file mode 100644 index f0275be02..000000000 --- a/conda/artiq-kc705-nist_clock/build.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -SOC_PREFIX=$PREFIX/lib/python3.5/site-packages/artiq/binaries/kc705-nist_clock -mkdir -p $SOC_PREFIX - -V=1 $PYTHON -m artiq.gateware.targets.kc705_dds -H nist_clock -cp misoc_nist_clock_kc705/gateware/top.bit $SOC_PREFIX -cp misoc_nist_clock_kc705/software/bios/bios.bin $SOC_PREFIX -cp misoc_nist_clock_kc705/software/runtime/runtime.fbi $SOC_PREFIX diff --git a/conda/artiq-kc705-nist_clock/meta.yaml b/conda/artiq-kc705-nist_clock/meta.yaml deleted file mode 100644 index f6aebd686..000000000 --- a/conda/artiq-kc705-nist_clock/meta.yaml +++ /dev/null @@ -1,23 +0,0 @@ -package: - name: artiq-kc705-nist_clock - version: {{ environ.get("GIT_DESCRIBE_TAG", "") }} - -source: - git_url: ../.. - -build: - noarch: generic - ignore_prefix_files: True - number: {{ environ.get("GIT_DESCRIBE_NUMBER", 0) }} - string: py_{{ environ.get("GIT_DESCRIBE_NUMBER", 0) }}+git{{ environ.get("GIT_FULL_HASH", "")[:8] }} - -requirements: - build: - - artiq-dev {{ "{tag} py_{number}+git{hash}".format(tag=environ.get("GIT_DESCRIBE_TAG"), number=environ.get("GIT_DESCRIBE_NUMBER"), hash=environ.get("GIT_FULL_HASH", "")[:8]) if "GIT_DESCRIBE_TAG" in environ else "" }} - run: - - artiq {{ "{tag} py_{number}+git{hash}".format(tag=environ.get("GIT_DESCRIBE_TAG"), number=environ.get("GIT_DESCRIBE_NUMBER"), hash=environ.get("GIT_FULL_HASH", "")[:8]) if "GIT_DESCRIBE_TAG" in environ else "" }} - -about: - home: https://m-labs.hk/artiq - license: LGPL - summary: 'Bitstream, BIOS and runtime for NIST_CLOCK on the KC705 board' diff --git a/conda/artiq-kc705-nist_qc2/build.sh b/conda/artiq-kc705-nist_qc2/build.sh deleted file mode 100644 index b60f39b3e..000000000 --- a/conda/artiq-kc705-nist_qc2/build.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -SOC_PREFIX=$PREFIX/lib/python3.5/site-packages/artiq/binaries/kc705-nist_qc2 -mkdir -p $SOC_PREFIX - -V=1 $PYTHON -m artiq.gateware.targets.kc705_dds -H nist_qc2 -cp misoc_nist_qc2_kc705/gateware/top.bit $SOC_PREFIX -cp misoc_nist_qc2_kc705/software/bios/bios.bin $SOC_PREFIX -cp misoc_nist_qc2_kc705/software/runtime/runtime.fbi $SOC_PREFIX diff --git a/conda/artiq-kc705-nist_qc2/meta.yaml b/conda/artiq-kc705-nist_qc2/meta.yaml deleted file mode 100644 index 7caf0169a..000000000 --- a/conda/artiq-kc705-nist_qc2/meta.yaml +++ /dev/null @@ -1,23 +0,0 @@ -package: - name: artiq-kc705-nist_qc2 - version: {{ environ.get("GIT_DESCRIBE_TAG", "") }} - -source: - git_url: ../.. - -build: - noarch: generic - ignore_prefix_files: True - number: {{ environ.get("GIT_DESCRIBE_NUMBER", 0) }} - string: py_{{ environ.get("GIT_DESCRIBE_NUMBER", 0) }}+git{{ environ.get("GIT_FULL_HASH", "")[:8] }} - -requirements: - build: - - artiq-dev {{ "{tag} py_{number}+git{hash}".format(tag=environ.get("GIT_DESCRIBE_TAG"), number=environ.get("GIT_DESCRIBE_NUMBER"), hash=environ.get("GIT_FULL_HASH", "")[:8]) if "GIT_DESCRIBE_TAG" in environ else "" }} - run: - - artiq {{ "{tag} py_{number}+git{hash}".format(tag=environ.get("GIT_DESCRIBE_TAG"), number=environ.get("GIT_DESCRIBE_NUMBER"), hash=environ.get("GIT_FULL_HASH", "")[:8]) if "GIT_DESCRIBE_TAG" in environ else "" }} - -about: - home: https://m-labs.hk/artiq - license: LGPL - summary: 'Bitstream, BIOS and runtime for NIST_QC2 on the KC705 board' diff --git a/conda/artiq-sayma_amc-standalone/build.sh b/conda/artiq-sayma_amc-standalone/build.sh deleted file mode 100644 index aa116befb..000000000 --- a/conda/artiq-sayma_amc-standalone/build.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -SOC_PREFIX=$PREFIX/lib/python3.5/site-packages/artiq/binaries/sayma_amc-standalone -mkdir -p $SOC_PREFIX - -V=1 $PYTHON -m artiq.gateware.targets.sayma_amc_standalone --rtm-csr-csv $SP_DIR/artiq/binaries/sayma_rtm/sayma_rtm_csr.csv -cp misoc_standalone_sayma_amc/gateware/top.bit $SOC_PREFIX -cp misoc_standalone_sayma_amc/software/bios/bios.bin $SOC_PREFIX -cp misoc_standalone_sayma_amc/software/runtime/runtime.fbi $SOC_PREFIX diff --git a/conda/artiq-sayma_amc-standalone/meta.yaml b/conda/artiq-sayma_amc-standalone/meta.yaml deleted file mode 100644 index 543c04bed..000000000 --- a/conda/artiq-sayma_amc-standalone/meta.yaml +++ /dev/null @@ -1,24 +0,0 @@ -package: - name: artiq-sayma_amc-standalone - version: {{ environ.get("GIT_DESCRIBE_TAG", "") }} - -source: - git_url: ../.. - -build: - noarch: generic - ignore_prefix_files: True - number: {{ environ.get("GIT_DESCRIBE_NUMBER", 0) }} - string: py_{{ environ.get("GIT_DESCRIBE_NUMBER", 0) }}+git{{ environ.get("GIT_FULL_HASH", "")[:8] }} - -requirements: - build: - - artiq-dev {{ "{tag} py_{number}+git{hash}".format(tag=environ.get("GIT_DESCRIBE_TAG"), number=environ.get("GIT_DESCRIBE_NUMBER"), hash=environ.get("GIT_FULL_HASH", "")[:8]) if "GIT_DESCRIBE_TAG" in environ else "" }} - - artiq-sayma_rtm {{ "{tag} py_{number}+git{hash}".format(tag=environ.get("GIT_DESCRIBE_TAG"), number=environ.get("GIT_DESCRIBE_NUMBER"), hash=environ.get("GIT_FULL_HASH", "")[:8]) if "GIT_DESCRIBE_TAG" in environ else "" }} - run: - - artiq {{ "{tag} py_{number}+git{hash}".format(tag=environ.get("GIT_DESCRIBE_TAG"), number=environ.get("GIT_DESCRIBE_NUMBER"), hash=environ.get("GIT_FULL_HASH", "")[:8]) if "GIT_DESCRIBE_TAG" in environ else "" }} - -about: - home: https://m-labs.hk/artiq - license: LGPL - summary: 'Bitstream, BIOS and runtime for stand-alone Sayma AMC' diff --git a/conda/artiq-sayma_rtm/build.sh b/conda/artiq-sayma_rtm/build.sh deleted file mode 100644 index 5b9476ac7..000000000 --- a/conda/artiq-sayma_rtm/build.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -BUILD_SETTINGS_FILE=$HOME/.m-labs/build_settings.sh -[ -f $BUILD_SETTINGS_FILE ] && . $BUILD_SETTINGS_FILE - -SOC_PREFIX=$PREFIX/lib/python3.5/site-packages/artiq/binaries/sayma_rtm -mkdir -p $SOC_PREFIX - -$PYTHON -m artiq.gateware.targets.sayma_rtm -cp artiq_sayma_rtm/top.bit $SOC_PREFIX -cp artiq_sayma_rtm/sayma_rtm_csr.csv $SOC_PREFIX diff --git a/conda/artiq-sayma_rtm/meta.yaml b/conda/artiq-sayma_rtm/meta.yaml deleted file mode 100644 index a792ba439..000000000 --- a/conda/artiq-sayma_rtm/meta.yaml +++ /dev/null @@ -1,23 +0,0 @@ -package: - name: artiq-sayma_rtm - version: {{ environ.get("GIT_DESCRIBE_TAG", "") }} - -source: - git_url: ../.. - -build: - noarch: generic - ignore_prefix_files: True - number: {{ environ.get("GIT_DESCRIBE_NUMBER", 0) }} - string: py_{{ environ.get("GIT_DESCRIBE_NUMBER", 0) }}+git{{ environ.get("GIT_FULL_HASH", "")[:8] }} - -requirements: - build: - - artiq-dev {{ "{tag} py_{number}+git{hash}".format(tag=environ.get("GIT_DESCRIBE_TAG"), number=environ.get("GIT_DESCRIBE_NUMBER"), hash=environ.get("GIT_FULL_HASH", "")[:8]) if "GIT_DESCRIBE_TAG" in environ else "" }} - run: - - artiq {{ "{tag} py_{number}+git{hash}".format(tag=environ.get("GIT_DESCRIBE_TAG"), number=environ.get("GIT_DESCRIBE_NUMBER"), hash=environ.get("GIT_FULL_HASH", "")[:8]) if "GIT_DESCRIBE_TAG" in environ else "" }} - -about: - home: https://m-labs.hk/artiq - license: LGPL - summary: 'Bitstream and CSR map for Sayma RTM' diff --git a/conda/artiq/meta.yaml b/conda/artiq/meta.yaml deleted file mode 100644 index 00690d38d..000000000 --- a/conda/artiq/meta.yaml +++ /dev/null @@ -1,55 +0,0 @@ -package: - name: artiq - version: {{ environ.get("GIT_DESCRIBE_TAG", "") }} - -source: - git_url: ../.. - -{% set data = load_setup_py_data() %} - -build: - noarch: python - number: {{ environ.get("GIT_DESCRIBE_NUMBER", 0) }} - string: py_{{ environ.get("GIT_DESCRIBE_NUMBER", 0) }}+git{{ environ.get("GIT_FULL_HASH", "")[:8] }} - entry_points: - # NOTE: conda-build cannot distinguish between console and gui scripts - {% for entry_point_type, entry_points in data.get("entry_points", dict()).items() -%} - {% for entry_point in entry_points -%} - - {{ entry_point }} - {% endfor %} - {% endfor %} - script: $PYTHON setup.py install --no-compile --single-version-externally-managed --record=record.txt - -requirements: - build: - - python >=3.5.3,<3.6 - - setuptools 33.1.1 - run: - - python >=3.5.3,<3.6 - - llvmlite-artiq 0.12.0 - - binutils-or1k-linux - - pythonparser >=1.1 - - openocd 0.10.0+git1 - - lit - - outputcheck - - scipy - - numpy - - prettytable - - asyncserial - - h5py - - python-dateutil - - pyqt >=5.5 - - quamash - - pyqtgraph 0.10.0 - - pygit2 - - aiohttp - - levenshtein - -test: - imports: - - artiq - -about: - home: https://m-labs.hk/artiq - license: LGPL - summary: 'ARTIQ (Advanced Real-Time Infrastructure for Quantum physics) is a next-generation control system for quantum information experiments. It is being developed in partnership with the Ion Storage Group at NIST, and its applicability reaches beyond ion trapping.' diff --git a/doc/manual/conf.py b/doc/manual/conf.py index 9605e9015..cb00b7afb 100644 --- a/doc/manual/conf.py +++ b/doc/manual/conf.py @@ -15,37 +15,47 @@ import sys import os +from unittest.mock import Mock import sphinx_rtd_theme -from unittest.mock import MagicMock # Hack-patch Sphinx so that ARTIQ-Python types are correctly printed # See: https://github.com/m-labs/artiq/issues/741 from sphinx.ext import autodoc -autodoc.repr = str +from sphinx.util import inspect +autodoc.repr = inspect.repr = str -class Mock(MagicMock): - @classmethod - def __getattr__(cls, name): - if name == "_mock_methods": - return None - return Mock() - - -mock_modules = ["artiq.gui.moninj", - "artiq.gui.waitingspinnerwidget", +# we cannot use autodoc_mock_imports (does not help with argparse) +mock_modules = ["artiq.gui.waitingspinnerwidget", "artiq.gui.flowlayout", - "quamash", "pyqtgraph", "matplotlib", + "artiq.gui.state", + "artiq.gui.log", + "artiq.gui.models", + "artiq.compiler.module", + "artiq.compiler.embedding", + "artiq.dashboard", + "qasync", "pyqtgraph", "matplotlib", "numpy", "dateutil", "dateutil.parser", "prettytable", "PyQt5", - "h5py", "serial", "scipy", "scipy.interpolate", "asyncserial", - "llvmlite_artiq", "Levenshtein", "aiohttp"] + "h5py", "serial", "scipy", "scipy.interpolate", + "llvmlite_artiq", "Levenshtein", "pythonparser", + "sipyco", "sipyco.pc_rpc", "sipyco.sync_struct", + "sipyco.asyncio_tools", "sipyco.logging_tools", + "sipyco.broadcast", "sipyco.packed_exceptions"] for module in mock_modules: sys.modules[module] = Mock() +# https://stackoverflow.com/questions/29992444/sphinx-autodoc-skips-classes-inherited-from-mock +class MockApplets: + class AppletsDock: + pass + +sys.modules["artiq.gui.applets"] = MockApplets + + # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. @@ -70,6 +80,8 @@ extensions = [ 'sphinxcontrib.wavedrom', # see also below for config ] +mathjax_path = "https://m-labs.hk/MathJax/MathJax.js?config=TeX-AMS-MML_HTMLorMML.js" + # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -84,7 +96,7 @@ master_doc = 'index' # General information about the project. project = 'ARTIQ' -copyright = '2014-2017, M-Labs Limited' +copyright = '2014-2020, M-Labs Limited' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -238,7 +250,7 @@ latex_elements = { # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'ARTIQ.tex', 'ARTIQ Documentation', - 'M-Labs / NIST Ion Storage Group', 'manual'), + 'M-Labs and contributors', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of @@ -268,7 +280,7 @@ latex_documents = [ # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'artiq', 'ARTIQ Documentation', - ['M-Labs / NIST Ion Storage Group'], 1) + ['M-Labs and contributors'], 1) ] # If true, show URL addresses after external links. @@ -282,7 +294,7 @@ man_pages = [ # dir menu entry, description, category) texinfo_documents = [ ('index', 'ARTIQ', 'ARTIQ Documentation', - 'M-Labs / NIST Ion Storage Group', 'ARTIQ', 'One line description of project.', + 'M-Labs and contributors', 'ARTIQ', 'A leading-edge control system for quantum information experiments.', 'Miscellaneous'), ] diff --git a/doc/manual/core_device.rst b/doc/manual/core_device.rst index ed5eaf448..9488e4d33 100644 --- a/doc/manual/core_device.rst +++ b/doc/manual/core_device.rst @@ -17,7 +17,7 @@ This storage area is used to store the core device MAC address, IP address and e The flash storage area is one sector (typically 64 kB) large and is organized as a list of key-value records. -This flash storage space can be accessed by using ``artiq_coreconfig`` (see: :ref:`core-device-configuration-tool`). +This flash storage space can be accessed by using ``artiq_coremgmt`` (see: :ref:`core-device-management-tool`). .. _board-ports: @@ -26,16 +26,45 @@ FPGA board ports All boards have a serial interface running at 115200bps 8-N-1 that can be used for debugging. +Kasli +----- + +`Kasli `_ is a versatile core device designed for ARTIQ as part of the `Sinara `_ family of boards. All variants support interfacing to various EEM daughterboards (TTL, DDS, ADC, DAC...) connected directly to it. + +Standalone variants ++++++++++++++++++++ + +Kasli is connected to the network using a 1000Base-X SFP module. `No-name `_ BiDi (1000Base-BX) modules have been used successfully. The SFP module for the network should be installed into the SFP0 cage. +The other SFP cages are not used. + +The RTIO clock frequency is 125MHz or 150MHz, which is generated by the Si5324. + +DRTIO master variants ++++++++++++++++++++++ + +Kasli can be used as a DRTIO master that provides local RTIO channels and can additionally control one DRTIO satellite. + +The RTIO clock frequency is 125MHz or 150MHz, which is generated by the Si5324. The DRTIO line rate is 2.5Gbps or 3Gbps. + +As with the standalone configuration, the SFP module for the Ethernet network should be installed into the SFP0 cage. The DRTIO connections are on SFP1 and SFP2, and optionally on the SATA connector. + +DRTIO satellite/repeater variants ++++++++++++++++++++++++++++++++++ + +Kasli can be used as a DRTIO satellite with a 125MHz or 150MHz RTIO clock and a 2.5Gbps or 3Gbps DRTIO line rate. + +The DRTIO upstream connection is on SFP0 or optionally on the SATA connector, and the remaining SFPs are downstream ports. + KC705 ----- -The main target board for the ARTIQ core device is the KC705 development board from Xilinx. It supports the NIST CLOCK and QC2 hardware (FMC). +An alternative target board for the ARTIQ core device is the KC705 development board from Xilinx. It supports the NIST CLOCK and QC2 hardware (FMC). Common problems +++++++++++++++ * The SW13 switches on the board need to be set to 00001. -* When connected, CLOCK adapter breaks the JTAG chain due to TDI not being connect to TDO on the FMC mezzanine. +* When connected, the CLOCK adapter breaks the JTAG chain due to TDI not being connected to TDO on the FMC mezzanine. * On some boards, the JTAG USB connector is not correctly soldered. VADJ @@ -68,38 +97,25 @@ With the CLOCK hardware, the TTL lines are mapped as follows: +--------------------+-----------------------+--------------+ | 21 | LA32_P | Clock | +--------------------+-----------------------+--------------+ -| 27 | FMCDIO_DIRCTL_CLK | Output | -+--------------------+-----------------------+--------------+ -| 28 | FMCDIO_DIRCTL_SER | Output | -+--------------------+-----------------------+--------------+ -| 29 | FMCDIO_DIRCTL_LATCH | Output | -+--------------------+-----------------------+--------------+ -| 31 | ZOTINO_LDAC | Output | -+--------------------+-----------------------+--------------+ The board has RTIO SPI buses mapped as follows: -+--------------+--------------+--------------+--------------+------------+ -| RTIO channel | CS_N | MOSI | MISO | CLK | -+==============+==============+==============+==============+============+ -| 22 | AMS101_CS_N | AMS101_MOSI | | AMS101_CLK | -+--------------+--------------+--------------+--------------+------------+ -| 23 | SPI0_CS_N | SPI0_MOSI | SPI0_MISO | SPI0_CLK | -+--------------+--------------+--------------+--------------+------------+ -| 24 | SPI1_CS_N | SPI1_MOSI | SPI1_MISO | SPI1_CLK | -+--------------+--------------+--------------+--------------+------------+ -| 25 | SPI2_CS_N | SPI2_MOSI | SPI2_MISO | SPI2_CLK | -+--------------+--------------+--------------+--------------+------------+ -| 26 | MMC_SPI_CS_N | MMC_SPI_MOSI | MMC_SPI_MISO | MMC_SPI_CLK| -+--------------+--------------+--------------+--------------+------------+ -| 30 | ZOTINO_CS_N | ZOTINO_MOSI | ZOTINO_MISO | ZOTINO_CLK | -+--------------+--------------+--------------+--------------+------------+ ++--------------+------------------+--------------+--------------+------------+ +| RTIO channel | CS_N | MOSI | MISO | CLK | ++==============+==================+==============+==============+============+ +| 22 | AMS101_CS_N | AMS101_MOSI | | AMS101_CLK | ++--------------+------------------+--------------+--------------+------------+ +| 23 | SPI0_CS_N | SPI0_MOSI | SPI0_MISO | SPI0_CLK | ++--------------+------------------+--------------+--------------+------------+ +| 24 | SPI1_CS_N | SPI1_MOSI | SPI1_MISO | SPI1_CLK | ++--------------+------------------+--------------+--------------+------------+ +| 25 | SPI2_CS_N | SPI2_MOSI | SPI2_MISO | SPI2_CLK | ++--------------+------------------+--------------+--------------+------------+ +| 26 | MMC_SPI_CS_N | MMC_SPI_MOSI | MMC_SPI_MISO | MMC_SPI_CLK| ++--------------+------------------+--------------+--------------+------------+ -The DDS bus is on channel 32. +The DDS bus is on channel 27. -This configuration supports a Zotino connected to the KC705 FMC HPC through a FMC DIO 32ch LVDS v1.2 and a VHDCI breakout board rev 1.0. On the VHDCI breakout board, the VHDCI cable to the KC705 should be plugged into to the bottom connector, and the EEM cable to the Zotino should be connected to J41. - -The shift registers on the FMC card should be configured to set the directions of its LVDS buffers, using :mod:`artiq.coredevice.shiftreg`. NIST QC2 ++++++++ @@ -144,3 +160,10 @@ The QC2 hardware uses TCA6424A I2C I/O expanders to define the directions of its To avoid I/O contention, the startup kernel should first program the TCA6424A expanders and then call ``output()`` on all ``TTLInOut`` channels that should be configured as outputs. See :mod:`artiq.coredevice.i2c` for more details. + +Clocking +++++++++ + +The KC705 supports an internal 125MHz RTIO clock (based on its crystal oscillator) and an external clock, that can be selected using the ``rtio_clock`` configuration entry. Valid values are ``i`` and ``e``, and the default is ``i``. The selected option can be observed in the core device boot logs. + +On Kasli, when set to ``e``, the ``rtio_clock`` setting overrides the built-in (and variant-dependent) Si5324 synthesizer configuration and disables the Si5324. The user must apply a clock at the RTIO frequency on the Kasli front panel SMA. As the Si5324 is bypassed in this mode, its skew is deterministic, which is useful to distribute clocks externally to Kasli and Urukul when Urukul phase synchronization is desired. diff --git a/doc/manual/core_drivers_reference.rst b/doc/manual/core_drivers_reference.rst index 6def86ae8..0fc5a8e8f 100644 --- a/doc/manual/core_drivers_reference.rst +++ b/doc/manual/core_drivers_reference.rst @@ -3,68 +3,185 @@ Core drivers reference These drivers are for the core device and the peripherals closely integrated into it, which do not use the controller mechanism. + +System drivers +-------------- + :mod:`artiq.coredevice.core` module ------------------------------------ ++++++++++++++++++++++++++++++++++++ .. automodule:: artiq.coredevice.core :members: -:mod:`artiq.coredevice.ttl` module ----------------------------------- - -.. automodule:: artiq.coredevice.ttl - :members: - -:mod:`artiq.coredevice.dds` module ----------------------------------- - -.. automodule:: artiq.coredevice.dds - :members: - -:mod:`artiq.coredevice.dma` module ----------------------------------- - -.. automodule:: artiq.coredevice.dma - :members: - -:mod:`artiq.coredevice.spi` module ----------------------------------- - -.. automodule:: artiq.coredevice.spi - :members: - -:mod:`artiq.coredevice.ad5360` module -------------------------------------- - -.. automodule:: artiq.coredevice.ad5360 - :members: - -:mod:`artiq.coredevice.i2c` module ----------------------------------- - -.. automodule:: artiq.coredevice.i2c - :members: - -:mod:`artiq.coredevice.cache` module ------------------------------------- - -.. automodule:: artiq.coredevice.cache - :members: - :mod:`artiq.coredevice.exceptions` module ------------------------------------------ ++++++++++++++++++++++++++++++++++++++++++ .. automodule:: artiq.coredevice.exceptions :members: +:mod:`artiq.coredevice.dma` module +++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.dma + :members: + +:mod:`artiq.coredevice.cache` module +++++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.cache + :members: + + +Digital I/O drivers +------------------- + +:mod:`artiq.coredevice.ttl` module +++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.ttl + :members: + +:mod:`artiq.coredevice.edge_counter` module +++++++++++++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.edge_counter + :members: + +:mod:`artiq.coredevice.shiftreg` module ++++++++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.shiftreg + :members: + +:mod:`artiq.coredevice.spi2` module ++++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.spi2 + :members: + +:mod:`artiq.coredevice.i2c` module +++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.i2c + :members: + +:mod:`artiq.coredevice.pcf8574a` module ++++++++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.pcf8574a + :members: + + +RF generation drivers +--------------------- + +:mod:`artiq.coredevice.urukul` module ++++++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.urukul + :members: + +:mod:`artiq.coredevice.ad9910` module ++++++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.ad9910 + :members: + +:mod:`artiq.coredevice.ad9912` module ++++++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.ad9912 + :members: + +:mod:`artiq.coredevice.ad9914` module ++++++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.ad9914 + :members: + +:mod:`artiq.coredevice.mirny` module ++++++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.mirny + :members: + +:mod:`artiq.coredevice.adf5356` module ++++++++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.adf5356 + :members: + :mod:`artiq.coredevice.spline` module -------------------------------------- ++++++++++++++++++++++++++++++++++++++ .. automodule:: artiq.coredevice.spline :members: :mod:`artiq.coredevice.sawg` module ------------------------------------ ++++++++++++++++++++++++++++++++++++ .. automodule:: artiq.coredevice.sawg :members: + + +:mod:`artiq.coredevice.basemod_att` module +++++++++++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.basemod_att + :members: + +:mod:`artiq.coredevice.phaser` module ++++++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.phaser + :members: + +DAC/ADC drivers +--------------- + +:mod:`artiq.coredevice.ad53xx` module ++++++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.ad53xx + :members: + +:mod:`artiq.coredevice.zotino` module ++++++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.zotino + :members: + +:mod:`artiq.coredevice.sampler` module +++++++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.sampler + :members: + +:mod:`artiq.coredevice.novogorny` module +++++++++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.novogorny + :members: + +:mod:`artiq.coredevice.fastino` module +++++++++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.fastino + :members: + + +Miscellaneous +------------- + +:mod:`artiq.coredevice.suservo` module +++++++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.suservo + :members: + + +:mod:`artiq.coredevice.grabber` module +++++++++++++++++++++++++++++++++++++++ + +.. automodule:: artiq.coredevice.grabber + :members: diff --git a/doc/manual/core_language_reference.rst b/doc/manual/core_language_reference.rst index 8982f9231..228d33590 100644 --- a/doc/manual/core_language_reference.rst +++ b/doc/manual/core_language_reference.rst @@ -1,7 +1,7 @@ Core language reference ======================= -The most commonly used features from the ARTIQ language modules and from the core device modules are bundled together in ``artiq.experiment`` and can be imported with ``from artiq.experiment import *``. +The most commonly used features from the ARTIQ language modules and from the core device modules are bundled together in :mod:`artiq.experiment` and can be imported with ``from artiq.experiment import *``. :mod:`artiq.language.core` module --------------------------------- diff --git a/doc/manual/default_network_ports.rst b/doc/manual/default_network_ports.rst index fb3da836d..df7c33dd9 100644 --- a/doc/manual/default_network_ports.rst +++ b/doc/manual/default_network_ports.rst @@ -1,38 +1,52 @@ Default network ports ===================== -+--------------------------------+--------------+ -| Component | Default port | -+================================+==============+ -| Core device (management) | 1380 | -+--------------------------------+--------------+ -| Core device (main) | 1381 | -+--------------------------------+--------------+ -| Core device (analyzer) | 1382 | -+--------------------------------+--------------+ -| Core device (mon/inj) | 1383 | -+--------------------------------+--------------+ -| Master (logging input) | 1066 | -+--------------------------------+--------------+ -| Master (broadcasts) | 1067 | -+--------------------------------+--------------+ -| Core device logging controller | 1068 | -+--------------------------------+--------------+ -| InfluxDB bridge | 3248 | -+--------------------------------+--------------+ -| Controller manager | 3249 | -+--------------------------------+--------------+ -| Master (notifications) | 3250 | -+--------------------------------+--------------+ -| Master (control) | 3251 | -+--------------------------------+--------------+ -| PDQ2 (out-of-tree) | 3252 | -+--------------------------------+--------------+ -| LDA | 3253 | -+--------------------------------+--------------+ -| Novatech 409B | 3254 | -+--------------------------------+--------------+ -| Thorlabs T-Cube | 3255 | -+--------------------------------+--------------+ -| Korad KA3005P | 3256 | -+--------------------------------+--------------+ ++---------------------------------+--------------+ +| Component | Default port | ++=================================+==============+ +| Core device (management) | 1380 | ++---------------------------------+--------------+ +| Core device (main) | 1381 | ++---------------------------------+--------------+ +| Core device (analyzer) | 1382 | ++---------------------------------+--------------+ +| Core device (mon/inj) | 1383 | ++---------------------------------+--------------+ +| Master (logging input) | 1066 | ++---------------------------------+--------------+ +| Master (broadcasts) | 1067 | ++---------------------------------+--------------+ +| Core device logging controller | 1068 | ++---------------------------------+--------------+ +| InfluxDB bridge | 3248 | ++---------------------------------+--------------+ +| Controller manager | 3249 | ++---------------------------------+--------------+ +| Master (notifications) | 3250 | ++---------------------------------+--------------+ +| Master (control) | 3251 | ++---------------------------------+--------------+ +| PDQ2 (out-of-tree) | 3252 | ++---------------------------------+--------------+ +| LDA (out-of-tree) | 3253 | ++---------------------------------+--------------+ +| Novatech 409B (out-of-tree) | 3254 | ++---------------------------------+--------------+ +| Thorlabs T-Cube (out-of-tree) | 3255 | ++---------------------------------+--------------+ +| Korad KA3005P (out-of-tree) | 3256 | ++---------------------------------+--------------+ +| Newfocus 8742 (out-of-tree) | 3257 | ++---------------------------------+--------------+ +| PICam (out-of-tree) | 3258 | ++---------------------------------+--------------+ +| PTB Drivers (out-of-tree) | 3259-3270 | ++---------------------------------+--------------+ +| HUT2 (out-of-tree) | 3271 | ++---------------------------------+--------------+ +| TOPTICA Laser SDK (out-of-tree) | 3272 | ++---------------------------------+--------------+ +| HighFinesse (out-of-tree) | 3273 | ++---------------------------------+--------------+ +| InfluxDB schedule bridge | 3275 | ++---------------------------------+--------------+ diff --git a/doc/manual/developing.rst b/doc/manual/developing.rst index d6600d841..7dcde3822 100644 --- a/doc/manual/developing.rst +++ b/doc/manual/developing.rst @@ -1,292 +1,25 @@ Developing ARTIQ ^^^^^^^^^^^^^^^^ -We describe two different approaches to creating a development environment for ARTIQ. - -The first method uses existing pre-compiled Anaconda packages and the ``artiq-dev`` meta-package for the development environment. -This is fast and convenient because it avoids compiling the entire toolchain. -Consequently, some ARTIQ developers as well as the buildbot that's used for continuous integration all employ this method to build the ``artiq`` Anaconda packages and the bitstreams. -It is completely sufficient to develop and tweak the ARTIQ code and to build -bitstreams. - -But with the meta-pakage developing individual components within the toolchain requires extra care. -Consequently, the second method builds most components in the toolchain from their sources. -This takes time and care to reproduce accurately but it gives absolute control over the components and an immediate handle at developing them. -Some ARTIQ developers use this second method of building the entire toolchain -from sources. -It is only recommended for developers and advanced users. - -.. _develop-from-conda: - -ARTIQ Anaconda development environment -====================================== - - 1. Install ``git`` as recommended for your operating system and distribution. - 2. Obtain ARTIQ:: - - $ git clone --recursive https://github.com/m-labs/artiq ~/artiq-dev/artiq - $ cd ~/artiq-dev/artiq - - Add ``-b release-X`` to the ``git clone`` command if you are building a stable branch of ARTIQ. Replace ``X`` with the major release. The default will fetch the development ``master`` branch. - 3. :ref:`Install Anaconda or Miniconda ` - 4. Create and activate a conda environment named ``artiq-dev`` and install the ``artiq-dev`` package which pulls in all the packages required to develop ARTIQ:: - - $ conda env create -f conda/artiq-dev.yaml - $ source activate artiq-dev - 5. Add the ARTIQ source tree to the environment's search path:: - - $ pip install -e . - 6. :ref:`Install Vivado ` - 7. :ref:`Configure OpenOCD ` - 8. :ref:`Build target binaries ` - 9. :ref:`Flash target binaries ` - -.. _install-from-source: - -Installing ARTIQ from source -============================ - -Preparing the build environment for the core device ---------------------------------------------------- - -These steps are required to generate code that can run on the core -device. They are necessary both for building the MiSoC BIOS -and the ARTIQ kernels. - -* Install required host packages: :: - - $ sudo apt-get install python3.5 pip3 build-essential cmake cargo - -* Create a development directory: :: - - $ mkdir ~/artiq-dev - -* Clone ARTIQ repository: :: - - $ cd ~/artiq-dev - $ git clone --recursive https://github.com/m-labs/artiq - - Add ``-b release-X`` to the ``git clone`` command if you are building a stable branch of ARTIQ (the default will fetch the development ``master`` branch). - -* Install OpenRISC binutils (or1k-linux-...): :: - - $ cd ~/artiq-dev - $ wget https://ftp.gnu.org/gnu/binutils/binutils-2.27.tar.bz2 - $ tar xvf binutils-2.27.tar.bz2 - $ cd binutils-2.27 - $ curl -L 'https://raw.githubusercontent.com/m-labs/conda-recipes/c3effbc26e96c6e246d6e8035f8a07bc52d8ded1/conda/binutils-or1k-linux/fix-R_OR1K_GOTOFF-relocations.patch' | patch -p1 - - $ mkdir build - $ cd build - $ ../configure --target=or1k-linux --prefix=/usr/local - $ make -j4 - $ sudo make install - -.. note:: - We're using an ``or1k-linux`` target because it is necessary to enable - shared library support in ``ld``, not because Linux is involved. - -* Install LLVM and Clang: :: - - $ cd ~/artiq-dev - $ git clone -b artiq-3.9 https://github.com/m-labs/llvm-or1k - $ cd llvm-or1k - $ git clone -b artiq-3.9 https://github.com/m-labs/clang-or1k tools/clang - - $ mkdir build - $ cd build - $ cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local/llvm-or1k -DLLVM_TARGETS_TO_BUILD="OR1K;X86" -DLLVM_ENABLE_ASSERTIONS=ON -DLLVM_INSTALL_UTILS=ON - $ make -j4 - $ sudo make install - -* Install Rust: :: - - $ cd ~/artiq-dev - $ git clone -b artiq-1.20.0 https://github.com/m-labs/rust - $ cd rust - $ git submodule update --init - $ mkdir build - $ cd build - $ ../configure --prefix=/usr/local/rust-or1k --llvm-root=/usr/local/llvm-or1k --disable-manage-submodules --disable-docs - $ sudo mkdir /usr/local/rust-or1k - $ sudo chown $USER.$USER /usr/local/rust-or1k - $ make install - - $ libs="libcore libstd_unicode liballoc liblibc_mini libunwind" - $ rustc="/usr/local/rust-or1k/bin/rustc --target or1k-unknown-none -g -C target-feature=+mul,+div,+ffl1,+cmov,+addc -C opt-level=s -L ." - $ destdir="/usr/local/rust-or1k/lib/rustlib/or1k-unknown-none/lib/" - $ mkdir ../build-or1k - $ cd ../build-or1k - $ for lib in ${libs}; do ${rustc} ../src/${lib}/lib.rs; done - $ ${rustc} -Cpanic=abort ../src/libpanic_abort/lib.rs - $ ${rustc} -Cpanic=unwind ../src/libpanic_unwind/lib.rs --cfg llvm_libunwind - $ mkdir -p ${destdir} - $ cp *.rlib ${destdir} - -.. note:: - Compilation of LLVM can take more than 30 min on some machines. Compilation of Rust can take more than two hours. - -Preparing the core device FPGA board ------------------------------------- - -These steps are required to generate gateware bitstream (``.bit``) files, build the MiSoC BIOS and ARTIQ runtime, and flash FPGA boards. If the board is already flashed, you may skip those steps and go directly to `Installing the host-side software`. - -.. _install-xilinx: - -* Install the FPGA vendor tools (i.e. Vivado): - - * Get Vivado from http://www.xilinx.com/support/download/index.htm. - - * During the Vivado installation, uncheck ``Install cable drivers`` (they are not required as we use better and open source alternatives). - -* Install Migen: :: - - $ cd ~/artiq-dev - $ git clone https://github.com/m-labs/migen - $ cd migen - $ python3 setup.py develop --user - -.. note:: - The options ``develop`` and ``--user`` are for setup.py to install Migen in ``~/.local/lib/python3.5``. - -.. _install-bscan-spi: - -* Install the required flash proxy gateware bitstreams: - - The purpose of the flash proxy gateware bitstream is to give programming software fast JTAG access to the flash connected to the FPGA. - - * KC705: - - :: - - $ cd ~/artiq-dev - $ wget https://raw.githubusercontent.com/jordens/bscan_spi_bitstreams/master/bscan_spi_xc7k325t.bit - - Then move ``~/artiq-dev/bscan_spi_xc7k325t.bit`` to ``~/.migen``, ``/usr/local/share/migen``, or ``/usr/share/migen``. - -* :ref:`Download and install OpenOCD `. - -* Download and install ``asyncserial``: :: - - $ cd ~/artiq-dev - $ git clone https://www.github.com/m-labs/asyncserial - $ cd asyncserial - $ python3 setup.py develop --user - -* Download and install MiSoC: :: - - $ cd ~/artiq-dev - $ git clone --recursive https://github.com/m-labs/misoc - $ cd misoc - $ python3 setup.py develop --user - -* Download and install ``pythonparser``: :: - - $ cd ~/artiq-dev - $ git clone https://www.github.com/m-labs/pythonparser - $ cd pythonparser - $ python3 setup.py develop --user - -* Download and install ARTIQ: :: - - $ cd ~/artiq-dev - $ git clone --recursive https://github.com/m-labs/artiq - $ cd artiq - $ python3 setup.py develop --user - -.. note:: - If you have any trouble during ARTIQ setup about ``pygit2`` installation, - refer to the section dealing with - :ref:`installing the host-side software `. - - -* Build the gateware bitstream, BIOS and runtime by running: - :: - - $ cd ~/artiq-dev - $ export PATH=/usr/local/llvm-or1k/bin:$PATH - - .. note:: Make sure that ``/usr/local/llvm-or1k/bin`` is first in your ``PATH``, so that the ``clang`` command you just built is found instead of the system one, if any. - -.. _build-target-binaries: - - * For KC705:: - - $ python3 -m artiq.gateware.targets.kc705_dds -H nist_clock # or nist_qc2 - - .. note:: Add ``--toolchain ise`` if you wish to use ISE instead of Vivado. ISE needs a separate installation step. - -.. _flash-target-binaries: - -* Then, gather the binaries and flash them: :: - - $ mkdir binaries - $ cp misoc_nist_qcX_/gateware/top.bit binaries - $ cp misoc_nist_qcX_/software/bios/bios.bin binaries - $ cp misoc_nist_qcX_/software/runtime/runtime.fbi binaries - $ artiq_flash -d binaries - -* Check that the board boots by running a serial terminal program (you may need to press its FPGA reconfiguration button or power-cycle it to load the gateware bitstream that was newly written into the flash): :: - - $ flterm /dev/ttyUSB1 - MiSoC BIOS http://m-labs.hk - [...] - Booting from flash... - Loading xxxxx bytes from flash... - Executing booted program. - ARTIQ runtime built - -.. note:: flterm is part of MiSoC. If you installed MiSoC with ``setup.py develop --user``, the flterm launcher is in ``~/.local/bin``. - -The communication parameters are 115200 8-N-1. Ensure that your user has access -to the serial device (``sudo adduser $USER dialout`` assuming standard setup). - -.. _installing-the-host-side-software: - -Installing the host-side software ---------------------------------- - -* Install the llvmlite Python bindings: :: - - $ cd ~/artiq-dev - $ git clone https://github.com/m-labs/llvmlite - $ cd llvmlite - $ git checkout artiq-3.9 - $ LLVM_CONFIG=/usr/local/llvm-or1k/bin/llvm-config python3 setup.py install --user - -* Install ARTIQ: :: - - $ cd ~/artiq-dev - $ git clone --recursive https://github.com/m-labs/artiq # if not already done - $ cd artiq - $ python3 setup.py develop --user - -.. note:: - If you have any trouble during ARTIQ setup about ``pygit2`` installation, - you can install it by using ``pip``: - - On Ubuntu 14.04:: - - $ python3 `which pip3` install --user pygit2==0.19.1 - - On Ubuntu 14.10:: - - $ python3 `which pip3` install --user pygit2==0.20.3 - - On Ubuntu 15.04 and 15.10:: - - $ python3 `which pip3` install --user pygit2==0.22.1 - - On Ubuntu 16.04:: - - $ python3 `which pip3` install --user pygit2==0.24.1 - - The rationale behind this is that pygit2 and libgit2 must have the same - major.minor version numbers. - - See http://www.pygit2.org/install.html#version-numbers - -* Build the documentation: :: - - $ cd ~/artiq-dev/artiq/doc/manual - $ make html +.. warning:: + This section is only for software or FPGA developers who want to modify ARTIQ. The steps described here are not required if you simply want to run experiments with ARTIQ. If you purchased a system from M-Labs or QUARTIQ, we normally provide board binaries for you. + +The easiest way to obtain an ARTIQ development environment is via the Nix package manager on Linux. The Nix system is used on the `M-Labs Hydra server `_ to build ARTIQ and its dependencies continuously; it ensures that all build instructions are up-to-date and allows binary packages to be used on developers' machines, in particular for large tools such as the Rust compiler. +ARTIQ itself does not depend on Nix, and it is also possible to compile everything from source (look into the ``.nix`` files from the ``nix-scripts`` repository and run the commands manually) - but Nix makes the process a lot easier. + +* Install the `Nix package manager `_ and Git (e.g. ``$ nix-shell -p git``). +* Set up the M-Labs Hydra channels (:ref:`same procedure as the user section `) to allow binaries to be downloaded. Otherwise, tools such as LLVM and the Rust compiler will be compiled on your machine, which uses a lot of CPU time, memory, and disk space. Simply setting up the channels is sufficient, Nix will automatically detect when a binary can be downloaded instead of being compiled locally. +* Clone the repositories https://github.com/m-labs/artiq and https://git.m-labs.hk/m-labs/nix-scripts. +* Run ``$ nix-shell -I artiqSrc=path_to_artiq_sources shell-dev.nix`` to obtain an environment containing all the required development tools (e.g. Migen, MiSoC, Clang, Rust, OpenOCD...) in addition to the ARTIQ user environment. ``artiqSrc`` should point to the root of the cloned ``artiq`` repository, and ``shell-dev.nix`` can be found in the ``artiq-fast`` folder of the ``nix-scripts`` repository. +* This enters a FHS chroot environment that simplifies the installation and patching of Xilinx Vivado. +* Download Vivado from Xilinx and install it (by running the official installer in the FHS chroot environment). If you do not want to write to ``/opt``, you can install it in a folder of your home directory. The "appropriate" Vivado version to use for building the bitstream can vary. Some versions contain bugs that lead to hidden or visible failures, others work fine. Refer to the `M-Labs Hydra logs `_ to determine which version is currently used when building the binary packages. +* During the Vivado installation, uncheck ``Install cable drivers`` (they are not required as we use better and open source alternatives). +* You can then build the firmware and gateware with a command such as ``$ python -m artiq.gateware.targets.kasli``. +* If you did not install Vivado in ``/opt``, add a command line option such as ``--gateware-toolchain-path ~/Xilinx/Vivado``. +* Flash the binaries into the FPGA board with a command such as ``$ artiq_flash --srcbuild artiq_kasli -V ``. You need to configure OpenOCD as explained :ref:`in the user section `. OpenOCD is already part of the shell started by ``shell-dev.nix``. +* Check that the board boots and examine the UART messages by running a serial terminal program, e.g. ``$ flterm /dev/ttyUSB1`` (``flterm`` is part of MiSoC and installed by ``shell-dev.nix``). Leave the terminal running while you are flashing the board, so that you see the startup messages when the board boots immediately after flashing. You can also restart the board (without reflashing it) with ``$ artiq_flash start``. +* The communication parameters are 115200 8-N-1. Ensure that your user has access to the serial device (``$ sudo adduser $USER dialout`` assuming standard setup). +* If you are modifying a dependency of ARTIQ, in addition to updating the relevant part of ``nix-scripts``, rebuild and upload the corresponding Conda packages manually, and update their version numbers in ``conda-artiq.nix``. For Conda, only the main ARTIQ package and the board packages are handled automatically on Hydra. + +.. warning:: + Nix will make a read-only copy of the ARTIQ source to use in the shell environment. Therefore, any modifications that you make to the source after the shell is started will not be taken into account. A solution applicable to ARTIQ (and several other Python packages such as Migen and MiSoC) is to prepend the ARTIQ source directory to the ``PYTHONPATH`` environment variable after entering the shell. If you want this to be done by default, edit ``profile`` in ``artiq-dev.nix``. diff --git a/doc/manual/developing_a_ndsp.rst b/doc/manual/developing_a_ndsp.rst index a48b7aa2b..be8809c58 100644 --- a/doc/manual/developing_a_ndsp.rst +++ b/doc/manual/developing_a_ndsp.rst @@ -1,7 +1,7 @@ -Developing a network device support package -=========================================== +Developing a Network Device Support Package (NDSP) +================================================== -Most ARTIQ devices are interfaced through "controllers" that expose RPC interfaces to the network (based on :class:`artiq.protocols.pc_rpc`). The master never does direct I/O to the devices, but issues RPCs to the controllers when needed. As opposed to running everything on the master, this architecture has those main advantages: +Most ARTIQ devices are interfaced through "controllers" that expose RPC interfaces to the network (based on SiPyCo). The master never does direct I/O to the devices, but issues RPCs to the controllers when needed. As opposed to running everything on the master, this architecture has those main advantages: * Each driver can be run on a different machine, which alleviates cabling issues and OS compatibility problems. * Reduces the impact of driver crashes. @@ -15,13 +15,13 @@ A network device support package (NDSP) is composed of several parts: 1. The `driver`, which contains the Python API functions to be called over the network, and performs the I/O to the device. The top-level module of the driver is called ``artiq.devices.XXX.driver``. 2. The `controller`, which instantiates, initializes and terminates the driver, and sets up the RPC server. The controller is a front-end command-line tool to the user and is called ``artiq.frontend.aqctl_XXX``. A ``setup.py`` entry must also be created to install it. -3. An optional `client`, which connects to the controller and exposes the functions of the driver as a command-line interface. Clients are front-end tools (called ``artiq.frontend.aqcli_XXX``) that have ``setup.py`` entries. In most cases, a custom client is not needed and the generic ``artiq_rpctool`` utility can be used instead. Custom clients are only required when large amounts of data must be transferred over the network API, that would be unwieldy to pass as ``artiq_rpctool`` command-line parameters. +3. An optional `client`, which connects to the controller and exposes the functions of the driver as a command-line interface. Clients are front-end tools (called ``artiq.frontend.aqcli_XXX``) that have ``setup.py`` entries. In most cases, a custom client is not needed and the generic ``sipyco_rpctool`` utility can be used instead. Custom clients are only required when large amounts of data must be transferred over the network API, that would be unwieldy to pass as ``sipyco_rpctool`` command-line parameters. 4. An optional `mediator`, which is code executed on the client that supplements the network API. A mediator may contain kernels that control real-time signals such as TTL lines connected to the device. Simple devices use the network API directly and do not have a mediator. Mediator modules are called ``artiq.devices.XXX.mediator`` and their public classes are exported at the ``artiq.devices.XXX`` level (via ``__init__.py``) for direct import and use by the experiments. The driver and controller ------------------------- -A controller is a piece of software that receives commands from a client over the network (or the ``localhost`` interface), drives a device, and returns information about the device to the client. The mechanism used is remote procedure calls (RPCs) using :class:`artiq.protocols.pc_rpc`, which makes the network layers transparent for the driver's user. +A controller is a piece of software that receives commands from a client over the network (or the ``localhost`` interface), drives a device, and returns information about the device to the client. The mechanism used is remote procedure calls (RPCs) using ``sipyco.pc_rpc``, which makes the network layers transparent for the driver's user. The controller we will develop is for a "device" that is very easy to work with: the console from which the controller is run. The operation that the driver will implement is writing a message to that console. @@ -33,9 +33,9 @@ For using RPC, the functions that a driver provides must be the methods of a sin For a more complex driver, you would put this class definition into a separate Python module called ``driver``. -To turn it into a server, we use :class:`artiq.protocols.pc_rpc`. Import the function we will use: :: +To turn it into a server, we use ``sipyco.pc_rpc``. Import the function we will use: :: - from artiq.protocols.pc_rpc import simple_server_loop + from sipyco.pc_rpc import simple_server_loop and add a ``main`` function that is run when the program is executed: :: @@ -68,24 +68,24 @@ and verify that you can connect to the TCP port: :: :tip: Use the key combination Ctrl-AltGr-9 to get the ``telnet>`` prompt, and enter ``close`` to quit Telnet. Quit the controller with Ctrl-C. -Also verify that a target (service) named "hello" (as passed in the first argument to ``simple_server_loop``) exists using the ``artiq_rpctool`` program from the ARTIQ front-end tools: :: +Also verify that a target (service) named "hello" (as passed in the first argument to ``simple_server_loop``) exists using the ``sipyco_rpctool`` program from the ARTIQ front-end tools: :: - $ artiq_rpctool ::1 3249 list-targets + $ sipyco_rpctool ::1 3249 list-targets Target(s): hello The client ---------- -Clients are small command-line utilities that expose certain functionalities of the drivers. The ``artiq_rpctool`` utility contains a generic client that can be used in most cases, and developing a custom client is not required. Try these commands :: +Clients are small command-line utilities that expose certain functionalities of the drivers. The ``sipyco_rpctool`` utility contains a generic client that can be used in most cases, and developing a custom client is not required. Try these commands :: - $ artiq_rpctool ::1 3249 list-methods - $ artiq_rpctool ::1 3249 call message test + $ sipyco_rpctool ::1 3249 list-methods + $ sipyco_rpctool ::1 3249 call message test In case you are developing a NDSP that is complex enough to need a custom client, we will see how to develop one. Create a ``aqcli_hello.py`` file with the following contents: :: #!/usr/bin/env python3 - from artiq.protocols.pc_rpc import Client + from sipyco.pc_rpc import Client def main(): @@ -112,11 +112,11 @@ Command-line arguments Use the Python ``argparse`` module to make the bind address(es) and port configurable on the controller, and the server address, port and message configurable on the client. -We suggest naming the controller parameters ``--bind`` (which adds a bind address in addition to a default binding to localhost), ``--no-bind-localhost`` (which disables the default binding to localhost), and ``--port``, so that those parameters stay consistent across controllers. Use ``-s/--server`` and ``--port`` on the client. The ``artiq.tools.simple_network_args`` library function adds such arguments for the controller, and the ``artiq.tools.bind_address_from_args`` function processes them. +We suggest naming the controller parameters ``--bind`` (which adds a bind address in addition to a default binding to localhost), ``--no-bind-localhost`` (which disables the default binding to localhost), and ``--port``, so that those parameters stay consistent across controllers. Use ``-s/--server`` and ``--port`` on the client. The ``sipyco.common_args.simple_network_args`` library function adds such arguments for the controller, and the ``sipyco.common_args.bind_address_from_args`` function processes them. The controller's code would contain something similar to this: :: - from artiq.tools import simple_network_args + from sipyco.common_args import simple_network_args def get_argparser(): parser = argparse.ArgumentParser(description="Hello world controller") @@ -132,14 +132,14 @@ We suggest that you define a function ``get_argparser`` that returns the argumen Logging ------- -For the debug, information and warning messages, use the ``logging`` Python module and print the log on the standard error output (the default setting). The logging level is by default "WARNING", meaning that only warning messages and more critical messages will get printed (and no debug nor information messages). By calling the ``verbosity_args()`` with the parser as argument, you add support for the ``--verbose`` (``-v``) and ``--quiet`` (``-q``) arguments in the parser. Each occurence of ``-v`` (resp. ``-q``) in the arguments will increase (resp. decrease) the log level of the logging module. For instance, if only one ``-v`` is present in the arguments, then more messages (info, warning and above) will get printed. If only one ``-q`` is present in the arguments, then only errors and critical messages will get printed. If ``-qq`` is present in the arguments, then only critical messages will get printed, but no debug/info/warning/error. +For the debug, information and warning messages, use the ``logging`` Python module and print the log on the standard error output (the default setting). The logging level is by default "WARNING", meaning that only warning messages and more critical messages will get printed (and no debug nor information messages). By calling ``sipyco.common_args.verbosity_args`` with the parser as argument, you add support for the ``--verbose`` (``-v``) and ``--quiet`` (``-q``) arguments in the parser. Each occurence of ``-v`` (resp. ``-q``) in the arguments will increase (resp. decrease) the log level of the logging module. For instance, if only one ``-v`` is present in the arguments, then more messages (info, warning and above) will get printed. If only one ``-q`` is present in the arguments, then only errors and critical messages will get printed. If ``-qq`` is present in the arguments, then only critical messages will get printed, but no debug/info/warning/error. The program below exemplifies how to use logging: :: import argparse import logging - from artiq.tools import verbosity_args, init_logger + from sipyco.common_args import verbosity_args, init_logger_from_args # get a logger that prints the module name @@ -151,13 +151,13 @@ The program below exemplifies how to use logging: :: parser.add_argument("--someargument", help="some argument") # [...] - verbosity_args(parser) # This adds the -q and -v handling + add_verbosity_args(parser) # This adds the -q and -v handling return parser def main(): args = get_argparser().parse_args() - init_logger(args) # This initializes logging system log level according to -v/-q args + init_logger_from_args(args) # This initializes logging system log level according to -v/-q args logger.debug("this is a debug message") logger.info("this is an info message") @@ -172,7 +172,7 @@ The program below exemplifies how to use logging: :: Remote execution support ------------------------ -If you wish to support remote execution in your controller, you may do so by simply replacing ``simple_server_loop`` with :class:`artiq.protocols.remote_exec.simple_rexec_server_loop`. +If you wish to support remote execution in your controller, you may do so by simply replacing ``simple_server_loop`` with :class:`sipyco.remote_exec.simple_rexec_server_loop`. General guidelines ------------------ @@ -184,5 +184,10 @@ General guidelines * Controllers must be able to operate in "simulation" mode, where they behave properly even if the associated hardware is not connected. For example, they can print the data to the console instead of sending it to the device, or dump it into a file. * The simulation mode is entered whenever the ``--simulation`` option is specified. * Keep command line parameters consistent across clients/controllers. When adding new command line options, look for a client/controller that does a similar thing and follow its use of ``argparse``. If the original client/controller could use ``argparse`` in a better way, improve it. -* Use docstrings for all public methods of the driver (note that those will be retrieved by ``artiq_rpctool``). +* Use docstrings for all public methods of the driver (note that those will be retrieved by ``sipyco_rpctool``). * Choose a free default TCP port and add it to the default port list in this manual. + +Hosting your code +----------------- + +We suggest that you create a Git repository for your code, and publish it on https://git.m-labs.hk/, GitLab, GitHub, or a similar website of your choosing. Then send us a message or pull request for your NDSP to be added to the list in this manual. diff --git a/doc/manual/drtio.rst b/doc/manual/drtio.rst index 199d6ea4d..350a4b8b4 100644 --- a/doc/manual/drtio.rst +++ b/doc/manual/drtio.rst @@ -22,12 +22,72 @@ The lower layers of DRTIO are similar to White Rabbit, with the following main d From ARTIQ kernels, DRTIO channels are used in the same way as local RTIO channels. +.. _using-drtio: + Using DRTIO ----------- -Remote RTIO channels are accessed in the same was as local ones. Bits 16-24 of the RTIO channel number are used to select between local RTIO channels or one of the connected DRTIO satellites. Bits 0-15 of the RTIO channel number select the channel within one device (local or remote). +Terminology ++++++++++++ -This scheme will be expanded later with the introduction of DRTIO switches. +In a system of interconnected DRTIO devices, each RTIO core (driving RTIO PHYs; for example a RTIO core would connect to a large bank of TTL signals) is assigned a number and is called a *destination*. One DRTIO device normally contains one RTIO core. + +On one DRTIO device, the immediate path that a RTIO request must take is called a *hop*: the request can be sent to the local RTIO core, or to another device downstream. Each possible hop is assigned a number. Hop 0 is normally the local RTIO core, and hops 1 and above correspond to the respective downstream ports of the device. + +DRTIO devices are arranged in a tree topology, with the core device at the root. For each device, its distance from the root (in number of devices that are crossed) is called its *rank*. The root has rank 0, the devices immediately connected to it have rank 1, and so on. + +The routing table ++++++++++++++++++ + +The routing table defines, for each destination, the list of hops ("route") that must be taken from the root in order to reach it. + +It is stored in a binary format that can be manipulated with the :ref:`artiq_route utility `. The binary file is then programmed into the flash storage of the core device under the ``routing_table`` key. It is automatically distributed to downstream devices when the connections are established. Modifying the routing table requires rebooting the core device for the new table to be taken into account. + +All routes must end with the local RTIO core of the last device (0). + +The local RTIO core of the core device is a destination like any other, and it needs to be explicitly part of the routing table for kernels to be able to access it. + +If no routing table is programmed, the core device takes a default routing table for a star topology (i.e. with no devices of rank 2 or above), with destination 0 being the core device's local RTIO core and destinations 1 and above corresponding to devices on the respective downstream ports. + +Here is an example of creating and programming a routing table for a chain of 3 devices: :: + + # create an empty routing table + $ artiq_route rt.bin init + + # set destination 0 to the local RTIO core + $ artiq_route rt.bin set 0 0 + + # for destination 1, first use hop 1 (the first downstream port) + # then use the local RTIO core of that second device. + $ artiq_route rt.bin set 1 1 0 + + # for destination 2, use hop 1 and reach the second device as + # before, then use hop 1 on that device to reach the third + # device, and finally use the local RTIO core (hop 0) of the + # third device. + $ artiq_route rt.bin set 2 1 1 0 + + $ artiq_route rt.bin show + 0: 0 + 1: 1 0 + 2: 1 1 0 + + $ artiq_coremgmt config write -f routing_table rt.bin + +Addressing distributed RTIO cores from kernels +++++++++++++++++++++++++++++++++++++++++++++++ + +Remote RTIO channels are accessed in the same way as local ones. Bits 16-24 of the RTIO channel number define the destination. Bits 0-15 of the RTIO channel number select the channel within the destination. + +Link establishment +++++++++++++++++++ + +After devices have booted, it takes several seconds for all links in a DRTIO system to become established (especially with the long locking times of low-bandwidth PLLs that are used for jitter reduction purposes). Kernels should not attempt to access destinations until all required links are up (when this happens, the ``RTIODestinationUnreachable`` exception is raised). ARTIQ provides the method :meth:`~artiq.coredevice.core.Core.get_rtio_destination_status` that determines whether a destination can be reached. We recommend calling it in a loop in your startup kernel for each important destination, to delay startup until they all can be reached. + +Latency ++++++++ + +Each hop increases the RTIO latency of a destination by a significant amount; that latency is however constant and can be compensated for in kernels. To limit latency in a system, fully utilize the downstream ports of devices to reduce the depth of the tree, instead of creating chains. Internal details ---------------- @@ -38,14 +98,14 @@ Real-time and auxiliary packets DRTIO is a packet-based protocol that uses two types of packets: * real-time packets, which are transmitted at high priority at a high bandwidth and are used for the bulk of RTIO commands and data. In the ARTIQ DRTIO implementation, real-time packets are processed entirely in gateware. -* auxiliary packets, which are lower-bandwidth and are used for ancilliary tasks such as housekeeping and monitoring/injection. Auxiliary packets are low-priority and their transmission has no impact on the timing of real-time packets (however, transmission of real-time packets slows down the transmission of auxiliary packets). In the ARTIQ DRTIO implementation, the contents of the auxiliary packets are read and written directly by the firmware, with the gateware simply handling the transmission of the raw data. +* auxiliary packets, which are lower-bandwidth and are used for ancillary tasks such as housekeeping and monitoring/injection. Auxiliary packets are low-priority and their transmission has no impact on the timing of real-time packets (however, transmission of real-time packets slows down the transmission of auxiliary packets). In the ARTIQ DRTIO implementation, the contents of the auxiliary packets are read and written directly by the firmware, with the gateware simply handling the transmission of the raw data. Link layer ++++++++++ The lower layer of the DRTIO protocol stack is the link layer, which is responsible for delimiting real-time and auxiliary packets, and assisting with the establishment of a fixed-latency high speed serial transceiver link. -DRTIO uses the IBM (Widmer and Franaszek) 8b/10b encoding. The two types of 8b/10b codes are used: D characters, that always transmit real-time packet data, and K characters, that are used for idling and transmitting auxiliary packet data. +DRTIO uses the IBM (Widmer and Franaszek) 8b/10b encoding. D characters (the encoded 8b symbols) always transmit real-time packet data, whereas K characters are used for idling and transmitting auxiliary packet data. At every logic clock cycle, the high-speed transceiver hardware transmits some amount N of 8b/10b characters (typically, N is 2 or 4) and receives the same amount. With DRTIO, those characters must be all of the D type or all of the K type; mixing D and K characters in the same logic clock cycle is not allowed. @@ -59,7 +119,7 @@ The series of K selection words is then used to form auxiliary packets and the i Both real-time traffic and K selection words are scrambled in order to make the generated electromagnetic interference practically independent from the DRTIO traffic. A multiplicative scrambler is used and its state is shared between the real-time traffic and K selection words, so that real-time data can be descrambled immediately after the scrambler has been synchronized from the K characters. Another positive effect of the scrambling is that commas always appear regularly in the absence of any traffic (and in practice also appear regularly on a busy link). This makes a receiver always able to synchronize itself to an idling transmitter, which removes the need for relatively complex link initialization states. -Due to the use of K characters both as delimiters for real-time packets and as information carrier for auxiliary packets, auxiliary traffic is guaranteed a minimum bandwith simply by having a maximum size limit on real-time packets. +Due to the use of K characters both as delimiters for real-time packets and as information carrier for auxiliary packets, auxiliary traffic is guaranteed a minimum bandwidth simply by having a maximum size limit on real-time packets. Clocking ++++++++ @@ -74,7 +134,7 @@ As part of the DRTIO link initialization, a real-time packet is sent by the core RTIO outputs ++++++++++++ -Controlling a remote RTIO output involves placing the RTIO event into the FIFO of the remote device. The core device maintains a cache of the space available in each channel FIFO of the remote device. If, according to the cache, there is space available, then a packet containing the event information (timestamp, address, channel, data) is sent immediately and the cached value is decremented by one. If, according to the cache, no space is available, then the core device sends a request for the space available in the remote FIFO and updates the cache. The process repeats until at least one FIFO entry is available for the event, at which point a packet containing the event information is sent as before. +Controlling a remote RTIO output involves placing the RTIO event into the buffer of the destination. The core device maintains a cache of the buffer space available in each destination. If, according to the cache, there is space available, then a packet containing the event information (timestamp, address, channel, data) is sent immediately and the cached value is decremented by one. If, according to the cache, no space is available, then the core device sends a request for the space available in the destination and updates the cache. The process repeats until at least one remote buffer entry is available for the event, at which point a packet containing the event information is sent as before. Detecting underflow conditions is the responsibility of the core device; should an underflow occur then no DRTIO packet is transmitted. Sequence errors are handled similarly. diff --git a/doc/manual/environment.rst b/doc/manual/environment.rst index 924c54509..cf2cf7308 100644 --- a/doc/manual/environment.rst +++ b/doc/manual/environment.rst @@ -10,7 +10,7 @@ The device database The device database contains information about the devices available in a ARTIQ installation, what drivers to use, what controllers to use and on what machine, and where the devices are connected. -The master (or ``artiq_run``) instantiates the device drivers (and the RPC clients in the case of controllers) for the experiments based on the contents of the device database. +The master (or :mod:`~artiq.frontend.artiq_run`) instantiates the device drivers (and the RPC clients in the case of controllers) for the experiments based on the contents of the device database. The device database is stored in the memory of the master and is generated by a Python script typically called ``device_db.py``. That script must define a global variable ``device_db`` with the contents of the database. The device database is a Python dictionary whose keys are the device names, and values can have several types. @@ -22,9 +22,9 @@ Local device entries are dictionaries that contain a ``type`` field set to ``loc Controllers +++++++++++ -Controller entries are dictionaries whose ``type`` field is set to ``controller``. When an experiment requests such a device, a RPC client (see :class:`artiq.protocols.pc_rpc`) is created and connected to the appropriate controller. Controller entries are also used by controller managers to determine what controllers to run. +Controller entries are dictionaries whose ``type`` field is set to ``controller``. When an experiment requests such a device, a RPC client (see ``sipyco.pc_rpc``) is created and connected to the appropriate controller. Controller entries are also used by controller managers to determine what controllers to run. -The ``best_effort`` field is a boolean that determines whether to use :class:`artiq.protocols.pc_rpc.Client` or :class:`artiq.protocols.pc_rpc.BestEffortClient`. The ``host`` and ``port`` fields configure the TCP connection. The ``target`` field contains the name of the RPC target to use (you may use ``artiq_rpctool`` on a controller to list its targets). Controller managers run the ``command`` field in a shell to launch the controller, after replacing ``{port}`` and ``{bind}`` by respectively the TCP port the controller should listen to (matches the ``port`` field) and an appropriate bind address for the controller's listening socket. +The ``best_effort`` field is a boolean that determines whether to use ``sipyco.pc_rpc.Client`` or ``sipyco.pc_rpc.BestEffortClient``. The ``host`` and ``port`` fields configure the TCP connection. The ``target`` field contains the name of the RPC target to use (you may use ``sipyco_rpctool`` on a controller to list its targets). Controller managers run the ``command`` field in a shell to launch the controller, after replacing ``{port}`` and ``{bind}`` by respectively the TCP port the controller should listen to (matches the ``port`` field) and an appropriate bind address for the controller's listening socket. Aliases +++++++ diff --git a/doc/manual/faq.rst b/doc/manual/faq.rst index 8df0258d8..817b64130 100644 --- a/doc/manual/faq.rst +++ b/doc/manual/faq.rst @@ -88,7 +88,13 @@ The preferred way to specify a serial device is to make use of the ``hwgrep://`` URL: it allows to select the serial device by its USB vendor ID, product ID and/or serial number. Those never change, unlike the device file name. -See the :ref:`TDC001 documentation ` for an example of ``hwgrep://`` usage. +For instance, if you want to specify the Vendor/Product ID and the USB Serial Number, you can do: + +``-d "hwgrep://: SNR="``. +for example: + +``-d "hwgrep://0403:faf0 SNR=83852734"`` + run unit tests? --------------- @@ -107,3 +113,9 @@ The core device tests require the following TTL devices and connections: * ``loop_clock_in``: any input-capable TTL. Must be physically connected to ``loop_clock_out``. If TTL devices are missing, the corresponding tests are skipped. + +find the dashboard and browser configuration files are stored? +-------------------------------------------------------------- + +:: + python -c "from artiq.tools import get_user_config_dir; print(get_user_config_dir())" diff --git a/doc/manual/getting_started_core.rst b/doc/manual/getting_started_core.rst index eca62fd03..a17e0633e 100644 --- a/doc/manual/getting_started_core.rst +++ b/doc/manual/getting_started_core.rst @@ -21,7 +21,7 @@ As a very first step, we will turn on a LED on the core device. Create a file `` self.core.reset() self.led.on() -The central part of our code is our ``LED`` class, that derives from :class:`artiq.language.environment.EnvExperiment`. Among other features, ``EnvExperiment`` calls our ``build`` method and provides the ``setattr_device`` method that interfaces to the device database to create the appropriate device drivers and make those drivers accessible as ``self.core`` and ``self.led``. The ``@kernel`` decorator tells the system that the ``run`` method must be compiled for and executed on the core device (instead of being interpreted and executed as regular Python code on the host). The decorator uses ``self.core`` internally, which is why we request the core device using ``setattr_device`` like any other. +The central part of our code is our ``LED`` class, which derives from :class:`artiq.language.environment.EnvExperiment`. Among other features, :class:`~artiq.language.environment.EnvExperiment` calls our :meth:`~artiq.language.environment.Experiment.build` method and provides the :meth:`~artiq.language.environment.HasEnvironment.setattr_device` method that interfaces to the device database to create the appropriate device drivers and make those drivers accessible as ``self.core`` and ``self.led``. The :func:`~artiq.language.core.kernel` decorator (``@kernel``) tells the system that the :meth:`~artiq.language.environment.Experiment.run` method must be compiled for and executed on the core device (instead of being interpreted and executed as regular Python code on the host). The decorator uses ``self.core`` internally, which is why we request the core device using :meth:`~artiq.language.environment.HasEnvironment.setattr_device` like any other. Copy the file ``device_db.py`` (containing the device database) from the ``examples/master`` folder of ARTIQ into the same directory as ``led.py`` (alternatively, you can use the ``--device-db`` option of ``artiq_run``). You will probably want to set the IP address of the core device in ``device_db.py`` so that the computer can connect to it (it is the ``host`` parameter of the ``comm`` entry). See :ref:`device-db` for more information. The example device database is designed for the ``nist_clock`` hardware adapter on the KC705; see :ref:`board-ports` for RTIO channel assignments if you need to adapt the device database to a different hardware platform. @@ -69,18 +69,18 @@ You can then turn the LED off and on by entering 0 or 1 at the prompt that appea $ artiq_run led.py Enter desired LED state: 0 -What happens is the ARTIQ compiler notices that the ``input_led_state`` function does not have a ``@kernel`` decorator and thus must be executed on the host. When the core device calls it, it sends a request to the host to execute it. The host displays the prompt, collects user input, and sends the result back to the core device, which sets the LED state accordingly. +What happens is the ARTIQ compiler notices that the :meth:`input_led_state` function does not have a ``@kernel`` decorator (:func:`~artiq.language.core.kernel`) and thus must be executed on the host. When the core device calls it, it sends a request to the host to execute it. The host displays the prompt, collects user input, and sends the result back to the core device, which sets the LED state accordingly. RPC functions must always return a value of the same type. When they return a value that is not ``None``, the compiler should be informed in advance of the type of the value, which is what the ``-> TBool`` annotation is for. -Without the ``break_realtime()`` call, the RTIO events emitted by ``self.led.on()`` or ``self.led.off()`` would be scheduled at a fixed and very short delay after entering ``run()``. -These events would fail because the RPC to ``input_led_state()`` can take an arbitrary amount of time and therefore the deadline for submission of RTIO events would have long passed when ``self.led.on()`` or ``self.led.off()`` are called. -The ``break_realtime()`` call is necessary to waive the real-time requirements of the LED state change. +Without the :meth:`~artiq.coredevice.core.Core.break_realtime` call, the RTIO events emitted by :func:`self.led.on()` or :func:`self.led.off()` would be scheduled at a fixed and very short delay after entering :meth:`~artiq.language.environment.Experiment.run()`. +These events would fail because the RPC to :meth:`input_led_state()` can take an arbitrary amount of time and therefore the deadline for submission of RTIO events would have long passed when :func:`self.led.on()` or :func:`self.led.off()` are called. +The :meth:`~artiq.coredevice.core.Core.break_realtime` call is necessary to waive the real-time requirements of the LED state change. It advances the timeline far enough to ensure that events can meet the submission deadline. -Real-time I/O (RTIO) --------------------- +Real-time Input/Output (RTIO) +----------------------------- The point of running code on the core device is the ability to meet demanding real-time constraints. In particular, the core device can respond to an incoming stimulus or the result of a measurement with a low and predictable latency. We will see how to use inputs later; first, we must familiarize ourselves with how time is managed in kernels. @@ -102,11 +102,11 @@ Create a new file ``rtio.py`` containing the following: :: delay(2*us) self.ttl0.pulse(2*us) -In its ``build()`` method, the experiment obtains the core device and a TTL device called ``ttl0`` as defined in the device database. +In its :meth:`~artiq.language.environment.Experiment.build` method, the experiment obtains the core device and a TTL device called ``ttl0`` as defined in the device database. In ARTIQ, TTL is used roughly synonymous with "a single generic digital signal" and does not refer to a specific signaling standard or voltage/current levels. -When ``run()``, the experiment first ensures that ``ttl0`` is in output mode and actively driving the device it is connected to. -Bidirectional TTL channels (i.e. ``TTLInOut``) are in input (high impedance) mode by default, output-only TTL channels (``TTLOut``) are always in output mode. +When :meth:`~artiq.language.environment.Experiment.run`, the experiment first ensures that ``ttl0`` is in output mode and actively driving the device it is connected to. +Bidirectional TTL channels (i.e. :class:`~artiq.coredevice.ttl.TTLInOut`) are in input (high impedance) mode by default, output-only TTL channels (:class:`~artiq.coredevice.ttl.TTLOut`) are always in output mode. There are no input-only TTL channels. The experiment then drives one million 2 µs long pulses separated by 2 µs each. @@ -118,7 +118,7 @@ Any asymmetry in the overhead would manifest itself in a distorted and variable Instead, inside the core device, output timing is generated by the gateware and the CPU only programs switching commands with certain timestamps that the CPU computes. -This guarantees precise timing as long as the CPU can keep generating timestamps that are increasing fast enough. In case it fails to do that (and attempts to program an event with a timestamp smaller than the current RTIO clock timestamp), a :class:`artiq.coredevice.exceptions.RTIOUnderflow` exception is raised. The kernel causing it may catch it (using a regular ``try... except...`` construct), or it will be propagated to the host. +This guarantees precise timing as long as the CPU can keep generating timestamps that are increasing fast enough. In case it fails to do that (and attempts to program an event with a timestamp smaller than the current RTIO clock timestamp), a :exc:`~artiq.coredevice.exceptions.RTIOUnderflow` exception is raised. The kernel causing it may catch it (using a regular ``try... except...`` construct), or it will be propagated to the host. Try reducing the period of the generated waveform until the CPU cannot keep up with the generation of switching events and the underflow exception is raised. Then try catching it: :: @@ -161,7 +161,7 @@ ARTIQ can implement ``with parallel`` blocks without having to resort to any of It simply remembers the position on the timeline when entering the ``parallel`` block and then seeks back to that position after submitting the events generated by each statement. In other words, the statements in the ``parallel`` block are actually executed sequentially, only the RTIO events generated by them are scheduled to be executed in parallel. Note that if a statement takes a lot of CPU time to execute (this different from the events scheduled by a statement taking a long time), it may cause a subsequent statement to miss the deadline for timely submission of its events. -This then causes a ``RTIOUnderflow`` exception to be raised. +This then causes a :exc:`~artiq.coredevice.exceptions.RTIOUnderflow` exception to be raised. Within a parallel block, some statements can be made sequential again using a ``with sequential`` construct. Observe the pulses generated by this code: :: @@ -174,6 +174,13 @@ Within a parallel block, some statements can be made sequential again using a `` self.ttl1.pulse(4*us) delay(4*us) +Particular care needs to be taken when working with ``parallel`` blocks in cases where a large number of RTIO events are generated as it possible to create sequencing errors (`RTIO sequence error`). Sequence errors do not halt execution of the kernel for performance reasons and instead are reported in the core log. If the ``aqctl_corelog`` process has been started with ``artiq_ctlmgr``, then these errors will be posted to the master log. However, if an experiment is executed through ``artiq_run``, these errors will not be visible outside of the core log. + +A sequence error is caused when the scalable event dispatcher (SED) cannot queue an RTIO event due to its timestamp being the same as or earlier than another event in its queue. By default, the SED has 8 lanes which allows ``parallel`` events to work without sequence errors in most cases, however if many (>8) events are queued with conflicting timestamps this error can surface. + +These errors can usually be overcome by reordering the generation of the events. Alternatively, the number of SED lanes can be increased in the gateware. + +.. _rtio-analyzer-example: RTIO analyzer ------------- @@ -199,8 +206,8 @@ The core device records the real-time I/O waveforms into a circular buffer. It i Afterwards, the recorded data can be extracted and written to a VCD file using ``artiq_coreanalyzer -w rtio.vcd`` (see: :ref:`core-device-rtio-analyzer-tool`). VCD files can be viewed using third-party tools such as GtkWave. -DMA ---- +Direct Memory Access (DMA) +-------------------------- DMA allows you to store fixed sequences of pulses in system memory, and have the DMA core in the FPGA play them back at high speed. Pulse sequences that are too fast for the CPU (i.e. would cause RTIO underflows) can still be generated using DMA. The only modification of the sequence that the DMA core supports is shifting it in time (so it can be played back at any position of the timeline), everything else is fixed at the time of recording the sequence. @@ -220,7 +227,7 @@ Try this: :: with self.core_dma.record("pulses"): # all RTIO operations now go to the "pulses" # DMA buffer, instead of being executed immediately. - for i in range(100): + for i in range(50): self.ttl0.pulse(100*ns) delay(100*ns) diff --git a/doc/manual/getting_started_mgmt.rst b/doc/manual/getting_started_mgmt.rst index e667abe02..953a7fbd9 100644 --- a/doc/manual/getting_started_mgmt.rst +++ b/doc/manual/getting_started_mgmt.rst @@ -157,7 +157,7 @@ Plotting in the ARTIQ dashboard is achieved by programs called "applets". Applet Applets are configured through their command line to select parameters such as the names of the datasets to plot. The list of command-line options can be retrieved using the ``-h`` option; for example you can run ``python3 -m artiq.applets.plot_xy -h`` in a terminal. -In our case, create a new applet from the XY template by right-clicking on the applet list, and edit the applet command line so that it retrieves the ``parabola`` dataset. Run the experiment again, and observe how the points are added one by one to the plot. +In our case, create a new applet from the XY template by right-clicking on the applet list, and edit the applet command line so that it retrieves the ``parabola`` dataset (the command line should now be ``${artiq_applet}plot_xy parabola``). Run the experiment again, and observe how the points are added one by one to the plot. After the experiment has finished executing, the results are written to a HDF5 file that resides in ``~/artiq-master/results//``. Open that file with HDFView or h5dump, and observe the data we just generated as well as the Git commit ID of the experiment (a hexadecimal hash such as ``947acb1f90ae1b8862efb489a9cc29f7d4e0c645`` that represents the data at a particular time in the Git repository). The list of Git commit IDs can be found using the ``git log`` command in ``~/artiq-work``. diff --git a/doc/manual/index.rst b/doc/manual/index.rst index 5d3fcdd98..c7ecde30f 100644 --- a/doc/manual/index.rst +++ b/doc/manual/index.rst @@ -20,8 +20,7 @@ Contents: drtio core_language_reference core_drivers_reference - protocols_reference - ndsp_reference + list_of_ndsps developing_a_ndsp utilities default_network_ports diff --git a/doc/manual/installing.rst b/doc/manual/installing.rst index e9a82226c..e91163808 100644 --- a/doc/manual/installing.rst +++ b/doc/manual/installing.rst @@ -1,201 +1,295 @@ -.. _install-from-conda: - Installing ARTIQ ================ -The preferred way of installing ARTIQ is through the use of the conda package manager. -The conda package contains pre-built binaries that you can directly flash to your board. +ARTIQ can be installed using the Nix (on Linux) or Conda (on Windows or Linux) package managers. + +Nix is an innovative, robust, fast, and high-quality solution that comes with a larger collection of packages and features than Conda. However, Windows support is poor (using it with Windows Subsystem for Linux still has many problems) and Nix can be harder to learn. + +Conda has a more traditional approach to package management, is much more limited, slow, and lower-quality than Nix, but it supports Windows and it is simpler to use when it functions correctly. + +In the current state of affairs, we recommend that Linux users install ARTIQ via Nix and Windows users install it via Conda. + +.. _installing-nix-users: + +Installing via Nix (Linux) +-------------------------- + +.. note:: + Make sure you are using a 64-bit x86 Linux system. If you are using other systems, such as 32-bit x86, Nix will attempt to compile a number of dependencies from source on your machine. This may work, but the installation process will use a lot of CPU time, memory, and disk space. + +First, install the Nix package manager. Some distributions provide a package for the Nix package manager, otherwise, it can be installed via the script on the `Nix website `_. + +Once Nix is installed, add the M-Labs package channel with: :: + + $ nix-channel --add https://nixbld.m-labs.hk/channel/custom/artiq/full-beta/artiq-full + +Those channels track `nixpkgs 20.09 `_. You can check the latest status through the `Hydra interface `_. As the Nix package manager default installation uses the development version of nixpkgs, we need to tell it to switch to the release: :: + + $ nix-channel --remove nixpkgs + $ nix-channel --add https://nixos.org/channels/nixos-20.09 nixpkgs + +Finally, make all the channel changes effective: :: + + $ nix-channel --update + +Nix won't install packages without verifying their cryptographic signature. Add the M-Labs public key by creating the file ``~/.config/nix/nix.conf`` with the following contents: + +:: + + substituters = https://cache.nixos.org https://nixbld.m-labs.hk + trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= nixbld.m-labs.hk-1:5aSRVA5b320xbNvu30tqxVPXpld73bhtOeH6uAjRyHc= + +The easiest way to obtain ARTIQ is then to install it into the user environment with ``$ nix-env -iA artiq-full.artiq-env``. This provides a minimal installation of ARTIQ where the usual commands (``artiq_master``, ``artiq_dashboard``, ``artiq_run``, etc.) are available. + +This installation is however quite limited, as Nix creates a dedicated Python environment for the ARTIQ commands alone. This means that other useful Python packages that you may want (pandas, matplotlib, ...) are not available to them, and this restriction also applies to the M-Labs packages containing board binaries, which means that ``artiq_flash`` will not automatically find them. + +Installing multiple packages and making them visible to the ARTIQ commands requires using the Nix language. Create a file ``my-artiq-env.nix`` with the following contents: + +:: + + let + # pkgs contains the NixOS package collection. ARTIQ depends on some of them, and + # you may want some additional packages from there. + pkgs = import {}; + artiq-full = import { inherit pkgs; }; + in + pkgs.mkShell { + buildInputs = [ + (pkgs.python3.withPackages(ps: [ + # List desired Python packages here. + + # You probably want these two. + artiq-full.artiq + artiq-full.artiq-comtools + + # You need a board support package if and only if you intend to flash + # a board (those packages contain only board firmware). + # The lines below are only examples, you need to select appropriate + # packages for your boards. + #artiq-full.artiq-board-kc705-nist_clock + #artiq-full.artiq-board-kasli-wipm + #ps.paramiko # needed if and only if flashing boards remotely (artiq_flash -H) + + # The NixOS package collection contains many other packages that you may find + # interesting for your research. Here are some examples: + #ps.pandas + #ps.numpy + #ps.scipy + #ps.numba + #(ps.matplotlib.override { enableQt = true; }) + #ps.bokeh + #ps.cirq + #ps.qiskit + #ps.qutip + ])) + + # List desired non-Python packages here + #artiq-full.openocd # needed if and only if flashing boards + # Other potentially interesting packages from the NixOS package collection: + #pkgs.gtkwave + #pkgs.spyder + #pkgs.R + #pkgs.julia + ]; + } + +Then spawn a shell containing the packages with ``$ nix-shell my-artiq-env.nix``. The ARTIQ commands with all the additional packages should now be available. + +You can exit the shell by typing Control-D. The next time ``$ nix-shell my-artiq-env.nix`` is invoked, Nix uses the cached packages so the shell startup is fast. + +You can edit this file according to your needs, and also create multiple ``.nix`` files that correspond to different sets of packages. If you are familiar with Conda, using Nix in this way is similar to having multiple Conda environments. + +If your favorite package is not available with Nix, contact us. + +Installing via Conda (Windows, Linux) +------------------------------------- .. warning:: - NIST users on Linux need to pay close attention to their ``umask``. - The sledgehammer called ``secureconfig`` leaves you (and root) with umask 027 and files created by root (for example through ``sudo make install``) inaccessible to you. - The usual umask is 022. + For Linux users, the Nix package manager is preferred, as it is more reliable and faster than Conda. - -.. warning:: - Conda packages are supported for Linux (64-bit) and Windows (64-bit). - Users of other operating systems (32-bit Linux or Windows, BSD, OSX ...) should and can :ref:`install from source `. - -.. _install-anaconda: - -Installing Anaconda or Miniconda --------------------------------- - -You can either install Anaconda (choose Python 3.5) from https://store.continuum.io/cshop/anaconda/ or install the more minimalistic Miniconda (choose Python 3.5) from http://conda.pydata.org/miniconda.html +First, install `Anaconda `_ or the more minimalistic `Miniconda `_. After installing either Anaconda or Miniconda, open a new terminal (also known as command line, console, or shell and denoted here as lines starting with ``$``) and verify the following command works:: $ conda -Executing just ``conda`` should print the help of the ``conda`` command [1]_. +Executing just ``conda`` should print the help of the ``conda`` command. If your shell does not find the ``conda`` command, make sure that the Conda binaries are in your ``$PATH``. If ``$ echo $PATH`` does not show the Conda directories, add them: execute ``$ export PATH=$HOME/miniconda3/bin:$PATH`` if you installed Conda into ``~/miniconda3``. -Installing the ARTIQ packages ------------------------------ +Controllers for third-party devices (e.g. Thorlabs TCube, Lab Brick Digital Attenuator, etc.) that are not shipped with ARTIQ can also be installed with this script. Browse `Hydra `_ or see the list of NDSPs in this manual to find the names of the corresponding packages, and list them at the beginning of the script. + +Set up the Conda channel and install ARTIQ into a new Conda environment: :: + + $ conda config --prepend channels https://conda.m-labs.hk/artiq-beta + $ conda config --append channels conda-forge + $ conda create -n artiq artiq .. note:: - On a system with a pre-existing conda installation, it is recommended to update conda to the latest version prior to installing ARTIQ. + If you do not need to flash boards, the ``artiq`` package is sufficient. The packages named ``artiq-board-*`` contain only firmware for the FPGA board, and you should not install them unless you are reflashing an FPGA board. Controllers for third-party devices (e.g. Thorlabs TCube, Lab Brick Digital Attenuator, etc.) that are not shipped with ARTIQ can also be installed with Conda. Browse `Hydra `_ or see the list of NDSPs in this manual to find the names of the corresponding packages. -First add the conda-forge repository containing ARTIQ dependencies to your conda configuration:: +After the installation, activate the newly created environment by name. :: - $ conda config --prepend channels http://conda.anaconda.org/conda-forge/label/main - -Then add the M-Labs ``main`` Anaconda package repository containing stable releases and release candidates:: - - $ conda config --prepend channels http://conda.anaconda.org/m-labs/label/main - -.. note:: - To use the development versions of ARTIQ, also add the ``dev`` label (http://conda.anaconda.org/m-labs/label/dev). - Development versions are built for every change and contain more features, but are not as well-tested and are more likely to contain more bugs or inconsistencies than the releases in the ``main`` label. - -Then prepare to create a new conda environment with the ARTIQ package and the matching binaries for your hardware: -choose a suitable name for the environment, for example ``artiq-main`` if you intend to track the main label, ``artiq-3`` for the 3.x release series, or ``artiq-2016-04-01`` if you consider the environment a snapshot of ARTIQ on 2016-04-01. -Choose the package containing the binaries for your hardware: - - * ``artiq-kc705-nist_clock`` for the KC705 board with the NIST "clock" FMC backplane and AD9914 DDS chips. - * ``artiq-kc705-nist_qc2`` for the KC705 board with the NIST QC2 FMC backplane and AD9914 DDS chips. - -Conda will create the environment, automatically resolve, download, and install the necessary dependencies and install the packages you select:: - - $ conda create -n artiq-main artiq-kc705-nist_clock - -After the installation, activate the newly created environment by name. -On Unix:: - - $ source activate artiq-main - -On Windows:: - - $ activate artiq-main + $ conda activate artiq This activation has to be performed in every new shell you open to make the ARTIQ tools from that environment available. .. note:: - Some ARTIQ examples also require matplotlib and numba, and they must be installed manually for running those examples. They are available in conda. + Some ARTIQ examples also require matplotlib and numba, and they must be installed manually for running those examples. They are available in Conda. +Upgrading ARTIQ (with Nix) +-------------------------- -Upgrading ARTIQ ---------------- +Run ``$ nix-channel --update`` to retrieve information about the latest versions, and then either reinstall ARTIQ into the user environment (``$ nix-env -i python3.6-artiq``) or re-run the ``nix-shell`` command. -When upgrading ARTIQ or when testing different versions it is recommended that new environments are created instead of upgrading the packages in existing environments. +To rollback to the previous version, use ``$ nix-channel --rollback`` and then re-do the second step. You can switch between versions by passing a parameter to ``--rollback`` (see the ``nix-channel`` documentation). + +You may need to reflash the gateware and firmware of the core device to keep it synchronized with the software. + +Upgrading ARTIQ (with Conda) +---------------------------- + +When upgrading ARTIQ or when testing different versions it is recommended that new Conda environments are created instead of upgrading the packages in existing environments. Keep previous environments around until you are certain that they are not needed anymore and a new environment is known to work correctly. -You can create a new conda environment specifically to test a certain version of ARTIQ:: - $ conda create -n artiq-test-1.0rc2 artiq-kc705-nist_clock=1.0rc2 +To install the latest version, just select a different environment name and run the installation command again. + +Switching between Conda environments using commands such as ``$ conda deactivate artiq-6`` and ``$ conda activate artiq-5`` is the recommended way to roll back to previous versions of ARTIQ. + +You may need to reflash the gateware and firmware of the core device to keep it synchronized with the software. -Switching between conda environments using ``$ source deactivate artiq-1.0rc2`` and ``$ source activate artiq-1.0rc1`` is the recommended way to roll back to previous versions of ARTIQ. You can list the environments you have created using:: $ conda env list -See also the `conda documentation `_ for managing environments. +Flashing gateware and firmware into the core device +--------------------------------------------------- -Preparing the core device FPGA board ------------------------------------- +.. note:: + If you have purchased a pre-assembled system from M-Labs or QUARTIQ, the gateware and firmware are already flashed and you can skip those steps, unless you want to replace them with a different version of ARTIQ. You now need to write three binary images onto the FPGA board: 1. The FPGA gateware bitstream -2. The BIOS -3. The ARTIQ runtime +2. The bootloader +3. The ARTIQ runtime or satellite manager -They are all shipped in the conda packages, along with the required flash proxy gateware bitstreams. - -.. _install-openocd: +They are all shipped in the Nix and Conda packages, along with the required flash proxy gateware bitstreams. Installing OpenOCD ^^^^^^^^^^^^^^^^^^ OpenOCD can be used to write the binary images into the core device FPGA board's flash memory. -The ``artiq`` or ``artiq-dev`` conda packages install ``openocd`` automatically but it can also be installed explicitly using conda on both Linux and Windows:: + +With Nix, add ``artiq-full.openocd`` to the shell packages. Be careful not to add ``pkgs.openocd`` instead - this would install OpenOCD from the NixOS package collection, which does not support ARTIQ boards. + +With Conda, the ``artiq`` package installs ``openocd`` automatically but it can also be installed explicitly on both Linux and Windows:: $ conda install openocd -.. _setup-openocd: +.. _configuring-openocd: Configuring OpenOCD ^^^^^^^^^^^^^^^^^^^ Some additional steps are necessary to ensure that OpenOCD can communicate with the FPGA board. -On Linux, first ensure that the current user belongs to the ``plugdev`` group. If it does not, run ``sudo adduser $USER plugdev`` and relogin. Afterwards:: +On Linux, first ensure that the current user belongs to the ``plugdev`` group (i.e. ``plugdev`` shown when you run ``$ groups``). If it does not, run ``$ sudo adduser $USER plugdev`` and re-login. - $ wget https://raw.githubusercontent.com/ntfreak/openocd/406f4d1c68330e3bf8d9db4e402fd8802a5c79e2/contrib/99-openocd.rules - $ sudo cp 99-openocd.rules /etc/udev/rules.d - $ sudo adduser $USER plugdev - $ sudo udevadm trigger +If you installed OpenOCD on Linux using Nix, use the ``which`` command to determine the path to OpenOCD, and then copy the udev rules: :: + + $ which openocd + /nix/store/2bmsssvk3d0y5hra06pv54s2324m4srs-openocd-mlabs-0.10.0/bin/openocd + $ sudo cp /nix/store/2bmsssvk3d0y5hra06pv54s2324m4srs-openocd-mlabs-0.10.0/share/openocd/contrib/60-openocd.rules /etc/udev/rules.d + $ sudo udevadm trigger + +NixOS users should of course configure OpenOCD through ``/etc/nixos/configuration.nix`` instead. + +If you installed OpenOCD on Linux using Conda and are using the Conda environment ``artiq``, then execute the statements below. If you are using a different environment, you will have to replace ``artiq`` with the name of your environment:: + + $ sudo cp ~/.conda/envs/artiq/share/openocd/contrib/60-openocd.rules /etc/udev/rules.d + $ sudo udevadm trigger On Windows, a third-party tool, `Zadig `_, is necessary. Use it as follows: 1. Make sure the FPGA board's JTAG USB port is connected to your computer. 2. Activate Options → List All Devices. -3. Select the "Digilent Adept USB Device (Interface 0)" device from the drop-down list. +3. Select the "Digilent Adept USB Device (Interface 0)" or "FTDI Quad-RS232 HS" (or similar) + device from the drop-down list. 4. Select WinUSB from the spinner list. 5. Click "Install Driver" or "Replace Driver". You may need to repeat these steps every time you plug the FPGA board into a port where it has not been plugged into previously on the same system. -.. _flashing-core-device: +Writing the flash +^^^^^^^^^^^^^^^^^ -Flashing the core device -^^^^^^^^^^^^^^^^^^^^^^^^ +Then, you can write the flash: -Then, you can flash the board: +* For Kasli:: -* For the KC705 board (selecting the appropriate hardware peripheral):: + $ artiq_flash -V [your system variant] - $ artiq_flash -t kc705 -m [nist_clock/nist_qc2] +The JTAG adapter is integrated into the Kasli board; for flashing (and debugging) you simply need to connect your computer to the micro-USB connector on the Kasli front panel. - The SW13 switches also need to be set to 00001. +* For the KC705 board:: -The next step is to flash the MAC and IP addresses to the board. See :ref:`those instructions `. + $ artiq_flash -t kc705 -V [nist_clock/nist_qc2] -.. _configuring-core-device: + The SW13 switches need to be set to 00001. -Configuring the core device ---------------------------- +Setting up the core device IP networking +---------------------------------------- -This should be done after either installation method (conda or source). +For Kasli, insert a SFP/RJ45 transceiver (normally included with purchases from M-Labs and QUARTIQ) into the SFP0 port and connect it to an Ethernet port in your network. If the port is 10Mbps or 100Mbps and not 1000Mbps, make sure that the SFP/RJ45 transceiver supports the lower rate. Many SFP/RJ45 transceivers only support the 1000Mbps rate. If you do not have a SFP/RJ45 transceiver that supports 10Mbps and 100Mbps rates, you may instead use a gigabit Ethernet switch in the middle to perform rate conversion. -.. _flash-mac-ip-addr: +You can also insert other types of SFP transceivers into Kasli if you wish to use it directly in e.g. an optical fiber Ethernet network. -* Set the MAC and IP address in the :ref:`core device configuration flash storage ` (see above for the ``-t`` and ``-m`` options to ``artiq_flash`` that may be required): :: +If you purchased a Kasli device from M-Labs, it usually comes with the IP address ``192.168.1.75``. Once you can reach this IP, it can be changed with: :: - $ artiq_mkfs flash_storage.img -s mac xx:xx:xx:xx:xx:xx -s ip xx.xx.xx.xx - $ artiq_flash -t [board] -m [adapter] -f flash_storage.img proxy storage start + $ artiq_coremgmt -D 192.168.1.75 config write -s ip [new IP] -* (optional) Flash the idle kernel +and then reboot the device (with ``artiq_flash start`` or a power cycle). + +In other cases, install OpenOCD as before, and flash the IP (and, if necessary, MAC) addresses directly: :: + + $ artiq_mkfs flash_storage.img -s mac xx:xx:xx:xx:xx:xx -s ip xx.xx.xx.xx + $ artiq_flash -t [board] -V [variant] -f flash_storage.img storage start + +For Kasli devices, flashing a MAC address is not necessary as they can obtain it from their EEPROM. + +Check that you can ping the device. If ping fails, check that the Ethernet link LED is ON - on Kasli, it is the LED next to the SFP0 connector. As a next step, look at the messages emitted on the UART during boot. Use a program such as flterm or PuTTY to connect to the device's serial port at 115200bps 8-N-1 and reboot the device. On Kasli, the serial port is on FTDI channel 2 with v1.1 hardware (with channel 0 being JTAG) and on FTDI channel 1 with v1.0 hardware. + +If you want to use IPv6, the device also has a link-local address that corresponds to its EUI-64, and an additional arbitrary IPv6 address can be defined by using the ``ip6`` configuration key. All IPv4 and IPv6 addresses can be used at the same time. + +Miscellaneous configuration of the core device +---------------------------------------------- + +Those steps are optional. The core device usually needs to be restarted for changes to take effect. + +* Load the idle kernel The idle kernel is the kernel (some piece of code running on the core device) which the core device runs whenever it is not connected to a PC via Ethernet. This kernel is therefore stored in the :ref:`core device configuration flash storage `. -To flash the idle kernel: - * Compile the idle experiment: - The idle experiment's ``run()`` method must be a kernel: it must be decorated with the ``@kernel`` decorator (see :ref:`next topic ` for more information about kernels). +To flash the idle kernel, first compile the idle experiment. The idle experiment's ``run()`` method must be a kernel: it must be decorated with the ``@kernel`` decorator (see :ref:`next topic ` for more information about kernels). Since the core device is not connected to the PC, RPCs (calling Python code running on the PC from the kernel) are forbidden in the idle experiment. Then write it into the core device configuration flash storage: :: - Since the core device is not connected to the PC, RPCs (calling Python code running on the PC from the kernel) are forbidden in the idle experiment. - :: + $ artiq_compile idle.py + $ artiq_coremgmt config write -f idle_kernel idle.elf - $ artiq_compile idle.py +.. note:: You can find more information about how to use the ``artiq_coremgmt`` utility on the :ref:`Utilities ` page. - * Write it into the core device configuration flash storage: :: +* Load the startup kernel - $ artiq_coreconfig write -f idle_kernel idle.elf +The startup kernel is executed once when the core device powers up. It should initialize DDSes, set up TTL directions, etc. Proceed as with the idle kernel, but using the ``startup_kernel`` key in the ``artiq_coremgmt`` command. -.. note:: You can find more information about how to use the ``artiq_coreconfig`` utility on the :ref:`Utilities ` page. +For DRTIO systems, the startup kernel should wait until the desired destinations (including local RTIO) are up, using :meth:`artiq.coredevice.Core.get_rtio_destination_status`. -* (optional) Flash the startup kernel +* Load the DRTIO routing table -The startup kernel is executed once when the core device powers up. It should initialize DDSes, set up TTL directions, etc. Proceed as with the idle kernel, but using the ``startup_kernel`` key in ``artiq_coreconfig``. +If you are using DRTIO and the default routing table (for a star topology) is not suitable to your needs, prepare and load a different routing table. See :ref:`Using DRTIO `. -* (optional) Select the startup clock +* Select the RTIO clock source (KC705 and Kasli) -The core device may use either an external clock signal or its internal clock. This clock can be switched dynamically after the PC is connected using the ``external_clock`` parameter of the core device driver; however, one may want to select the clock at power-up so that it is used for the startup and idle kernels. Use one of these commands: :: +The KC705 may use either an external clock signal or its internal clock. The clock is selected at power-up. For Kasli, setting the RTIO clock source to "external" would bypass the Si5324 synthesiser, requiring that an input clock be present. To select the source, use one of these commands: :: - $ artiq_coreconfig write -s startup_clock i # internal clock (default) - $ artiq_coreconfig write -s startup_clock e # external clock - - -.. rubric:: Footnotes - -.. [1] [Linux] If your shell does not find the ``conda`` command, make sure that the conda binaries are in your ``$PATH``: - If ``$ echo $PATH`` does not show the conda directories, add them: execute ``$ export PATH=$HOME/miniconda3/bin:$PATH`` if you installed conda into ``~/miniconda3``. + $ artiq_coremgmt config write -s rtio_clock i # internal clock (default) + $ artiq_coremgmt config write -s rtio_clock e # external clock diff --git a/doc/manual/introduction.rst b/doc/manual/introduction.rst index c5513f070..6336ba4ee 100644 --- a/doc/manual/introduction.rst +++ b/doc/manual/introduction.rst @@ -10,13 +10,11 @@ It is maintained and developed by `M-Labs `_ and the initial The system features a high-level programming language that helps describing complex experiments, which is compiled and executed on dedicated hardware with nanosecond timing resolution and sub-microsecond latency. It includes graphical user interfaces to parametrize and schedule experiments and to visualize and explore the results. -ARTIQ uses FPGA hardware to perform its time-critical tasks. -It is designed to be portable to hardware platforms from different vendors and FPGA manufacturers. -Currently, several different configurations of a `high-end FPGA evaluation kit `_ are used and supported. This FPGA platform can be combined with any number of additional peripherals, either already accessible from ARTIQ or made accessible with little effort. +ARTIQ uses FPGA hardware to perform its time-critical tasks. The `Sinara hardware `_, and in particular the Kasli FPGA carrier, is designed to work with ARTIQ. +ARTIQ is designed to be portable to hardware platforms from different vendors and FPGA manufacturers. +Several different configurations of a `high-end FPGA evaluation kit `_ are also used and supported. FPGA platforms can be combined with any number of additional peripherals, either already accessible from ARTIQ or made accessible with little effort. -Custom hardware components with widely extended capabilities and advanced support for scalable and fully distributed real-time control of experiments `are being designed `_. - -ARTIQ and its dependencies are available in the form of `conda packages `_ for both Linux and Windows. +ARTIQ and its dependencies are available in the form of Nix packages (for Linux) and Conda packages (for Windows and Linux). Packages containing pre-compiled binary images to be loaded onto the hardware platforms are supplied for each configuration. Like any open source software ARTIQ can equally be built and installed directly from `source `_. @@ -29,4 +27,4 @@ Website: https://m-labs.hk/artiq `Cite ARTIQ `_ as ``Bourdeauducq, Sébastien et al. (2016). ARTIQ 1.0. Zenodo. 10.5281/zenodo.51303``. -Copyright (C) 2014-2017 M-Labs Limited. Licensed under GNU LGPL version 3+. +Copyright (C) 2014-2020 M-Labs Limited. Licensed under GNU LGPL version 3+. diff --git a/doc/manual/list_of_ndsps.rst b/doc/manual/list_of_ndsps.rst new file mode 100644 index 000000000..74237b22a --- /dev/null +++ b/doc/manual/list_of_ndsps.rst @@ -0,0 +1,28 @@ +List of available NDSPs +======================= + +The following network device support packages are available for ARTIQ. If you would like to add yours to this list, just send us an email or a pull request. + ++---------------------------------+-----------------------------------+----------------------------------+-----------------------------------------------------------------------------------------------------+----------------------------------------------+ +| Equipment | Nix package | Conda package | Documentation | URL | ++=================================+===================================+==================================+=====================================================================================================+==============================================+ +| PDQ2 | Not available | Not available | `HTML `_ | https://github.com/m-labs/pdq | ++---------------------------------+-----------------------------------+----------------------------------+-----------------------------------------------------------------------------------------------------+----------------------------------------------+ +| Lab Brick Digital Attenuator | ``lda`` | ``lda`` | `HTML `_ | https://github.com/m-labs/lda | ++---------------------------------+-----------------------------------+----------------------------------+-----------------------------------------------------------------------------------------------------+----------------------------------------------+ +| Novatech 409B | ``novatech409b`` | ``novatech409b`` | `HTML `_ | https://github.com/m-labs/novatech409b | ++---------------------------------+-----------------------------------+----------------------------------+-----------------------------------------------------------------------------------------------------+----------------------------------------------+ +| Thorlabs T-Cubes | ``thorlabs_tcube`` | ``thorlabs_tcube`` | `HTML `_ | https://github.com/m-labs/thorlabs_tcube | ++---------------------------------+-----------------------------------+----------------------------------+-----------------------------------------------------------------------------------------------------+----------------------------------------------+ +| Korad KA3005P | ``korad_ka3005p`` | ``korad_ka3005p`` | `HTML `_ | https://github.com/m-labs/korad_ka3005p | ++---------------------------------+-----------------------------------+----------------------------------+-----------------------------------------------------------------------------------------------------+----------------------------------------------+ +| Newfocus 8742 | ``newfocus8742`` | ``newfocus8742`` | `HTML `_ | https://github.com/quartiq/newfocus8742 | ++---------------------------------+-----------------------------------+----------------------------------+-----------------------------------------------------------------------------------------------------+----------------------------------------------+ +| Princeton Instruments PICam | Not available | Not available | Not available | https://github.com/quartiq/picam | ++---------------------------------+-----------------------------------+----------------------------------+-----------------------------------------------------------------------------------------------------+----------------------------------------------+ +| Anel HUT2 power distribution | ``hut2`` | ``hut2`` | `HTML `_ | https://github.com/quartiq/hut2 | ++---------------------------------+-----------------------------------+----------------------------------+-----------------------------------------------------------------------------------------------------+----------------------------------------------+ +| TOPTICA lasers | ``toptica-lasersdk-artiq`` | ``toptica-lasersdk-artiq`` | Not available | https://github.com/quartiq/lasersdk-artiq | ++---------------------------------+-----------------------------------+----------------------------------+-----------------------------------------------------------------------------------------------------+----------------------------------------------+ +| HighFinesse wavemeters | ``highfinesse-net`` | ``highfinesse-net`` | `HTML `_ | https://github.com/quartiq/highfinesse-net | ++---------------------------------+-----------------------------------+----------------------------------+-----------------------------------------------------------------------------------------------------+----------------------------------------------+ diff --git a/doc/manual/make.bat b/doc/manual/make.bat new file mode 100644 index 000000000..5567edd05 --- /dev/null +++ b/doc/manual/make.bat @@ -0,0 +1,263 @@ +@ECHO OFF + +REM Command file for Sphinx documentation + +REM TODO: missing latexpdf and latexpdfja cmds of ./Makefile + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set BUILDDIR=_build +set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . +set I18NSPHINXOPTS=%SPHINXOPTS% . +if NOT "%PAPER%" == "" ( + set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% + set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% +) + +if "%1" == "" goto help + +if "%1" == "help" ( + :help + echo.Please use `make ^` where ^ is one of + echo. html to make standalone HTML files + echo. dirhtml to make HTML files named index.html in directories + echo. singlehtml to make a single large HTML file + echo. pickle to make pickle files + echo. json to make JSON files + echo. htmlhelp to make HTML files and a HTML help project + echo. qthelp to make HTML files and a qthelp project + echo. devhelp to make HTML files and a Devhelp project + echo. epub to make an epub + echo. epub3 to make an epub3 + echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter + echo. text to make text files + echo. man to make manual pages + echo. texinfo to make Texinfo files + echo. gettext to make PO message catalogs + echo. changes to make an overview over all changed/added/deprecated items + echo. xml to make Docutils-native XML files + echo. pseudoxml to make pseudoxml-XML files for display purposes + echo. linkcheck to check all external links for integrity + echo. doctest to run all doctests embedded in the documentation if enabled + goto end +) + +if "%1" == "clean" ( + for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i + del /q /s %BUILDDIR%\* + goto end +) + + +REM Check if sphinx-build is available and fallback to Python version if any +%SPHINXBUILD% 1>NUL 2>NUL +if errorlevel 9009 goto sphinx_python +goto sphinx_ok + +:sphinx_python + +set SPHINXBUILD=python -m sphinx.__init__ +%SPHINXBUILD% 2> nul +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +:sphinx_ok + +if "%1" == "html" ( + %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/html. + goto end +) + +if "%1" == "dirhtml" ( + %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. + goto end +) + +if "%1" == "singlehtml" ( + %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. + goto end +) + +if "%1" == "pickle" ( + %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the pickle files. + goto end +) + +if "%1" == "json" ( + %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can process the JSON files. + goto end +) + +if "%1" == "htmlhelp" ( + %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run HTML Help Workshop with the ^ +.hhp project file in %BUILDDIR%/htmlhelp. + goto end +) + +if "%1" == "qthelp" ( + %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; now you can run "qcollectiongenerator" with the ^ +.qhcp project file in %BUILDDIR%/qthelp, like this: + REM echo.^> qcollectiongenerator %BUILDDIR%\qthelp\ARTIQ.qhcp + REM echo.To view the help file: + REM echo.^> assistant -collectionFile %BUILDDIR%\qthelp\ARTIQ.ghc + goto end +) + +if "%1" == "devhelp" ( + %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. + goto end +) + +if "%1" == "epub" ( + %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The epub file is in %BUILDDIR%/epub. + goto end +) + +if "%1" == "epub3" ( + %SPHINXBUILD% -b epub3 %ALLSPHINXOPTS% %BUILDDIR%/epub3 + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The epub3 file is in %BUILDDIR%/epub3. + goto end +) + +if "%1" == "latex" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + if errorlevel 1 exit /b 1 + echo. + echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "latexpdf" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + cd %BUILDDIR%/latex + make all-pdf + cd %~dp0 + echo. + echo.Build finished; the PDF files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "latexpdfja" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + cd %BUILDDIR%/latex + make all-pdf-ja + cd %~dp0 + echo. + echo.Build finished; the PDF files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "text" ( + %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The text files are in %BUILDDIR%/text. + goto end +) + +if "%1" == "man" ( + %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The manual pages are in %BUILDDIR%/man. + goto end +) + +if "%1" == "texinfo" ( + %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. + goto end +) + +if "%1" == "gettext" ( + %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The message catalogs are in %BUILDDIR%/locale. + goto end +) + +if "%1" == "changes" ( + %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes + if errorlevel 1 exit /b 1 + echo. + echo.The overview file is in %BUILDDIR%/changes. + goto end +) + +if "%1" == "linkcheck" ( + %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck + if errorlevel 1 exit /b 1 + echo. + echo.Link check complete; look for any errors in the above output ^ +or in %BUILDDIR%/linkcheck/output.txt. + goto end +) + +if "%1" == "doctest" ( + %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest + if errorlevel 1 exit /b 1 + echo. + echo.Testing of doctests in the sources finished, look at the ^ +results in %BUILDDIR%/doctest/output.txt. + goto end +) + +if "%1" == "xml" ( + %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The XML files are in %BUILDDIR%/xml. + goto end +) + +if "%1" == "pseudoxml" ( + %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. + goto end +) + +:end diff --git a/doc/manual/management_system.rst b/doc/manual/management_system.rst index cd771b226..aafbb0361 100644 --- a/doc/manual/management_system.rst +++ b/doc/manual/management_system.rst @@ -1,7 +1,7 @@ Management system ================= -The management system described below is optional: experiments can be run one by one using ``artiq_run``, and the controllers can run stand-alone (without a controller manager). For their very first steps with ARTIQ or in simple or particular cases, users do not need to deploy the management system. +The management system described below is optional: experiments can be run one by one using :mod:`~artiq.frontend.artiq_run`, and the controllers can run stand-alone (without a controller manager). For their very first steps with ARTIQ or in simple or particular cases, users do not need to deploy the management system. Components ********** @@ -9,14 +9,14 @@ Components Master ------ -The master is responsible for managing the parameter and device databases, the experiment repository, scheduling and running experiments, archiving results, and distributing real-time results. +The :ref:`master ` is responsible for managing the parameter and device databases, the experiment repository, scheduling and running experiments, archiving results, and distributing real-time results. The master is a headless component, and one or several clients (command-line or GUI) use the network to interact with it. Controller manager ------------------ -Controller managers are responsible for running and stopping controllers on a machine. There is one controller manager per network node that runs controllers. +Controller managers (started using the ``artiq_ctlmgr`` command that is part of the ``artiq-comtools`` package) are responsible for running and stopping controllers on a machine. There is one controller manager per network node that runs controllers. A controller manager connects to the master and uses the device database to determine what controllers need to be run. Changes in the device database are tracked by the manager and controllers are started and stopped accordingly. @@ -27,12 +27,12 @@ Controller managers use the local network address of the connection to the maste Command-line client ------------------- -The command-line client connects to the master and permits modification and monitoring of the databases, monitoring the experiment schedule and log, and submitting experiments. +The :ref:`command-line client ` connects to the master and permits modification and monitoring of the databases, monitoring the experiment schedule and log, and submitting experiments. Dashboard --------- -The dashboard connects to the master and is the main way of interacting with it. The main features of the dashboard are scheduling of experiments, setting of their arguments, examining the schedule, displaying real-time results, and debugging TTL and DDS channels in real time. +The :ref:`dashboard ` connects to the master and is the main way of interacting with it. The main features of the dashboard are scheduling of experiments, setting of their arguments, examining the schedule, displaying real-time results, and debugging TTL and DDS channels in real time. Experiment scheduling ********************* @@ -40,18 +40,25 @@ Experiment scheduling Basics ------ -To use hardware resources more efficiently, potentially compute-intensive pre-computation and analysis phases of other experiments is executed in parallel with the body of the current experiment that accesses the hardware. +To use hardware resources more efficiently, potentially compute-intensive pre-computation and analysis phases of other experiments are executed in parallel with the body of the current experiment that accesses the hardware. + +.. seealso:: These steps are implemented in :class:`~artiq.language.environment.Experiment`. However, user-written experiments should usually derive from (sub-class) :class:`artiq.language.environment.EnvExperiment`. Experiments are divided into three phases that are programmed by the user: -1. The preparation stage, that pre-fetches and pre-computes any data that necessary to run the experiment. Users may implement this stage by overloading the ``prepare`` method. It is not permitted to access hardware in this stage, as doing so may conflict with other experiments using the same devices. -2. The running stage, that corresponds to the body of the experiment, and typically accesses hardware. Users must implement this stage and overload the ``run`` method. -3. The analysis stage, where raw results collected in the running stage are post-processed and may lead to updates of the parameter database. This stage may be implemented by overloading the ``analyze`` method. +1. The **preparation** stage, that pre-fetches and pre-computes any data that necessary to run the experiment. Users may implement this stage by overloading the :meth:`~artiq.language.environment.Experiment.prepare` method. It is not permitted to access hardware in this stage, as doing so may conflict with other experiments using the same devices. +2. The **running** stage, that corresponds to the body of the experiment, and typically accesses hardware. Users must implement this stage and overload the :meth:`~artiq.language.environment.Experiment.run` method. +3. The **analysis** stage, where raw results collected in the running stage are post-processed and may lead to updates of the parameter database. This stage may be implemented by overloading the :meth:`~artiq.language.environment.Experiment.analyze` method. -.. note:: Only the ``run`` method implementation is mandatory; if the experiment does not fit into the pipelined scheduling model, it can leave one or both of the other methods empty (which is the default). +.. note:: Only the :meth:`~artiq.language.environment.Experiment.run` method implementation is mandatory; if the experiment does not fit into the pipelined scheduling model, it can leave one or both of the other methods empty (which is the default). The three phases of several experiments are then executed in a pipelined manner by the scheduler in the ARTIQ master: experiment A executes its preparation stage, then experiment A executes its running stage while experiment B executes its preparation stage, and so on. +.. note:: + The next experiment (B) may start :meth:`~artiq.language.environment.Experiment.run`\ ing before all events placed into (core device) RTIO buffers by the previous experiment (A) have been executed. These events can then execute while experiment B is :meth:`~artiq.language.environment.Experiment.run`\ ing. Using :meth:`~artiq.coredevice.core.Core.reset` clears the RTIO buffers, discarding pending events, including those left over from A. + + Interactions between events of different experiments can be avoided by preventing the :meth:`~artiq.language.environment.Experiment.run` method of experiment A from returning until all events have been executed. This is discussed in the section on RTIO :ref:`rtio-handover-synchronization`. + Priorities and timed runs ------------------------- @@ -70,18 +77,18 @@ If there are other experiments with higher priority (e.g. a high-priority timed Otherwise, ``pause()`` returns immediately. To check whether ``pause()`` would in fact *not* return immediately, use :meth:`artiq.master.scheduler.Scheduler.check_pause`. -The experiment must place the hardware in a safe state and disconnect from the core device (typically, by using ``self.core.comm.close()``) before calling ``pause``. +The experiment must place the hardware in a safe state and disconnect from the core device (typically, by calling ``self.core.comm.close()`` from the kernel, which is equivalent to :meth:`artiq.coredevice.core.Core.close`) before calling ``pause()``. -Accessing the ``pause`` and ``check_pause`` methods is done through a virtual device called ``scheduler`` that is accessible to all experiments. The scheduler virtual device is requested like regular devices using ``get_device`` or ``attr_device``. +Accessing the ``pause()`` and :meth:`~artiq.master.scheduler.Scheduler.check_pause` methods is done through a virtual device called ``scheduler`` that is accessible to all experiments. The scheduler virtual device is requested like regular devices using :meth:`~artiq.language.environment.HasEnvironment.get_device` (``self.get_device()``) or :meth:`~artiq.language.environment.HasEnvironment.setattr_device` (``self.setattr_device()``). -``check_pause`` can be called (via RPC) from a kernel, but ``pause`` must not. +:meth:`~artiq.master.scheduler.Scheduler.check_pause` can be called (via RPC) from a kernel, but ``pause()`` must not. Multiple pipelines ------------------ Multiple pipelines can operate in parallel inside the same master. It is the responsibility of the user to ensure that experiments scheduled in one pipeline will never conflict with those of another pipeline over resources (e.g. same devices). -Pipelines are identified by their name, and are automatically created (when an experiment is scheduled with a pipeline name that does not exist) and destroyed (when it runs empty). +Pipelines are identified by their name, and are automatically created (when an experiment is scheduled with a pipeline name that does not exist) and destroyed (when they run empty). Git integration @@ -136,25 +143,44 @@ CCBs are used by experiments to configure applets in the dashboard, for example .. autoclass:: artiq.dashboard.applets_ccb.AppletsCCBDock :members: + Front-end tool reference ************************ + +.. _frontend-artiq-master: + +artiq_master +------------ + .. argparse:: :ref: artiq.frontend.artiq_master.get_argparser :prog: artiq_master -.. argparse:: - :ref: artiq.frontend.artiq_ctlmgr.get_argparser - :prog: artiq_ctlmgr + +.. _frontend-artiq-client: + +artiq_client +------------ .. argparse:: :ref: artiq.frontend.artiq_client.get_argparser :prog: artiq_client + +.. _frontend-artiq-dashboard: + +artiq_dashboard +--------------- + .. argparse:: :ref: artiq.frontend.artiq_dashboard.get_argparser :prog: artiq_dashboard + +artiq_session +------------- + .. argparse:: :ref: artiq.frontend.artiq_session.get_argparser :prog: artiq_session diff --git a/doc/manual/ndsp_reference.rst b/doc/manual/ndsp_reference.rst deleted file mode 100644 index 721c15abe..000000000 --- a/doc/manual/ndsp_reference.rst +++ /dev/null @@ -1,165 +0,0 @@ -Network device support packages reference -========================================= - -Core device logging controller ------------------------------- - -.. argparse:: - :ref: artiq.frontend.aqctl_corelog.get_argparser - :prog: aqctl_corelog - -Lab Brick Digital Attenuator (LDA) ----------------------------------- - -Driver -++++++ - -.. automodule:: artiq.devices.lda.driver - :members: - -Controller -++++++++++ - -On Linux, you need to give your user access to the USB device. - -You can do that by creating a file under ``/etc/udev/rules.d/`` named -``99-lda.rules`` with the following content:: - - SUBSYSTEM=="usb", ATTR{idVendor}=="041f", MODE="0666" - -Then you need to tell udev to reload its rules:: - - $ sudo invoke-rc.d udev reload - -You must also unplug/replug your device if it was already plugged in. - -Then, to run the Lab Brick Digital Attenuator (LDA) controller:: - - $ aqctl_lda -d SN:xxxxx - -The serial number must contain exactly 5 digits, prepend it with the necessary number of 0s. -Also, the ``SN:`` prefix is mandatory. - -You can choose the LDA model with the ``-P`` parameter. The default is LDA-102. - -.. argparse:: - :ref: artiq.frontend.aqctl_lda.get_argparser - :prog: aqctl_lda - -Korad KA3005P -------------- - -Driver -++++++ - -.. automodule:: artiq.devices.korad_ka3005p.driver - :members: - -Controller -++++++++++ - -.. argparse:: - :ref: artiq.frontend.aqctl_korad_ka3005p.get_argparser - :prog: aqctl_korad_ka3005p - -Novatech 409B -------------- - -Driver -++++++ - -.. automodule:: artiq.devices.novatech409b.driver - :members: - -Controller -++++++++++ - -.. argparse:: - :ref: artiq.frontend.aqctl_novatech409b.get_argparser - :prog: aqctl_novatech409b - -Thorlabs T-Cube ---------------- - -.. note:: - When power is applied before the USB connection, some devices will enter a state where they fail to report the completion of commands. When using the ARTIQ controller, this cause certain function calls to never return and freeze the controller. To prevent this, connect USB first and then power up the device. When a device has entered the problematic state, power-cycling it while keeping the USB connection active also resolves the problem. - -TDC001 Driver -+++++++++++++ - -.. autoclass:: artiq.devices.thorlabs_tcube.driver.Tdc - :members: - -TPZ001 Driver -+++++++++++++ - -.. autoclass:: artiq.devices.thorlabs_tcube.driver.Tpz - :members: - -Controller -++++++++++ - -.. argparse:: - :ref: artiq.frontend.aqctl_thorlabs_tcube.get_argparser - :prog: aqctl_thorlabs - -.. _tdc001-controller-usage-example: - -TDC001 controller usage example -+++++++++++++++++++++++++++++++ - -First, run the TDC001 controller:: - - $ aqctl_thorlabs_tcube -P TDC001 -d /dev/ttyUSBx - -.. note:: - On Windows the serial port (the ``-d`` argument) will be of the form ``COMx``. - -.. note:: - Anything compatible with `serial_for_url `_ - can be given as a device in ``-d`` argument. - - For instance, if you want to specify the Vendor/Product ID and the USB Serial Number, you can do: - - ``-d "hwgrep://: SNR="``. - for instance: - - ``-d "hwgrep://0403:faf0 SNR=83852734"`` - - The hwgrep URL works on both Linux and Windows. - -Then, send commands to it via the ``artiq_rpctool`` utility:: - - $ artiq_rpctool ::1 3255 list-targets - Target(s): tdc001 - $ artiq_rpctool ::1 3255 call move_relative 10000 # will move forward - $ artiq_rpctool ::1 3255 call move_relative -10000 # will move backward - $ artiq_rpctool ::1 3255 call move_absolute 20000 # absolute move to 20000 - $ artiq_rpctool ::1 3255 call move_home # will go back to home position - $ artiq_rpctool ::1 3255 call close # close the device - -TPZ001 controller usage example -+++++++++++++++++++++++++++++++ - -First, run the TPZ001 controller:: - - $ aqctl_thorlabs_tcube -P TPZ001 -d /dev/ttyUSBx - -.. note:: - On Windows the serial port (the ``-d`` argument) will be of the form ``COMx``. - -.. note:: - See the :ref:`TDC001 documentation ` for - how to specify the USB Serial Number of the device instead of the - /dev/ttyUSBx (or the COMx name). - -Then, send commands to it via the ``artiq_rpctool`` utility:: - - $ artiq_rpctool ::1 3255 list-targets - Target(s): tpz001 - $ artiq_rpctool ::1 3255 call set_output_volts 15 # set output voltage to 15 V - $ artiq_rpctool ::1 3255 call get_output_volts # read back output voltage - 15 - $ artiq_rpctool ::1 3255 call set_tpz_io_settings 150 1 # set maximum output voltage to 150 V - $ artiq_rpctool ::1 3255 call set_output_volts 150 # set output voltage to 150 V - $ artiq_rpctool ::1 3255 call close # close the device diff --git a/doc/manual/protocols_reference.rst b/doc/manual/protocols_reference.rst deleted file mode 100644 index 247d834b7..000000000 --- a/doc/manual/protocols_reference.rst +++ /dev/null @@ -1,43 +0,0 @@ -Protocols reference -=================== - -:mod:`artiq.protocols.asyncio_server` module --------------------------------------------- - -.. automodule:: artiq.protocols.asyncio_server - :members: - - -:mod:`artiq.protocols.pyon` module ----------------------------------- - -.. automodule:: artiq.protocols.pyon - :members: - - -:mod:`artiq.protocols.pc_rpc` module ------------------------------------- - -.. automodule:: artiq.protocols.pc_rpc - :members: - - -:mod:`artiq.protocols.fire_and_forget` module ---------------------------------------------- - -.. automodule:: artiq.protocols.fire_and_forget - :members: - - -:mod:`artiq.protocols.sync_struct` module ------------------------------------------ - -.. automodule:: artiq.protocols.sync_struct - :members: - - -:mod:`artiq.protocols.remote_exec` module ------------------------------------------ - -.. automodule:: artiq.protocols.remote_exec - :members: diff --git a/doc/manual/rtio.rst b/doc/manual/rtio.rst index a08edad32..67c8bdcaf 100644 --- a/doc/manual/rtio.rst +++ b/doc/manual/rtio.rst @@ -55,28 +55,29 @@ Then later, when the wall clock reaches the respective timestamps the RTIO gatew The following diagram shows what is going on at the different levels of the software and gateware stack (assuming one machine unit of time is 1 ns): .. wavedrom:: + { - signal: [ - {name: 'kernel', wave: 'x32.3x', data: ['on()', 'delay(2*us)', 'off()'], node: '..A.XB'}, - {name: 'now', wave: '2...2.', data: ['7000', '9000'], node: '..P..Q'}, + "signal": [ + {"name": "kernel", "wave": "x32.3x", "data": ["on()", "delay(2*us)", "off()"], "node": "..A.XB"}, + {"name": "now", "wave": "2...2.", "data": ["7000", "9000"], "node": "..P..Q"}, {}, - {name: 'slack', wave: 'x2x.2x', data: ['4400', '5800']}, + {"name": "slack", "wave": "x2x.2x", "data": ["4400", "5800"]}, {}, - {name: 'rtio_counter', wave: 'x2x|2x|2x2x', data: ['2600', '3200', '7000', '9000'], node: ' V.W'}, - {name: 'ttl', wave: 'x1.0', node: ' R.S', phase: -6.5}, - { node: ' T.U', phase: -6.5} - ], - edge: [ - 'A~>R', 'P~>R', 'V~>R', 'B~>S', 'Q~>S', 'W~>S', - 'R-T', 'S-U', 'T<->U 2µs' + {"name": "rtio_counter", "wave": "x2x|2x|2x2x", "data": ["2600", "3200", "7000", "9000"], "node": " V.W"}, + {"name": "ttl", "wave": "x1.0", "node": " R.S", "phase": -6.5}, + { "node": " T.U", "phase": -6.5} ], + "edge": [ + "A~>R", "P~>R", "V~>R", "B~>S", "Q~>S", "W~>S", + "R-T", "S-U", "T<->U 2µs" + ] } The sequence is exactly equivalent to:: ttl.pulse(2*us) -The :meth:`artiq.coredevice.ttl.TTLOut.pulse` method advances the timeline cursor (using ``delay()``) while other methods such as :meth:`artiq.coredevice.ttl.TTLOut.on`, :meth:`artiq.coredevice.ttl.TTLOut.off`, :meth:`artiq.coredevice.dds._DDSGeneric.set`, or the ``set_*()`` methods of :class:`artiq.coredevice.spi.SPIMaster` do not. The latter are called *zero-duration* methods. +The :meth:`artiq.coredevice.ttl.TTLOut.pulse` method advances the timeline cursor (using ``delay()``) while other methods such as :meth:`artiq.coredevice.ttl.TTLOut.on`, :meth:`artiq.coredevice.ttl.TTLOut.off`, :meth:`artiq.coredevice.ad9914.set`. The latter are called *zero-duration* methods. Underflow exceptions -------------------- @@ -95,19 +96,20 @@ The experiment attempts to handle the exception by moving the cursor forward and ttl.on() .. wavedrom:: + { - signal: [ - {name: 'kernel', wave: 'x34..2.3x', data: ['on()', 'RTIOUnderflow', 'delay()', 'on()'], node: '..AB....C', phase: -3}, - {name: 'now_mu', wave: '2.....2', data: ['t0', 't1'], node: '.D.....E', phase: -4}, + "signal": [ + {"name": "kernel", "wave": "x34..2.3x", "data": ["on()", "RTIOUnderflow", "delay()", "on()"], "node": "..AB....C", "phase": -3}, + {"name": "now_mu", "wave": "2.....2", "data": ["t0", "t1"], "node": ".D.....E", "phase": -4}, {}, - {name: 'slack', wave: '2x....2', data: ['< 0', '> 0'], node: '.T', phase: -4}, + {"name": "slack", "wave": "2x....2", "data": ["< 0", "> 0"], "node": ".T", "phase": -4}, {}, - {name: 'rtio_counter', wave: 'x2x.2x....2x2', data: ['t0', '> t0', '< t1', 't1'], node: '............P'}, - {name: 'tll', wave: 'x...........1', node: '.R..........S', phase: -.5} + {"name": "rtio_counter", "wave": "x2x.2x....2x2", "data": ["t0", "> t0", "< t1", "t1"], "node": "............P"}, + {"name": "tll", "wave": "x...........1", "node": ".R..........S", "phase": -0.5} ], - edge: [ - 'A-~>R forbidden', 'D-~>R', 'T-~B exception', - 'C~>S allowed', 'E~>S', 'P~>S' + "edge": [ + "A-~>R forbidden", "D-~>R", "T-~B exception", + "C~>S allowed", "E~>S", "P~>S" ] } @@ -117,6 +119,24 @@ To track down ``RTIOUnderflows`` in an experiment there are a few approaches: code. * The :any:`integrated logic analyzer ` shows the timeline context that lead to the exception. The analyzer is always active and supports plotting of RTIO slack. RTIO slack is the difference between timeline cursor and wall clock time (``now - rtio_counter``). +Sequence errors +--------------- +A sequence error happens when the sequence of coarse timestamps cannot be supported by the gateware. For example, there may have been too many timeline rewinds. + +Internally, the gateware stores output events in an array of FIFO buffers (the "lanes") and the timestamps in each lane must be strictly increasing. If an event with a decreasing or equal timestamp is submitted, the gateware selects the next lane, wrapping around if the final lane is reached. If this lane also contains an event with a timestamp beyond the one being submitted then a sequence error occurs. See `this issue `_ for a real-life example of how this works. + +Notes: + +* Strictly increasing timestamps never cause sequence errors. +* Configuring the gateware with more lanes for the RTIO core reduces the frequency of sequence errors. +* The number of lanes is a hard limit on the number of simultaneous RTIO output events. +* Whether a particular sequence of timestamps causes a sequence error or not is fully deterministic (starting from a known RTIO state, e.g. after a reset). Adding a constant offset to the whole sequence does not affect the result. +* Zero-duration methods (such as :meth:`artiq.coredevice.ttl.TTLOut.on()`) do not advance the timeline and so will consume additional lanes if they are scheduled simultaneously. Adding a tiny delay will prevent this (e.g. ``delay_mu(self.core.ref_multiplier)``, at least one coarse rtio cycle). + +The offending event is discarded and the RTIO core keeps operating. + +This error is reported asynchronously via the core device log: for performance reasons with DRTIO, the CPU does not wait for an error report from the satellite after writing an event. Therefore, it is not possible to raise an exception precisely. + Collisions ---------- A collision happens when more than one event is submitted on a given channel with the same coarse timestamp, and that channel does not implement replacement behavior or the fine timestamps are different. @@ -141,31 +161,30 @@ Input channels and events Input channels detect events, timestamp them, and place them in a buffer for the experiment to read out. The following example counts the rising edges occurring during a precisely timed 500 ns interval. -If more than 20 rising edges were received it outputs a pulse:: +If more than 20 rising edges are received, it outputs a pulse:: - input.gate_rising(500*ns) - if input.count() > 20: + if input.count(input.gate_rising(500*ns)) > 20: delay(2*us) output.pulse(500*ns) -The :meth:`artiq.coredevice.ttl.TTLInOut.count` method of an input channel can lead to a situation of negative slack (timeline cursor ``now`` smaller than the current wall clock ``rtio_counter``): -The :meth:`artiq.coredevice.ttl.TTLInOut.gate_rising` method leaves the timeline cursor at the closure time of the gate and ``count()`` must necessarily wait until the gate closing event has actually been executed which is sometime with ``rtio_counter > now``. -In these situations where ``count()`` leads to a synchronization of timeline cursor and wall clock, a ``delay()`` is necessary to reestablish positive slack so that output events can be placed. +The :meth:`artiq.coredevice.ttl.TTLInOut.count` method of an input channel will often lead to a situation of negative slack (timeline cursor ``now`` smaller than the current wall clock ``rtio_counter``): +The :meth:`artiq.coredevice.ttl.TTLInOut.gate_rising` method leaves the timeline cursor at the closing time of the gate. ``count()`` must necessarily wait until the gate closing event has actually been executed, at which point ``rtio_counter > now``: ``count()`` synchronizes timeline cursor (``now``) and wall clock (``rtio_counter``). In these situations, a ``delay()`` is necessary to re-establish positive slack so that further output events can be placed. Similar situations arise with methods such as :meth:`artiq.coredevice.ttl.TTLInOut.sample_get` and :meth:`artiq.coredevice.ttl.TTLInOut.watch_done`. .. wavedrom:: + { - signal: [ - {name: 'kernel', wave: '3..5.|2.3..x..', data: ['gate_rising()', 'count()', 'delay()', 'pulse()'], node: '.A.B..C.ZD.E'}, - {name: 'now_mu', wave: '2.2..|..2.2.', node: '.P.Q....XV.W'}, + "signal": [ + {"name": "kernel", "wave": "3..5.|2.3..x..", "data": ["gate_rising()", "count()", "delay()", "pulse()"], "node": ".A.B..C.ZD.E"}, + {"name": "now_mu", "wave": "2.2..|..2.2.", "node": ".P.Q....XV.W"}, {}, {}, - {name: 'input gate', wave: 'x1.0', node: '.T.U', phase: -2.5}, - {name: 'output', wave: 'x1.0', node: '.R.S', phase: -10.5} + {"name": "input gate", "wave": "x1.0", "node": ".T.U", "phase": -2.5}, + {"name": "output", "wave": "x1.0", "node": ".R.S", "phase": -10.5} ], - edge: [ - 'A~>T', 'P~>T', 'B~>U', 'Q~>U', 'U~>C', 'D~>R', 'E~>S', 'V~>R', 'W~>S' + "edge": [ + "A~>T", "P~>T", "B~>U", "Q~>U", "U~>C", "D~>R", "E~>S", "V~>R", "W~>S" ] } @@ -199,45 +218,55 @@ This is demonstrated in the following example where a pulse is split across two Here, ``run()`` calls ``k1()`` which exits leaving the cursor one second after the rising edge and ``k2()`` then submits a falling edge at that position. .. wavedrom:: + { - signal: [ - {name: 'kernel', wave: '3.2..2..|3.', data: ['k1: on()', 'k1: delay(dt)', 'k1->k2 swap', 'k2: off()'], node: '..A........B'}, - {name: 'now', wave: '2....2...|.', data: ['t', 't+dt'], node: '..P........Q'}, + "signal": [ + {"name": "kernel", "wave": "3.2..2..|3.", "data": ["k1: on()", "k1: delay(dt)", "k1->k2 swap", "k2: off()"], "node": "..A........B"}, + {"name": "now", "wave": "2....2...|.", "data": ["t", "t+dt"], "node": "..P........Q"}, {}, {}, - {name: 'rtio_counter', wave: 'x......|2xx|2', data: ['t', 't+dt'], node: '........V...W'}, - {name: 'ttl', wave: 'x1...0', node: '.R...S', phase: -7.5}, - { node: ' T...U', phase: -7.5} - ], - edge: [ - 'A~>R', 'P~>R', 'V~>R', 'B~>S', 'Q~>S', 'W~>S', - 'R-T', 'S-U', 'T<->U dt' + {"name": "rtio_counter", "wave": "x......|2xx|2", "data": ["t", "t+dt"], "node": "........V...W"}, + {"name": "ttl", "wave": "x1...0", "node": ".R...S", "phase": -7.5}, + { "node": " T...U", "phase": -7.5} ], + "edge": [ + "A~>R", "P~>R", "V~>R", "B~>S", "Q~>S", "W~>S", + "R-T", "S-U", "T<->U dt" + ] } +.. _rtio-handover-synchronization: + Synchronization --------------- The seamless handover of the timeline (cursor and events) across kernels and experiments implies that a kernel can exit long before the events it has submitted have been executed. If a previous kernel sets timeline cursor far in the future this effectively locks the system. -When a kernel should wait until all the events on a particular channel have been executed, use the :meth:`artiq.coredevice.ttl.TTLOut.sync` method of a channel: + +When a kernel should wait until all the events have been executed, use the :meth:`artiq.coredevice.core.Core.wait_until_mu` with a timestamp after (or at) the last event: .. wavedrom:: + { - signal: [ - {name: 'kernel', wave: 'x3x.|5.|x', data: ['on()', 'sync()'], node: '..A.....Y'}, - {name: 'now', wave: '2..', data: ['7000'], node: '..P'}, + "signal": [ + {"name": "kernel", "wave": "x3x.|5...|x", "data": ["on()", "wait_until_mu(7000)"], "node": "..A.....Y"}, + {"name": "now", "wave": "2..", "data": ["7000"], "node": "..P"}, {}, {}, - {name: 'rtio_counter', wave: 'x2x.|..2x', data: ['2000', '7000'], node: ' ....V'}, - {name: 'ttl', wave: 'x1', node: ' R', phase: -6.5}, - ], - edge: [ - 'A~>R', 'P~>R', 'V~>R', 'V~>Y' + {"name": "rtio_counter", "wave": "x2x.|..2x..", "data": ["2000", "7000"], "node": " ....V"}, + {"name": "ttl", "wave": "x1", "node": " R", "phase": -6.5} ], + "edge": [ + "A~>R", "P~>R", "V~>R", "V~>Y" + ] } +In many cases, :meth:`~artiq.language.core.now_mu` will return an appropriate timestamp:: + + self.core.wait_until_mu(now_mu()) + + RTIO reset ----------- diff --git a/doc/manual/utilities.rst b/doc/manual/utilities.rst index 1a049b94f..9e056e55e 100644 --- a/doc/manual/utilities.rst +++ b/doc/manual/utilities.rst @@ -12,71 +12,6 @@ Local running tool :ref: artiq.frontend.artiq_run.get_argparser :prog: artiq_run -Remote Procedure Call tool --------------------------- - -.. argparse:: - :ref: artiq.frontend.artiq_rpctool.get_argparser - :prog: artiq_rpctool - -This tool is the preferred way of handling simple ARTIQ controllers. -Instead of writing a client for very simple cases you can just use this tool -in order to call remote functions of an ARTIQ controller. - -* Listing existing targets - - The ``list-targets`` sub-command will print to standard output the - target list of the remote server:: - - $ artiq_rpctool hostname port list-targets - -* Listing callable functions - - The ``list-methods`` sub-command will print to standard output a sorted - list of the functions you can call on the remote server's target. - - The list will contain function names, signatures (arguments) and - docstrings. - - If the server has only one target, you can do:: - - $ artiq_rpctool hostname port list-methods - - Otherwise you need to specify the target, using the ``-t target`` - option:: - - $ artiq_rpctool hostname port list-methods -t target_name - -* Remotely calling a function - - The ``call`` sub-command will call a function on the specified remote - server's target, passing the specified arguments. - Like with the previous sub-command, you only need to provide the target - name (with ``-t target``) if the server hosts several targets. - - The following example will call the ``set_attenuation`` method of the - Lda controller with the argument ``5``:: - - $ artiq_rpctool ::1 3253 call -t lda set_attenuation 5 - - In general, to call a function named ``f`` with N arguments named - respectively ``x1, x2, ..., xN`` you can do:: - - $ artiq_rpctool hostname port call -t target f x1 x2 ... xN - - You can use Python syntax to compute arguments as they will be passed - to the ``eval()`` primitive. The numpy package is available in the namespace - as ``np``. Beware to use quotes to separate arguments which use spaces:: - - $ artiq_rpctool hostname port call -t target f '3 * 4 + 2' True '[1, 2]' - $ artiq_rpctool ::1 3256 call load_sample_values 'np.array([1.0, 2.0], dtype=float)' - - If the called function has a return value, it will get printed to - the standard output if the value is not None like in the standard - python interactive console:: - - $ artiq_rpctool ::1 3253 call get_attenuation - 5.0 dB Static compiler --------------- @@ -104,77 +39,95 @@ Flashing/Loading tool :ref: artiq.frontend.artiq_flash.get_argparser :prog: artiq_flash -.. _core-device-configuration-tool: +.. _core-device-management-tool: -Core device configuration tool ------------------------------- +Core device management tool +--------------------------- -The artiq_coreconfig utility gives remote access to the :ref:`core-device-flash-storage`. +The artiq_coremgmt utility gives remote access to the core device logs, the :ref:`core-device-flash-storage`, and other management functions. -To use this tool, you need to specify a ``device_db.py`` device database file which contains a ``comm`` device (an example is provided in ``examples/master/device_db.py``). This tells the tool how to connect to the core device and with which parameters (e.g. IP address, TCP port). When not specified, the artiq_coreconfig utility will assume that there is a file named ``device_db.py`` in the current directory. +To use this tool, you need to specify a ``device_db.py`` device database file which contains a ``comm`` device (an example is provided in ``examples/master/device_db.py``). This tells the tool how to connect to the core device and with which parameters (e.g. IP address, TCP port). When not specified, the artiq_coremgmt utility will assume that there is a file named ``device_db.py`` in the current directory. + +To read core device logs:: + + $ artiq_coremgmt log + +To set core device log level and UART log level (possible levels are ``TRACE``, ``DEBUG``, ``INFO``, ``WARN`` and ``ERROR``):: + + $ artiq_coremgmt log set_level LEVEL + $ artiq_coremgmt log set_uart_level LEVEL + +Note that enabling the ``TRACE`` log level results in small core device slowdown, and printing large amounts of log messages to the UART results in significant core device slowdown. To read the record whose key is ``mac``:: - $ artiq_coreconfig read mac + $ artiq_coremgmt config read mac To write the value ``test_value`` in the key ``my_key``:: - $ artiq_coreconfig write -s my_key test_value - $ artiq_coreconfig read my_key + $ artiq_coremgmt config write -s my_key test_value + $ artiq_coremgmt config read my_key b'test_value' You can also write entire files in a record using the ``-f`` parameter. This is useful for instance to write the startup and idle kernels in the flash storage:: - $ artiq_coreconfig write -f idle_kernel idle.elf - $ artiq_coreconfig read idle_kernel | head -c9 + $ artiq_coremgmt config write -f idle_kernel idle.elf + $ artiq_coremgmt config read idle_kernel | head -c9 b'\x7fELF You can write several records at once:: - $ artiq_coreconfig write -s key1 value1 -f key2 filename -s key3 value3 + $ artiq_coremgmt config write -s key1 value1 -f key2 filename -s key3 value3 To remove the previously written key ``my_key``:: - $ artiq_coreconfig delete my_key + $ artiq_coremgmt config delete my_key You can remove several keys at once:: - $ artiq_coreconfig delete key1 key2 + $ artiq_coremgmt config delete key1 key2 To erase the entire flash storage area:: - $ artiq_coreconfig erase + $ artiq_coremgmt config erase You do not need to remove a record in order to change its value, just overwrite it:: - $ artiq_coreconfig write -s my_key some_value - $ artiq_coreconfig write -s my_key some_other_value - $ artiq_coreconfig read my_key + $ artiq_coremgmt config write -s my_key some_value + $ artiq_coremgmt config write -s my_key some_other_value + $ artiq_coremgmt config read my_key b'some_other_value' .. argparse:: - :ref: artiq.frontend.artiq_coreconfig.get_argparser - :prog: artiq_coreconfig + :ref: artiq.frontend.artiq_coremgmt.get_argparser + :prog: artiq_coremgmt -Core device log download tool ------------------------------ +Core device logging controller +------------------------------ .. argparse:: - :ref: artiq.frontend.artiq_corelog.get_argparser - :prog: artiq_corelog + :ref: artiq.frontend.aqctl_corelog.get_argparser + :prog: aqctl_corelog .. _core-device-rtio-analyzer-tool: Core device RTIO analyzer tool ------------------------------ +:mod:`~artiq.frontend.artiq_coreanalyzer` is a tool to convert core device RTIO logs to VCD waveform files that are readable by third-party tools such as GtkWave. This tool extracts pre-recorded data from an ARTIQ core device buffer (or from a file with the ``-r`` option), and converts it to a standard VCD file format. See :ref:`rtio-analyzer-example` for an example, or :mod:`artiq.test.coredevice.test_analyzer` for a relevant unit test. + .. argparse:: :ref: artiq.frontend.artiq_coreanalyzer.get_argparser :prog: artiq_coreanalyzer -Data to InfluxDB bridge ------------------------ +.. note:: + The RTIO analyzer does not support SAWG. + +.. _routing-table-tool: + +DRTIO routing table manipulation tool +------------------------------------- .. argparse:: - :ref: artiq.frontend.artiq_influxdb.get_argparser - :prog: artiq_influxdb + :ref: artiq.frontend.artiq_route.get_argparser + :prog: artiq_route diff --git a/doc/slides/artiq_overview.tex b/doc/slides/artiq_overview.tex index e20655e5b..9f9851aeb 100644 --- a/doc/slides/artiq_overview.tex +++ b/doc/slides/artiq_overview.tex @@ -90,8 +90,8 @@ inner sep=.3mm] at (current page.south east) {% \footnotesize \begin{minted}[frame=leftline]{python} -trigger.sync() # wait for trigger input -start = now_mu() # capture trigger time +# wait for trigger input and capture timestamp +start = trigger.timestamp_mu(trigger.gate_rising(100*ms)) for i in range(3): delay(5*us) dds.pulse(900*MHz, 7*us) # first pulse 5 µs after trigger diff --git a/doc/slides/taaccs.tex b/doc/slides/taaccs.tex index df497c578..7b7f5dbc3 100644 --- a/doc/slides/taaccs.tex +++ b/doc/slides/taaccs.tex @@ -106,8 +106,8 @@ inner sep=.3mm] at (current page.south east) {% \footnotesize \begin{minted}[frame=leftline]{python} -trigger.sync() # wait for trigger input -start = now_mu() # capture trigger time +# wait for trigger input and capture timestamp +start = trigger.timestamp_mu(trigger.gate_rising(100*ms)) for i in range(3): delay(5*us) dds.pulse(900*MHz, 7*us) # first pulse 5 µs after trigger diff --git a/doc/wrpll_diagram.png b/doc/wrpll_diagram.png new file mode 100644 index 000000000..1085e0b15 Binary files /dev/null and b/doc/wrpll_diagram.png differ diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index c8d846cbf..000000000 --- a/setup.cfg +++ /dev/null @@ -1,7 +0,0 @@ -[versioneer] -VCS = git -style = pep440 -versionfile_source = artiq/_version.py -versionfile_build = artiq/_version.py -tag_prefix = -parentdir_prefix = artiq- diff --git a/setup.py b/setup.py index ae3c59cdc..b8b5d7e1f 100755 --- a/setup.py +++ b/setup.py @@ -6,15 +6,15 @@ import sys import versioneer -if not (3, 5, 3) <= sys.version_info[:3] < (3, 6, 0): - raise Exception("You need Python 3.5.3+ (but not 3.6+)") +if sys.version_info[:3] < (3, 5, 3): + raise Exception("You need Python 3.5.3+") # Depends on PyQt5, but setuptools cannot check for it. requirements = [ - "asyncserial", "numpy", "scipy", + "numpy", "scipy", "python-dateutil", "prettytable", "h5py", - "quamash", "pyqtgraph", "pygit2", "aiohttp", + "qasync", "pyqtgraph", "pygit2", "llvmlite_artiq", "pythonparser", "python-Levenshtein", ] @@ -22,25 +22,17 @@ console_scripts = [ "artiq_client = artiq.frontend.artiq_client:main", "artiq_compile = artiq.frontend.artiq_compile:main", "artiq_coreanalyzer = artiq.frontend.artiq_coreanalyzer:main", - "artiq_coreconfig = artiq.frontend.artiq_coreconfig:main", - "artiq_corelog = artiq.frontend.artiq_corelog:main", - "artiq_coreboot = artiq.frontend.artiq_coreboot:main", - "artiq_ctlmgr = artiq.frontend.artiq_ctlmgr:main", - "artiq_devtool = artiq.frontend.artiq_devtool:main", - "artiq_pcap = artiq.frontend.artiq_pcap:main", - "artiq_influxdb = artiq.frontend.artiq_influxdb:main", + "artiq_coremgmt = artiq.frontend.artiq_coremgmt:main", + "artiq_ddb_template = artiq.frontend.artiq_ddb_template:main", "artiq_master = artiq.frontend.artiq_master:main", "artiq_mkfs = artiq.frontend.artiq_mkfs:main", + "artiq_rtiomon = artiq.frontend.artiq_rtiomon:main", + "artiq_sinara_tester = artiq.frontend.artiq_sinara_tester:main", "artiq_session = artiq.frontend.artiq_session:main", - "artiq_rpctool = artiq.frontend.artiq_rpctool:main", + "artiq_route = artiq.frontend.artiq_route:main", "artiq_run = artiq.frontend.artiq_run:main", "artiq_flash = artiq.frontend.artiq_flash:main", - "aqctl_corelog = artiq.frontend.aqctl_corelog:main", - "aqctl_korad_ka3005p = artiq.frontend.aqctl_korad_ka3005p:main", - "aqctl_lda = artiq.frontend.aqctl_lda:main", - "aqctl_novatech409b = artiq.frontend.aqctl_novatech409b:main", - "aqctl_thorlabs_tcube = artiq.frontend.aqctl_thorlabs_tcube:main", ] gui_scripts = [ diff --git a/versioneer.py b/versioneer.py index f115f7998..93f1c661d 100644 --- a/versioneer.py +++ b/versioneer.py @@ -1,1543 +1,32 @@ - -# Version: 0.18 - -"""The Versioneer - like a rocketeer, but for versions. - -The Versioneer -============== - -* like a rocketeer, but for versions! -* https://github.com/warner/python-versioneer -* Brian Warner -* License: Public Domain -* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy -* [![Latest Version] -(https://pypip.in/version/versioneer/badge.svg?style=flat) -](https://pypi.python.org/pypi/versioneer/) -* [![Build Status] -(https://travis-ci.org/warner/python-versioneer.png?branch=master) -](https://travis-ci.org/warner/python-versioneer) - -This is a tool for managing a recorded version number in distutils-based -python projects. The goal is to remove the tedious and error-prone "update -the embedded version string" step from your release process. Making a new -release should be as easy as recording a new tag in your version-control -system, and maybe making new tarballs. - - -## Quick Install - -* `pip install versioneer` to somewhere to your $PATH -* add a `[versioneer]` section to your setup.cfg (see below) -* run `versioneer install` in your source tree, commit the results - -## Version Identifiers - -Source trees come from a variety of places: - -* a version-control system checkout (mostly used by developers) -* a nightly tarball, produced by build automation -* a snapshot tarball, produced by a web-based VCS browser, like github's - "tarball from tag" feature -* a release tarball, produced by "setup.py sdist", distributed through PyPI - -Within each source tree, the version identifier (either a string or a number, -this tool is format-agnostic) can come from a variety of places: - -* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows - about recent "tags" and an absolute revision-id -* the name of the directory into which the tarball was unpacked -* an expanded VCS keyword ($Id$, etc) -* a `_version.py` created by some earlier build step - -For released software, the version identifier is closely related to a VCS -tag. Some projects use tag names that include more than just the version -string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool -needs to strip the tag prefix to extract the version identifier. For -unreleased software (between tags), the version identifier should provide -enough information to help developers recreate the same tree, while also -giving them an idea of roughly how old the tree is (after version 1.2, before -version 1.3). Many VCS systems can report a description that captures this, -for example `git describe --tags --dirty --always` reports things like -"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the -0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has -uncommitted changes. - -The version identifier is used for multiple purposes: - -* to allow the module to self-identify its version: `myproject.__version__` -* to choose a name and prefix for a 'setup.py sdist' tarball - -## Theory of Operation - -Versioneer works by adding a special `_version.py` file into your source -tree, where your `__init__.py` can import it. This `_version.py` knows how to -dynamically ask the VCS tool for version information at import time. - -`_version.py` also contains `$Revision$` markers, and the installation -process marks `_version.py` to have this marker rewritten with a tag name -during the `git archive` command. As a result, generated tarballs will -contain enough information to get the proper version. - -To allow `setup.py` to compute a version too, a `versioneer.py` is added to -the top level of your source tree, next to `setup.py` and the `setup.cfg` -that configures it. This overrides several distutils/setuptools commands to -compute the version when invoked, and changes `setup.py build` and `setup.py -sdist` to replace `_version.py` with a small static file that contains just -the generated version data. - -## Installation - -See [INSTALL.md](./INSTALL.md) for detailed installation instructions. - -## Version-String Flavors - -Code which uses Versioneer can learn about its version string at runtime by -importing `_version` from your main `__init__.py` file and running the -`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can -import the top-level `versioneer.py` and run `get_versions()`. - -Both functions return a dictionary with different flavors of version -information: - -* `['version']`: A condensed version string, rendered using the selected - style. This is the most commonly used value for the project's version - string. The default "pep440" style yields strings like `0.11`, - `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section - below for alternative styles. - -* `['full-revisionid']`: detailed revision identifier. For Git, this is the - full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". - -* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the - commit date in ISO 8601 format. This will be None if the date is not - available. - -* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that - this is only accurate if run in a VCS checkout, otherwise it is likely to - be False or None - -* `['error']`: if the version string could not be computed, this will be set - to a string describing the problem, otherwise it will be None. It may be - useful to throw an exception in setup.py if this is set, to avoid e.g. - creating tarballs with a version string of "unknown". - -Some variants are more useful than others. Including `full-revisionid` in a -bug report should allow developers to reconstruct the exact code being tested -(or indicate the presence of local changes that should be shared with the -developers). `version` is suitable for display in an "about" box or a CLI -`--version` output: it can be easily compared against release notes and lists -of bugs fixed in various releases. - -The installer adds the following text to your `__init__.py` to place a basic -version in `YOURPROJECT.__version__`: - - from ._version import get_versions - __version__ = get_versions()['version'] - del get_versions - -## Styles - -The setup.cfg `style=` configuration controls how the VCS information is -rendered into a version string. - -The default style, "pep440", produces a PEP440-compliant string, equal to the -un-prefixed tag name for actual releases, and containing an additional "local -version" section with more detail for in-between builds. For Git, this is -TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags ---dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the -tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and -that this commit is two revisions ("+2") beyond the "0.11" tag. For released -software (exactly equal to a known tag), the identifier will only contain the -stripped tag, e.g. "0.11". - -Other styles are available. See [details.md](details.md) in the Versioneer -source tree for descriptions. - -## Debugging - -Versioneer tries to avoid fatal errors: if something goes wrong, it will tend -to return a version of "0+unknown". To investigate the problem, run `setup.py -version`, which will run the version-lookup code in a verbose mode, and will -display the full contents of `get_versions()` (including the `error` string, -which may help identify what went wrong). - -## Known Limitations - -Some situations are known to cause problems for Versioneer. This details the -most significant ones. More can be found on Github -[issues page](https://github.com/warner/python-versioneer/issues). - -### Subprojects - -Versioneer has limited support for source trees in which `setup.py` is not in -the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are -two common reasons why `setup.py` might not be in the root: - -* Source trees which contain multiple subprojects, such as - [Buildbot](https://github.com/buildbot/buildbot), which contains both - "master" and "slave" subprojects, each with their own `setup.py`, - `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI - distributions (and upload multiple independently-installable tarballs). -* Source trees whose main purpose is to contain a C library, but which also - provide bindings to Python (and perhaps other langauges) in subdirectories. - -Versioneer will look for `.git` in parent directories, and most operations -should get the right version string. However `pip` and `setuptools` have bugs -and implementation details which frequently cause `pip install .` from a -subproject directory to fail to find a correct version string (so it usually -defaults to `0+unknown`). - -`pip install --editable .` should work correctly. `setup.py install` might -work too. - -Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in -some later version. - -[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking -this issue. The discussion in -[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the -issue from the Versioneer side in more detail. -[pip PR#3176](https://github.com/pypa/pip/pull/3176) and -[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve -pip to let Versioneer work correctly. - -Versioneer-0.16 and earlier only looked for a `.git` directory next to the -`setup.cfg`, so subprojects were completely unsupported with those releases. - -### Editable installs with setuptools <= 18.5 - -`setup.py develop` and `pip install --editable .` allow you to install a -project into a virtualenv once, then continue editing the source code (and -test) without re-installing after every change. - -"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a -convenient way to specify executable scripts that should be installed along -with the python package. - -These both work as expected when using modern setuptools. When using -setuptools-18.5 or earlier, however, certain operations will cause -`pkg_resources.DistributionNotFound` errors when running the entrypoint -script, which must be resolved by re-installing the package. This happens -when the install happens with one version, then the egg_info data is -regenerated while a different version is checked out. Many setup.py commands -cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into -a different virtualenv), so this can be surprising. - -[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes -this one, but upgrading to a newer version of setuptools should probably -resolve it. - -### Unicode version strings - -While Versioneer works (and is continually tested) with both Python 2 and -Python 3, it is not entirely consistent with bytes-vs-unicode distinctions. -Newer releases probably generate unicode version strings on py2. It's not -clear that this is wrong, but it may be surprising for applications when then -write these strings to a network connection or include them in bytes-oriented -APIs like cryptographic checksums. - -[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates -this question. - - -## Updating Versioneer - -To upgrade your project to a new release of Versioneer, do the following: - -* install the new Versioneer (`pip install -U versioneer` or equivalent) -* edit `setup.cfg`, if necessary, to include any new configuration settings - indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. -* re-run `versioneer install` in your source tree, to replace - `SRC/_version.py` -* commit any changed files - -## Future Directions - -This tool is designed to make it easily extended to other version-control -systems: all VCS-specific components are in separate directories like -src/git/ . The top-level `versioneer.py` script is assembled from these -components by running make-versioneer.py . In the future, make-versioneer.py -will take a VCS name as an argument, and will construct a version of -`versioneer.py` that is specific to the given VCS. It might also take the -configuration arguments that are currently provided manually during -installation by editing setup.py . Alternatively, it might go the other -direction and include code from all supported VCS systems, reducing the -number of intermediate scripts. - - -## License - -To make Versioneer easier to embed, all its code is dedicated to the public -domain. The `_version.py` that it creates is also in the public domain. -Specifically, both are released under the Creative Commons "Public Domain -Dedication" license (CC0-1.0), as described in -https://creativecommons.org/publicdomain/zero/1.0/ . - -""" - -from __future__ import print_function -try: - import configparser -except ImportError: - import ConfigParser as configparser -import errno -import json import os -import re -import subprocess import sys - -class VersioneerConfig: - """Container for Versioneer configuration parameters.""" - - -def get_root(): - """Get the project root directory. - - We require that all commands are run from the project root, i.e. the - directory that contains setup.py, setup.cfg, and versioneer.py . - """ - root = os.path.realpath(os.path.abspath(os.getcwd())) - setup_py = os.path.join(root, "setup.py") - versioneer_py = os.path.join(root, "versioneer.py") - if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): - # allow 'python path/to/setup.py COMMAND' - root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) - setup_py = os.path.join(root, "setup.py") - versioneer_py = os.path.join(root, "versioneer.py") - if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): - err = ("Versioneer was unable to run the project root directory. " - "Versioneer requires setup.py to be executed from " - "its immediate directory (like 'python setup.py COMMAND'), " - "or in a way that lets it use sys.argv[0] to find the root " - "(like 'python path/to/setup.py COMMAND').") - raise VersioneerBadRootError(err) - try: - # Certain runtime workflows (setup.py install/develop in a setuptools - # tree) execute all dependencies in a single python process, so - # "versioneer" may be imported multiple times, and python's shared - # module-import table will cache the first one. So we can't use - # os.path.dirname(__file__), as that will find whichever - # versioneer.py was first imported, even in later projects. - me = os.path.realpath(os.path.abspath(__file__)) - me_dir = os.path.normcase(os.path.splitext(me)[0]) - vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) - if me_dir != vsr_dir: - print("Warning: build in %s is using versioneer.py from %s" - % (os.path.dirname(me), versioneer_py)) - except NameError: - pass - return root - - -def get_config_from_root(root): - """Read the project setup.cfg file to determine Versioneer config.""" - # This might raise EnvironmentError (if setup.cfg is missing), or - # configparser.NoSectionError (if it lacks a [versioneer] section), or - # configparser.NoOptionError (if it lacks "VCS="). See the docstring at - # the top of versioneer.py for instructions on writing your setup.cfg . - setup_cfg = os.path.join(root, "setup.cfg") - parser = configparser.SafeConfigParser() - with open(setup_cfg, "r") as f: - parser.readfp(f) - VCS = parser.get("versioneer", "VCS") # mandatory - - def get(parser, name): - if parser.has_option("versioneer", name): - return parser.get("versioneer", name) - return None - cfg = VersioneerConfig() - cfg.VCS = VCS - cfg.style = get(parser, "style") or "" - cfg.versionfile_source = get(parser, "versionfile_source") - cfg.versionfile_build = get(parser, "versionfile_build") - cfg.tag_prefix = get(parser, "tag_prefix") - if cfg.tag_prefix in ("''", '""'): - cfg.tag_prefix = "" - cfg.parentdir_prefix = get(parser, "parentdir_prefix") - cfg.verbose = get(parser, "verbose") - return cfg - - -class NotThisMethod(Exception): - """Exception raised if a method is not valid for the current scenario.""" - - -# these dictionaries contain VCS-specific tools -LONG_VERSION_PY = {} -HANDLERS = {} - - -def register_vcs_handler(vcs, method): # decorator - """Decorator to mark a method as the handler for a particular VCS.""" - def decorate(f): - """Store f in HANDLERS[vcs][method].""" - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f - return f - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): - """Call the given command(s).""" - assert isinstance(commands, list) - p = None - for c in commands: - try: - dispcmd = str([c] + args) - # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) - break - except EnvironmentError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %s" % dispcmd) - print(e) - return None, None - else: - if verbose: - print("unable to find command, tried %s" % (commands,)) - return None, None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() - if p.returncode != 0: - if verbose: - print("unable to run %s (error)" % dispcmd) - print("stdout was %s" % stdout) - return None, p.returncode - return stdout, p.returncode - - -LONG_VERSION_PY['git'] = ''' -# This file helps to compute a version number in source trees obtained from -# git-archive tarball (such as those provided by githubs download-from-tag -# feature). Distribution tarballs (built by setup.py sdist) and build -# directories (produced by setup.py build) will contain a much shorter file -# that just contains the computed version number. - -# This file is released into the public domain. Generated by -# versioneer-0.18 (https://github.com/warner/python-versioneer) - -"""Git implementation of _version.py.""" - -import errno -import os -import re -import subprocess -import sys - - -def get_keywords(): - """Get the keywords needed to look up the version information.""" - # these strings will be replaced by git during git-archive. - # setup.py/versioneer.py will grep for the variable names, so they must - # each be defined on a line of their own. _version.py will just call - # get_keywords(). - git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" - git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" - git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" - keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} - return keywords - - -class VersioneerConfig: - """Container for Versioneer configuration parameters.""" - - -def get_config(): - """Create, populate and return the VersioneerConfig() object.""" - # these strings are filled in when 'setup.py versioneer' creates - # _version.py - cfg = VersioneerConfig() - cfg.VCS = "git" - cfg.style = "%(STYLE)s" - cfg.tag_prefix = "%(TAG_PREFIX)s" - cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" - cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" - cfg.verbose = False - return cfg - - -class NotThisMethod(Exception): - """Exception raised if a method is not valid for the current scenario.""" - - -LONG_VERSION_PY = {} -HANDLERS = {} - - -def register_vcs_handler(vcs, method): # decorator - """Decorator to mark a method as the handler for a particular VCS.""" - def decorate(f): - """Store f in HANDLERS[vcs][method].""" - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f - return f - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): - """Call the given command(s).""" - assert isinstance(commands, list) - p = None - for c in commands: - try: - dispcmd = str([c] + args) - # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) - break - except EnvironmentError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %%s" %% dispcmd) - print(e) - return None, None - else: - if verbose: - print("unable to find command, tried %%s" %% (commands,)) - return None, None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() - if p.returncode != 0: - if verbose: - print("unable to run %%s (error)" %% dispcmd) - print("stdout was %%s" %% stdout) - return None, p.returncode - return stdout, p.returncode - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - """Try to determine the version from the parent directory name. - - Source tarballs conventionally unpack into a directory that includes both - the project name and a version string. We will also support searching up - two directory levels for an appropriately named parent directory - """ - rootdirs = [] - - for i in range(3): - dirname = os.path.basename(root) - if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} - else: - rootdirs.append(root) - root = os.path.dirname(root) # up a level - - if verbose: - print("Tried directories %%s but none started with prefix %%s" %% - (str(rootdirs), parentdir_prefix)) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - """Extract version information from the given file.""" - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - f.close() - except EnvironmentError: - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - """Get version information from git keywords.""" - if not keywords: - raise NotThisMethod("no keywords at all, weird") - date = keywords.get("date") - if date is not None: - # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant - # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 - # -like" string, which we must then edit to make compliant), because - # it's been around since git-1.5.3, and it's too difficult to - # discover which version we're using, or to work around using an - # older one. - date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %%d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) - if verbose: - print("discarding '%%s', no digits" %% ",".join(refs - tags)) - if verbose: - print("likely tags: %%s" %% ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] - if verbose: - print("picking %%s" %% r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} - # no suitable tags, so version is "0+unknown", but full hex is still there - if verbose: - print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): - """Get version from 'git describe' in the root of the source tree. - - This only gets called if the git-archive 'subst' keywords were *not* - expanded, and _version.py hasn't already been rewritten with a short - version string, meaning we're inside a checked out source tree. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) - if rc != 0: - if verbose: - print("Directory %%s not under git control" %% root) - raise NotThisMethod("'git rev-parse --git-dir' returned error") - - # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] - # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%%s*" %% tag_prefix], - cwd=root) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:7] # maybe improved later - pieces["error"] = None - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) - if not mo: - # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%%s'" - %% describe_out) - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%%s' doesn't start with prefix '%%s'" - print(fmt %% (full_tag, tag_prefix)) - pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" - %% (full_tag, tag_prefix)) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) - pieces["distance"] = int(count_out) # total number of commits - - # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"], - cwd=root)[0].strip() - pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - - return pieces - - -def plus_or_dot(pieces): - """Return a + if we don't already have one, else return a .""" - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - """Build up version string, with post-release "local version identifier". - - Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - Exceptions: - 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_pre(pieces): - """TAG[.post.devDISTANCE] -- No -dirty. - - Exceptions: - 1: no tags. 0.post.devDISTANCE - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += ".post.dev%%d" %% pieces["distance"] - else: - # exception #1 - rendered = "0.post.dev%%d" %% pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX] . - - The ".dev0" means dirty. Note that .dev0 sorts backwards - (a dirty tree will appear "older" than the corresponding clean one), - but you shouldn't be releasing software with -dirty anyways. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%%s" %% pieces["short"] - else: - # exception #1 - rendered = "0.post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%%s" %% pieces["short"] - return rendered - - -def render_pep440_old(pieces): - """TAG[.postDISTANCE[.dev0]] . - - The ".dev0" means dirty. - - Eexceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - """TAG[-DISTANCE-gHEX][-dirty]. - - Like 'git describe --tags --dirty --always'. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - """TAG-DISTANCE-gHEX[-dirty]. - - Like 'git describe --tags --dirty --always -long'. - The distance/hash is unconditional. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - """Render the given version pieces into the requested style.""" - if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%%s'" %% style) - - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} - - -def get_versions(): - """Get version information or return default if unable to do so.""" - # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have - # __file__, we can work backwards from there to the root. Some - # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which - # case we can only use expanded keywords. - - cfg = get_config() - verbose = cfg.verbose - - try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, - verbose) - except NotThisMethod: - pass - - try: - root = os.path.realpath(__file__) - # versionfile_source is the relative path from the top of the source - # tree (where the .git directory might live) to this file. Invert - # this to find the root from __file__. - for i in cfg.versionfile_source.split('/'): - root = os.path.dirname(root) - except NameError: - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree", - "date": None} - - try: - pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) - return render(pieces, cfg.style) - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - except NotThisMethod: - pass - - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to compute version", "date": None} -''' - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - """Extract version information from the given file.""" - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - f.close() - except EnvironmentError: - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - """Get version information from git keywords.""" - if not keywords: - raise NotThisMethod("no keywords at all, weird") - date = keywords.get("date") - if date is not None: - # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant - # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 - # -like" string, which we must then edit to make compliant), because - # it's been around since git-1.5.3, and it's too difficult to - # discover which version we're using, or to work around using an - # older one. - date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) - if verbose: - print("discarding '%s', no digits" % ",".join(refs - tags)) - if verbose: - print("likely tags: %s" % ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] - if verbose: - print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} - # no suitable tags, so version is "0+unknown", but full hex is still there - if verbose: - print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): - """Get version from 'git describe' in the root of the source tree. - - This only gets called if the git-archive 'subst' keywords were *not* - expanded, and _version.py hasn't already been rewritten with a short - version string, meaning we're inside a checked out source tree. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) - if rc != 0: - if verbose: - print("Directory %s not under git control" % root) - raise NotThisMethod("'git rev-parse --git-dir' returned error") - - # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] - # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", "--abbrev=8", - "--match", "%s*" % tag_prefix], - cwd=root) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:8] # maybe improved later - pieces["error"] = None - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) - if not mo: - # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%s' doesn't start with prefix '%s'" - print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) - pieces["distance"] = int(count_out) # total number of commits - - # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], - cwd=root)[0].strip() - pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - - return pieces - - -def do_vcs_install(manifest_in, versionfile_source, ipy): - """Git-specific installation logic for Versioneer. - - For Git, this means creating/changing .gitattributes to mark _version.py - for export-subst keyword substitution. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - files = [manifest_in, versionfile_source] - if ipy: - files.append(ipy) - try: - me = __file__ - if me.endswith(".pyc") or me.endswith(".pyo"): - me = os.path.splitext(me)[0] + ".py" - versioneer_file = os.path.relpath(me) - except NameError: - versioneer_file = "versioneer.py" - files.append(versioneer_file) - present = False - try: - f = open(".gitattributes", "r") - for line in f.readlines(): - if line.strip().startswith(versionfile_source): - if "export-subst" in line.strip().split()[1:]: - present = True - f.close() - except EnvironmentError: - pass - if not present: - f = open(".gitattributes", "a+") - f.write("%s export-subst\n" % versionfile_source) - f.close() - files.append(".gitattributes") - run_command(GITS, ["add", "--"] + files) - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - """Try to determine the version from the parent directory name. - - Source tarballs conventionally unpack into a directory that includes both - the project name and a version string. We will also support searching up - two directory levels for an appropriately named parent directory - """ - rootdirs = [] - - for i in range(3): - dirname = os.path.basename(root) - if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} - else: - rootdirs.append(root) - root = os.path.dirname(root) # up a level - - if verbose: - print("Tried directories %s but none started with prefix %s" % - (str(rootdirs), parentdir_prefix)) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - - -SHORT_VERSION_PY = """ -# This file was generated by 'versioneer.py' (0.18) from -# revision-control system data, or from the parent directory name of an -# unpacked source archive. Distribution tarballs contain a pre-generated copy -# of this file. - -import json - -version_json = ''' -%s -''' # END VERSION_JSON - - -def get_versions(): - return json.loads(version_json) +VERSION_FILE = """ +def get_version(): + return "{version}" """ - -def versions_from_file(filename): - """Try to determine the version from _version.py if present.""" - try: - with open(filename) as f: - contents = f.read() - except EnvironmentError: - raise NotThisMethod("unable to read _version.py") - mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) - if not mo: - mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) - if not mo: - raise NotThisMethod("no version_json in _version.py") - return json.loads(mo.group(1)) - - -def write_to_version_file(filename, versions): - """Write the given version number to the given _version.py file.""" - os.unlink(filename) - contents = json.dumps(versions, sort_keys=True, - indent=1, separators=(",", ": ")) - with open(filename, "w") as f: - f.write(SHORT_VERSION_PY % contents) - - print("set %s to '%s'" % (filename, versions["version"])) - - -def plus_or_dot(pieces): - """Return a + if we don't already have one, else return a .""" - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - """Build up version string, with post-release "local version identifier". - - Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - Exceptions: - 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_pre(pieces): - """TAG[.post.devDISTANCE] -- No -dirty. - - Exceptions: - 1: no tags. 0.post.devDISTANCE - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += ".post.dev%d" % pieces["distance"] - else: - # exception #1 - rendered = "0.post.dev%d" % pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX] . - - The ".dev0" means dirty. Note that .dev0 sorts backwards - (a dirty tree will appear "older" than the corresponding clean one), - but you shouldn't be releasing software with -dirty anyways. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%s" % pieces["short"] - return rendered - - -def render_pep440_old(pieces): - """TAG[.postDISTANCE[.dev0]] . - - The ".dev0" means dirty. - - Eexceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - """TAG[-DISTANCE-gHEX][-dirty]. - - Like 'git describe --tags --dirty --always'. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - """TAG-DISTANCE-gHEX[-dirty]. - - Like 'git describe --tags --dirty --always -long'. - The distance/hash is unconditional. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - """Render the given version pieces into the requested style.""" - if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%s'" % style) - - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} - - -class VersioneerBadRootError(Exception): - """The project root directory is unknown or missing key files.""" - - -def get_versions(verbose=False): - """Get the project version from whatever source is available. - - Returns dict with two keys: 'version' and 'full'. - """ - if "versioneer" in sys.modules: - # see the discussion in cmdclass.py:get_cmdclass() - del sys.modules["versioneer"] - - root = get_root() - cfg = get_config_from_root(root) - - assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" - handlers = HANDLERS.get(cfg.VCS) - assert handlers, "unrecognized VCS '%s'" % cfg.VCS - verbose = verbose or cfg.verbose - assert cfg.versionfile_source is not None, \ - "please set versioneer.versionfile_source" - assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" - - versionfile_abs = os.path.join(root, cfg.versionfile_source) - - # extract version from first of: _version.py, VCS command (e.g. 'git - # describe'), parentdir. This is meant to work for developers using a - # source checkout, for users of a tarball created by 'setup.py sdist', - # and for users of a tarball/zipball created by 'git archive' or github's - # download-from-tag feature or the equivalent in other VCSes. - - get_keywords_f = handlers.get("get_keywords") - from_keywords_f = handlers.get("keywords") - if get_keywords_f and from_keywords_f: - try: - keywords = get_keywords_f(versionfile_abs) - ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) - if verbose: - print("got version from expanded keyword %s" % ver) - return ver - except NotThisMethod: - pass - - try: - ver = versions_from_file(versionfile_abs) - if verbose: - print("got version from file %s %s" % (versionfile_abs, ver)) - return ver - except NotThisMethod: - pass - - from_vcs_f = handlers.get("pieces_from_vcs") - if from_vcs_f: - try: - pieces = from_vcs_f(cfg.tag_prefix, root, verbose) - ver = render(pieces, cfg.style) - if verbose: - print("got version from VCS %s" % ver) - return ver - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - if verbose: - print("got version from parentdir %s" % ver) - return ver - except NotThisMethod: - pass - - if verbose: - print("unable to compute version") - - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, "error": "unable to compute version", - "date": None} - - def get_version(): - """Get the short version string for this project.""" - return get_versions()["version"] + override = os.getenv("VERSIONEER_OVERRIDE") + if override: + return override + srcroot = os.path.dirname(os.path.abspath(__file__)) + with open(os.path.join(srcroot, "MAJOR_VERSION"), "r") as f: + version = f.read().strip() + version += ".unknown" + if os.path.exists(os.path.join(srcroot, "BETA")): + version += ".beta" + return version + +def write_to_version_file(filename, version): + os.unlink(filename) + with open(filename, "w") as f: + f.write(VERSION_FILE.format(version=version)) def get_cmdclass(): - """Get the custom setuptools/distutils subclasses used by Versioneer.""" - if "versioneer" in sys.modules: - del sys.modules["versioneer"] - # this fixes the "python setup.py develop" case (also 'install' and - # 'easy_install .'), in which subdependencies of the main project are - # built (using setup.py bdist_egg) in the same python process. Assume - # a main project A and a dependency B, which use different versions - # of Versioneer. A's setup.py imports A's Versioneer, leaving it in - # sys.modules by the time B's setup.py is executed, causing B to run - # with the wrong versioneer. Setuptools wraps the sub-dep builds in a - # sandbox that restores sys.modules to it's pre-build state, so the - # parent is protected against the child's "import versioneer". By - # removing ourselves from sys.modules here, before the child build - # happens, we protect the child from the parent's versioneer too. - # Also see https://github.com/warner/python-versioneer/issues/52 - cmds = {} - # we add "version" to both distutils and setuptools - from distutils.core import Command - - class cmd_version(Command): - description = "report generated version string" - user_options = [] - boolean_options = [] - - def initialize_options(self): - pass - - def finalize_options(self): - pass - - def run(self): - vers = get_versions(verbose=True) - print("Version: %s" % vers["version"]) - print(" full-revisionid: %s" % vers.get("full-revisionid")) - print(" dirty: %s" % vers.get("dirty")) - print(" date: %s" % vers.get("date")) - if vers["error"]: - print(" error: %s" % vers["error"]) - cmds["version"] = cmd_version - - # we override "build_py" in both distutils and setuptools - # - # most invocation pathways end up running build_py: - # distutils/build -> build_py - # distutils/install -> distutils/build ->.. - # setuptools/bdist_wheel -> distutils/install ->.. - # setuptools/bdist_egg -> distutils/install_lib -> build_py - # setuptools/install -> bdist_egg ->.. - # setuptools/develop -> ? - # pip install: - # copies source tree to a tempdir before running egg_info/etc - # if .git isn't copied too, 'git describe' will fail - # then does setup.py bdist_wheel, or sometimes setup.py install - # setup.py egg_info -> ? - # we override different "build_py" commands for both environments if "setuptools" in sys.modules: from setuptools.command.build_py import build_py as _build_py @@ -1546,78 +35,14 @@ def get_cmdclass(): class cmd_build_py(_build_py): def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() + version = get_version() _build_py.run(self) - # now locate _version.py in the new build/ directory and replace - # it with an updated value - if cfg.versionfile_build: - target_versionfile = os.path.join(self.build_lib, - cfg.versionfile_build) - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) + target_versionfile = os.path.join(self.build_lib, + "artiq", "_version.py") + print("UPDATING %s" % target_versionfile) + write_to_version_file(target_versionfile, version) cmds["build_py"] = cmd_build_py - if "cx_Freeze" in sys.modules: # cx_freeze enabled? - from cx_Freeze.dist import build_exe as _build_exe - # nczeczulin reports that py2exe won't like the pep440-style string - # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. - # setup(console=[{ - # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION - # "product_version": versioneer.get_version(), - # ... - - class cmd_build_exe(_build_exe): - def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() - target_versionfile = cfg.versionfile_source - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) - - _build_exe.run(self) - os.unlink(target_versionfile) - with open(cfg.versionfile_source, "w") as f: - LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - cmds["build_exe"] = cmd_build_exe - del cmds["build_py"] - - if 'py2exe' in sys.modules: # py2exe enabled? - try: - from py2exe.distutils_buildexe import py2exe as _py2exe # py3 - except ImportError: - from py2exe.build_exe import py2exe as _py2exe # py2 - - class cmd_py2exe(_py2exe): - def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() - target_versionfile = cfg.versionfile_source - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) - - _py2exe.run(self) - os.unlink(target_versionfile) - with open(cfg.versionfile_source, "w") as f: - LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - cmds["py2exe"] = cmd_py2exe # we override different "sdist" commands for both environments if "setuptools" in sys.modules: @@ -1627,196 +52,19 @@ def get_cmdclass(): class cmd_sdist(_sdist): def run(self): - versions = get_versions() - self._versioneer_generated_versions = versions + version = get_version() + self._versioneer_generated_version = version # unless we update this, the command will keep using the old # version - self.distribution.metadata.version = versions["version"] + self.distribution.metadata.version = version return _sdist.run(self) def make_release_tree(self, base_dir, files): - root = get_root() - cfg = get_config_from_root(root) _sdist.make_release_tree(self, base_dir, files) - # now locate _version.py in the new base_dir directory - # (remembering that it may be a hardlink) and replace it with an - # updated value - target_versionfile = os.path.join(base_dir, cfg.versionfile_source) + target_versionfile = os.path.join(base_dir, "artiq", "_version.py") print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, - self._versioneer_generated_versions) + self._versioneer_generated_version) cmds["sdist"] = cmd_sdist return cmds - - -CONFIG_ERROR = """ -setup.cfg is missing the necessary Versioneer configuration. You need -a section like: - - [versioneer] - VCS = git - style = pep440 - versionfile_source = src/myproject/_version.py - versionfile_build = myproject/_version.py - tag_prefix = - parentdir_prefix = myproject- - -You will also need to edit your setup.py to use the results: - - import versioneer - setup(version=versioneer.get_version(), - cmdclass=versioneer.get_cmdclass(), ...) - -Please read the docstring in ./versioneer.py for configuration instructions, -edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. -""" - -SAMPLE_CONFIG = """ -# See the docstring in versioneer.py for instructions. Note that you must -# re-run 'versioneer.py setup' after changing this section, and commit the -# resulting files. - -[versioneer] -#VCS = git -#style = pep440 -#versionfile_source = -#versionfile_build = -#tag_prefix = -#parentdir_prefix = - -""" - -INIT_PY_SNIPPET = """ -from ._version import get_versions -__version__ = get_versions()['version'] -del get_versions -""" - - -def do_setup(): - """Main VCS-independent setup function for installing Versioneer.""" - root = get_root() - try: - cfg = get_config_from_root(root) - except (EnvironmentError, configparser.NoSectionError, - configparser.NoOptionError) as e: - if isinstance(e, (EnvironmentError, configparser.NoSectionError)): - print("Adding sample versioneer config to setup.cfg", - file=sys.stderr) - with open(os.path.join(root, "setup.cfg"), "a") as f: - f.write(SAMPLE_CONFIG) - print(CONFIG_ERROR, file=sys.stderr) - return 1 - - print(" creating %s" % cfg.versionfile_source) - with open(cfg.versionfile_source, "w") as f: - LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - - ipy = os.path.join(os.path.dirname(cfg.versionfile_source), - "__init__.py") - if os.path.exists(ipy): - try: - with open(ipy, "r") as f: - old = f.read() - except EnvironmentError: - old = "" - if INIT_PY_SNIPPET not in old: - print(" appending to %s" % ipy) - with open(ipy, "a") as f: - f.write(INIT_PY_SNIPPET) - else: - print(" %s unmodified" % ipy) - else: - print(" %s doesn't exist, ok" % ipy) - ipy = None - - # Make sure both the top-level "versioneer.py" and versionfile_source - # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so - # they'll be copied into source distributions. Pip won't be able to - # install the package without this. - manifest_in = os.path.join(root, "MANIFEST.in") - simple_includes = set() - try: - with open(manifest_in, "r") as f: - for line in f: - if line.startswith("include "): - for include in line.split()[1:]: - simple_includes.add(include) - except EnvironmentError: - pass - # That doesn't cover everything MANIFEST.in can do - # (http://docs.python.org/2/distutils/sourcedist.html#commands), so - # it might give some false negatives. Appending redundant 'include' - # lines is safe, though. - if "versioneer.py" not in simple_includes: - print(" appending 'versioneer.py' to MANIFEST.in") - with open(manifest_in, "a") as f: - f.write("include versioneer.py\n") - else: - print(" 'versioneer.py' already in MANIFEST.in") - if cfg.versionfile_source not in simple_includes: - print(" appending versionfile_source ('%s') to MANIFEST.in" % - cfg.versionfile_source) - with open(manifest_in, "a") as f: - f.write("include %s\n" % cfg.versionfile_source) - else: - print(" versionfile_source already in MANIFEST.in") - - # Make VCS-specific changes. For git, this means creating/changing - # .gitattributes to mark _version.py for export-subst keyword - # substitution. - do_vcs_install(manifest_in, cfg.versionfile_source, ipy) - return 0 - - -def scan_setup_py(): - """Validate the contents of setup.py against Versioneer's expectations.""" - found = set() - setters = False - errors = 0 - with open("setup.py", "r") as f: - for line in f.readlines(): - if "import versioneer" in line: - found.add("import") - if "versioneer.get_cmdclass()" in line: - found.add("cmdclass") - if "versioneer.get_version()" in line: - found.add("get_version") - if "versioneer.VCS" in line: - setters = True - if "versioneer.versionfile_source" in line: - setters = True - if len(found) != 3: - print("") - print("Your setup.py appears to be missing some important items") - print("(but I might be wrong). Please make sure it has something") - print("roughly like the following:") - print("") - print(" import versioneer") - print(" setup( version=versioneer.get_version(),") - print(" cmdclass=versioneer.get_cmdclass(), ...)") - print("") - errors += 1 - if setters: - print("You should remove lines like 'versioneer.VCS = ' and") - print("'versioneer.versionfile_source = ' . This configuration") - print("now lives in setup.cfg, and should be removed from setup.py") - print("") - errors += 1 - return errors - - -if __name__ == "__main__": - cmd = sys.argv[1] - if cmd == "setup": - errors = do_setup() - errors += scan_setup_py() - if errors: - sys.exit(1)