forked from M-Labs/artiq
Compare commits
690 Commits
Author | SHA1 | Date |
---|---|---|
Simon Renblad | 76fba538b1 | |
Sebastien Bourdeauducq | 8dd8cfa6b0 | |
Sebastien Bourdeauducq | 5df0721811 | |
Sebastien Bourdeauducq | 6326051052 | |
Sebastien Bourdeauducq | 44a95b5dda | |
Sebastien Bourdeauducq | 645b9b8c5f | |
Sebastien Bourdeauducq | 858f0479ba | |
Sebastien Bourdeauducq | 133b26b6ce | |
Sebastien Bourdeauducq | d96213dbbc | |
Sebastien Bourdeauducq | 413d33c3d1 | |
Sebastien Bourdeauducq | c2b53ecb43 | |
Sebastien Bourdeauducq | ede0b37c6e | |
Sebastien Bourdeauducq | 795c4372fa | |
Sebastien Bourdeauducq | 402a5d3376 | |
Sebastien Bourdeauducq | 85850ad9e8 | |
Sebastien Bourdeauducq | 7a863b4f5e | |
Sebastien Bourdeauducq | a26cee6ca7 | |
Sebastien Bourdeauducq | be08862606 | |
Sebastien Bourdeauducq | 05a9422e67 | |
Simon Renblad | b09a39c82e | |
mwojcik | 49267671f9 | |
Sebastien Bourdeauducq | 8ca75a3fb9 | |
Florian Agbuya | 8381b34a79 | |
Sebastien Bourdeauducq | d458fc27bf | |
mwojcik | 9f4b8db2de | |
Florian Agbuya | 1108cebd75 | |
Florian Agbuya | cf7cbd0c3b | |
mwojcik | 1a28069aa2 | |
Sebastien Bourdeauducq | 56418e342e | |
Sebastien Bourdeauducq | 77c6553725 | |
Sebastien Bourdeauducq | e81e8f28cf | |
mwojcik | de10e584f6 | |
Florian Agbuya | 875666f3ec | |
Sebastien Bourdeauducq | 3ad3fac828 | |
Simon Renblad | 49afa116b3 | |
Simon Renblad | 363afb5fc9 | |
Simon Renblad | e7af219505 | |
linuswck | ec2b86b08d | |
linuswck | 8f7d138dbd | |
Sebastien Bourdeauducq | bbe6ff8cac | |
Sebastien Bourdeauducq | c0a6252e77 | |
mwojcik | 6640bf0e82 | |
mwojcik | b3c0d084d4 | |
linuswck | bb0b8a6c00 | |
Sebastien Bourdeauducq | ce80bf5717 | |
Florian Agbuya | 378dd0e5ca | |
jfniedermeyer | 9c68451cae | |
linuswck | 93c9d8bcdf | |
mwojcik | e480bbe8d8 | |
mwojcik | b168f0bb4b | |
Sebastien Bourdeauducq | 6705c9fbfb | |
mwojcik | 5f445f6b92 | |
occheung | 363f7327f1 | |
Sebastien Bourdeauducq | f7abc156cb | |
linuswck | de41bd6655 | |
Simon Renblad | 96941d7c04 | |
mwojcik | f3c79e71e1 | |
Simon Renblad | 333b81f789 | |
Sebastien Bourdeauducq | d070826911 | |
Sebastien Bourdeauducq | 9c90f923d2 | |
Sebastien Bourdeauducq | e23e4d39d7 | |
David Nadlinger | 08eea09d44 | |
mwojcik | 7ab52af603 | |
mwojcik | 973fd88b27 | |
mwojcik | 8d7194941e | |
mwojcik | 0a750c77e8 | |
mwojcik | 1a0fc317df | |
mwojcik | e05be2f8e4 | |
mwojcik | 6f4b8c641e | |
mwojcik | b42816582e | |
Hartmann Michael (IFAG PSS SIS SCE QSE) | 76f1318bc0 | |
Sebastien Bourdeauducq | 0131a8bef2 | |
mwojcik | e63e2a2897 | |
Simon Renblad | 47fc640f75 | |
Simon Renblad | bb7caacb5f | |
Simon Renblad | da9f7cb58a | |
occheung | 43926574da | |
Simon Renblad | 4f3e58db52 | |
Simon Renblad | 13271cea64 | |
occheung | 0e8fa8933f | |
David Nadlinger | 2eb89cb168 | |
occheung | a772dee1cc | |
Simon Renblad | bafb85a274 | |
mwojcik | 0e8aa33979 | |
mwojcik | fcf6c90ba2 | |
linuswck | 0c1b572872 | |
linuswck | ab0d4c41c3 | |
Jonathan Coates | 6eb81494c5 | |
Jonathan Coates | 586d97c6cb | |
David Nadlinger | 892b0eaca2 | |
linuswck | eedac7cf71 | |
linuswck | a61bbf5618 | |
occheung | b7b8f0efa2 | |
occheung | b52f253dbd | |
occheung | 73ab71f443 | |
linuswck | ab8247b3d7 | |
mwojcik | 36b3678853 | |
mwojcik | af77885dfc | |
mwojcik | eb57b3b393 | |
Simon Renblad | 40ac2e03ab | |
occheung | a2fbcb8bfd | |
occheung | 5c64eac8d2 | |
occheung | 477a7b693c | |
occheung | f2694f25eb | |
occheung | 9e1447d104 | |
occheung | 870020bc9f | |
occheung | c2d136f669 | |
occheung | 06426e0ed9 | |
occheung | e443e06e62 | |
occheung | 55150ebdbb | |
occheung | eb08c55abe | |
occheung | 67b6588d95 | |
occheung | 1bb7e9ceef | |
Florian Agbuya | c02a14ba37 | |
Simon Renblad | 1f3b2ef645 | |
linuswck | 372008cb66 | |
linuswck | 85abb1da2c | |
David Nadlinger | 9e5b62a6b1 | |
David Nadlinger | 22ab62324c | |
David Nadlinger | fc74b78a45 | |
Simon Renblad | f01e654b9c | |
David Nadlinger | e45dc948e9 | |
David Mak | 460cbf4499 | |
Florian Agbuya | 6df85478e4 | |
Jonathan Coates | 5c85cef0c2 | |
linuswck | ccb140a929 | |
linuswck | 7c8073c1ce | |
Florian Agbuya | 2f3329181c | |
Sebastien Bourdeauducq | 1ec1ab0502 | |
linuswck | b49fb841ce | |
Florian Agbuya | a619c9f3c2 | |
Florian Agbuya | 0188f31f3a | |
Florian Agbuya | 4e770509db | |
occheung | 7f63bb322d | |
occheung | 5e5d671f4c | |
occheung | 98904ef4c3 | |
Sebastien Bourdeauducq | 73ac414912 | |
occheung | 838cc80922 | |
Simon Renblad | 904afe1632 | |
Simon Renblad | 01d777c977 | |
Sebastien Bourdeauducq | 9556ca53de | |
occheung | df99450faa | |
Sebastien Bourdeauducq | 1f58cd505c | |
linuswck | ddb2b5e3a1 | |
linuswck | b56f7e429a | |
Sebastien Bourdeauducq | 3452d0c423 | |
Sebastien Bourdeauducq | 2139456f80 | |
Sebastien Bourdeauducq | a2a780a3f2 | |
Sebastien Bourdeauducq | 3620358f12 | |
Sebastien Bourdeauducq | 72b0a17542 | |
Sebastien Bourdeauducq | f5cbca9c29 | |
linuswck | 737ff79ae7 | |
linuswck | dc97d3aee6 | |
Sebastien Bourdeauducq | 5d38db19d0 | |
Sebastien Bourdeauducq | 9bee4b9697 | |
linuswck | cd22e42cb4 | |
linuswck | b7bac8c9d8 | |
mwojcik | e8818c812c | |
occheung | 68dd0e029f | |
occheung | 64d3f867a0 | |
Sebastien Bourdeauducq | df662c4262 | |
Sebastien Bourdeauducq | d2ac6aceb3 | |
Sebastien Bourdeauducq | 9b94a09477 | |
David Nadlinger | efbae51f9d | |
David Nadlinger | 8acfa82586 | |
David Nadlinger | 4d636ea593 | |
Sebastien Bourdeauducq | 3ed7e0ed06 | |
Simon Renblad | c4259dab18 | |
mwojcik | c46ac6f87d | |
linuswck | 758b97426a | |
linuswck | c206e92f29 | |
linuswck | cb547c8a46 | |
linuswck | 72a5231493 | |
Denis Ovchinnikov | 07714be8a7 | |
Simon Renblad | 361088ae72 | |
Simon Renblad | a384df17a4 | |
Simon Renblad | 6592b6ea1d | |
Simon Renblad | 2fb085f1a2 | |
Simon Renblad | a7569a0b2d | |
Simon Renblad | 4fbff1648c | |
Simon Renblad | 8f4c8387f9 | |
Simon Renblad | a2d62e6006 | |
Simon Renblad | 3d0feef614 | |
Simon Renblad | 59ad873831 | |
Simon Renblad | 8589da0723 | |
Simon Renblad | 94e076e976 | |
Simon Renblad | a0094aafbb | |
Simon Renblad | 0befadee96 | |
sven-oxionics | b3dc199e6a | |
Florian Agbuya | d73889fb27 | |
Simon Renblad | 9f8bb6445f | |
Simon Renblad | 068a2d1663 | |
Simon Renblad | 6c588b83d7 | |
Simon Renblad | c17f69a51b | |
Simon Renblad | ac504069d2 | |
Simon Renblad | b6a83904b5 | |
Simon Renblad | 25959d0cd6 | |
Simon Renblad | 5695e9f77e | |
Simon Renblad | fe0f6d8a2c | |
Simon Renblad | d1f2727126 | |
Simon Renblad | 16a3ce274f | |
Simon Renblad | af7622d7ab | |
Jonathan Coates | 9a84575649 | |
Simon Renblad | faf85e815a | |
Simon Renblad | 3663a6b8e8 | |
Simon Renblad | 91442e2914 | |
Simon Renblad | 50a6dac178 | |
Simon Renblad | 5292a8de82 | |
Sebastien Bourdeauducq | 7791f85a1a | |
Sebastien Bourdeauducq | 48bc8a2ecc | |
Denis Ovchinnikov | 93882eb3ce | |
Simon Renblad | 7ca02a119d | |
Simon Renblad | 373fe3dbe7 | |
Simon Renblad | 1af98727b7 | |
Simon Renblad | 376f36c965 | |
Simon Renblad | e710d4badd | |
Simon Renblad | bfbe13e51b | |
Simon Renblad | bf38fc8b0f | |
Simon Renblad | 337273acb6 | |
Simon Renblad | 748707e157 | |
Leon Riesebos | 833fd8760e | |
Florian Agbuya | 454597915a | |
Sebastien Bourdeauducq | 77293d53e3 | |
Sebastien Bourdeauducq | a792bc5456 | |
Sebastien Bourdeauducq | 20d4712815 | |
Spaqin | 82bd913f63 | |
Sebastien Bourdeauducq | 115415d120 | |
Florian Agbuya | d140c960bb | |
Egor Savkin | c25c0bd55a | |
Egor Savkin | 30ef8d8cb4 | |
Florian Agbuya | 7ad32d903a | |
Florian Agbuya | bf46ce4a92 | |
den512is | 1f306a2859 | |
Florian Agbuya | 150d325fc1 | |
Florian Agbuya | c298ec4c2e | |
Sebastien Bourdeauducq | 69bf2dfb81 | |
mwojcik | 29cb7e785d | |
Sebastien Bourdeauducq | b97f6a9e44 | |
Sebastien Bourdeauducq | e0ebc1b21d | |
Sebastien Bourdeauducq | c6ddd3af17 | |
Florian Agbuya | e12219e803 | |
Sebastien Bourdeauducq | ff11b5df71 | |
Sebastien Bourdeauducq | c8dc2cbf09 | |
Sebastien Bourdeauducq | c6b29b30fb | |
Sebastien Bourdeauducq | b20d09aad5 | |
Sebastien Bourdeauducq | 6276182c96 | |
Sebastien Bourdeauducq | d103cbea31 | |
Sebastien Bourdeauducq | 9a6bc6dc7b | |
Sebastien Bourdeauducq | fabe88065b | |
Egor Savkin | 748969c21e | |
Sebastien Bourdeauducq | 75f6bdb6a1 | |
Sebastien Bourdeauducq | 41caec797e | |
Sebastien Bourdeauducq | 953a8a9555 | |
Sebastien Bourdeauducq | 444bab2186 | |
Sebastien Bourdeauducq | 0941d3a29a | |
Denis Ovchinnikov | 22e2514ce6 | |
mwojcik | a4895b591a | |
Sebastien Bourdeauducq | ef2cc2cc12 | |
Sebastien Bourdeauducq | 779810163f | |
Sebastien Bourdeauducq | b9c7905b20 | |
Charles Baynham | c2b0c97640 | |
Sebastien Bourdeauducq | 58cc3b8d0a | |
Sebastien Bourdeauducq | 598c7b1d25 | |
Jonathan Coates | ea9fe9b4e1 | |
mwojcik | c1d6fd4bbe | |
mwojcik | ab52748cac | |
mwojcik | ddfe51e7ac | |
mwojcik | 6c96033d41 | |
mwojcik | 0b03126038 | |
mwojcik | fdca1ab7fc | |
mwojcik | c36b6b3b65 | |
mwojcik | c0ca27e6cf | |
Jonathan Coates | 3ca47537b8 | |
Hartmann Michael (IFAG PSS SIS SCE QSE) | df15f53ee9 | |
Sebastien Bourdeauducq | e015483e48 | |
Sebastien Bourdeauducq | c53d333d46 | |
Sebastien Bourdeauducq | 5b94ce82e4 | |
Sebastien Bourdeauducq | 45cd438fb8 | |
Sebastien Bourdeauducq | 0e7e30d46e | |
Sebastien Bourdeauducq | d5a7755584 | |
Sebastien Bourdeauducq | 3ff0be6540 | |
Sebastien Bourdeauducq | 8409a6bb94 | |
Sebastien Bourdeauducq | 2c1438c4b9 | |
Egor Savkin | 5199bea353 | |
mwojcik | a533f2a0cd | |
Jonathan Coates | 0bf57f4ebd | |
Sebastien Bourdeauducq | 4417acd13b | |
Sebastien Bourdeauducq | 4056168875 | |
Egor Savkin | 9331911139 | |
Spaqin | 2f35869eb1 | |
Egor Savkin | aed47d79ff | |
mwojcik | 918d30b900 | |
Egor Savkin | b5d9062ba9 | |
Egor Savkin | 8984f5104a | |
Egor Savkin | d0b8818688 | |
Sebastien Bourdeauducq | 757c00b0fe | |
Sebastien Bourdeauducq | c1474c134a | |
Sebastien Bourdeauducq | dc3db8bb66 | |
Sebastien Bourdeauducq | 97161a3df2 | |
Ikko Eltociear Ashimine | 7ba06bfe61 | |
Spaqin | b225717ddb | |
mwojcik | 696bda5c03 | |
mwojcik | 9150230ea7 | |
Spaqin | e9a153b985 | |
David Nadlinger | 8b1f38b015 | |
Egor Savkin | bbf80875fb | |
Egor Savkin | 1ca09b9484 | |
Spaqin | 84e7515721 | |
Ikko Eltociear Ashimine | 15c18bdc81 | |
Sebastien Bourdeauducq | a9360823b1 | |
Egor Savkin | 1ec0abbfcf | |
mwojcik | 90a6fe1c35 | |
mwojcik | d0437f5672 | |
Michael Hartmann | 07d684a35d | |
Michael Hartmann | 2371c825f5 | |
Egor Savkin | 394138f00f | |
Sebastien Bourdeauducq | 3f5cc4aa10 | |
Sebastien Bourdeauducq | e9c65abebe | |
Sebastien Bourdeauducq | 20e8f17b3d | |
Sebastien Bourdeauducq | 57e87c9717 | |
Sebastien Bourdeauducq | 248cd69673 | |
Sebastien Bourdeauducq | b8968262d7 | |
Sebastien Bourdeauducq | babbbfadb3 | |
Sebastien Bourdeauducq | 514ac953ce | |
Sebastien Bourdeauducq | 0a37a1a4c1 | |
Sebastien Bourdeauducq | 6d37d9d52c | |
Sebastien Bourdeauducq | 5f77d4f5fa | |
Sebastien Bourdeauducq | 2f289c552f | |
Sebastien Bourdeauducq | 9e8bb3c701 | |
Sebastien Bourdeauducq | d872c3ab4d | |
Sebastien Bourdeauducq | f8d93813e9 | |
Sebastien Bourdeauducq | 628b671433 | |
Sebastien Bourdeauducq | daad3d263a | |
Sebastien Bourdeauducq | 80f261437a | |
Sebastien Bourdeauducq | 7fd6dead8f | |
Sebastien Bourdeauducq | 73a4ef89ec | |
mwojcik | 70edc9c5c6 | |
mwojcik | 9042426872 | |
mwojcik | cd860beda2 | |
mwojcik | 627504b60e | |
Sebastien Bourdeauducq | c8ab6c1b2b | |
Sebastien Bourdeauducq | a96bbd8508 | |
Sebastien Bourdeauducq | 6cfd1480a7 | |
Sebastien Bourdeauducq | c401559ed5 | |
Sebastien Bourdeauducq | ea21f474a7 | |
Sebastien Bourdeauducq | cee9f3f44e | |
Sebastien Bourdeauducq | b9bfe090f4 | |
mwojcik | eb3742fb08 | |
Egor Savkin | 070fed755b | |
Sebastien Bourdeauducq | 63f1a6d197 | |
Sebastien Bourdeauducq | 7dafdfe2f7 | |
Sebastien Bourdeauducq | ec893222a4 | |
Sebastien Bourdeauducq | 573a895c1e | |
Sebastien Bourdeauducq | cf2a4972f7 | |
Sebastien Bourdeauducq | 668997a451 | |
Sebastien Bourdeauducq | 5da9794895 | |
Spaqin | 3838dfc1d1 | |
Sebastien Bourdeauducq | 1be7e2a2e1 | |
Sebastien Bourdeauducq | 1bf7188dec | |
mwojcik | bdae594c79 | |
mwojcik | 8dc6902c23 | |
Norman Krackow | dbb77b5356 | |
Sebastien Bourdeauducq | 1fc127c770 | |
David Nadlinger | 88684dbd2a | |
David Nadlinger | b9f13d48aa | |
David Nadlinger | 4bb2a3b9e0 | |
David Nadlinger | f5c408d8d9 | |
Sebastien Bourdeauducq | 4be7f302e4 | |
Spaqin | 17efc28dbe | |
David Nadlinger | 1e0102379b | |
David Nadlinger | ceabeb8d84 | |
SingularitySurfer | 8e476dd502 | |
David Nadlinger | 874d298ceb | |
Egor Savkin | d75ade7be6 | |
Egor Savkin | 2a58981822 | |
Egor Savkin | e80442811e | |
Egor Savkin | 12649720f1 | |
Egor Savkin | 454ae39c5d | |
David Nadlinger | 3c7a394eff | |
David Nadlinger | 740543d4e2 | |
Egor Savkin | b2b559e73b | |
Egor Savkin | 1852491102 | |
Egor Savkin | c591e7e305 | |
David Nadlinger | 261dc6b933 | |
David Nadlinger | 1abedba6dc | |
Egor Savkin | aa2febca53 | |
Egor Savkin | d60a96a715 | |
wlph17 | 3f93f16955 | |
Sebastien Bourdeauducq | 3735b7ea9d | |
Sebastien Bourdeauducq | 195d2aea6a | |
Sebastien Bourdeauducq | 6d179b2bf5 | |
Sebastien Bourdeauducq | 275b00bfc2 | |
Jonathan Coates | b8b6ce14cc | |
Nico Pulido | 88c5109627 | |
David Nadlinger | dee154b35b | |
David Nadlinger | 950b9ac4d6 | |
Egor Savkin | 6c47aac760 | |
mwojcik | f2c1e663a7 | |
Egor Savkin | f7f027001e | |
David Nadlinger | 0b3c232819 | |
Etienne Wodey | d45f9b6950 | |
Sebastien Bourdeauducq | 2fe02cee6f | |
Sebastien Bourdeauducq | 404f24af6b | |
David Nadlinger | 3d25092cbd | |
David Nadlinger | dbbe8e8ed4 | |
David Nadlinger | 8740ec3dd5 | |
David Nadlinger | 6caa779c74 | |
David Nadlinger | 4819016a3c | |
David Nadlinger | 00a27b105a | |
David Nadlinger | beff15de5e | |
火焚 富良 | defc69d9c3 | |
火焚 富良 | e2178f6c86 | |
Sebastien Bourdeauducq | f3f068036a | |
mwojcik | ad000609ce | |
mwojcik | af0b94bb34 | |
mwojcik | 5cd57e8688 | |
mwojcik | f8eb695c0f | |
mwojcik | 458bd8a927 | |
mwojcik | a6856a5e4a | |
mwojcik | 1eb87164be | |
Sebastien Bourdeauducq | f75ddf78b0 | |
Sebastien Bourdeauducq | e0b1098bc0 | |
Robert Jördens | e5c621751f | |
Robert Jördens | 07db770423 | |
Robert Jördens | eb7a0714b3 | |
Robert Jördens | e15b5b50d8 | |
Robert Jördens | 1820e1f715 | |
Robert Jördens | 118b7aca1d | |
Fabian Schwartau | d5e267fadf | |
Sebastien Bourdeauducq | 286f151d9a | |
Sebastien Bourdeauducq | 19b8d28a2e | |
Sebastien Bourdeauducq | 3ffbc5681e | |
Sebastien Bourdeauducq | 192cab887f | |
wlph17 | 9846ee653c | |
fanmingyu212 | 56e6b1428c | |
Michael Birtwell | b895846322 | |
Robert Jördens | a1a4545ed4 | |
Robert Jördens | a0053f7a2b | |
Robert Jördens | 740f3d220b | |
Robert Jördens | 513f9f00f3 | |
Robert Jördens | 5cfa8d9a42 | |
Robert Jördens | 0e4a87826c | |
Sebastien Bourdeauducq | 1709cf9717 | |
Sebastien Bourdeauducq | 4266beeb9c | |
mwojcik | c955ac15ed | |
mwojcik | 81ef484864 | |
mwojcik | f2c3f95040 | |
mwojcik | 616ed3dcc2 | |
Robert Jördens | aedcf205c7 | |
Robert Jördens | 14ab1d4bbc | |
Sebastien Bourdeauducq | a028b5c9f7 | |
Sebastien Bourdeauducq | 6085fe3319 | |
Robert Jördens | af28bf3550 | |
Robert Jördens | 4df880faf6 | |
Robert Jördens | 857fb4ecec | |
Robert Jördens | a91836e5fe | |
Robert Jördens | c5c5c30617 | |
Robert Jördens | 27e3c044ed | |
Robert Jördens | c26fa5eb90 | |
Sebastien Bourdeauducq | 411afbdc23 | |
Sebastien Bourdeauducq | b4287ac9f4 | |
Robert Jördens | 1cc57e2345 | |
Robert Jördens | 263c2751b3 | |
Robert Jördens | 876f26ee30 | |
Robert Jördens | fa3678f8a3 | |
Robert Jördens | f4d325112c | |
Robert Jördens | b6586cd7e4 | |
Robert Jördens | 3809ac5470 | |
Robert Jördens | b9727fdfce | |
Robert Jördens | d6d0c2c866 | |
Robert Jördens | 0df2cadcd3 | |
Robert Jördens | 25c0dc4688 | |
Robert Jördens | cf48232a90 | |
Robert Jördens | a20087848d | |
Robert Jördens | 31663556b8 | |
Robert Jördens | 47f90a58cc | |
Mikołaj Sowiński | 3c7ab498d1 | |
Deepskyhunter | 7c306d5609 | |
mwojcik | b705862ecd | |
fanmingyu212 | 20cb99061e | |
Sebastien Bourdeauducq | 5ef94d30dd | |
kk1050 | 3c72b8d646 | |
Sebastien Bourdeauducq | 27397625ba | |
cc78078 | 3535d0f1ae | |
cc78078 | 185c91f522 | |
Deepskyhunter | f31279411e | |
Alex Wong Tat Hang | a3ae82502c | |
Deepskyhunter | 0cdb06fdf5 | |
Deepskyhunter | 2a7a72b27a | |
kk1050 | 748e28be38 | |
Sebastien Bourdeauducq | 4b1715c80b | |
Robert Jördens | 5985595845 | |
Robert Jördens | a8f498b478 | |
Sebastien Bourdeauducq | db4bccda7e | |
Sebastien Bourdeauducq | 5c461443e4 | |
Sebastien Bourdeauducq | cb711e0ee3 | |
Sebastien Bourdeauducq | 9ba239b8b2 | |
Robert Jördens | 4ea11f4609 | |
SingularitySurfer | 57ac6ec003 | |
Robert Jördens | d2dacc6433 | |
Sebastien Bourdeauducq | 734b2a6747 | |
Deepskyhunter | c7394802bd | |
kk1050 | 7aa6104872 | |
mwojcik | 46f2842d38 | |
mwojcik | c9fb7b410f | |
Spaqin | 8be945d5c7 | |
SingularitySurfer | 9c8ffa54b2 | |
Sebastien Bourdeauducq | d17675e9b5 | |
Sebastien Bourdeauducq | 388b81af19 | |
Deepskyhunter | 02b086c9e5 | |
SingularitySurfer | 953dd899fd | |
SingularitySurfer | 689a2ef8ba | |
SingularitySurfer | d8cfe22501 | |
Deepskyhunter | b4f24dd326 | |
Deepskyhunter | da6d35e7c6 | |
Deepskyhunter | 745f440597 | |
SingularitySurfer | 2e834cf406 | |
SingularitySurfer | 3f8a221c76 | |
SingularitySurfer | ab097b8ef9 | |
SingularitySurfer | 24b4ec46bd | |
Norman Krackow | 56c59e38f0 | |
SingularitySurfer | c0581178d6 | |
SingularitySurfer | 43c94577ce | |
SingularitySurfer | ce4055db3b | |
SingularitySurfer | b67a70392d | |
SingularitySurfer | 57176fedb2 | |
SingularitySurfer | 8bea821f93 | |
SingularitySurfer | 0388161754 | |
SingularitySurfer | 751af3144e | |
SingularitySurfer | 5df766e6da | |
David Nadlinger | e1f9feae8b | |
David Nadlinger | dd928fc014 | |
Sebastien Bourdeauducq | 48cb111035 | |
hartytp | d8597e9dc8 | |
David Nadlinger | 32db6ff978 | |
David Nadlinger | dbc87f08ff | |
David Nadlinger | c4068e6896 | |
David Nadlinger | 85895ab89b | |
kk1050 | 46fb8916bb | |
David Nadlinger | 2d6fc154db | |
David Nadlinger | 4c42f65909 | |
David Nadlinger | f4d639242d | |
SingularitySurfer | d09153411f | |
Norman Krackow | dc49372d57 | |
Norman Krackow | 2044dc3ae5 | |
SingularitySurfer | ae3f1c1c71 | |
Sebastien Bourdeauducq | bf3b155a31 | |
SingularitySurfer | 1bddadc6e2 | |
SingularitySurfer | b0f9fd9c4c | |
Michael Birtwell | 69c4026d2b | |
Deepskyhunter | e47834d82e | |
Spaqin | 4ede14b14d | |
kk1050 | 4ddd2739ee | |
Sebastien Bourdeauducq | e702624720 | |
Sebastien Bourdeauducq | 68ef0073ea | |
Sebastien Bourdeauducq | 71a37bb408 | |
occheung | f79f7db3a2 | |
occheung | 872f8f039f | |
occheung | 50495097e5 | |
Sebastien Bourdeauducq | ca614a3eea | |
Sebastien Bourdeauducq | 8bf6bc4d1f | |
occheung | 6d46c886d7 | |
Sebastien Bourdeauducq | a5b7e958f8 | |
Sebastien Bourdeauducq | 667f36a2e7 | |
Sebastien Bourdeauducq | 7cff63e539 | |
Sebastien Bourdeauducq | df1b19082c | |
Sebastien Bourdeauducq | d478086119 | |
Sebastien Bourdeauducq | 18a08954c1 | |
Sebastien Bourdeauducq | 57086e2349 | |
mwojcik | cf8e583847 | |
mwojcik | d24a36a02a | |
mwojcik | 4bdb4c8e11 | |
Sebastien Bourdeauducq | 8599be5550 | |
Sebastien Bourdeauducq | 9896d78e07 | |
kk1050 | 70503bee6f | |
Laurent Stephenson | 16393efa7c | |
David Nadlinger | 8a7af3f75c | |
Spaqin | 35f30ddf05 | |
Sebastien Bourdeauducq | c440f9fe1b | |
Sebastien Bourdeauducq | 69b6426800 | |
Michael Birtwell | 50dbda4f43 | |
Michael Birtwell | 95378cf9c9 | |
Michael Birtwell | 671453938b | |
Michael Birtwell | 1fe59d27dc | |
Michael Birtwell | 73082d116f | |
Michael Birtwell | 596b9a265c | |
Michael Birtwell | 6ffb1f83ee | |
Michael Birtwell | c60de48a30 | |
Suthep Pomjaksilp | 06ad76b6ab | |
David Nadlinger | b2b84b1fd6 | |
David Nadlinger | 6b5c390d48 | |
David Nadlinger | 2cb08814e8 | |
Sebastien Bourdeauducq | 58b59b99ff | |
Sebastien Bourdeauducq | fa3ee8ad23 | |
Michael Birtwell | cab9d90d01 | |
Sebastien Bourdeauducq | 0a029748ee | |
Leon Riesebos | 386391e3f9 | |
Leon Riesebos | b5dc9fd640 | |
Sebastien Bourdeauducq | c82c358f3a | |
Sebastien Bourdeauducq | 723f41c78b | |
Sebastien Bourdeauducq | 866a83796a | |
Timothy Ballance | f91e106586 | |
Timothy Ballance | a289d69883 | |
Sebastien Bourdeauducq | f89275b02a | |
Sebastien Bourdeauducq | 65d2dd0173 | |
Sebastien Bourdeauducq | 6b33f3b719 | |
Sebastien Bourdeauducq | 80d412a8bf | |
Sebastien Bourdeauducq | 922d2b1619 | |
Sebastien Bourdeauducq | d644e982c8 | |
Sebastien Bourdeauducq | ec1efd7af9 | |
Sebastien Bourdeauducq | 735133a2b4 | |
Sebastien Bourdeauducq | 207717c740 | |
Sebastien Bourdeauducq | 6d92e539b1 | |
Sebastien Bourdeauducq | 6a49b8cb58 | |
Sebastien Bourdeauducq | df1513f0e9 | |
Sebastien Bourdeauducq | d3073022ac | |
Sebastien Bourdeauducq | bbb2c75194 | |
Sebastien Bourdeauducq | 710786388c | |
Sebastien Bourdeauducq | aff569b2c3 | |
Sebastien Bourdeauducq | a159ef642d | |
Sebastien Bourdeauducq | 1a26eb8cf2 | |
Sebastien Bourdeauducq | c1c2d21ba7 | |
Sebastien Bourdeauducq | e5e4d55f84 | |
Sebastien Bourdeauducq | 71e8b49246 | |
pca006132 | ebfeb1869f | |
pca006132 | eb6817c8f1 | |
Sebastien Bourdeauducq | 8415151866 | |
ciciwu | 67ca48fa84 | |
ciciwu | 9a96387dfe | |
Sebastien Bourdeauducq | b02abc2bf4 | |
Sebastien Bourdeauducq | ac55da81d8 | |
spaqin | 232f28c0e8 | |
spaqin | 51fa1b5e5e | |
spaqin | 17ecd35530 | |
Spaqin | a85b4d5f5e | |
David Nadlinger | 9bfbd39fa3 | |
Sebastien Bourdeauducq | 338bb189b4 | |
Leon Riesebos | c4292770f8 | |
Sebastien Bourdeauducq | 2b918ac6f7 | |
Michael Birtwell | 1b80746f48 | |
Michael Birtwell | 2d6215158f | |
mwojcik | c000af9985 | |
mwojcik | 35f91aef68 | |
Sebastien Bourdeauducq | 0da7b83176 | |
Steve Fan | ad656d1e53 | |
Sebastien Bourdeauducq | 69ce09c7c0 | |
Sebastien Bourdeauducq | 6a586c2e4d | |
Sebastien Bourdeauducq | e84056f7e0 | |
Mike Birtwell | a106ed0295 | |
Robert Jördens | c8b9eed9c9 | |
Robert Jördens | 08b65470cd | |
Sebastien Bourdeauducq | 65eab31f23 | |
Sebastien Bourdeauducq | 6dfc854673 | |
Sebastien Bourdeauducq | 5a8928fbf3 | |
Sebastien Bourdeauducq | b3b73948a2 | |
Sebastien Bourdeauducq | 8433cc6731 | |
Sebastien Bourdeauducq | 0649e69d94 | |
Sebastien Bourdeauducq | bbfa926fa6 | |
Sebastien Bourdeauducq | 9e37fb95d6 | |
Sebastien Bourdeauducq | 034a0fdb35 | |
Sebastien Bourdeauducq | 0e178e40ac | |
Sebastien Bourdeauducq | a0070d4396 | |
Sebastien Bourdeauducq | 03a367f565 | |
Sebastien Bourdeauducq | b893d97d7b | |
Sebastien Bourdeauducq | b6f5ba8b5b | |
Sebastien Bourdeauducq | cc69482dad | |
Sebastien Bourdeauducq | 833acb6925 | |
occheung | d5eec652ee | |
occheung | a74196aa27 | |
Steve Fan | 798a412c6f | |
David Nadlinger | e45cb217be | |
Sebastien Bourdeauducq | 8866ab301a | |
Sebastien Bourdeauducq | 3cddb14174 | |
Sebastien Bourdeauducq | 245fe6e9ea | |
Sebastien Bourdeauducq | ef25640937 | |
Sebastien Bourdeauducq | dd3279e506 | |
Sebastien Bourdeauducq | afb98a1903 | |
Steve Fan | 34008b7a21 | |
pca006132 | 93328ad8ee | |
Steve Fan | 234a82aaa9 | |
Sebastien Bourdeauducq | ee511758ce | |
Sebastien Bourdeauducq | e6c18364ae | |
pca006132 | 9d43762695 | |
pca006132 | 4132c450a5 | |
pca006132 | 536b3e0c26 | |
pca006132 | ba34700798 | |
pca006132 | 6ec003c1c9 | |
pca006132 | da4ff44377 | |
pca006132 | 4644e105b1 | |
hartytp | 715bff3ebf |
|
@ -29,6 +29,7 @@ __pycache__/
|
||||||
/repository/
|
/repository/
|
||||||
/results
|
/results
|
||||||
/last_rid.pyon
|
/last_rid.pyon
|
||||||
/dataset_db.pyon
|
/dataset_db.mdb
|
||||||
|
/dataset_db.mdb-lock
|
||||||
/device_db*.py
|
/device_db*.py
|
||||||
/test*
|
/test*
|
||||||
|
|
|
@ -26,7 +26,6 @@ report if possible:
|
||||||
* Operating System
|
* Operating System
|
||||||
* ARTIQ version (with recent versions of ARTIQ, run ``artiq_client --version``)
|
* ARTIQ version (with recent versions of ARTIQ, run ``artiq_client --version``)
|
||||||
* Version of the gateware and runtime loaded in the core device (in the output of ``artiq_coremgmt -D .... log``)
|
* Version of the gateware and runtime loaded in the core device (in the output of ``artiq_coremgmt -D .... log``)
|
||||||
* If using Conda, output of `conda list`
|
|
||||||
* Hardware involved
|
* Hardware involved
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
7
|
|
|
@ -5,3 +5,4 @@ include versioneer.py
|
||||||
include artiq/_version.py
|
include artiq/_version.py
|
||||||
include artiq/coredevice/coredevice_generic.schema.json
|
include artiq/coredevice/coredevice_generic.schema.json
|
||||||
include artiq/compiler/kernel.ld
|
include artiq/compiler/kernel.ld
|
||||||
|
include artiq/afws.pem
|
||||||
|
|
|
@ -13,7 +13,7 @@ ARTIQ uses FPGA hardware to perform its time-critical tasks. The `Sinara hardwar
|
||||||
ARTIQ is designed to be portable to hardware platforms from different vendors and FPGA manufacturers.
|
ARTIQ is designed to be portable to hardware platforms from different vendors and FPGA manufacturers.
|
||||||
Several different configurations of a `FPGA evaluation kit <https://www.xilinx.com/products/boards-and-kits/ek-k7-kc705-g.html>`_ and of a `Zynq evaluation kit <https://www.xilinx.com/products/boards-and-kits/ek-z7-zc706-g.html>`_ are also used and supported. FPGA platforms can be combined with any number of additional peripherals, either already accessible from ARTIQ or made accessible with little effort.
|
Several different configurations of a `FPGA evaluation kit <https://www.xilinx.com/products/boards-and-kits/ek-k7-kc705-g.html>`_ and of a `Zynq evaluation kit <https://www.xilinx.com/products/boards-and-kits/ek-z7-zc706-g.html>`_ are also used and supported. FPGA platforms can be combined with any number of additional peripherals, either already accessible from ARTIQ or made accessible with little effort.
|
||||||
|
|
||||||
ARTIQ and its dependencies are available in the form of Nix packages (for Linux) and Conda packages (for Windows and Linux). See `the manual <https://m-labs.hk/experiment-control/resources/>`_ for installation instructions.
|
ARTIQ and its dependencies are available in the form of Nix packages (for Linux) and MSYS2 packages (for Windows). See `the manual <https://m-labs.hk/experiment-control/resources/>`_ for installation instructions.
|
||||||
Packages containing pre-compiled binary images to be loaded onto the hardware platforms are supplied for each configuration.
|
Packages containing pre-compiled binary images to be loaded onto the hardware platforms are supplied for each configuration.
|
||||||
Like any open source software ARTIQ can equally be built and installed directly from `source <https://github.com/m-labs/artiq>`_.
|
Like any open source software ARTIQ can equally be built and installed directly from `source <https://github.com/m-labs/artiq>`_.
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ Website: https://m-labs.hk/artiq
|
||||||
License
|
License
|
||||||
=======
|
=======
|
||||||
|
|
||||||
Copyright (C) 2014-2021 M-Labs Limited.
|
Copyright (C) 2014-2023 M-Labs Limited.
|
||||||
|
|
||||||
ARTIQ is free software: you can redistribute it and/or modify
|
ARTIQ is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Lesser General Public License as published by
|
it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
|
|
@ -3,39 +3,151 @@
|
||||||
Release notes
|
Release notes
|
||||||
=============
|
=============
|
||||||
|
|
||||||
|
ARTIQ-8 (Unreleased)
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
Highlights:
|
||||||
|
|
||||||
|
* New hardware support:
|
||||||
|
- Support for Shuttler, a 16-channel 125MSPS DAC card intended for ion transport.
|
||||||
|
Waveform generator and user API are similar to the NIST PDQ.
|
||||||
|
- Implemented Phaser-servo. This requires recent gateware on Phaser.
|
||||||
|
- Almazny v1.2 with finer RF switch control.
|
||||||
|
- Metlino and Sayma support has been dropped due to complications with synchronous RTIO clocking.
|
||||||
|
- More user LEDs are exposed to RTIO on Kasli.
|
||||||
|
- Implemented Phaser-MIQRO support. This requires the proprietary Phaser MIQRO gateware
|
||||||
|
variant from QUARTIQ.
|
||||||
|
- Sampler: fixed ADC MU to Volt conversion factor for Sampler v2.2+.
|
||||||
|
For earlier hardware versions, specify the hardware version in the device
|
||||||
|
database file (e.g. ``"hw_rev": "v2.1"``) to use the correct conversion factor.
|
||||||
|
* Support for distributed DMA, where DMA is run directly on satellites for corresponding
|
||||||
|
RTIO events, increasing bandwidth in scenarios with heavy satellite usage.
|
||||||
|
* Support for subkernels, where kernels are run on satellite device CPUs to offload some
|
||||||
|
of the processing and RTIO operations.
|
||||||
|
* CPU (on softcore platforms) and AXI bus (on Zynq) are now clocked synchronously with the RTIO
|
||||||
|
clock, to facilitate implementation of local processing on DRTIO satellites, and to slightly
|
||||||
|
reduce RTIO latency.
|
||||||
|
* Support for DRTIO-over-EEM, used with Shuttler.
|
||||||
|
* Added channel names to RTIO error messages.
|
||||||
|
* GUI:
|
||||||
|
- Implemented Applet Request Interfaces which allow applets to modify datasets and set the
|
||||||
|
current values of widgets in the dashboard's experiment windows.
|
||||||
|
- Implemented a new EntryArea widget which allows argument entry widgets to be used in applets.
|
||||||
|
- The "Close all applets" command (shortcut: Ctrl-Alt-W) now ignores docked applets,
|
||||||
|
making it a convenient way to clean up after exploratory work without destroying a
|
||||||
|
carefully arranged default workspace.
|
||||||
|
- Hotkeys now organize experiment windows in the order they were last interacted with:
|
||||||
|
+ CTRL+SHIFT+T tiles experiment windows
|
||||||
|
+ CTRL+SHIFT+C cascades experiment windows
|
||||||
|
* Persistent datasets are now stored in a LMDB database for improved performance.
|
||||||
|
* Python's built-in types (such as ``float``, or ``List[...]``) can now be used in type annotations on
|
||||||
|
kernel functions.
|
||||||
|
* Full Python 3.10 support.
|
||||||
|
* MSYS2 packaging for Windows, which replaces Conda. Conda packages are still available to
|
||||||
|
support legacy installations, but may be removed in a future release.
|
||||||
|
|
||||||
|
Breaking changes:
|
||||||
|
|
||||||
|
* ``SimpleApplet`` now calls widget constructors with an additional ``ctl`` parameter for control
|
||||||
|
operations, which includes dataset operations. It can be ignored if not needed. For an example usage,
|
||||||
|
refer to the ``big_number.py`` applet.
|
||||||
|
* ``SimpleApplet`` and ``TitleApplet`` now call ``data_changed`` with additional parameters. Derived applets
|
||||||
|
should change the function signature as below:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
# SimpleApplet
|
||||||
|
def data_changed(self, value, metadata, persist, mods)
|
||||||
|
# SimpleApplet (old version)
|
||||||
|
def data_changed(self, data, mods)
|
||||||
|
# TitleApplet
|
||||||
|
def data_changed(self, value, metadata, persist, mods, title)
|
||||||
|
# TitleApplet (old version)
|
||||||
|
def data_changed(self, data, mods, title)
|
||||||
|
|
||||||
|
Accesses to the data argument should be replaced as below:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
data[key][0] ==> persist[key]
|
||||||
|
data[key][1] ==> value[key]
|
||||||
|
|
||||||
|
* The ``ndecimals`` parameter in ``NumberValue`` and ``Scannable`` has been renamed to ``precision``.
|
||||||
|
Parameters after and including ``scale`` in both constructors are now keyword-only.
|
||||||
|
Refer to the updated ``no_hardware/arguments_demo.py`` example for current usage.
|
||||||
|
* Almazny v1.2 is incompatible with the legacy versions and is the default.
|
||||||
|
To use legacy versions, specify ``almazny_hw_rev`` in the JSON description.
|
||||||
|
* kasli_generic.py has been merged into kasli.py, and the demonstration designs without JSON descriptions
|
||||||
|
have been removed. The base classes remain present in kasli.py to support third-party flows without
|
||||||
|
JSON descriptions.
|
||||||
|
* Legacy PYON databases should be converted to LMDB with the script below:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
from sipyco import pyon
|
||||||
|
import lmdb
|
||||||
|
|
||||||
|
old = pyon.load_file("dataset_db.pyon")
|
||||||
|
new = lmdb.open("dataset_db.mdb", subdir=False, map_size=2**30)
|
||||||
|
with new.begin(write=True) as txn:
|
||||||
|
for key, value in old.items():
|
||||||
|
txn.put(key.encode(), pyon.encode((value, {})).encode())
|
||||||
|
new.close()
|
||||||
|
|
||||||
|
* ``artiq.wavesynth`` has been removed.
|
||||||
|
|
||||||
ARTIQ-7
|
ARTIQ-7
|
||||||
-------
|
-------
|
||||||
|
|
||||||
Highlights:
|
Highlights:
|
||||||
|
|
||||||
* New hardware support:
|
* New hardware support:
|
||||||
- Kasli-SoC, a new EEM carrier based on a Zynq SoC, enabling much faster kernel execution.
|
- Kasli-SoC, a new EEM carrier based on a Zynq SoC, enabling much faster kernel execution
|
||||||
|
(see: https://arxiv.org/abs/2111.15290).
|
||||||
|
- DRTIO support on Zynq-based devices (Kasli-SoC and ZC706).
|
||||||
|
- DRTIO support on KC705.
|
||||||
- HVAMP_8CH 8 channel HV amplifier for Fastino / Zotinos
|
- HVAMP_8CH 8 channel HV amplifier for Fastino / Zotinos
|
||||||
- Almazny mezzanine board for Mirny
|
- Almazny mezzanine board for Mirny
|
||||||
|
- Phaser: improved documentation, exposed the DAC coarse mixer and ``sif_sync``, exposed upconverter calibration
|
||||||
|
and enabling/disabling of upconverter LO & RF outputs, added helpers to align Phaser updates to the
|
||||||
|
RTIO timeline (``get_next_frame_mu()``).
|
||||||
|
- Urukul: ``get()``, ``get_mu()``, ``get_att()``, and ``get_att_mu()`` functions added for AD9910 and AD9912.
|
||||||
* Softcore targets now use the RISC-V architecture (VexRiscv) instead of OR1K (mor1kx).
|
* Softcore targets now use the RISC-V architecture (VexRiscv) instead of OR1K (mor1kx).
|
||||||
|
* Gateware FPU is supported on KC705 and Kasli 2.0.
|
||||||
* Faster compilation for large arrays/lists.
|
* Faster compilation for large arrays/lists.
|
||||||
* Phaser:
|
* Faster exception handling.
|
||||||
- Improved documentation
|
* Several exception handling bugs fixed.
|
||||||
- Expose the DAC coarse mixer and ``sif_sync``
|
* Support for a simpler shared library system with faster calls into the runtime. This is only used by the NAC3
|
||||||
- Exposes upconverter calibration and enabling/disabling of upconverter LO & RF outputs.
|
compiler (nac3ld) and improves RTIO output performance (test_pulse_rate) by 9-10%.
|
||||||
- Add helpers to align Phaser updates to the RTIO timeline (``get_next_frame_mu()``)
|
* Moninj improvements:
|
||||||
* ``get()``, ``get_mu()``, ``get_att()``, and ``get_att_mu()`` functions added for AD9910 and AD9912
|
- Urukul monitoring and frequency setting (through dashboard) is now supported.
|
||||||
* On Kasli, the number of FIFO lanes in the scalable events dispatcher (SED) can now be configured in
|
- Core device moninj is now proxied via the ``aqctl_moninj_proxy`` controller.
|
||||||
the JSON hardware description file.
|
|
||||||
* ``artiq_ddb_template`` generates edge-counter keys that start with the key of the corresponding
|
|
||||||
TTL device (e.g. ``"ttl_0_counter"`` for the edge counter on TTL device``"ttl_0"``)
|
|
||||||
* ``artiq_master`` now has an ``--experiment-subdir`` option to scan only a subdirectory of the
|
|
||||||
repository when building the list of experiments.
|
|
||||||
* The configuration entry ``rtio_clock`` supports multiple clocking settings, deprecating the usage
|
* The configuration entry ``rtio_clock`` supports multiple clocking settings, deprecating the usage
|
||||||
of compile-time options.
|
of compile-time options.
|
||||||
* DRTIO: added support for 100MHz clock.
|
* Added support for 100MHz RTIO clock in DRTIO.
|
||||||
* Previously detected RTIO async errors are reported to the host after each kernel terminates and a
|
* Previously detected RTIO async errors are reported to the host after each kernel terminates and a
|
||||||
warning is logged. The warning is additional to the one already printed in the core device log upon
|
warning is logged. The warning is additional to the one already printed in the core device log
|
||||||
detection of the error.
|
immediately upon detection of the error.
|
||||||
* HDF5 options can now be passed when creating datasets with ``set_dataset``. This allows
|
* Extended Kasli gateware JSON description with configuration for SPI over DIO.
|
||||||
in particular to use transparent compression filters as follows:
|
* TTL outputs can be now configured to work as a clock generator from the JSON.
|
||||||
``set_dataset(name, value, hdf5_options={"compression": "gzip"})``.
|
* On Kasli, the number of FIFO lanes in the scalable events dispatcher (SED) can now be configured in
|
||||||
* Removed worker DB warning for writing a dataset that is also in the archive
|
the JSON.
|
||||||
|
* ``artiq_ddb_template`` generates edge-counter keys that start with the key of the corresponding
|
||||||
|
TTL device (e.g. ``ttl_0_counter`` for the edge counter on TTL device ``ttl_0``).
|
||||||
|
* ``artiq_master`` now has an ``--experiment-subdir`` option to scan only a subdirectory of the
|
||||||
|
repository when building the list of experiments.
|
||||||
|
* Experiments can now be submitted by-content.
|
||||||
|
* The master can now optionally log all experiments submitted into a CSV file.
|
||||||
|
* Removed worker DB warning for writing a dataset that is also in the archive.
|
||||||
|
* Experiments can now call ``scheduler.check_termination()`` to test if the user
|
||||||
|
has requested graceful termination.
|
||||||
|
* ARTIQ command-line programs and controllers now exit cleanly on Ctrl-C.
|
||||||
|
* ``artiq_coremgmt reboot`` now reloads gateware as well, providing a more thorough and reliable
|
||||||
|
device reset (7-series FPGAs only).
|
||||||
|
* Firmware and gateware can now be built on-demand on the M-Labs server using ``afws_client``
|
||||||
|
(subscribers only). Self-compilation remains possible.
|
||||||
|
* Easier-to-use packaging via Nix Flakes.
|
||||||
|
* Python 3.10 support (experimental).
|
||||||
|
|
||||||
Breaking changes:
|
Breaking changes:
|
||||||
|
|
||||||
|
@ -48,12 +160,11 @@ Breaking changes:
|
||||||
generated for some configurations.
|
generated for some configurations.
|
||||||
* Phaser: fixed coarse mixer frequency configuration
|
* Phaser: fixed coarse mixer frequency configuration
|
||||||
* Mirny: Added extra delays in ``ADF5356.sync()``. This avoids the need of an extra delay before
|
* Mirny: Added extra delays in ``ADF5356.sync()``. This avoids the need of an extra delay before
|
||||||
calling `ADF5356.init()`.
|
calling ``ADF5356.init()``.
|
||||||
* DRTIO: Changed message alignment from 32-bits to 64-bits.
|
|
||||||
* The deprecated ``set_dataset(..., save=...)`` is no longer supported.
|
* The deprecated ``set_dataset(..., save=...)`` is no longer supported.
|
||||||
* The internal dataset representation was changed to support tracking HDF5 options like e.g.
|
* The ``PCA9548`` I2C switch class was renamed to ``I2CSwitch``, to accommodate support for PCA9547,
|
||||||
a compression method. This requires changes to code reading the dataset persistence file
|
and possibly other switches in future. Readback has been removed, and now only one channel per
|
||||||
(``dataset_db.pyon``) and to custom applets.
|
switch is supported.
|
||||||
|
|
||||||
|
|
||||||
ARTIQ-6
|
ARTIQ-6
|
||||||
|
@ -129,7 +240,6 @@ Breaking changes:
|
||||||
* Experiment classes with underscore-prefixed names are now ignored when ``artiq_client``
|
* Experiment classes with underscore-prefixed names are now ignored when ``artiq_client``
|
||||||
determines which experiment to submit (consistent with ``artiq_run``).
|
determines which experiment to submit (consistent with ``artiq_run``).
|
||||||
|
|
||||||
|
|
||||||
ARTIQ-5
|
ARTIQ-5
|
||||||
-------
|
-------
|
||||||
|
|
||||||
|
|
|
@ -1,13 +1,7 @@
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
def get_rev():
|
||||||
|
return os.getenv("VERSIONEER_REV", default="unknown")
|
||||||
|
|
||||||
def get_version():
|
def get_version():
|
||||||
override = os.getenv("VERSIONEER_OVERRIDE")
|
return os.getenv("VERSIONEER_OVERRIDE", default="8.0+unknown.beta")
|
||||||
if override:
|
|
||||||
return override
|
|
||||||
srcroot = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir)
|
|
||||||
with open(os.path.join(srcroot, "MAJOR_VERSION"), "r") as f:
|
|
||||||
version = f.read().strip()
|
|
||||||
version += ".unknown"
|
|
||||||
if os.path.exists(os.path.join(srcroot, "BETA")):
|
|
||||||
version += ".beta"
|
|
||||||
return version
|
|
||||||
|
|
|
@ -1,22 +1,96 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
from PyQt5 import QtWidgets
|
from PyQt5 import QtWidgets, QtCore, QtGui
|
||||||
|
|
||||||
from artiq.applets.simple import SimpleApplet
|
from artiq.applets.simple import SimpleApplet
|
||||||
|
from artiq.tools import scale_from_metadata
|
||||||
|
from artiq.gui.tools import LayoutWidget
|
||||||
|
|
||||||
|
|
||||||
class NumberWidget(QtWidgets.QLCDNumber):
|
class QResponsiveLCDNumber(QtWidgets.QLCDNumber):
|
||||||
def __init__(self, args):
|
doubleClicked = QtCore.pyqtSignal()
|
||||||
QtWidgets.QLCDNumber.__init__(self)
|
|
||||||
self.setDigitCount(args.digit_count)
|
def mouseDoubleClickEvent(self, event):
|
||||||
|
self.doubleClicked.emit()
|
||||||
|
|
||||||
|
|
||||||
|
class QCancellableLineEdit(QtWidgets.QLineEdit):
|
||||||
|
editCancelled = QtCore.pyqtSignal()
|
||||||
|
|
||||||
|
def keyPressEvent(self, event):
|
||||||
|
if event.key() == QtCore.Qt.Key_Escape:
|
||||||
|
self.editCancelled.emit()
|
||||||
|
else:
|
||||||
|
super().keyPressEvent(event)
|
||||||
|
|
||||||
|
|
||||||
|
class NumberWidget(LayoutWidget):
|
||||||
|
def __init__(self, args, req):
|
||||||
|
LayoutWidget.__init__(self)
|
||||||
self.dataset_name = args.dataset
|
self.dataset_name = args.dataset
|
||||||
|
self.req = req
|
||||||
|
self.metadata = dict()
|
||||||
|
|
||||||
def data_changed(self, data, mods):
|
self.number_area = QtWidgets.QStackedWidget()
|
||||||
|
self.addWidget(self.number_area, 0, 0)
|
||||||
|
|
||||||
|
self.unit_area = QtWidgets.QLabel()
|
||||||
|
self.unit_area.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
|
||||||
|
self.addWidget(self.unit_area, 0, 1)
|
||||||
|
|
||||||
|
self.lcd_widget = QResponsiveLCDNumber()
|
||||||
|
self.lcd_widget.setDigitCount(args.digit_count)
|
||||||
|
self.lcd_widget.doubleClicked.connect(self.start_edit)
|
||||||
|
self.number_area.addWidget(self.lcd_widget)
|
||||||
|
|
||||||
|
self.edit_widget = QCancellableLineEdit()
|
||||||
|
self.edit_widget.setValidator(QtGui.QDoubleValidator())
|
||||||
|
self.edit_widget.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
|
||||||
|
self.edit_widget.editCancelled.connect(self.cancel_edit)
|
||||||
|
self.edit_widget.returnPressed.connect(self.confirm_edit)
|
||||||
|
self.number_area.addWidget(self.edit_widget)
|
||||||
|
|
||||||
|
font = QtGui.QFont()
|
||||||
|
font.setPointSize(60)
|
||||||
|
self.edit_widget.setFont(font)
|
||||||
|
|
||||||
|
unit_font = QtGui.QFont()
|
||||||
|
unit_font.setPointSize(20)
|
||||||
|
self.unit_area.setFont(unit_font)
|
||||||
|
|
||||||
|
self.number_area.setCurrentWidget(self.lcd_widget)
|
||||||
|
|
||||||
|
def start_edit(self):
|
||||||
|
# QLCDNumber value property contains the value of zero
|
||||||
|
# if the displayed value is not a number.
|
||||||
|
self.edit_widget.setText(str(self.lcd_widget.value()))
|
||||||
|
self.edit_widget.selectAll()
|
||||||
|
self.edit_widget.setFocus()
|
||||||
|
self.number_area.setCurrentWidget(self.edit_widget)
|
||||||
|
|
||||||
|
def confirm_edit(self):
|
||||||
|
scale = scale_from_metadata(self.metadata)
|
||||||
|
val = float(self.edit_widget.text())
|
||||||
|
val *= scale
|
||||||
|
self.req.set_dataset(self.dataset_name, val, **self.metadata)
|
||||||
|
self.number_area.setCurrentWidget(self.lcd_widget)
|
||||||
|
|
||||||
|
def cancel_edit(self):
|
||||||
|
self.number_area.setCurrentWidget(self.lcd_widget)
|
||||||
|
|
||||||
|
def data_changed(self, value, metadata, persist, mods):
|
||||||
try:
|
try:
|
||||||
n = float(data[self.dataset_name]["value"])
|
self.metadata = metadata[self.dataset_name]
|
||||||
|
# This applet will degenerate other scalar types to native float on edit
|
||||||
|
# Use the dashboard ChangeEditDialog for consistent type casting
|
||||||
|
val = float(value[self.dataset_name])
|
||||||
|
scale = scale_from_metadata(self.metadata)
|
||||||
|
val /= scale
|
||||||
except (KeyError, ValueError, TypeError):
|
except (KeyError, ValueError, TypeError):
|
||||||
n = "---"
|
val = "---"
|
||||||
self.display(n)
|
|
||||||
|
unit = self.metadata.get("unit", "")
|
||||||
|
self.unit_area.setText(unit)
|
||||||
|
self.lcd_widget.display(val)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
|
@ -7,13 +7,13 @@ from artiq.applets.simple import SimpleApplet
|
||||||
|
|
||||||
|
|
||||||
class Image(pyqtgraph.ImageView):
|
class Image(pyqtgraph.ImageView):
|
||||||
def __init__(self, args):
|
def __init__(self, args, req):
|
||||||
pyqtgraph.ImageView.__init__(self)
|
pyqtgraph.ImageView.__init__(self)
|
||||||
self.args = args
|
self.args = args
|
||||||
|
|
||||||
def data_changed(self, data, mods):
|
def data_changed(self, value, metadata, persist, mods):
|
||||||
try:
|
try:
|
||||||
img = data[self.args.img]["value"]
|
img = value[self.args.img]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
return
|
return
|
||||||
self.setImage(img)
|
self.setImage(img)
|
||||||
|
|
|
@ -8,20 +8,20 @@ from artiq.applets.simple import TitleApplet
|
||||||
|
|
||||||
|
|
||||||
class HistogramPlot(pyqtgraph.PlotWidget):
|
class HistogramPlot(pyqtgraph.PlotWidget):
|
||||||
def __init__(self, args):
|
def __init__(self, args, req):
|
||||||
pyqtgraph.PlotWidget.__init__(self)
|
pyqtgraph.PlotWidget.__init__(self)
|
||||||
self.args = args
|
self.args = args
|
||||||
self.timer = QTimer()
|
self.timer = QTimer()
|
||||||
self.timer.setSingleShot(True)
|
self.timer.setSingleShot(True)
|
||||||
self.timer.timeout.connect(self.length_warning)
|
self.timer.timeout.connect(self.length_warning)
|
||||||
|
|
||||||
def data_changed(self, data, mods, title):
|
def data_changed(self, value, metadata, persist, mods, title):
|
||||||
try:
|
try:
|
||||||
y = data[self.args.y]["value"]
|
y = value[self.args.y]
|
||||||
if self.args.x is None:
|
if self.args.x is None:
|
||||||
x = None
|
x = None
|
||||||
else:
|
else:
|
||||||
x = data[self.args.x]["value"]
|
x = value[self.args.x]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
return
|
return
|
||||||
if x is None:
|
if x is None:
|
||||||
|
|
|
@ -6,11 +6,10 @@ from PyQt5.QtCore import QTimer
|
||||||
import pyqtgraph
|
import pyqtgraph
|
||||||
|
|
||||||
from artiq.applets.simple import TitleApplet
|
from artiq.applets.simple import TitleApplet
|
||||||
from artiq.master.databases import make_dataset as empty_dataset
|
|
||||||
|
|
||||||
|
|
||||||
class XYPlot(pyqtgraph.PlotWidget):
|
class XYPlot(pyqtgraph.PlotWidget):
|
||||||
def __init__(self, args):
|
def __init__(self, args, req):
|
||||||
pyqtgraph.PlotWidget.__init__(self)
|
pyqtgraph.PlotWidget.__init__(self)
|
||||||
self.args = args
|
self.args = args
|
||||||
self.timer = QTimer()
|
self.timer = QTimer()
|
||||||
|
@ -20,16 +19,16 @@ class XYPlot(pyqtgraph.PlotWidget):
|
||||||
'Error bars': False,
|
'Error bars': False,
|
||||||
'Fit values': False}
|
'Fit values': False}
|
||||||
|
|
||||||
def data_changed(self, data, mods, title):
|
def data_changed(self, value, metadata, persist, mods, title):
|
||||||
try:
|
try:
|
||||||
y = data[self.args.y]["value"]
|
y = value[self.args.y]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
return
|
return
|
||||||
x = data.get(self.args.x, empty_dataset())["value"]
|
x = value.get(self.args.x, (False, None))
|
||||||
if x is None:
|
if x is None:
|
||||||
x = np.arange(len(y))
|
x = np.arange(len(y))
|
||||||
error = data.get(self.args.error, empty_dataset())["value"]
|
error = value.get(self.args.error, (False, None))
|
||||||
fit = data.get(self.args.fit, empty_dataset())["value"]
|
fit = value.get(self.args.fit, (False, None))
|
||||||
|
|
||||||
if not len(y) or len(y) != len(x):
|
if not len(y) or len(y) != len(x):
|
||||||
self.mismatch['X values'] = True
|
self.mismatch['X values'] = True
|
||||||
|
|
|
@ -22,7 +22,7 @@ def _compute_ys(histogram_bins, histograms_counts):
|
||||||
# pyqtgraph.GraphicsWindow fails to behave like a regular Qt widget
|
# pyqtgraph.GraphicsWindow fails to behave like a regular Qt widget
|
||||||
# and breaks embedding. Do not use as top widget.
|
# and breaks embedding. Do not use as top widget.
|
||||||
class XYHistPlot(QtWidgets.QSplitter):
|
class XYHistPlot(QtWidgets.QSplitter):
|
||||||
def __init__(self, args):
|
def __init__(self, args, req):
|
||||||
QtWidgets.QSplitter.__init__(self)
|
QtWidgets.QSplitter.__init__(self)
|
||||||
self.resize(1000, 600)
|
self.resize(1000, 600)
|
||||||
self.setWindowTitle("XY/Histogram")
|
self.setWindowTitle("XY/Histogram")
|
||||||
|
@ -124,11 +124,11 @@ class XYHistPlot(QtWidgets.QSplitter):
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def data_changed(self, data, mods):
|
def data_changed(self, value, metadata, persist, mods):
|
||||||
try:
|
try:
|
||||||
xs = data[self.args.xs]["value"]
|
xs = value[self.args.xs]
|
||||||
histogram_bins = data[self.args.histogram_bins]["value"]
|
histogram_bins = value[self.args.histogram_bins]
|
||||||
histograms_counts = data[self.args.histograms_counts]["value"]
|
histograms_counts = value[self.args.histograms_counts]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
return
|
return
|
||||||
if len(xs) != histograms_counts.shape[0]:
|
if len(xs) != histograms_counts.shape[0]:
|
||||||
|
|
|
@ -0,0 +1,34 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
from PyQt5 import QtWidgets
|
||||||
|
|
||||||
|
from artiq.applets.simple import SimpleApplet
|
||||||
|
|
||||||
|
|
||||||
|
class ProgressWidget(QtWidgets.QProgressBar):
|
||||||
|
def __init__(self, args, req):
|
||||||
|
QtWidgets.QProgressBar.__init__(self)
|
||||||
|
self.setMinimum(args.min)
|
||||||
|
self.setMaximum(args.max)
|
||||||
|
self.dataset_value = args.value
|
||||||
|
|
||||||
|
def data_changed(self, value, metadata, persist, mods):
|
||||||
|
try:
|
||||||
|
val = round(value[self.dataset_value])
|
||||||
|
except (KeyError, ValueError, TypeError):
|
||||||
|
val = 0
|
||||||
|
self.setValue(val)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
applet = SimpleApplet(ProgressWidget)
|
||||||
|
applet.add_dataset("value", "counter")
|
||||||
|
applet.argparser.add_argument("--min", type=int, default=0,
|
||||||
|
help="minimum (left) value of the bar")
|
||||||
|
applet.argparser.add_argument("--max", type=int, default=100,
|
||||||
|
help="maximum (right) value of the bar")
|
||||||
|
applet.run()
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
|
@ -7,14 +7,112 @@ import string
|
||||||
from qasync import QEventLoop, QtWidgets, QtCore
|
from qasync import QEventLoop, QtWidgets, QtCore
|
||||||
|
|
||||||
from sipyco.sync_struct import Subscriber, process_mod
|
from sipyco.sync_struct import Subscriber, process_mod
|
||||||
|
from sipyco.pc_rpc import AsyncioClient as RPCClient
|
||||||
from sipyco import pyon
|
from sipyco import pyon
|
||||||
from sipyco.pipe_ipc import AsyncioChildComm
|
from sipyco.pipe_ipc import AsyncioChildComm
|
||||||
|
|
||||||
from artiq.master.databases import make_dataset as empty_dataset
|
from artiq.language.scan import ScanObject
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class _AppletRequestInterface:
|
||||||
|
def __init__(self):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def set_dataset(self, key, value, unit=None, scale=None, precision=None, persist=None):
|
||||||
|
"""
|
||||||
|
Set a dataset.
|
||||||
|
See documentation of ``artiq.language.environment.set_dataset``.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def mutate_dataset(self, key, index, value):
|
||||||
|
"""
|
||||||
|
Mutate a dataset.
|
||||||
|
See documentation of ``artiq.language.environment.mutate_dataset``.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def append_to_dataset(self, key, value):
|
||||||
|
"""
|
||||||
|
Append to a dataset.
|
||||||
|
See documentation of ``artiq.language.environment.append_to_dataset``.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def set_argument_value(self, expurl, name, value):
|
||||||
|
"""
|
||||||
|
Temporarily set the value of an argument in a experiment in the dashboard.
|
||||||
|
The value resets to default value when recomputing the argument.
|
||||||
|
|
||||||
|
:param expurl: Experiment URL identifying the experiment in the dashboard. Example: 'repo:ArgumentsDemo'.
|
||||||
|
:param name: Name of the argument in the experiment.
|
||||||
|
:param value: Object representing the new temporary value of the argument. For ``Scannable`` arguments, this parameter
|
||||||
|
should be a ``ScanObject``. The type of the ``ScanObject`` will be set as the selected type when this function is called.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
|
class AppletRequestIPC(_AppletRequestInterface):
|
||||||
|
def __init__(self, ipc):
|
||||||
|
self.ipc = ipc
|
||||||
|
|
||||||
|
def set_dataset(self, key, value, unit=None, scale=None, precision=None, persist=None):
|
||||||
|
metadata = {}
|
||||||
|
if unit is not None:
|
||||||
|
metadata["unit"] = unit
|
||||||
|
if scale is not None:
|
||||||
|
metadata["scale"] = scale
|
||||||
|
if precision is not None:
|
||||||
|
metadata["precision"] = precision
|
||||||
|
self.ipc.set_dataset(key, value, metadata, persist)
|
||||||
|
|
||||||
|
def mutate_dataset(self, key, index, value):
|
||||||
|
mod = {"action": "setitem", "path": [key, 1], "key": index, "value": value}
|
||||||
|
self.ipc.update_dataset(mod)
|
||||||
|
|
||||||
|
def append_to_dataset(self, key, value):
|
||||||
|
mod = {"action": "append", "path": [key, 1], "x": value}
|
||||||
|
self.ipc.update_dataset(mod)
|
||||||
|
|
||||||
|
def set_argument_value(self, expurl, name, value):
|
||||||
|
if isinstance(value, ScanObject):
|
||||||
|
value = value.describe()
|
||||||
|
self.ipc.set_argument_value(expurl, name, value)
|
||||||
|
|
||||||
|
|
||||||
|
class AppletRequestRPC(_AppletRequestInterface):
|
||||||
|
def __init__(self, loop, dataset_ctl):
|
||||||
|
self.loop = loop
|
||||||
|
self.dataset_ctl = dataset_ctl
|
||||||
|
self.background_tasks = set()
|
||||||
|
|
||||||
|
def _background(self, coro, *args, **kwargs):
|
||||||
|
task = self.loop.create_task(coro(*args, **kwargs))
|
||||||
|
self.background_tasks.add(task)
|
||||||
|
task.add_done_callback(self.background_tasks.discard)
|
||||||
|
|
||||||
|
def set_dataset(self, key, value, unit=None, scale=None, precision=None, persist=None):
|
||||||
|
metadata = {}
|
||||||
|
if unit is not None:
|
||||||
|
metadata["unit"] = unit
|
||||||
|
if scale is not None:
|
||||||
|
metadata["scale"] = scale
|
||||||
|
if precision is not None:
|
||||||
|
metadata["precision"] = precision
|
||||||
|
self._background(self.dataset_ctl.set, key, value, metadata=metadata, persist=persist)
|
||||||
|
|
||||||
|
def mutate_dataset(self, key, index, value):
|
||||||
|
mod = {"action": "setitem", "path": [key, 1], "key": index, "value": value}
|
||||||
|
self._background(self.dataset_ctl.update, mod)
|
||||||
|
|
||||||
|
def append_to_dataset(self, key, value):
|
||||||
|
mod = {"action": "append", "path": [key, 1], "x": value}
|
||||||
|
self._background(self.dataset_ctl.update, mod)
|
||||||
|
|
||||||
|
|
||||||
class AppletIPCClient(AsyncioChildComm):
|
class AppletIPCClient(AsyncioChildComm):
|
||||||
def set_close_cb(self, close_cb):
|
def set_close_cb(self, close_cb):
|
||||||
self.close_cb = close_cb
|
self.close_cb = close_cb
|
||||||
|
@ -65,12 +163,30 @@ class AppletIPCClient(AsyncioChildComm):
|
||||||
exc_info=True)
|
exc_info=True)
|
||||||
self.close_cb()
|
self.close_cb()
|
||||||
|
|
||||||
def subscribe(self, datasets, init_cb, mod_cb):
|
def subscribe(self, datasets, init_cb, mod_cb, dataset_prefixes=[], *, loop):
|
||||||
self.write_pyon({"action": "subscribe",
|
self.write_pyon({"action": "subscribe",
|
||||||
"datasets": datasets})
|
"datasets": datasets,
|
||||||
|
"dataset_prefixes": dataset_prefixes})
|
||||||
self.init_cb = init_cb
|
self.init_cb = init_cb
|
||||||
self.mod_cb = mod_cb
|
self.mod_cb = mod_cb
|
||||||
asyncio.ensure_future(self.listen())
|
self.listen_task = loop.create_task(self.listen())
|
||||||
|
|
||||||
|
def set_dataset(self, key, value, metadata, persist=None):
|
||||||
|
self.write_pyon({"action": "set_dataset",
|
||||||
|
"key": key,
|
||||||
|
"value": value,
|
||||||
|
"metadata": metadata,
|
||||||
|
"persist": persist})
|
||||||
|
|
||||||
|
def update_dataset(self, mod):
|
||||||
|
self.write_pyon({"action": "update_dataset",
|
||||||
|
"mod": mod})
|
||||||
|
|
||||||
|
def set_argument_value(self, expurl, name, value):
|
||||||
|
self.write_pyon({"action": "set_argument_value",
|
||||||
|
"expurl": expurl,
|
||||||
|
"name": name,
|
||||||
|
"value": value})
|
||||||
|
|
||||||
|
|
||||||
class SimpleApplet:
|
class SimpleApplet:
|
||||||
|
@ -92,8 +208,11 @@ class SimpleApplet:
|
||||||
"for dataset notifications "
|
"for dataset notifications "
|
||||||
"(ignored in embedded mode)")
|
"(ignored in embedded mode)")
|
||||||
group.add_argument(
|
group.add_argument(
|
||||||
"--port", default=3250, type=int,
|
"--port-notify", default=3250, type=int,
|
||||||
help="TCP port to connect to")
|
help="TCP port to connect to for notifications (ignored in embedded mode)")
|
||||||
|
group.add_argument(
|
||||||
|
"--port-control", default=3251, type=int,
|
||||||
|
help="TCP port to connect to for control (ignored in embedded mode)")
|
||||||
|
|
||||||
self._arggroup_datasets = self.argparser.add_argument_group("datasets")
|
self._arggroup_datasets = self.argparser.add_argument_group("datasets")
|
||||||
|
|
||||||
|
@ -114,6 +233,9 @@ class SimpleApplet:
|
||||||
self.embed = os.getenv("ARTIQ_APPLET_EMBED")
|
self.embed = os.getenv("ARTIQ_APPLET_EMBED")
|
||||||
self.datasets = {getattr(self.args, arg.replace("-", "_"))
|
self.datasets = {getattr(self.args, arg.replace("-", "_"))
|
||||||
for arg in self.dataset_args}
|
for arg in self.dataset_args}
|
||||||
|
# Optional prefixes (dataset sub-trees) to match subscriptions against;
|
||||||
|
# currently only used by out-of-tree subclasses (ndscan).
|
||||||
|
self.dataset_prefixes = []
|
||||||
|
|
||||||
def qasync_init(self):
|
def qasync_init(self):
|
||||||
app = QtWidgets.QApplication([])
|
app = QtWidgets.QApplication([])
|
||||||
|
@ -129,8 +251,21 @@ class SimpleApplet:
|
||||||
if self.embed is not None:
|
if self.embed is not None:
|
||||||
self.ipc.close()
|
self.ipc.close()
|
||||||
|
|
||||||
|
def req_init(self):
|
||||||
|
if self.embed is None:
|
||||||
|
dataset_ctl = RPCClient()
|
||||||
|
self.loop.run_until_complete(dataset_ctl.connect_rpc(
|
||||||
|
self.args.server, self.args.port_control, "master_dataset_db"))
|
||||||
|
self.req = AppletRequestRPC(self.loop, dataset_ctl)
|
||||||
|
else:
|
||||||
|
self.req = AppletRequestIPC(self.ipc)
|
||||||
|
|
||||||
|
def req_close(self):
|
||||||
|
if self.embed is None:
|
||||||
|
self.req.dataset_ctl.close_rpc()
|
||||||
|
|
||||||
def create_main_widget(self):
|
def create_main_widget(self):
|
||||||
self.main_widget = self.main_widget_class(self.args)
|
self.main_widget = self.main_widget_class(self.args, self.req)
|
||||||
if self.embed is not None:
|
if self.embed is not None:
|
||||||
self.ipc.set_close_cb(self.main_widget.close)
|
self.ipc.set_close_cb(self.main_widget.close)
|
||||||
if os.name == "nt":
|
if os.name == "nt":
|
||||||
|
@ -163,6 +298,14 @@ class SimpleApplet:
|
||||||
self.data = data
|
self.data = data
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
def is_dataset_subscribed(self, key):
|
||||||
|
if key in self.datasets:
|
||||||
|
return True
|
||||||
|
for prefix in self.dataset_prefixes:
|
||||||
|
if key.startswith(prefix):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
def filter_mod(self, mod):
|
def filter_mod(self, mod):
|
||||||
if self.embed is not None:
|
if self.embed is not None:
|
||||||
# the parent already filters for us
|
# the parent already filters for us
|
||||||
|
@ -171,14 +314,19 @@ class SimpleApplet:
|
||||||
if mod["action"] == "init":
|
if mod["action"] == "init":
|
||||||
return True
|
return True
|
||||||
if mod["path"]:
|
if mod["path"]:
|
||||||
return mod["path"][0] in self.datasets
|
return self.is_dataset_subscribed(mod["path"][0])
|
||||||
elif mod["action"] in {"setitem", "delitem"}:
|
elif mod["action"] in {"setitem", "delitem"}:
|
||||||
return mod["key"] in self.datasets
|
return self.is_dataset_subscribed(mod["key"])
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def emit_data_changed(self, data, mod_buffer):
|
def emit_data_changed(self, data, mod_buffer):
|
||||||
self.main_widget.data_changed(data, mod_buffer)
|
persist = dict()
|
||||||
|
value = dict()
|
||||||
|
metadata = dict()
|
||||||
|
for k, d in data.items():
|
||||||
|
persist[k], value[k], metadata[k] = d
|
||||||
|
self.main_widget.data_changed(value, metadata, persist, mod_buffer)
|
||||||
|
|
||||||
def flush_mod_buffer(self):
|
def flush_mod_buffer(self):
|
||||||
self.emit_data_changed(self.data, self.mod_buffer)
|
self.emit_data_changed(self.data, self.mod_buffer)
|
||||||
|
@ -193,7 +341,7 @@ class SimpleApplet:
|
||||||
self.mod_buffer.append(mod)
|
self.mod_buffer.append(mod)
|
||||||
else:
|
else:
|
||||||
self.mod_buffer = [mod]
|
self.mod_buffer = [mod]
|
||||||
asyncio.get_event_loop().call_later(self.args.update_delay,
|
self.loop.call_later(self.args.update_delay,
|
||||||
self.flush_mod_buffer)
|
self.flush_mod_buffer)
|
||||||
else:
|
else:
|
||||||
self.emit_data_changed(self.data, [mod])
|
self.emit_data_changed(self.data, [mod])
|
||||||
|
@ -203,9 +351,11 @@ class SimpleApplet:
|
||||||
self.subscriber = Subscriber("datasets",
|
self.subscriber = Subscriber("datasets",
|
||||||
self.sub_init, self.sub_mod)
|
self.sub_init, self.sub_mod)
|
||||||
self.loop.run_until_complete(self.subscriber.connect(
|
self.loop.run_until_complete(self.subscriber.connect(
|
||||||
self.args.server, self.args.port))
|
self.args.server, self.args.port_notify))
|
||||||
else:
|
else:
|
||||||
self.ipc.subscribe(self.datasets, self.sub_init, self.sub_mod)
|
self.ipc.subscribe(self.datasets, self.sub_init, self.sub_mod,
|
||||||
|
dataset_prefixes=self.dataset_prefixes,
|
||||||
|
loop=self.loop)
|
||||||
|
|
||||||
def unsubscribe(self):
|
def unsubscribe(self):
|
||||||
if self.embed is None:
|
if self.embed is None:
|
||||||
|
@ -216,6 +366,8 @@ class SimpleApplet:
|
||||||
self.qasync_init()
|
self.qasync_init()
|
||||||
try:
|
try:
|
||||||
self.ipc_init()
|
self.ipc_init()
|
||||||
|
try:
|
||||||
|
self.req_init()
|
||||||
try:
|
try:
|
||||||
self.create_main_widget()
|
self.create_main_widget()
|
||||||
self.subscribe()
|
self.subscribe()
|
||||||
|
@ -223,6 +375,8 @@ class SimpleApplet:
|
||||||
self.loop.run_forever()
|
self.loop.run_forever()
|
||||||
finally:
|
finally:
|
||||||
self.unsubscribe()
|
self.unsubscribe()
|
||||||
|
finally:
|
||||||
|
self.req_close()
|
||||||
finally:
|
finally:
|
||||||
self.ipc_close()
|
self.ipc_close()
|
||||||
finally:
|
finally:
|
||||||
|
@ -252,7 +406,7 @@ class TitleApplet(SimpleApplet):
|
||||||
|
|
||||||
def emit_data_changed(self, data, mod_buffer):
|
def emit_data_changed(self, data, mod_buffer):
|
||||||
if self.args.title is not None:
|
if self.args.title is not None:
|
||||||
title_values = {k.replace(".", "/"): data.get(k, empty_dataset())["value"]
|
title_values = {k.replace(".", "/"): data.get(k, (False, None))[1]
|
||||||
for k in self.dataset_title}
|
for k in self.dataset_title}
|
||||||
try:
|
try:
|
||||||
title = self.args.title.format(**title_values)
|
title = self.args.title.format(**title_values)
|
||||||
|
@ -261,4 +415,9 @@ class TitleApplet(SimpleApplet):
|
||||||
title = self.args.title
|
title = self.args.title
|
||||||
else:
|
else:
|
||||||
title = None
|
title = None
|
||||||
self.main_widget.data_changed(data, mod_buffer, title)
|
persist = dict()
|
||||||
|
value = dict()
|
||||||
|
metadata = dict()
|
||||||
|
for k, d in data.items():
|
||||||
|
persist[k], value[k], metadata[k] = d
|
||||||
|
self.main_widget.data_changed(value, metadata, persist, mod_buffer, title)
|
||||||
|
|
|
@ -20,11 +20,46 @@ class Model(DictSyncTreeSepModel):
|
||||||
DictSyncTreeSepModel.__init__(self, ".", ["Dataset", "Value"], init)
|
DictSyncTreeSepModel.__init__(self, ".", ["Dataset", "Value"], init)
|
||||||
|
|
||||||
def convert(self, k, v, column):
|
def convert(self, k, v, column):
|
||||||
return short_format(v[1])
|
return short_format(v[1], v[2])
|
||||||
|
|
||||||
|
|
||||||
|
class DatasetCtl:
|
||||||
|
def __init__(self, master_host, master_port):
|
||||||
|
self.master_host = master_host
|
||||||
|
self.master_port = master_port
|
||||||
|
|
||||||
|
async def _execute_rpc(self, op_name, key_or_mod, value=None, persist=None, metadata=None):
|
||||||
|
logger.info("Starting %s operation on %s", op_name, key_or_mod)
|
||||||
|
try:
|
||||||
|
remote = RPCClient()
|
||||||
|
await remote.connect_rpc(self.master_host, self.master_port,
|
||||||
|
"master_dataset_db")
|
||||||
|
try:
|
||||||
|
if op_name == "set":
|
||||||
|
await remote.set(key_or_mod, value, persist, metadata)
|
||||||
|
elif op_name == "update":
|
||||||
|
await remote.update(key_or_mod)
|
||||||
|
else:
|
||||||
|
logger.error("Invalid operation: %s", op_name)
|
||||||
|
return
|
||||||
|
finally:
|
||||||
|
remote.close_rpc()
|
||||||
|
except:
|
||||||
|
logger.error("Failed %s operation on %s", op_name,
|
||||||
|
key_or_mod, exc_info=True)
|
||||||
|
else:
|
||||||
|
logger.info("Finished %s operation on %s", op_name,
|
||||||
|
key_or_mod)
|
||||||
|
|
||||||
|
async def set(self, key, value, persist=None, metadata=None):
|
||||||
|
await self._execute_rpc("set", key, value, persist, metadata)
|
||||||
|
|
||||||
|
async def update(self, mod):
|
||||||
|
await self._execute_rpc("update", mod)
|
||||||
|
|
||||||
|
|
||||||
class DatasetsDock(QtWidgets.QDockWidget):
|
class DatasetsDock(QtWidgets.QDockWidget):
|
||||||
def __init__(self, datasets_sub, master_host, master_port):
|
def __init__(self, dataset_sub, dataset_ctl):
|
||||||
QtWidgets.QDockWidget.__init__(self, "Datasets")
|
QtWidgets.QDockWidget.__init__(self, "Datasets")
|
||||||
self.setObjectName("Datasets")
|
self.setObjectName("Datasets")
|
||||||
self.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
|
self.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
|
||||||
|
@ -62,10 +97,9 @@ class DatasetsDock(QtWidgets.QDockWidget):
|
||||||
self.table.addAction(upload_action)
|
self.table.addAction(upload_action)
|
||||||
|
|
||||||
self.set_model(Model(dict()))
|
self.set_model(Model(dict()))
|
||||||
datasets_sub.add_setmodel_callback(self.set_model)
|
dataset_sub.add_setmodel_callback(self.set_model)
|
||||||
|
|
||||||
self.master_host = master_host
|
self.dataset_ctl = dataset_ctl
|
||||||
self.master_port = master_port
|
|
||||||
|
|
||||||
def _search_datasets(self):
|
def _search_datasets(self):
|
||||||
if hasattr(self, "table_model_filter"):
|
if hasattr(self, "table_model_filter"):
|
||||||
|
@ -82,30 +116,14 @@ class DatasetsDock(QtWidgets.QDockWidget):
|
||||||
self.table_model_filter.setSourceModel(self.table_model)
|
self.table_model_filter.setSourceModel(self.table_model)
|
||||||
self.table.setModel(self.table_model_filter)
|
self.table.setModel(self.table_model_filter)
|
||||||
|
|
||||||
async def _upload_dataset(self, name, value,):
|
|
||||||
logger.info("Uploading dataset '%s' to master...", name)
|
|
||||||
try:
|
|
||||||
remote = RPCClient()
|
|
||||||
await remote.connect_rpc(self.master_host, self.master_port,
|
|
||||||
"master_dataset_db")
|
|
||||||
try:
|
|
||||||
await remote.set(name, value)
|
|
||||||
finally:
|
|
||||||
remote.close_rpc()
|
|
||||||
except:
|
|
||||||
logger.error("Failed uploading dataset '%s'",
|
|
||||||
name, exc_info=True)
|
|
||||||
else:
|
|
||||||
logger.info("Finished uploading dataset '%s'", name)
|
|
||||||
|
|
||||||
def upload_clicked(self):
|
def upload_clicked(self):
|
||||||
idx = self.table.selectedIndexes()
|
idx = self.table.selectedIndexes()
|
||||||
if idx:
|
if idx:
|
||||||
idx = self.table_model_filter.mapToSource(idx[0])
|
idx = self.table_model_filter.mapToSource(idx[0])
|
||||||
key = self.table_model.index_to_key(idx)
|
key = self.table_model.index_to_key(idx)
|
||||||
if key is not None:
|
if key is not None:
|
||||||
dataset = self.table_model.backing_store[key]
|
persist, value, metadata = self.table_model.backing_store[key]
|
||||||
asyncio.ensure_future(self._upload_dataset(key, dataset["value"]))
|
asyncio.ensure_future(self.dataset_ctl.set(key, value, metadata=metadata))
|
||||||
|
|
||||||
def save_state(self):
|
def save_state(self):
|
||||||
return bytes(self.table.header().saveState())
|
return bytes(self.table.header().saveState())
|
||||||
|
|
|
@ -10,22 +10,14 @@ import h5py
|
||||||
from sipyco import pyon
|
from sipyco import pyon
|
||||||
|
|
||||||
from artiq import __artiq_dir__ as artiq_dir
|
from artiq import __artiq_dir__ as artiq_dir
|
||||||
from artiq.gui.tools import LayoutWidget, log_level_to_name, get_open_file_name
|
from artiq.gui.tools import (LayoutWidget, WheelFilter,
|
||||||
|
log_level_to_name, get_open_file_name)
|
||||||
from artiq.gui.entries import procdesc_to_entry
|
from artiq.gui.entries import procdesc_to_entry
|
||||||
from artiq.master.worker import Worker, log_worker_exception
|
from artiq.master.worker import Worker, log_worker_exception
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class _WheelFilter(QtCore.QObject):
|
|
||||||
def eventFilter(self, obj, event):
|
|
||||||
if (event.type() == QtCore.QEvent.Wheel and
|
|
||||||
event.modifiers() != QtCore.Qt.NoModifier):
|
|
||||||
event.ignore()
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class _ArgumentEditor(QtWidgets.QTreeWidget):
|
class _ArgumentEditor(QtWidgets.QTreeWidget):
|
||||||
def __init__(self, dock):
|
def __init__(self, dock):
|
||||||
QtWidgets.QTreeWidget.__init__(self)
|
QtWidgets.QTreeWidget.__init__(self)
|
||||||
|
@ -46,7 +38,7 @@ class _ArgumentEditor(QtWidgets.QTreeWidget):
|
||||||
self.setStyleSheet("QTreeWidget {background: " +
|
self.setStyleSheet("QTreeWidget {background: " +
|
||||||
self.palette().midlight().color().name() + " ;}")
|
self.palette().midlight().color().name() + " ;}")
|
||||||
|
|
||||||
self.viewport().installEventFilter(_WheelFilter(self.viewport()))
|
self.viewport().installEventFilter(WheelFilter(self.viewport(), True))
|
||||||
|
|
||||||
self._groups = dict()
|
self._groups = dict()
|
||||||
self._arg_to_widgets = dict()
|
self._arg_to_widgets = dict()
|
||||||
|
@ -378,9 +370,9 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
|
|
||||||
|
|
||||||
class LocalDatasetDB:
|
class LocalDatasetDB:
|
||||||
def __init__(self, datasets_sub):
|
def __init__(self, dataset_sub):
|
||||||
self.datasets_sub = datasets_sub
|
self.dataset_sub = dataset_sub
|
||||||
datasets_sub.add_setmodel_callback(self.init)
|
dataset_sub.add_setmodel_callback(self.init)
|
||||||
|
|
||||||
def init(self, data):
|
def init(self, data):
|
||||||
self._data = data
|
self._data = data
|
||||||
|
@ -389,11 +381,11 @@ class LocalDatasetDB:
|
||||||
return self._data.backing_store[key][1]
|
return self._data.backing_store[key][1]
|
||||||
|
|
||||||
def update(self, mod):
|
def update(self, mod):
|
||||||
self.datasets_sub.update(mod)
|
self.dataset_sub.update(mod)
|
||||||
|
|
||||||
|
|
||||||
class ExperimentsArea(QtWidgets.QMdiArea):
|
class ExperimentsArea(QtWidgets.QMdiArea):
|
||||||
def __init__(self, root, datasets_sub):
|
def __init__(self, root, dataset_sub):
|
||||||
QtWidgets.QMdiArea.__init__(self)
|
QtWidgets.QMdiArea.__init__(self)
|
||||||
self.pixmap = QtGui.QPixmap(os.path.join(
|
self.pixmap = QtGui.QPixmap(os.path.join(
|
||||||
artiq_dir, "gui", "logo_ver.svg"))
|
artiq_dir, "gui", "logo_ver.svg"))
|
||||||
|
@ -402,11 +394,11 @@ class ExperimentsArea(QtWidgets.QMdiArea):
|
||||||
|
|
||||||
self.open_experiments = []
|
self.open_experiments = []
|
||||||
|
|
||||||
self._ddb = LocalDatasetDB(datasets_sub)
|
self._ddb = LocalDatasetDB(dataset_sub)
|
||||||
|
|
||||||
self.worker_handlers = {
|
self.worker_handlers = {
|
||||||
"get_device_db": lambda: {},
|
"get_device_db": lambda: {},
|
||||||
"get_device": lambda k: {"type": "dummy"},
|
"get_device": lambda key, resolve_alias=False: {"type": "dummy"},
|
||||||
"get_dataset": self._ddb.get,
|
"get_dataset": self._ddb.get,
|
||||||
"update_dataset": self._ddb.update,
|
"update_dataset": self._ddb.update,
|
||||||
}
|
}
|
||||||
|
@ -516,5 +508,9 @@ class ExperimentsArea(QtWidgets.QMdiArea):
|
||||||
self.open_experiments.append(dock)
|
self.open_experiments.append(dock)
|
||||||
return dock
|
return dock
|
||||||
|
|
||||||
|
def set_argument_value(self, expurl, name, value):
|
||||||
|
logger.warning("Unable to set argument '%s', dropping change. "
|
||||||
|
"'set_argument_value' not supported in browser.", name)
|
||||||
|
|
||||||
def on_dock_closed(self, dock):
|
def on_dock_closed(self, dock):
|
||||||
self.open_experiments.remove(dock)
|
self.open_experiments.remove(dock)
|
||||||
|
|
|
@ -71,7 +71,7 @@ class ZoomIconView(QtWidgets.QListView):
|
||||||
self._char_width = QtGui.QFontMetrics(self.font()).averageCharWidth()
|
self._char_width = QtGui.QFontMetrics(self.font()).averageCharWidth()
|
||||||
self.setViewMode(self.IconMode)
|
self.setViewMode(self.IconMode)
|
||||||
w = self._char_width*self.default_size
|
w = self._char_width*self.default_size
|
||||||
self.setIconSize(QtCore.QSize(w, w*self.aspect))
|
self.setIconSize(QtCore.QSize(w, int(w*self.aspect)))
|
||||||
self.setFlow(self.LeftToRight)
|
self.setFlow(self.LeftToRight)
|
||||||
self.setResizeMode(self.Adjust)
|
self.setResizeMode(self.Adjust)
|
||||||
self.setWrapping(True)
|
self.setWrapping(True)
|
||||||
|
@ -102,13 +102,14 @@ class Hdf5FileSystemModel(QtWidgets.QFileSystemModel):
|
||||||
h5 = open_h5(info)
|
h5 = open_h5(info)
|
||||||
if h5 is not None:
|
if h5 is not None:
|
||||||
try:
|
try:
|
||||||
expid = pyon.decode(h5["expid"][()])
|
expid = pyon.decode(h5["expid"][()]) if "expid" in h5 else dict()
|
||||||
start_time = datetime.fromtimestamp(h5["start_time"][()])
|
start_time = datetime.fromtimestamp(h5["start_time"][()]) if "start_time" in h5 else "<none>"
|
||||||
v = ("artiq_version: {}\nrepo_rev: {}\nfile: {}\n"
|
v = ("artiq_version: {}\nrepo_rev: {}\nfile: {}\n"
|
||||||
"class_name: {}\nrid: {}\nstart_time: {}").format(
|
"class_name: {}\nrid: {}\nstart_time: {}").format(
|
||||||
h5["artiq_version"][()], expid["repo_rev"],
|
h5["artiq_version"].asstr()[()] if "artiq_version" in h5 else "<none>",
|
||||||
expid["file"], expid["class_name"],
|
expid.get("repo_rev", "<none>"),
|
||||||
h5["rid"][()], start_time)
|
expid.get("file", "<none>"), expid.get("class_name", "<none>"),
|
||||||
|
h5["rid"][()] if "rid" in h5 else "<none>", start_time)
|
||||||
return v
|
return v
|
||||||
except:
|
except:
|
||||||
logger.warning("unable to read metadata from %s",
|
logger.warning("unable to read metadata from %s",
|
||||||
|
@ -174,31 +175,45 @@ class FilesDock(QtWidgets.QDockWidget):
|
||||||
logger.debug("loading datasets from %s", info.filePath())
|
logger.debug("loading datasets from %s", info.filePath())
|
||||||
with f:
|
with f:
|
||||||
try:
|
try:
|
||||||
expid = pyon.decode(f["expid"][()])
|
expid = pyon.decode(f["expid"][()]) if "expid" in f else dict()
|
||||||
start_time = datetime.fromtimestamp(f["start_time"][()])
|
start_time = datetime.fromtimestamp(f["start_time"][()]) if "start_time" in f else "<none>"
|
||||||
v = {
|
v = {
|
||||||
"artiq_version": f["artiq_version"][()],
|
"artiq_version": f["artiq_version"].asstr()[()] if "artiq_version" in f else "<none>",
|
||||||
"repo_rev": expid["repo_rev"],
|
"repo_rev": expid.get("repo_rev", "<none>"),
|
||||||
"file": expid["file"],
|
"file": expid.get("file", "<none>"),
|
||||||
"class_name": expid["class_name"],
|
"class_name": expid.get("class_name", "<none>"),
|
||||||
"rid": f["rid"][()],
|
"rid": f["rid"][()] if "rid" in f else "<none>",
|
||||||
"start_time": start_time,
|
"start_time": start_time,
|
||||||
}
|
}
|
||||||
self.metadata_changed.emit(v)
|
self.metadata_changed.emit(v)
|
||||||
except:
|
except:
|
||||||
logger.warning("unable to read metadata from %s",
|
logger.warning("unable to read metadata from %s",
|
||||||
info.filePath(), exc_info=True)
|
info.filePath(), exc_info=True)
|
||||||
rd = dict()
|
|
||||||
|
rd = {}
|
||||||
if "archive" in f:
|
if "archive" in f:
|
||||||
rd = {k: (True, v[()]) for k, v in f["archive"].items()}
|
def visitor(k, v):
|
||||||
|
if isinstance(v, h5py.Dataset):
|
||||||
|
# v.attrs is a non-serializable h5py.AttributeManager, need to convert to dict
|
||||||
|
# See https://docs.h5py.org/en/stable/high/attr.html#h5py.AttributeManager
|
||||||
|
rd[k] = (True, v[()], dict(v.attrs))
|
||||||
|
|
||||||
|
f["archive"].visititems(visitor)
|
||||||
|
|
||||||
if "datasets" in f:
|
if "datasets" in f:
|
||||||
for k, v in f["datasets"].items():
|
def visitor(k, v):
|
||||||
|
if isinstance(v, h5py.Dataset):
|
||||||
if k in rd:
|
if k in rd:
|
||||||
logger.warning("dataset '%s' is both in archive and "
|
logger.warning("dataset '%s' is both in archive "
|
||||||
"outputs", k)
|
"and outputs", k)
|
||||||
rd[k] = (True, v[()])
|
# v.attrs is a non-serializable h5py.AttributeManager, need to convert to dict
|
||||||
if rd:
|
# See https://docs.h5py.org/en/stable/high/attr.html#h5py.AttributeManager
|
||||||
|
rd[k] = (True, v[()], dict(v.attrs))
|
||||||
|
|
||||||
|
f["datasets"].visititems(visitor)
|
||||||
|
|
||||||
self.datasets.init(rd)
|
self.datasets.init(rd)
|
||||||
|
|
||||||
self.dataset_changed.emit(info.filePath())
|
self.dataset_changed.emit(info.filePath())
|
||||||
|
|
||||||
def list_activated(self, idx):
|
def list_activated(self, idx):
|
||||||
|
|
|
@ -59,7 +59,6 @@ def build_artiq_soc(soc, argdict):
|
||||||
builder.software_packages = []
|
builder.software_packages = []
|
||||||
builder.add_software_package("bootloader", os.path.join(firmware_dir, "bootloader"))
|
builder.add_software_package("bootloader", os.path.join(firmware_dir, "bootloader"))
|
||||||
is_kasli_v1 = isinstance(soc.platform, kasli.Platform) and soc.platform.hw_rev in ("v1.0", "v1.1")
|
is_kasli_v1 = isinstance(soc.platform, kasli.Platform) and soc.platform.hw_rev in ("v1.0", "v1.1")
|
||||||
if isinstance(soc, AMPSoC):
|
|
||||||
kernel_cpu_type = "vexriscv" if is_kasli_v1 else "vexriscv-g"
|
kernel_cpu_type = "vexriscv" if is_kasli_v1 else "vexriscv-g"
|
||||||
builder.add_software_package("libm", cpu_type=kernel_cpu_type)
|
builder.add_software_package("libm", cpu_type=kernel_cpu_type)
|
||||||
builder.add_software_package("libprintf", cpu_type=kernel_cpu_type)
|
builder.add_software_package("libprintf", cpu_type=kernel_cpu_type)
|
||||||
|
@ -69,9 +68,9 @@ def build_artiq_soc(soc, argdict):
|
||||||
# If the kernel lacks FPU, then the runtime unwinder is already generated
|
# If the kernel lacks FPU, then the runtime unwinder is already generated
|
||||||
if not is_kasli_v1:
|
if not is_kasli_v1:
|
||||||
builder.add_software_package("libunwind")
|
builder.add_software_package("libunwind")
|
||||||
|
if not soc.config["DRTIO_ROLE"] == "satellite":
|
||||||
builder.add_software_package("runtime", os.path.join(firmware_dir, "runtime"))
|
builder.add_software_package("runtime", os.path.join(firmware_dir, "runtime"))
|
||||||
else:
|
else:
|
||||||
# Assume DRTIO satellite.
|
|
||||||
builder.add_software_package("satman", os.path.join(firmware_dir, "satman"))
|
builder.add_software_package("satman", os.path.join(firmware_dir, "satman"))
|
||||||
try:
|
try:
|
||||||
builder.build()
|
builder.build()
|
||||||
|
|
|
@ -21,13 +21,19 @@ class scoped(object):
|
||||||
set of variables resolved as globals
|
set of variables resolved as globals
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
class remote(object):
|
||||||
|
"""
|
||||||
|
:ivar remote_fn: (bool) whether function is ran on a remote device,
|
||||||
|
meaning arguments are received remotely and return is sent remotely
|
||||||
|
"""
|
||||||
|
|
||||||
# Typed versions of untyped nodes
|
# Typed versions of untyped nodes
|
||||||
class argT(ast.arg, commontyped):
|
class argT(ast.arg, commontyped):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
class ClassDefT(ast.ClassDef):
|
class ClassDefT(ast.ClassDef):
|
||||||
_types = ("constructor_type",)
|
_types = ("constructor_type",)
|
||||||
class FunctionDefT(ast.FunctionDef, scoped):
|
class FunctionDefT(ast.FunctionDef, scoped, remote):
|
||||||
_types = ("signature_type",)
|
_types = ("signature_type",)
|
||||||
class QuotedFunctionDefT(FunctionDefT):
|
class QuotedFunctionDefT(FunctionDefT):
|
||||||
"""
|
"""
|
||||||
|
@ -58,7 +64,7 @@ class BinOpT(ast.BinOp, commontyped):
|
||||||
pass
|
pass
|
||||||
class BoolOpT(ast.BoolOp, commontyped):
|
class BoolOpT(ast.BoolOp, commontyped):
|
||||||
pass
|
pass
|
||||||
class CallT(ast.Call, commontyped):
|
class CallT(ast.Call, commontyped, remote):
|
||||||
"""
|
"""
|
||||||
:ivar iodelay: (:class:`iodelay.Expr`)
|
:ivar iodelay: (:class:`iodelay.Expr`)
|
||||||
:ivar arg_exprs: (dict of str to :class:`iodelay.Expr`)
|
:ivar arg_exprs: (dict of str to :class:`iodelay.Expr`)
|
||||||
|
|
|
@ -38,6 +38,9 @@ class TInt(types.TMono):
|
||||||
def one():
|
def one():
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
|
def TInt8():
|
||||||
|
return TInt(types.TValue(8))
|
||||||
|
|
||||||
def TInt32():
|
def TInt32():
|
||||||
return TInt(types.TValue(32))
|
return TInt(types.TValue(32))
|
||||||
|
|
||||||
|
@ -123,18 +126,23 @@ class TException(types.TMono):
|
||||||
# * File, line and column where it was raised (str, int, int).
|
# * File, line and column where it was raised (str, int, int).
|
||||||
# * Message, which can contain substitutions {0}, {1} and {2} (str).
|
# * Message, which can contain substitutions {0}, {1} and {2} (str).
|
||||||
# * Three 64-bit integers, parameterizing the message (numpy.int64).
|
# * Three 64-bit integers, parameterizing the message (numpy.int64).
|
||||||
|
# These attributes are prefixed with `#` so that users cannot access them,
|
||||||
|
# and we don't have to do string allocation in the runtime.
|
||||||
|
# #__name__ is now a string key in the host. TStr may not be an actual
|
||||||
|
# CSlice in the runtime, they might be a CSlice with length = i32::MAX and
|
||||||
|
# ptr = string key in the host.
|
||||||
|
|
||||||
# Keep this in sync with the function ARTIQIRGenerator.alloc_exn.
|
# Keep this in sync with the function ARTIQIRGenerator.alloc_exn.
|
||||||
attributes = OrderedDict([
|
attributes = OrderedDict([
|
||||||
("__name__", TStr()),
|
("#__name__", TInt32()),
|
||||||
("__file__", TStr()),
|
("#__file__", TStr()),
|
||||||
("__line__", TInt32()),
|
("#__line__", TInt32()),
|
||||||
("__col__", TInt32()),
|
("#__col__", TInt32()),
|
||||||
("__func__", TStr()),
|
("#__func__", TStr()),
|
||||||
("__message__", TStr()),
|
("#__message__", TStr()),
|
||||||
("__param0__", TInt64()),
|
("#__param0__", TInt64()),
|
||||||
("__param1__", TInt64()),
|
("#__param1__", TInt64()),
|
||||||
("__param2__", TInt64()),
|
("#__param2__", TInt64()),
|
||||||
])
|
])
|
||||||
|
|
||||||
def __init__(self, name="Exception", id=0):
|
def __init__(self, name="Exception", id=0):
|
||||||
|
@ -169,7 +177,9 @@ def fn_list():
|
||||||
return types.TConstructor(TList())
|
return types.TConstructor(TList())
|
||||||
|
|
||||||
def fn_array():
|
def fn_array():
|
||||||
return types.TConstructor(TArray())
|
# numpy.array() is actually a "magic" macro that is expanded in-place, but
|
||||||
|
# just as for builtin functions, we do not want to quote it, etc.
|
||||||
|
return types.TBuiltinFunction("array")
|
||||||
|
|
||||||
def fn_Exception():
|
def fn_Exception():
|
||||||
return types.TExceptionConstructor(TException("Exception"))
|
return types.TExceptionConstructor(TException("Exception"))
|
||||||
|
@ -237,6 +247,12 @@ def fn_at_mu():
|
||||||
def fn_rtio_log():
|
def fn_rtio_log():
|
||||||
return types.TBuiltinFunction("rtio_log")
|
return types.TBuiltinFunction("rtio_log")
|
||||||
|
|
||||||
|
def fn_subkernel_await():
|
||||||
|
return types.TBuiltinFunction("subkernel_await")
|
||||||
|
|
||||||
|
def fn_subkernel_preload():
|
||||||
|
return types.TBuiltinFunction("subkernel_preload")
|
||||||
|
|
||||||
# Accessors
|
# Accessors
|
||||||
|
|
||||||
def is_none(typ):
|
def is_none(typ):
|
||||||
|
@ -319,7 +335,7 @@ def get_iterable_elt(typ):
|
||||||
# n-dimensional arrays, rather than the n-1 dimensional result of iterating over
|
# n-dimensional arrays, rather than the n-1 dimensional result of iterating over
|
||||||
# the first axis, which makes the name a bit misleading.
|
# the first axis, which makes the name a bit misleading.
|
||||||
if is_str(typ) or is_bytes(typ) or is_bytearray(typ):
|
if is_str(typ) or is_bytes(typ) or is_bytearray(typ):
|
||||||
return TInt(types.TValue(8))
|
return TInt8()
|
||||||
elif types._is_pointer(typ) or is_iterable(typ):
|
elif types._is_pointer(typ) or is_iterable(typ):
|
||||||
return typ.find()["elt"].find()
|
return typ.find()["elt"].find()
|
||||||
else:
|
else:
|
||||||
|
@ -335,5 +351,5 @@ def is_allocated(typ):
|
||||||
is_float(typ) or is_range(typ) or
|
is_float(typ) or is_range(typ) or
|
||||||
types._is_pointer(typ) or types.is_function(typ) or
|
types._is_pointer(typ) or types.is_function(typ) or
|
||||||
types.is_external_function(typ) or types.is_rpc(typ) or
|
types.is_external_function(typ) or types.is_rpc(typ) or
|
||||||
types.is_method(typ) or types.is_tuple(typ) or
|
types.is_subkernel(typ) or types.is_method(typ) or
|
||||||
types.is_value(typ))
|
types.is_tuple(typ) or types.is_value(typ))
|
||||||
|
|
|
@ -5,6 +5,7 @@ the references to the host objects and translates the functions
|
||||||
annotated as ``@kernel`` when they are referenced.
|
annotated as ``@kernel`` when they are referenced.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import typing
|
||||||
import os, re, linecache, inspect, textwrap, types as pytypes, numpy
|
import os, re, linecache, inspect, textwrap, types as pytypes, numpy
|
||||||
from collections import OrderedDict, defaultdict
|
from collections import OrderedDict, defaultdict
|
||||||
|
|
||||||
|
@ -18,6 +19,13 @@ from . import types, builtins, asttyped, math_fns, prelude
|
||||||
from .transforms import ASTTypedRewriter, Inferencer, IntMonomorphizer, TypedtreePrinter
|
from .transforms import ASTTypedRewriter, Inferencer, IntMonomorphizer, TypedtreePrinter
|
||||||
from .transforms.asttyped_rewriter import LocalExtractor
|
from .transforms.asttyped_rewriter import LocalExtractor
|
||||||
|
|
||||||
|
try:
|
||||||
|
# From numpy=1.25.0 dispatching for `__array_function__` is done via
|
||||||
|
# a C wrapper: https://github.com/numpy/numpy/pull/23020
|
||||||
|
from numpy.core._multiarray_umath import _ArrayFunctionDispatcher
|
||||||
|
except ImportError:
|
||||||
|
_ArrayFunctionDispatcher = None
|
||||||
|
|
||||||
|
|
||||||
class SpecializedFunction:
|
class SpecializedFunction:
|
||||||
def __init__(self, instance_type, host_function):
|
def __init__(self, instance_type, host_function):
|
||||||
|
@ -45,8 +53,48 @@ class EmbeddingMap:
|
||||||
self.object_forward_map = {}
|
self.object_forward_map = {}
|
||||||
self.object_reverse_map = {}
|
self.object_reverse_map = {}
|
||||||
self.module_map = {}
|
self.module_map = {}
|
||||||
|
|
||||||
|
# type_map connects the host Python `type` to the pair of associated
|
||||||
|
# `(TInstance, TConstructor)`s. The `used_…_names` sets cache the
|
||||||
|
# respective `.name`s for O(1) collision avoidance.
|
||||||
self.type_map = {}
|
self.type_map = {}
|
||||||
|
self.used_instance_type_names = set()
|
||||||
|
self.used_constructor_type_names = set()
|
||||||
|
|
||||||
self.function_map = {}
|
self.function_map = {}
|
||||||
|
self.str_forward_map = {}
|
||||||
|
self.str_reverse_map = {}
|
||||||
|
|
||||||
|
self.preallocate_runtime_exception_names(["RuntimeError",
|
||||||
|
"RTIOUnderflow",
|
||||||
|
"RTIOOverflow",
|
||||||
|
"RTIODestinationUnreachable",
|
||||||
|
"DMAError",
|
||||||
|
"I2CError",
|
||||||
|
"CacheError",
|
||||||
|
"SPIError",
|
||||||
|
"0:ZeroDivisionError",
|
||||||
|
"0:IndexError",
|
||||||
|
"UnwrapNoneError",
|
||||||
|
"SubkernelError"])
|
||||||
|
|
||||||
|
def preallocate_runtime_exception_names(self, names):
|
||||||
|
for i, name in enumerate(names):
|
||||||
|
if ":" not in name:
|
||||||
|
name = "0:artiq.coredevice.exceptions." + name
|
||||||
|
exn_id = self.store_str(name)
|
||||||
|
assert exn_id == i
|
||||||
|
|
||||||
|
def store_str(self, s):
|
||||||
|
if s in self.str_forward_map:
|
||||||
|
return self.str_forward_map[s]
|
||||||
|
str_id = len(self.str_forward_map)
|
||||||
|
self.str_forward_map[s] = str_id
|
||||||
|
self.str_reverse_map[str_id] = s
|
||||||
|
return str_id
|
||||||
|
|
||||||
|
def retrieve_str(self, str_id):
|
||||||
|
return self.str_reverse_map[str_id]
|
||||||
|
|
||||||
# Modules
|
# Modules
|
||||||
def store_module(self, module, module_type):
|
def store_module(self, module, module_type):
|
||||||
|
@ -60,16 +108,6 @@ class EmbeddingMap:
|
||||||
|
|
||||||
# Types
|
# Types
|
||||||
def store_type(self, host_type, instance_type, constructor_type):
|
def store_type(self, host_type, instance_type, constructor_type):
|
||||||
self._rename_type(instance_type)
|
|
||||||
self.type_map[host_type] = (instance_type, constructor_type)
|
|
||||||
|
|
||||||
def retrieve_type(self, host_type):
|
|
||||||
return self.type_map[host_type]
|
|
||||||
|
|
||||||
def has_type(self, host_type):
|
|
||||||
return host_type in self.type_map
|
|
||||||
|
|
||||||
def _rename_type(self, new_instance_type):
|
|
||||||
# Generally, user-defined types that have exact same name (which is to say, classes
|
# Generally, user-defined types that have exact same name (which is to say, classes
|
||||||
# defined inside functions) do not pose a problem to the compiler. The two places which
|
# defined inside functions) do not pose a problem to the compiler. The two places which
|
||||||
# cannot handle this are:
|
# cannot handle this are:
|
||||||
|
@ -78,12 +116,29 @@ class EmbeddingMap:
|
||||||
# Since handling #2 requires renaming on ARTIQ side anyway, it's more straightforward
|
# Since handling #2 requires renaming on ARTIQ side anyway, it's more straightforward
|
||||||
# to do it once when embedding (since non-embedded code cannot define classes in
|
# to do it once when embedding (since non-embedded code cannot define classes in
|
||||||
# functions). Also, easier to debug.
|
# functions). Also, easier to debug.
|
||||||
n = 0
|
suffix = 0
|
||||||
for host_type in self.type_map:
|
new_instance_name = instance_type.name
|
||||||
instance_type, constructor_type = self.type_map[host_type]
|
new_constructor_name = constructor_type.name
|
||||||
if instance_type.name == new_instance_type.name:
|
while True:
|
||||||
n += 1
|
if (new_instance_name not in self.used_instance_type_names
|
||||||
new_instance_type.name = "{}.{}".format(new_instance_type.name, n)
|
and new_constructor_name not in self.used_constructor_type_names):
|
||||||
|
break
|
||||||
|
suffix += 1
|
||||||
|
new_instance_name = f"{instance_type.name}.{suffix}"
|
||||||
|
new_constructor_name = f"{constructor_type.name}.{suffix}"
|
||||||
|
|
||||||
|
self.used_instance_type_names.add(new_instance_name)
|
||||||
|
instance_type.name = new_instance_name
|
||||||
|
self.used_constructor_type_names.add(new_constructor_name)
|
||||||
|
constructor_type.name = new_constructor_name
|
||||||
|
|
||||||
|
self.type_map[host_type] = (instance_type, constructor_type)
|
||||||
|
|
||||||
|
def retrieve_type(self, host_type):
|
||||||
|
return self.type_map[host_type]
|
||||||
|
|
||||||
|
def has_type(self, host_type):
|
||||||
|
return host_type in self.type_map
|
||||||
|
|
||||||
def attribute_count(self):
|
def attribute_count(self):
|
||||||
count = 0
|
count = 0
|
||||||
|
@ -130,7 +185,22 @@ class EmbeddingMap:
|
||||||
obj_typ, _ = self.type_map[type(obj_ref)]
|
obj_typ, _ = self.type_map[type(obj_ref)]
|
||||||
yield obj_id, obj_ref, obj_typ
|
yield obj_id, obj_ref, obj_typ
|
||||||
|
|
||||||
|
def subkernels(self):
|
||||||
|
subkernels = {}
|
||||||
|
for k, v in self.object_forward_map.items():
|
||||||
|
if hasattr(v, "artiq_embedded"):
|
||||||
|
if v.artiq_embedded.destination is not None:
|
||||||
|
subkernels[k] = v
|
||||||
|
return subkernels
|
||||||
|
|
||||||
def has_rpc(self):
|
def has_rpc(self):
|
||||||
|
return any(filter(
|
||||||
|
lambda x: (inspect.isfunction(x) or inspect.ismethod(x)) and \
|
||||||
|
(not hasattr(x, "artiq_embedded") or x.artiq_embedded.destination is None),
|
||||||
|
self.object_forward_map.values()
|
||||||
|
))
|
||||||
|
|
||||||
|
def has_rpc_or_subkernel(self):
|
||||||
return any(filter(lambda x: inspect.isfunction(x) or inspect.ismethod(x),
|
return any(filter(lambda x: inspect.isfunction(x) or inspect.ismethod(x),
|
||||||
self.object_forward_map.values()))
|
self.object_forward_map.values()))
|
||||||
|
|
||||||
|
@ -138,6 +208,7 @@ class EmbeddingMap:
|
||||||
class ASTSynthesizer:
|
class ASTSynthesizer:
|
||||||
def __init__(self, embedding_map, value_map, quote_function=None, expanded_from=None):
|
def __init__(self, embedding_map, value_map, quote_function=None, expanded_from=None):
|
||||||
self.source = ""
|
self.source = ""
|
||||||
|
self.source_last_new_line = 0
|
||||||
self.source_buffer = source.Buffer(self.source, "<synthesized>")
|
self.source_buffer = source.Buffer(self.source, "<synthesized>")
|
||||||
self.embedding_map = embedding_map
|
self.embedding_map = embedding_map
|
||||||
self.value_map = value_map
|
self.value_map = value_map
|
||||||
|
@ -156,6 +227,14 @@ class ASTSynthesizer:
|
||||||
return source.Range(self.source_buffer, range_from, range_to,
|
return source.Range(self.source_buffer, range_from, range_to,
|
||||||
expanded_from=self.expanded_from)
|
expanded_from=self.expanded_from)
|
||||||
|
|
||||||
|
def _add_iterable(self, fragment):
|
||||||
|
# Since DILocation points on the beginning of the piece of source
|
||||||
|
# we don't care if the fragment's end will overflow LLVM's limit.
|
||||||
|
if len(self.source) - self.source_last_new_line >= 2**16:
|
||||||
|
fragment = "\\\n" + fragment
|
||||||
|
self.source_last_new_line = len(self.source) + 2
|
||||||
|
return self._add(fragment)
|
||||||
|
|
||||||
def fast_quote_list(self, value):
|
def fast_quote_list(self, value):
|
||||||
elts = [None] * len(value)
|
elts = [None] * len(value)
|
||||||
is_T = False
|
is_T = False
|
||||||
|
@ -214,7 +293,7 @@ class ASTSynthesizer:
|
||||||
for index, elt in enumerate(value):
|
for index, elt in enumerate(value):
|
||||||
elts[index] = self.quote(elt)
|
elts[index] = self.quote(elt)
|
||||||
if index < len(value) - 1:
|
if index < len(value) - 1:
|
||||||
self._add(", ")
|
self._add_iterable(", ")
|
||||||
return elts
|
return elts
|
||||||
|
|
||||||
def quote(self, value):
|
def quote(self, value):
|
||||||
|
@ -265,28 +344,28 @@ class ASTSynthesizer:
|
||||||
loc=self._add(repr(value)))
|
loc=self._add(repr(value)))
|
||||||
elif isinstance(value, str):
|
elif isinstance(value, str):
|
||||||
return asttyped.StrT(s=value, ctx=None, type=builtins.TStr(),
|
return asttyped.StrT(s=value, ctx=None, type=builtins.TStr(),
|
||||||
loc=self._add(repr(value)))
|
loc=self._add_iterable(repr(value)))
|
||||||
elif isinstance(value, bytes):
|
elif isinstance(value, bytes):
|
||||||
return asttyped.StrT(s=value, ctx=None, type=builtins.TBytes(),
|
return asttyped.StrT(s=value, ctx=None, type=builtins.TBytes(),
|
||||||
loc=self._add(repr(value)))
|
loc=self._add_iterable(repr(value)))
|
||||||
elif isinstance(value, bytearray):
|
elif isinstance(value, bytearray):
|
||||||
quote_loc = self._add('`')
|
quote_loc = self._add_iterable('`')
|
||||||
repr_loc = self._add(repr(value))
|
repr_loc = self._add_iterable(repr(value))
|
||||||
unquote_loc = self._add('`')
|
unquote_loc = self._add_iterable('`')
|
||||||
loc = quote_loc.join(unquote_loc)
|
loc = quote_loc.join(unquote_loc)
|
||||||
|
|
||||||
return asttyped.QuoteT(value=value, type=builtins.TByteArray(), loc=loc)
|
return asttyped.QuoteT(value=value, type=builtins.TByteArray(), loc=loc)
|
||||||
elif isinstance(value, list):
|
elif isinstance(value, list):
|
||||||
begin_loc = self._add("[")
|
begin_loc = self._add_iterable("[")
|
||||||
elts = self.fast_quote_list(value)
|
elts = self.fast_quote_list(value)
|
||||||
end_loc = self._add("]")
|
end_loc = self._add_iterable("]")
|
||||||
return asttyped.ListT(elts=elts, ctx=None, type=builtins.TList(),
|
return asttyped.ListT(elts=elts, ctx=None, type=builtins.TList(),
|
||||||
begin_loc=begin_loc, end_loc=end_loc,
|
begin_loc=begin_loc, end_loc=end_loc,
|
||||||
loc=begin_loc.join(end_loc))
|
loc=begin_loc.join(end_loc))
|
||||||
elif isinstance(value, tuple):
|
elif isinstance(value, tuple):
|
||||||
begin_loc = self._add("(")
|
begin_loc = self._add_iterable("(")
|
||||||
elts = self.fast_quote_list(value)
|
elts = self.fast_quote_list(value)
|
||||||
end_loc = self._add(")")
|
end_loc = self._add_iterable(")")
|
||||||
return asttyped.TupleT(elts=elts, ctx=None,
|
return asttyped.TupleT(elts=elts, ctx=None,
|
||||||
type=types.TTuple([e.type for e in elts]),
|
type=types.TTuple([e.type for e in elts]),
|
||||||
begin_loc=begin_loc, end_loc=end_loc,
|
begin_loc=begin_loc, end_loc=end_loc,
|
||||||
|
@ -296,7 +375,9 @@ class ASTSynthesizer:
|
||||||
elif inspect.isfunction(value) or inspect.ismethod(value) or \
|
elif inspect.isfunction(value) or inspect.ismethod(value) or \
|
||||||
isinstance(value, pytypes.BuiltinFunctionType) or \
|
isinstance(value, pytypes.BuiltinFunctionType) or \
|
||||||
isinstance(value, SpecializedFunction) or \
|
isinstance(value, SpecializedFunction) or \
|
||||||
isinstance(value, numpy.ufunc):
|
isinstance(value, numpy.ufunc) or \
|
||||||
|
(isinstance(value, _ArrayFunctionDispatcher) if
|
||||||
|
_ArrayFunctionDispatcher is not None else False):
|
||||||
if inspect.ismethod(value):
|
if inspect.ismethod(value):
|
||||||
quoted_self = self.quote(value.__self__)
|
quoted_self = self.quote(value.__self__)
|
||||||
function_type = self.quote_function(value.__func__, self.expanded_from)
|
function_type = self.quote_function(value.__func__, self.expanded_from)
|
||||||
|
@ -405,7 +486,7 @@ class ASTSynthesizer:
|
||||||
return asttyped.QuoteT(value=value, type=instance_type,
|
return asttyped.QuoteT(value=value, type=instance_type,
|
||||||
loc=loc)
|
loc=loc)
|
||||||
|
|
||||||
def call(self, callee, args, kwargs, callback=None):
|
def call(self, callee, args, kwargs, callback=None, remote_fn=False):
|
||||||
"""
|
"""
|
||||||
Construct an AST fragment calling a function specified by
|
Construct an AST fragment calling a function specified by
|
||||||
an AST node `function_node`, with given arguments.
|
an AST node `function_node`, with given arguments.
|
||||||
|
@ -449,7 +530,7 @@ class ASTSynthesizer:
|
||||||
starargs=None, kwargs=None,
|
starargs=None, kwargs=None,
|
||||||
type=types.TVar(), iodelay=None, arg_exprs={},
|
type=types.TVar(), iodelay=None, arg_exprs={},
|
||||||
begin_loc=begin_loc, end_loc=end_loc, star_loc=None, dstar_loc=None,
|
begin_loc=begin_loc, end_loc=end_loc, star_loc=None, dstar_loc=None,
|
||||||
loc=callee_node.loc.join(end_loc))
|
loc=callee_node.loc.join(end_loc), remote_fn=remote_fn)
|
||||||
|
|
||||||
if callback is not None:
|
if callback is not None:
|
||||||
node = asttyped.CallT(
|
node = asttyped.CallT(
|
||||||
|
@ -484,7 +565,7 @@ class StitchingASTTypedRewriter(ASTTypedRewriter):
|
||||||
arg=node.arg, annotation=None,
|
arg=node.arg, annotation=None,
|
||||||
arg_loc=node.arg_loc, colon_loc=node.colon_loc, loc=node.loc)
|
arg_loc=node.arg_loc, colon_loc=node.colon_loc, loc=node.loc)
|
||||||
|
|
||||||
def visit_quoted_function(self, node, function):
|
def visit_quoted_function(self, node, function, remote_fn):
|
||||||
extractor = LocalExtractor(env_stack=self.env_stack, engine=self.engine)
|
extractor = LocalExtractor(env_stack=self.env_stack, engine=self.engine)
|
||||||
extractor.visit(node)
|
extractor.visit(node)
|
||||||
|
|
||||||
|
@ -501,11 +582,11 @@ class StitchingASTTypedRewriter(ASTTypedRewriter):
|
||||||
node = asttyped.QuotedFunctionDefT(
|
node = asttyped.QuotedFunctionDefT(
|
||||||
typing_env=extractor.typing_env, globals_in_scope=extractor.global_,
|
typing_env=extractor.typing_env, globals_in_scope=extractor.global_,
|
||||||
signature_type=types.TVar(), return_type=types.TVar(),
|
signature_type=types.TVar(), return_type=types.TVar(),
|
||||||
name=node.name, args=node.args, returns=node.returns,
|
name=node.name, args=node.args, returns=None,
|
||||||
body=node.body, decorator_list=node.decorator_list,
|
body=node.body, decorator_list=node.decorator_list,
|
||||||
keyword_loc=node.keyword_loc, name_loc=node.name_loc,
|
keyword_loc=node.keyword_loc, name_loc=node.name_loc,
|
||||||
arrow_loc=node.arrow_loc, colon_loc=node.colon_loc, at_locs=node.at_locs,
|
arrow_loc=node.arrow_loc, colon_loc=node.colon_loc, at_locs=node.at_locs,
|
||||||
loc=node.loc)
|
loc=node.loc, remote_fn=remote_fn)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.env_stack.append(node.typing_env)
|
self.env_stack.append(node.typing_env)
|
||||||
|
@ -713,7 +794,7 @@ class TypedtreeHasher(algorithm.Visitor):
|
||||||
return hash(tuple(freeze(getattr(node, field_name)) for field_name in fields))
|
return hash(tuple(freeze(getattr(node, field_name)) for field_name in fields))
|
||||||
|
|
||||||
class Stitcher:
|
class Stitcher:
|
||||||
def __init__(self, core, dmgr, engine=None, print_as_rpc=True):
|
def __init__(self, core, dmgr, engine=None, print_as_rpc=True, destination=0, subkernel_arg_types=[]):
|
||||||
self.core = core
|
self.core = core
|
||||||
self.dmgr = dmgr
|
self.dmgr = dmgr
|
||||||
if engine is None:
|
if engine is None:
|
||||||
|
@ -739,11 +820,19 @@ class Stitcher:
|
||||||
self.value_map = defaultdict(lambda: [])
|
self.value_map = defaultdict(lambda: [])
|
||||||
self.definitely_changed = False
|
self.definitely_changed = False
|
||||||
|
|
||||||
|
self.destination = destination
|
||||||
|
self.first_call = True
|
||||||
|
# for non-annotated subkernels:
|
||||||
|
# main kernel inferencer output with types of arguments
|
||||||
|
self.subkernel_arg_types = subkernel_arg_types
|
||||||
|
|
||||||
def stitch_call(self, function, args, kwargs, callback=None):
|
def stitch_call(self, function, args, kwargs, callback=None):
|
||||||
# We synthesize source code for the initial call so that
|
# We synthesize source code for the initial call so that
|
||||||
# diagnostics would have something meaningful to display to the user.
|
# diagnostics would have something meaningful to display to the user.
|
||||||
synthesizer = self._synthesizer(self._function_loc(function.artiq_embedded.function))
|
synthesizer = self._synthesizer(self._function_loc(function.artiq_embedded.function))
|
||||||
call_node = synthesizer.call(function, args, kwargs, callback)
|
# first call of a subkernel will get its arguments from remote (DRTIO)
|
||||||
|
remote_fn = self.destination != 0
|
||||||
|
call_node = synthesizer.call(function, args, kwargs, callback, remote_fn=remote_fn)
|
||||||
synthesizer.finalize()
|
synthesizer.finalize()
|
||||||
self.typedtree.append(call_node)
|
self.typedtree.append(call_node)
|
||||||
|
|
||||||
|
@ -855,6 +944,10 @@ class Stitcher:
|
||||||
return [diagnostic.Diagnostic("note",
|
return [diagnostic.Diagnostic("note",
|
||||||
"in kernel function here", {},
|
"in kernel function here", {},
|
||||||
call_loc)]
|
call_loc)]
|
||||||
|
elif fn_kind == 'subkernel':
|
||||||
|
return [diagnostic.Diagnostic("note",
|
||||||
|
"in subkernel call here", {},
|
||||||
|
call_loc)]
|
||||||
else:
|
else:
|
||||||
assert False
|
assert False
|
||||||
else:
|
else:
|
||||||
|
@ -874,7 +967,7 @@ class Stitcher:
|
||||||
self._function_loc(function),
|
self._function_loc(function),
|
||||||
notes=self._call_site_note(loc, fn_kind))
|
notes=self._call_site_note(loc, fn_kind))
|
||||||
self.engine.process(diag)
|
self.engine.process(diag)
|
||||||
elif fn_kind == 'rpc' and param.default is not inspect.Parameter.empty:
|
elif fn_kind == 'rpc' or fn_kind == 'subkernel' and param.default is not inspect.Parameter.empty:
|
||||||
notes = []
|
notes = []
|
||||||
notes.append(diagnostic.Diagnostic("note",
|
notes.append(diagnostic.Diagnostic("note",
|
||||||
"expanded from here while trying to infer a type for an"
|
"expanded from here while trying to infer a type for an"
|
||||||
|
@ -893,11 +986,18 @@ class Stitcher:
|
||||||
Inferencer(engine=self.engine).visit(ast)
|
Inferencer(engine=self.engine).visit(ast)
|
||||||
IntMonomorphizer(engine=self.engine).visit(ast)
|
IntMonomorphizer(engine=self.engine).visit(ast)
|
||||||
return ast.type
|
return ast.type
|
||||||
else:
|
elif fn_kind == 'kernel' and self.first_call and self.destination != 0:
|
||||||
|
# subkernels do not have access to the main kernel code to infer
|
||||||
|
# arg types - so these are cached and passed onto subkernel
|
||||||
|
# compilation, to avoid having to annotate them fully
|
||||||
|
for name, typ in self.subkernel_arg_types:
|
||||||
|
if param.name == name:
|
||||||
|
return typ
|
||||||
|
|
||||||
# Let the rest of the program decide.
|
# Let the rest of the program decide.
|
||||||
return types.TVar()
|
return types.TVar()
|
||||||
|
|
||||||
def _quote_embedded_function(self, function, flags):
|
def _quote_embedded_function(self, function, flags, remote_fn=False):
|
||||||
# we are now parsing new functions... definitely changed the type
|
# we are now parsing new functions... definitely changed the type
|
||||||
self.definitely_changed = True
|
self.definitely_changed = True
|
||||||
|
|
||||||
|
@ -996,7 +1096,7 @@ class Stitcher:
|
||||||
engine=self.engine, prelude=self.prelude,
|
engine=self.engine, prelude=self.prelude,
|
||||||
globals=self.globals, host_environment=host_environment,
|
globals=self.globals, host_environment=host_environment,
|
||||||
quote=self._quote)
|
quote=self._quote)
|
||||||
function_node = asttyped_rewriter.visit_quoted_function(function_node, embedded_function)
|
function_node = asttyped_rewriter.visit_quoted_function(function_node, embedded_function, remote_fn)
|
||||||
function_node.flags = flags
|
function_node.flags = flags
|
||||||
|
|
||||||
# Add it into our typedtree so that it gets inferenced and codegen'd.
|
# Add it into our typedtree so that it gets inferenced and codegen'd.
|
||||||
|
@ -1008,9 +1108,6 @@ class Stitcher:
|
||||||
return function_node
|
return function_node
|
||||||
|
|
||||||
def _extract_annot(self, function, annot, kind, call_loc, fn_kind):
|
def _extract_annot(self, function, annot, kind, call_loc, fn_kind):
|
||||||
if annot is None:
|
|
||||||
annot = builtins.TNone()
|
|
||||||
|
|
||||||
if isinstance(function, SpecializedFunction):
|
if isinstance(function, SpecializedFunction):
|
||||||
host_function = function.host_function
|
host_function = function.host_function
|
||||||
else:
|
else:
|
||||||
|
@ -1024,9 +1121,20 @@ class Stitcher:
|
||||||
if isinstance(embedded_function, str):
|
if isinstance(embedded_function, str):
|
||||||
embedded_function = host_function
|
embedded_function = host_function
|
||||||
|
|
||||||
|
return self._to_artiq_type(
|
||||||
|
annot,
|
||||||
|
function=function,
|
||||||
|
kind=kind,
|
||||||
|
eval_in_scope=lambda x: eval(x, embedded_function.__globals__),
|
||||||
|
call_loc=call_loc,
|
||||||
|
fn_kind=fn_kind)
|
||||||
|
|
||||||
|
def _to_artiq_type(
|
||||||
|
self, annot, *, function, kind: str, eval_in_scope, call_loc: str, fn_kind: str
|
||||||
|
) -> types.Type:
|
||||||
if isinstance(annot, str):
|
if isinstance(annot, str):
|
||||||
try:
|
try:
|
||||||
annot = eval(annot, embedded_function.__globals__)
|
annot = eval_in_scope(annot)
|
||||||
except Exception:
|
except Exception:
|
||||||
diag = diagnostic.Diagnostic(
|
diag = diagnostic.Diagnostic(
|
||||||
"error",
|
"error",
|
||||||
|
@ -1036,23 +1144,72 @@ class Stitcher:
|
||||||
notes=self._call_site_note(call_loc, fn_kind))
|
notes=self._call_site_note(call_loc, fn_kind))
|
||||||
self.engine.process(diag)
|
self.engine.process(diag)
|
||||||
|
|
||||||
if not isinstance(annot, types.Type):
|
if isinstance(annot, types.Type):
|
||||||
|
return annot
|
||||||
|
|
||||||
|
# Convert built-in Python types to ARTIQ ones.
|
||||||
|
if annot is None:
|
||||||
|
return builtins.TNone()
|
||||||
|
elif annot is numpy.int64:
|
||||||
|
return builtins.TInt64()
|
||||||
|
elif annot is numpy.int32:
|
||||||
|
return builtins.TInt32()
|
||||||
|
elif annot is float:
|
||||||
|
return builtins.TFloat()
|
||||||
|
elif annot is bool:
|
||||||
|
return builtins.TBool()
|
||||||
|
elif annot is str:
|
||||||
|
return builtins.TStr()
|
||||||
|
elif annot is bytes:
|
||||||
|
return builtins.TBytes()
|
||||||
|
elif annot is bytearray:
|
||||||
|
return builtins.TByteArray()
|
||||||
|
|
||||||
|
# Convert generic Python types to ARTIQ ones.
|
||||||
|
generic_ty = typing.get_origin(annot)
|
||||||
|
if generic_ty is not None:
|
||||||
|
type_args = typing.get_args(annot)
|
||||||
|
artiq_args = [
|
||||||
|
self._to_artiq_type(
|
||||||
|
x,
|
||||||
|
function=function,
|
||||||
|
kind=kind,
|
||||||
|
eval_in_scope=eval_in_scope,
|
||||||
|
call_loc=call_loc,
|
||||||
|
fn_kind=fn_kind)
|
||||||
|
for x in type_args
|
||||||
|
]
|
||||||
|
|
||||||
|
if generic_ty is list and len(artiq_args) == 1:
|
||||||
|
return builtins.TList(artiq_args[0])
|
||||||
|
elif generic_ty is tuple:
|
||||||
|
return types.TTuple(artiq_args)
|
||||||
|
|
||||||
|
# Otherwise report an unknown type and just use a fresh tyvar.
|
||||||
|
|
||||||
|
if annot is int:
|
||||||
|
message = (
|
||||||
|
"type annotation for {kind}, 'int' cannot be used as an ARTIQ type. "
|
||||||
|
"Use numpy's int32 or int64 instead."
|
||||||
|
)
|
||||||
|
ty = builtins.TInt()
|
||||||
|
else:
|
||||||
|
message = "type annotation for {kind}, '{annot}', is not an ARTIQ type"
|
||||||
|
ty = types.TVar()
|
||||||
|
|
||||||
diag = diagnostic.Diagnostic("error",
|
diag = diagnostic.Diagnostic("error",
|
||||||
"type annotation for {kind}, '{annot}', is not an ARTIQ type",
|
message,
|
||||||
{"kind": kind, "annot": repr(annot)},
|
{"kind": kind, "annot": repr(annot)},
|
||||||
self._function_loc(function),
|
self._function_loc(function),
|
||||||
notes=self._call_site_note(call_loc, fn_kind))
|
notes=self._call_site_note(call_loc, fn_kind))
|
||||||
self.engine.process(diag)
|
self.engine.process(diag)
|
||||||
|
|
||||||
return types.TVar()
|
return ty
|
||||||
else:
|
|
||||||
return annot
|
|
||||||
|
|
||||||
def _quote_syscall(self, function, loc):
|
def _quote_syscall(self, function, loc):
|
||||||
signature = inspect.signature(function)
|
signature = inspect.signature(function)
|
||||||
|
|
||||||
arg_types = OrderedDict()
|
arg_types = OrderedDict()
|
||||||
optarg_types = OrderedDict()
|
|
||||||
for param in signature.parameters.values():
|
for param in signature.parameters.values():
|
||||||
if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:
|
if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:
|
||||||
diag = diagnostic.Diagnostic("error",
|
diag = diagnostic.Diagnostic("error",
|
||||||
|
@ -1090,6 +1247,40 @@ class Stitcher:
|
||||||
self.functions[function] = function_type
|
self.functions[function] = function_type
|
||||||
return function_type
|
return function_type
|
||||||
|
|
||||||
|
def _quote_subkernel(self, function, loc):
|
||||||
|
if isinstance(function, SpecializedFunction):
|
||||||
|
host_function = function.host_function
|
||||||
|
else:
|
||||||
|
host_function = function
|
||||||
|
ret_type = builtins.TNone()
|
||||||
|
signature = inspect.signature(host_function)
|
||||||
|
|
||||||
|
if signature.return_annotation is not inspect.Signature.empty:
|
||||||
|
ret_type = self._extract_annot(host_function, signature.return_annotation,
|
||||||
|
"return type", loc, fn_kind='subkernel')
|
||||||
|
arg_types = OrderedDict()
|
||||||
|
optarg_types = OrderedDict()
|
||||||
|
for param in signature.parameters.values():
|
||||||
|
if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:
|
||||||
|
diag = diagnostic.Diagnostic("error",
|
||||||
|
"subkernels must only use positional arguments; '{argument}' isn't",
|
||||||
|
{"argument": param.name},
|
||||||
|
self._function_loc(function),
|
||||||
|
notes=self._call_site_note(loc, fn_kind='subkernel'))
|
||||||
|
self.engine.process(diag)
|
||||||
|
|
||||||
|
arg_type = self._type_of_param(function, loc, param, fn_kind='subkernel')
|
||||||
|
if param.default is inspect.Parameter.empty:
|
||||||
|
arg_types[param.name] = arg_type
|
||||||
|
else:
|
||||||
|
optarg_types[param.name] = arg_type
|
||||||
|
|
||||||
|
function_type = types.TSubkernel(arg_types, optarg_types, ret_type,
|
||||||
|
sid=self.embedding_map.store_object(host_function),
|
||||||
|
destination=host_function.artiq_embedded.destination)
|
||||||
|
self.functions[function] = function_type
|
||||||
|
return function_type
|
||||||
|
|
||||||
def _quote_rpc(self, function, loc):
|
def _quote_rpc(self, function, loc):
|
||||||
if isinstance(function, SpecializedFunction):
|
if isinstance(function, SpecializedFunction):
|
||||||
host_function = function.host_function
|
host_function = function.host_function
|
||||||
|
@ -1149,8 +1340,18 @@ class Stitcher:
|
||||||
(host_function.artiq_embedded.core_name is None and
|
(host_function.artiq_embedded.core_name is None and
|
||||||
host_function.artiq_embedded.portable is False and
|
host_function.artiq_embedded.portable is False and
|
||||||
host_function.artiq_embedded.syscall is None and
|
host_function.artiq_embedded.syscall is None and
|
||||||
|
host_function.artiq_embedded.destination is None and
|
||||||
host_function.artiq_embedded.forbidden is False):
|
host_function.artiq_embedded.forbidden is False):
|
||||||
self._quote_rpc(function, loc)
|
self._quote_rpc(function, loc)
|
||||||
|
elif host_function.artiq_embedded.destination is not None and \
|
||||||
|
host_function.artiq_embedded.destination != self.destination:
|
||||||
|
# treat subkernels as kernels if running on the same device
|
||||||
|
if not 0 < host_function.artiq_embedded.destination <= 255:
|
||||||
|
diag = diagnostic.Diagnostic("error",
|
||||||
|
"subkernel destination must be between 1 and 255 (inclusive)", {},
|
||||||
|
self._function_loc(host_function))
|
||||||
|
self.engine.process(diag)
|
||||||
|
self._quote_subkernel(function, loc)
|
||||||
elif host_function.artiq_embedded.function is not None:
|
elif host_function.artiq_embedded.function is not None:
|
||||||
if host_function.__name__ == "<lambda>":
|
if host_function.__name__ == "<lambda>":
|
||||||
note = diagnostic.Diagnostic("note",
|
note = diagnostic.Diagnostic("note",
|
||||||
|
@ -1174,8 +1375,13 @@ class Stitcher:
|
||||||
notes=[note])
|
notes=[note])
|
||||||
self.engine.process(diag)
|
self.engine.process(diag)
|
||||||
|
|
||||||
|
destination = host_function.artiq_embedded.destination
|
||||||
|
# remote_fn only for first call in subkernels
|
||||||
|
remote_fn = destination is not None and self.first_call
|
||||||
self._quote_embedded_function(function,
|
self._quote_embedded_function(function,
|
||||||
flags=host_function.artiq_embedded.flags)
|
flags=host_function.artiq_embedded.flags,
|
||||||
|
remote_fn=remote_fn)
|
||||||
|
self.first_call = False
|
||||||
elif host_function.artiq_embedded.syscall is not None:
|
elif host_function.artiq_embedded.syscall is not None:
|
||||||
# Insert a storage-less global whose type instructs the compiler
|
# Insert a storage-less global whose type instructs the compiler
|
||||||
# to perform a system call instead of a regular call.
|
# to perform a system call instead of a regular call.
|
||||||
|
|
|
@ -135,6 +135,7 @@ class NamedValue(Value):
|
||||||
def __init__(self, typ, name):
|
def __init__(self, typ, name):
|
||||||
super().__init__(typ)
|
super().__init__(typ)
|
||||||
self.name, self.function = name, None
|
self.name, self.function = name, None
|
||||||
|
self.is_removed = False
|
||||||
|
|
||||||
def set_name(self, new_name):
|
def set_name(self, new_name):
|
||||||
if self.function is not None:
|
if self.function is not None:
|
||||||
|
@ -235,7 +236,7 @@ class Instruction(User):
|
||||||
self.drop_references()
|
self.drop_references()
|
||||||
# Check this after drop_references in case this
|
# Check this after drop_references in case this
|
||||||
# is a self-referencing phi.
|
# is a self-referencing phi.
|
||||||
assert not any(self.uses)
|
assert all(use.is_removed for use in self.uses)
|
||||||
|
|
||||||
def replace_with(self, value):
|
def replace_with(self, value):
|
||||||
self.replace_all_uses_with(value)
|
self.replace_all_uses_with(value)
|
||||||
|
@ -370,7 +371,7 @@ class BasicBlock(NamedValue):
|
||||||
self.remove_from_parent()
|
self.remove_from_parent()
|
||||||
# Check this after erasing instructions in case the block
|
# Check this after erasing instructions in case the block
|
||||||
# loops into itself.
|
# loops into itself.
|
||||||
assert not any(self.uses)
|
assert all(use.is_removed for use in self.uses)
|
||||||
|
|
||||||
def prepend(self, insn):
|
def prepend(self, insn):
|
||||||
assert isinstance(insn, Instruction)
|
assert isinstance(insn, Instruction)
|
||||||
|
@ -705,6 +706,81 @@ class SetLocal(Instruction):
|
||||||
def value(self):
|
def value(self):
|
||||||
return self.operands[1]
|
return self.operands[1]
|
||||||
|
|
||||||
|
class GetArgFromRemote(Instruction):
|
||||||
|
"""
|
||||||
|
An instruction that receives function arguments from remote
|
||||||
|
(ie. subkernel in DRTIO context)
|
||||||
|
|
||||||
|
:ivar arg_name: (string) argument name
|
||||||
|
:ivar arg_type: argument type
|
||||||
|
"""
|
||||||
|
|
||||||
|
"""
|
||||||
|
:param arg_name: (string) argument name
|
||||||
|
:param arg_type: argument type
|
||||||
|
"""
|
||||||
|
def __init__(self, arg_name, arg_type, name=""):
|
||||||
|
assert isinstance(arg_name, str)
|
||||||
|
super().__init__([], arg_type, name)
|
||||||
|
self.arg_name = arg_name
|
||||||
|
self.arg_type = arg_type
|
||||||
|
|
||||||
|
def copy(self, mapper):
|
||||||
|
self_copy = super().copy(mapper)
|
||||||
|
self_copy.arg_name = self.arg_name
|
||||||
|
self_copy.arg_type = self.arg_type
|
||||||
|
return self_copy
|
||||||
|
|
||||||
|
def opcode(self):
|
||||||
|
return "getargfromremote({})".format(repr(self.arg_name))
|
||||||
|
|
||||||
|
class GetOptArgFromRemote(GetArgFromRemote):
|
||||||
|
"""
|
||||||
|
An instruction that may or may not retrieve an optional function argument
|
||||||
|
from remote, depending on number of values received by firmware.
|
||||||
|
|
||||||
|
:ivar rcv_count: number of received values,
|
||||||
|
determined by firmware
|
||||||
|
:ivar index: (integer) index of the current argument,
|
||||||
|
in reference to remote arguments
|
||||||
|
"""
|
||||||
|
|
||||||
|
"""
|
||||||
|
:param rcv_count: number of received valuese
|
||||||
|
:param index: (integer) index of the current argument,
|
||||||
|
in reference to remote arguments
|
||||||
|
"""
|
||||||
|
def __init__(self, arg_name, arg_type, rcv_count, index, name=""):
|
||||||
|
super().__init__(arg_name, arg_type, name)
|
||||||
|
self.rcv_count = rcv_count
|
||||||
|
self.index = index
|
||||||
|
|
||||||
|
def copy(self, mapper):
|
||||||
|
self_copy = super().copy(mapper)
|
||||||
|
self_copy.rcv_count = self.rcv_count
|
||||||
|
self_copy.index = self.index
|
||||||
|
return self_copy
|
||||||
|
|
||||||
|
def opcode(self):
|
||||||
|
return "getoptargfromremote({})".format(repr(self.arg_name))
|
||||||
|
|
||||||
|
class SubkernelAwaitArgs(Instruction):
|
||||||
|
"""
|
||||||
|
A builtin instruction that takes min and max received messages as operands,
|
||||||
|
and a list of received types.
|
||||||
|
|
||||||
|
:ivar arg_types: (list of types) types of passed arguments (including optional)
|
||||||
|
"""
|
||||||
|
|
||||||
|
"""
|
||||||
|
:param arg_types: (list of types) types of passed arguments (including optional)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, operands, arg_types, name=None):
|
||||||
|
assert isinstance(arg_types, list)
|
||||||
|
self.arg_types = arg_types
|
||||||
|
super().__init__(operands, builtins.TNone(), name)
|
||||||
|
|
||||||
class GetAttr(Instruction):
|
class GetAttr(Instruction):
|
||||||
"""
|
"""
|
||||||
An intruction that loads an attribute from an object,
|
An intruction that loads an attribute from an object,
|
||||||
|
@ -727,7 +803,7 @@ class GetAttr(Instruction):
|
||||||
typ = obj.type.attributes[attr]
|
typ = obj.type.attributes[attr]
|
||||||
else:
|
else:
|
||||||
typ = obj.type.constructor.attributes[attr]
|
typ = obj.type.constructor.attributes[attr]
|
||||||
if types.is_function(typ) or types.is_rpc(typ):
|
if types.is_function(typ) or types.is_rpc(typ) or types.is_subkernel(typ):
|
||||||
typ = types.TMethod(obj.type, typ)
|
typ = types.TMethod(obj.type, typ)
|
||||||
super().__init__([obj], typ, name)
|
super().__init__([obj], typ, name)
|
||||||
self.attr = attr
|
self.attr = attr
|
||||||
|
@ -1189,14 +1265,18 @@ class IndirectBranch(Terminator):
|
||||||
class Return(Terminator):
|
class Return(Terminator):
|
||||||
"""
|
"""
|
||||||
A return instruction.
|
A return instruction.
|
||||||
|
:param remote_return: (bool)
|
||||||
|
marks a return in subkernel context,
|
||||||
|
where the return value is sent back through DRTIO
|
||||||
"""
|
"""
|
||||||
|
|
||||||
"""
|
"""
|
||||||
:param value: (:class:`Value`) return value
|
:param value: (:class:`Value`) return value
|
||||||
"""
|
"""
|
||||||
def __init__(self, value, name=""):
|
def __init__(self, value, remote_return=False, name=""):
|
||||||
assert isinstance(value, Value)
|
assert isinstance(value, Value)
|
||||||
super().__init__([value], builtins.TNone(), name)
|
super().__init__([value], builtins.TNone(), name)
|
||||||
|
self.remote_return = remote_return
|
||||||
|
|
||||||
def opcode(self):
|
def opcode(self):
|
||||||
return "return"
|
return "return"
|
||||||
|
@ -1245,9 +1325,9 @@ class Raise(Terminator):
|
||||||
if len(self.operands) > 1:
|
if len(self.operands) > 1:
|
||||||
return self.operands[1]
|
return self.operands[1]
|
||||||
|
|
||||||
class Reraise(Terminator):
|
class Resume(Terminator):
|
||||||
"""
|
"""
|
||||||
A reraise instruction.
|
A resume instruction.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
@ -1261,7 +1341,7 @@ class Reraise(Terminator):
|
||||||
super().__init__(operands, builtins.TNone(), name)
|
super().__init__(operands, builtins.TNone(), name)
|
||||||
|
|
||||||
def opcode(self):
|
def opcode(self):
|
||||||
return "reraise"
|
return "resume"
|
||||||
|
|
||||||
def exception_target(self):
|
def exception_target(self):
|
||||||
if len(self.operands) > 0:
|
if len(self.operands) > 0:
|
||||||
|
@ -1360,14 +1440,6 @@ class LandingPad(Terminator):
|
||||||
def cleanup(self):
|
def cleanup(self):
|
||||||
return self.operands[0]
|
return self.operands[0]
|
||||||
|
|
||||||
def erase(self):
|
|
||||||
self.remove_from_parent()
|
|
||||||
# we should erase all clauses as well
|
|
||||||
for block in set(self.operands):
|
|
||||||
block.uses.remove(self)
|
|
||||||
block.erase()
|
|
||||||
assert not any(self.uses)
|
|
||||||
|
|
||||||
def clauses(self):
|
def clauses(self):
|
||||||
return zip(self.operands[1:], self.types)
|
return zip(self.operands[1:], self.types)
|
||||||
|
|
||||||
|
|
|
@ -33,9 +33,19 @@ SECTIONS
|
||||||
KEEP(*(.eh_frame_hdr))
|
KEEP(*(.eh_frame_hdr))
|
||||||
} : text : eh_frame
|
} : text : eh_frame
|
||||||
|
|
||||||
|
.got :
|
||||||
|
{
|
||||||
|
*(.got)
|
||||||
|
} : text
|
||||||
|
|
||||||
|
.got.plt :
|
||||||
|
{
|
||||||
|
*(.got.plt)
|
||||||
|
} : text
|
||||||
|
|
||||||
.data :
|
.data :
|
||||||
{
|
{
|
||||||
*(.data)
|
*(.data .data.*)
|
||||||
} : data
|
} : data
|
||||||
|
|
||||||
.dynamic :
|
.dynamic :
|
||||||
|
@ -51,6 +61,10 @@ SECTIONS
|
||||||
_end = .;
|
_end = .;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Kernel stack grows downward from end of memory, so put guard page after
|
||||||
|
* all the program contents. Note: This requires all loaded sections (at
|
||||||
|
* least those accessed) to be explicitly listed in the above!
|
||||||
|
*/
|
||||||
. = ALIGN(0x1000);
|
. = ALIGN(0x1000);
|
||||||
_sstack_guard = .;
|
_sstack_guard = .;
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,7 +10,7 @@ string and infers types for it using a trivial :module:`prelude`.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
from pythonparser import source, diagnostic, parse_buffer
|
from pythonparser import source, diagnostic, parse_buffer
|
||||||
from . import prelude, types, transforms, analyses, validators
|
from . import prelude, types, transforms, analyses, validators, embedding
|
||||||
|
|
||||||
class Source:
|
class Source:
|
||||||
def __init__(self, source_buffer, engine=None):
|
def __init__(self, source_buffer, engine=None):
|
||||||
|
@ -18,7 +18,7 @@ class Source:
|
||||||
self.engine = diagnostic.Engine(all_errors_are_fatal=True)
|
self.engine = diagnostic.Engine(all_errors_are_fatal=True)
|
||||||
else:
|
else:
|
||||||
self.engine = engine
|
self.engine = engine
|
||||||
self.embedding_map = None
|
self.embedding_map = embedding.EmbeddingMap()
|
||||||
self.name, _ = os.path.splitext(os.path.basename(source_buffer.name))
|
self.name, _ = os.path.splitext(os.path.basename(source_buffer.name))
|
||||||
|
|
||||||
asttyped_rewriter = transforms.ASTTypedRewriter(engine=engine,
|
asttyped_rewriter = transforms.ASTTypedRewriter(engine=engine,
|
||||||
|
@ -57,7 +57,8 @@ class Module:
|
||||||
constness_validator = validators.ConstnessValidator(engine=self.engine)
|
constness_validator = validators.ConstnessValidator(engine=self.engine)
|
||||||
artiq_ir_generator = transforms.ARTIQIRGenerator(engine=self.engine,
|
artiq_ir_generator = transforms.ARTIQIRGenerator(engine=self.engine,
|
||||||
module_name=src.name,
|
module_name=src.name,
|
||||||
ref_period=ref_period)
|
ref_period=ref_period,
|
||||||
|
embedding_map=self.embedding_map)
|
||||||
dead_code_eliminator = transforms.DeadCodeEliminator(engine=self.engine)
|
dead_code_eliminator = transforms.DeadCodeEliminator(engine=self.engine)
|
||||||
local_access_validator = validators.LocalAccessValidator(engine=self.engine)
|
local_access_validator = validators.LocalAccessValidator(engine=self.engine)
|
||||||
local_demoter = transforms.LocalDemoter()
|
local_demoter = transforms.LocalDemoter()
|
||||||
|
@ -83,6 +84,8 @@ class Module:
|
||||||
constant_hoister.process(self.artiq_ir)
|
constant_hoister.process(self.artiq_ir)
|
||||||
if remarks:
|
if remarks:
|
||||||
invariant_detection.process(self.artiq_ir)
|
invariant_detection.process(self.artiq_ir)
|
||||||
|
# for subkernels: main kernel inferencer output, to be passed to further compilations
|
||||||
|
self.subkernel_arg_types = inferencer.subkernel_arg_types
|
||||||
|
|
||||||
def build_llvm_ir(self, target):
|
def build_llvm_ir(self, target):
|
||||||
"""Compile the module to LLVM IR for the specified target."""
|
"""Compile the module to LLVM IR for the specified target."""
|
||||||
|
|
|
@ -37,6 +37,7 @@ def globals():
|
||||||
|
|
||||||
# ARTIQ decorators
|
# ARTIQ decorators
|
||||||
"kernel": builtins.fn_kernel(),
|
"kernel": builtins.fn_kernel(),
|
||||||
|
"subkernel": builtins.fn_kernel(),
|
||||||
"portable": builtins.fn_kernel(),
|
"portable": builtins.fn_kernel(),
|
||||||
"rpc": builtins.fn_kernel(),
|
"rpc": builtins.fn_kernel(),
|
||||||
|
|
||||||
|
@ -54,4 +55,8 @@ def globals():
|
||||||
# ARTIQ utility functions
|
# ARTIQ utility functions
|
||||||
"rtio_log": builtins.fn_rtio_log(),
|
"rtio_log": builtins.fn_rtio_log(),
|
||||||
"core_log": builtins.fn_print(),
|
"core_log": builtins.fn_print(),
|
||||||
|
|
||||||
|
# ARTIQ subkernel utility functions
|
||||||
|
"subkernel_await": builtins.fn_subkernel_await(),
|
||||||
|
"subkernel_preload": builtins.fn_subkernel_preload(),
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,6 +74,8 @@ class Target:
|
||||||
LLVM target data layout, e.g. ``"E-m:e-p:32:32-i64:32-f64:32-v64:32-v128:32-a:0:32-n32"``
|
LLVM target data layout, e.g. ``"E-m:e-p:32:32-i64:32-f64:32-v64:32-v128:32-a:0:32-n32"``
|
||||||
:var features: (list of string)
|
:var features: (list of string)
|
||||||
LLVM target CPU features, e.g. ``["mul", "div", "ffl1"]``
|
LLVM target CPU features, e.g. ``["mul", "div", "ffl1"]``
|
||||||
|
:var additional_linker_options: (list of string)
|
||||||
|
Linker options for the target in addition to the target-independent ones, e.g. ``["--target2=rel"]``
|
||||||
:var print_function: (string)
|
:var print_function: (string)
|
||||||
Name of a formatted print functions (with the signature of ``printf``)
|
Name of a formatted print functions (with the signature of ``printf``)
|
||||||
provided by the target, e.g. ``"printf"``.
|
provided by the target, e.g. ``"printf"``.
|
||||||
|
@ -83,16 +85,18 @@ class Target:
|
||||||
triple = "unknown"
|
triple = "unknown"
|
||||||
data_layout = ""
|
data_layout = ""
|
||||||
features = []
|
features = []
|
||||||
|
additional_linker_options = []
|
||||||
print_function = "printf"
|
print_function = "printf"
|
||||||
now_pinning = True
|
now_pinning = True
|
||||||
|
|
||||||
tool_ld = "ld.lld"
|
tool_ld = "ld.lld"
|
||||||
tool_strip = "llvm-strip"
|
tool_strip = "llvm-strip"
|
||||||
tool_addr2line = "llvm-addr2line"
|
tool_symbolizer = "llvm-symbolizer"
|
||||||
tool_cxxfilt = "llvm-cxxfilt"
|
tool_cxxfilt = "llvm-cxxfilt"
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self, subkernel_id=None):
|
||||||
self.llcontext = ll.Context()
|
self.llcontext = ll.Context()
|
||||||
|
self.subkernel_id = subkernel_id
|
||||||
|
|
||||||
def target_machine(self):
|
def target_machine(self):
|
||||||
lltarget = llvm.Target.from_triple(self.triple)
|
lltarget = llvm.Target.from_triple(self.triple)
|
||||||
|
@ -145,7 +149,8 @@ class Target:
|
||||||
ir.BasicBlock._dump_loc = False
|
ir.BasicBlock._dump_loc = False
|
||||||
|
|
||||||
type_printer = types.TypePrinter()
|
type_printer = types.TypePrinter()
|
||||||
_dump(os.getenv("ARTIQ_DUMP_IR"), "ARTIQ IR", ".txt",
|
suffix = "_subkernel_{}".format(self.subkernel_id) if self.subkernel_id is not None else ""
|
||||||
|
_dump(os.getenv("ARTIQ_DUMP_IR"), "ARTIQ IR", suffix + ".txt",
|
||||||
lambda: "\n".join(fn.as_entity(type_printer) for fn in module.artiq_ir))
|
lambda: "\n".join(fn.as_entity(type_printer) for fn in module.artiq_ir))
|
||||||
|
|
||||||
llmod = module.build_llvm_ir(self)
|
llmod = module.build_llvm_ir(self)
|
||||||
|
@ -157,12 +162,12 @@ class Target:
|
||||||
_dump("", "LLVM IR (broken)", ".ll", lambda: str(llmod))
|
_dump("", "LLVM IR (broken)", ".ll", lambda: str(llmod))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
_dump(os.getenv("ARTIQ_DUMP_UNOPT_LLVM"), "LLVM IR (generated)", "_unopt.ll",
|
_dump(os.getenv("ARTIQ_DUMP_UNOPT_LLVM"), "LLVM IR (generated)", suffix + "_unopt.ll",
|
||||||
lambda: str(llparsedmod))
|
lambda: str(llparsedmod))
|
||||||
|
|
||||||
self.optimize(llparsedmod)
|
self.optimize(llparsedmod)
|
||||||
|
|
||||||
_dump(os.getenv("ARTIQ_DUMP_LLVM"), "LLVM IR (optimized)", ".ll",
|
_dump(os.getenv("ARTIQ_DUMP_LLVM"), "LLVM IR (optimized)", suffix + ".ll",
|
||||||
lambda: str(llparsedmod))
|
lambda: str(llparsedmod))
|
||||||
|
|
||||||
return llparsedmod
|
return llparsedmod
|
||||||
|
@ -181,6 +186,7 @@ class Target:
|
||||||
def link(self, objects):
|
def link(self, objects):
|
||||||
"""Link the relocatable objects into a shared library for this target."""
|
"""Link the relocatable objects into a shared library for this target."""
|
||||||
with RunTool([self.tool_ld, "-shared", "--eh-frame-hdr"] +
|
with RunTool([self.tool_ld, "-shared", "--eh-frame-hdr"] +
|
||||||
|
self.additional_linker_options +
|
||||||
["-T" + os.path.join(os.path.dirname(__file__), "kernel.ld")] +
|
["-T" + os.path.join(os.path.dirname(__file__), "kernel.ld")] +
|
||||||
["{{obj{}}}".format(index) for index in range(len(objects))] +
|
["{{obj{}}}".format(index) for index in range(len(objects))] +
|
||||||
["-x"] +
|
["-x"] +
|
||||||
|
@ -212,9 +218,10 @@ class Target:
|
||||||
# just after the call. Offset them back to get an address somewhere
|
# just after the call. Offset them back to get an address somewhere
|
||||||
# inside the call instruction (or its delay slot), since that's what
|
# inside the call instruction (or its delay slot), since that's what
|
||||||
# the backtrace entry should point at.
|
# the backtrace entry should point at.
|
||||||
|
last_inlined = None
|
||||||
offset_addresses = [hex(addr - 1) for addr in addresses]
|
offset_addresses = [hex(addr - 1) for addr in addresses]
|
||||||
with RunTool([self.tool_addr2line, "--addresses", "--functions", "--inlines",
|
with RunTool([self.tool_symbolizer, "--addresses", "--functions", "--inlines",
|
||||||
"--demangle", "--exe={library}"] + offset_addresses,
|
"--demangle", "--output-style=GNU", "--exe={library}"] + offset_addresses,
|
||||||
library=library) \
|
library=library) \
|
||||||
as results:
|
as results:
|
||||||
lines = iter(results["__stdout__"].read().rstrip().split("\n"))
|
lines = iter(results["__stdout__"].read().rstrip().split("\n"))
|
||||||
|
@ -227,9 +234,11 @@ class Target:
|
||||||
if address_or_function[:2] == "0x":
|
if address_or_function[:2] == "0x":
|
||||||
address = int(address_or_function[2:], 16) + 1 # remove offset
|
address = int(address_or_function[2:], 16) + 1 # remove offset
|
||||||
function = next(lines)
|
function = next(lines)
|
||||||
|
inlined = False
|
||||||
else:
|
else:
|
||||||
address = backtrace[-1][4] # inlined
|
address = backtrace[-1][4] # inlined
|
||||||
function = address_or_function
|
function = address_or_function
|
||||||
|
inlined = True
|
||||||
location = next(lines)
|
location = next(lines)
|
||||||
|
|
||||||
filename, line = location.rsplit(":", 1)
|
filename, line = location.rsplit(":", 1)
|
||||||
|
@ -240,10 +249,17 @@ class Target:
|
||||||
else:
|
else:
|
||||||
line = int(line)
|
line = int(line)
|
||||||
# can't get column out of addr2line D:
|
# can't get column out of addr2line D:
|
||||||
backtrace.append((filename, line, -1, function, address))
|
if inlined:
|
||||||
|
last_inlined.append((filename, line, -1, function, address))
|
||||||
|
else:
|
||||||
|
last_inlined = []
|
||||||
|
backtrace.append((filename, line, -1, function, address,
|
||||||
|
last_inlined))
|
||||||
return backtrace
|
return backtrace
|
||||||
|
|
||||||
def demangle(self, names):
|
def demangle(self, names):
|
||||||
|
if not any(names):
|
||||||
|
return names
|
||||||
with RunTool([self.tool_cxxfilt] + names) as results:
|
with RunTool([self.tool_cxxfilt] + names) as results:
|
||||||
return results["__stdout__"].read().rstrip().split("\n")
|
return results["__stdout__"].read().rstrip().split("\n")
|
||||||
|
|
||||||
|
@ -251,40 +267,43 @@ class NativeTarget(Target):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.triple = llvm.get_default_triple()
|
self.triple = llvm.get_default_triple()
|
||||||
host_data_layout = str(llvm.targets.Target.from_default_triple().create_target_machine().target_data)
|
self.data_layout = str(llvm.targets.Target.from_default_triple().create_target_machine().target_data)
|
||||||
|
|
||||||
class RV32IMATarget(Target):
|
class RV32IMATarget(Target):
|
||||||
triple = "riscv32-unknown-linux"
|
triple = "riscv32-unknown-linux"
|
||||||
data_layout = "e-m:e-p:32:32-i64:64-n32-S128"
|
data_layout = "e-m:e-p:32:32-i64:64-n32-S128"
|
||||||
features = ["m", "a"]
|
features = ["m", "a"]
|
||||||
|
additional_linker_options = ["-m", "elf32lriscv"]
|
||||||
print_function = "core_log"
|
print_function = "core_log"
|
||||||
now_pinning = True
|
now_pinning = True
|
||||||
|
|
||||||
tool_ld = "ld.lld"
|
tool_ld = "ld.lld"
|
||||||
tool_strip = "llvm-strip"
|
tool_strip = "llvm-strip"
|
||||||
tool_addr2line = "llvm-addr2line"
|
tool_symbolizer = "llvm-symbolizer"
|
||||||
tool_cxxfilt = "llvm-cxxfilt"
|
tool_cxxfilt = "llvm-cxxfilt"
|
||||||
|
|
||||||
class RV32GTarget(Target):
|
class RV32GTarget(Target):
|
||||||
triple = "riscv32-unknown-linux"
|
triple = "riscv32-unknown-linux"
|
||||||
data_layout = "e-m:e-p:32:32-i64:64-n32-S128"
|
data_layout = "e-m:e-p:32:32-i64:64-n32-S128"
|
||||||
features = ["m", "a", "f", "d"]
|
features = ["m", "a", "f", "d"]
|
||||||
|
additional_linker_options = ["-m", "elf32lriscv"]
|
||||||
print_function = "core_log"
|
print_function = "core_log"
|
||||||
now_pinning = True
|
now_pinning = True
|
||||||
|
|
||||||
tool_ld = "ld.lld"
|
tool_ld = "ld.lld"
|
||||||
tool_strip = "llvm-strip"
|
tool_strip = "llvm-strip"
|
||||||
tool_addr2line = "llvm-addr2line"
|
tool_symbolizer = "llvm-symbolizer"
|
||||||
tool_cxxfilt = "llvm-cxxfilt"
|
tool_cxxfilt = "llvm-cxxfilt"
|
||||||
|
|
||||||
class CortexA9Target(Target):
|
class CortexA9Target(Target):
|
||||||
triple = "armv7-unknown-linux-gnueabihf"
|
triple = "armv7-unknown-linux-gnueabihf"
|
||||||
data_layout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
|
data_layout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
|
||||||
features = ["dsp", "fp16", "neon", "vfp3"]
|
features = ["dsp", "fp16", "neon", "vfp3"]
|
||||||
|
additional_linker_options = ["-m", "armelf_linux_eabi", "--target2=rel"]
|
||||||
print_function = "core_log"
|
print_function = "core_log"
|
||||||
now_pinning = False
|
now_pinning = False
|
||||||
|
|
||||||
tool_ld = "ld.lld"
|
tool_ld = "ld.lld"
|
||||||
tool_strip = "llvm-strip"
|
tool_strip = "llvm-strip"
|
||||||
tool_addr2line = "llvm-addr2line"
|
tool_symbolizer = "llvm-symbolizer"
|
||||||
tool_cxxfilt = "llvm-cxxfilt"
|
tool_cxxfilt = "llvm-cxxfilt"
|
||||||
|
|
|
@ -30,8 +30,9 @@ def main():
|
||||||
device_db_path = os.path.join(os.path.dirname(sys.argv[1]), "device_db.py")
|
device_db_path = os.path.join(os.path.dirname(sys.argv[1]), "device_db.py")
|
||||||
device_mgr = DeviceManager(DeviceDB(device_db_path))
|
device_mgr = DeviceManager(DeviceDB(device_db_path))
|
||||||
|
|
||||||
dataset_db_path = os.path.join(os.path.dirname(sys.argv[1]), "dataset_db.pyon")
|
dataset_db_path = os.path.join(os.path.dirname(sys.argv[1]), "dataset_db.mdb")
|
||||||
dataset_mgr = DatasetManager(DatasetDB(dataset_db_path))
|
dataset_db = DatasetDB(dataset_db_path)
|
||||||
|
dataset_mgr = DatasetManager()
|
||||||
|
|
||||||
argument_mgr = ProcessArgumentManager({})
|
argument_mgr = ProcessArgumentManager({})
|
||||||
|
|
||||||
|
@ -68,5 +69,7 @@ def main():
|
||||||
benchmark(lambda: target.strip(elf_shlib),
|
benchmark(lambda: target.strip(elf_shlib),
|
||||||
"Stripping debug information")
|
"Stripping debug information")
|
||||||
|
|
||||||
|
dataset_db.close_db()
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|
|
@ -8,6 +8,7 @@ semantics explicitly.
|
||||||
|
|
||||||
from collections import OrderedDict, defaultdict
|
from collections import OrderedDict, defaultdict
|
||||||
from functools import reduce
|
from functools import reduce
|
||||||
|
from itertools import chain
|
||||||
from pythonparser import algorithm, diagnostic, ast
|
from pythonparser import algorithm, diagnostic, ast
|
||||||
from .. import types, builtins, asttyped, ir, iodelay
|
from .. import types, builtins, asttyped, ir, iodelay
|
||||||
|
|
||||||
|
@ -61,6 +62,9 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
the basic block to which ``return`` will transfer control
|
the basic block to which ``return`` will transfer control
|
||||||
:ivar unwind_target: (:class:`ir.BasicBlock` or None)
|
:ivar unwind_target: (:class:`ir.BasicBlock` or None)
|
||||||
the basic block to which unwinding will transfer control
|
the basic block to which unwinding will transfer control
|
||||||
|
:ivar catch_clauses: (list of (:class:`ir.BasicBlock`, :class:`types.Type` or None))
|
||||||
|
a list of catch clauses that should be appended to inner try block
|
||||||
|
landingpad
|
||||||
:ivar final_branch: (function (target: :class:`ir.BasicBlock`, block: :class:`ir.BasicBlock)
|
:ivar final_branch: (function (target: :class:`ir.BasicBlock`, block: :class:`ir.BasicBlock)
|
||||||
or None)
|
or None)
|
||||||
the function that appends to ``block`` a jump through the ``finally`` statement
|
the function that appends to ``block`` a jump through the ``finally`` statement
|
||||||
|
@ -88,8 +92,9 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
|
|
||||||
_size_type = builtins.TInt32()
|
_size_type = builtins.TInt32()
|
||||||
|
|
||||||
def __init__(self, module_name, engine, ref_period):
|
def __init__(self, module_name, engine, ref_period, embedding_map):
|
||||||
self.engine = engine
|
self.engine = engine
|
||||||
|
self.embedding_map = embedding_map
|
||||||
self.functions = []
|
self.functions = []
|
||||||
self.name = [module_name] if module_name != "" else []
|
self.name = [module_name] if module_name != "" else []
|
||||||
self.ref_period = ir.Constant(ref_period, builtins.TFloat())
|
self.ref_period = ir.Constant(ref_period, builtins.TFloat())
|
||||||
|
@ -102,10 +107,13 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
self.current_private_env = None
|
self.current_private_env = None
|
||||||
self.current_args = None
|
self.current_args = None
|
||||||
self.current_assign = None
|
self.current_assign = None
|
||||||
|
self.current_exception = None
|
||||||
|
self.current_remote_fn = False
|
||||||
self.break_target = None
|
self.break_target = None
|
||||||
self.continue_target = None
|
self.continue_target = None
|
||||||
self.return_target = None
|
self.return_target = None
|
||||||
self.unwind_target = None
|
self.unwind_target = None
|
||||||
|
self.catch_clauses = []
|
||||||
self.final_branch = None
|
self.final_branch = None
|
||||||
self.function_map = dict()
|
self.function_map = dict()
|
||||||
self.variable_map = dict()
|
self.variable_map = dict()
|
||||||
|
@ -204,7 +212,8 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
old_priv_env, self.current_private_env = self.current_private_env, priv_env
|
old_priv_env, self.current_private_env = self.current_private_env, priv_env
|
||||||
|
|
||||||
self.generic_visit(node)
|
self.generic_visit(node)
|
||||||
self.terminate(ir.Return(ir.Constant(None, builtins.TNone())))
|
self.terminate(ir.Return(ir.Constant(None, builtins.TNone()),
|
||||||
|
remote_return=self.current_remote_fn))
|
||||||
|
|
||||||
return self.functions
|
return self.functions
|
||||||
finally:
|
finally:
|
||||||
|
@ -287,6 +296,8 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
old_block, self.current_block = self.current_block, entry
|
old_block, self.current_block = self.current_block, entry
|
||||||
|
|
||||||
old_globals, self.current_globals = self.current_globals, node.globals_in_scope
|
old_globals, self.current_globals = self.current_globals, node.globals_in_scope
|
||||||
|
old_remote_fn = self.current_remote_fn
|
||||||
|
self.current_remote_fn = getattr(node, "remote_fn", False)
|
||||||
|
|
||||||
env_without_globals = \
|
env_without_globals = \
|
||||||
{var: node.typing_env[var]
|
{var: node.typing_env[var]
|
||||||
|
@ -319,7 +330,8 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
self.terminate(ir.Return(result))
|
self.terminate(ir.Return(result))
|
||||||
elif builtins.is_none(typ.ret):
|
elif builtins.is_none(typ.ret):
|
||||||
if not self.current_block.is_terminated():
|
if not self.current_block.is_terminated():
|
||||||
self.current_block.append(ir.Return(ir.Constant(None, builtins.TNone())))
|
self.current_block.append(ir.Return(ir.Constant(None, builtins.TNone()),
|
||||||
|
remote_return=self.current_remote_fn))
|
||||||
else:
|
else:
|
||||||
if not self.current_block.is_terminated():
|
if not self.current_block.is_terminated():
|
||||||
if len(self.current_block.predecessors()) != 0:
|
if len(self.current_block.predecessors()) != 0:
|
||||||
|
@ -338,6 +350,7 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
self.current_block = old_block
|
self.current_block = old_block
|
||||||
self.current_globals = old_globals
|
self.current_globals = old_globals
|
||||||
self.current_env = old_env
|
self.current_env = old_env
|
||||||
|
self.current_remote_fn = old_remote_fn
|
||||||
if not is_lambda:
|
if not is_lambda:
|
||||||
self.current_private_env = old_priv_env
|
self.current_private_env = old_priv_env
|
||||||
|
|
||||||
|
@ -360,7 +373,8 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
return_value = self.visit(node.value)
|
return_value = self.visit(node.value)
|
||||||
|
|
||||||
if self.return_target is None:
|
if self.return_target is None:
|
||||||
self.append(ir.Return(return_value))
|
self.append(ir.Return(return_value,
|
||||||
|
remote_return=self.current_remote_fn))
|
||||||
else:
|
else:
|
||||||
self.append(ir.SetLocal(self.current_private_env, "$return", return_value))
|
self.append(ir.SetLocal(self.current_private_env, "$return", return_value))
|
||||||
self.append(ir.Branch(self.return_target))
|
self.append(ir.Branch(self.return_target))
|
||||||
|
@ -638,10 +652,10 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
loc_column = ir.Constant(loc.column(), builtins.TInt32())
|
loc_column = ir.Constant(loc.column(), builtins.TInt32())
|
||||||
loc_function = ir.Constant(".".join(self.name), builtins.TStr())
|
loc_function = ir.Constant(".".join(self.name), builtins.TStr())
|
||||||
|
|
||||||
self.append(ir.SetAttr(exn, "__file__", loc_file))
|
self.append(ir.SetAttr(exn, "#__file__", loc_file))
|
||||||
self.append(ir.SetAttr(exn, "__line__", loc_line))
|
self.append(ir.SetAttr(exn, "#__line__", loc_line))
|
||||||
self.append(ir.SetAttr(exn, "__col__", loc_column))
|
self.append(ir.SetAttr(exn, "#__col__", loc_column))
|
||||||
self.append(ir.SetAttr(exn, "__func__", loc_function))
|
self.append(ir.SetAttr(exn, "#__func__", loc_function))
|
||||||
|
|
||||||
if self.unwind_target is not None:
|
if self.unwind_target is not None:
|
||||||
self.append(ir.Raise(exn, self.unwind_target))
|
self.append(ir.Raise(exn, self.unwind_target))
|
||||||
|
@ -649,9 +663,9 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
self.append(ir.Raise(exn))
|
self.append(ir.Raise(exn))
|
||||||
else:
|
else:
|
||||||
if self.unwind_target is not None:
|
if self.unwind_target is not None:
|
||||||
self.append(ir.Reraise(self.unwind_target))
|
self.append(ir.Resume(self.unwind_target))
|
||||||
else:
|
else:
|
||||||
self.append(ir.Reraise())
|
self.append(ir.Resume())
|
||||||
|
|
||||||
def visit_Raise(self, node):
|
def visit_Raise(self, node):
|
||||||
if node.exc is not None and types.is_exn_constructor(node.exc.type):
|
if node.exc is not None and types.is_exn_constructor(node.exc.type):
|
||||||
|
@ -661,6 +675,9 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
|
|
||||||
def visit_Try(self, node):
|
def visit_Try(self, node):
|
||||||
dispatcher = self.add_block("try.dispatch")
|
dispatcher = self.add_block("try.dispatch")
|
||||||
|
cleanup = self.add_block('handler.cleanup')
|
||||||
|
landingpad = ir.LandingPad(cleanup)
|
||||||
|
dispatcher.append(landingpad)
|
||||||
|
|
||||||
if any(node.finalbody):
|
if any(node.finalbody):
|
||||||
# k for continuation
|
# k for continuation
|
||||||
|
@ -676,15 +693,6 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
final_targets.append(target)
|
final_targets.append(target)
|
||||||
final_paths.append(block)
|
final_paths.append(block)
|
||||||
|
|
||||||
final_exn_targets = []
|
|
||||||
final_exn_paths = []
|
|
||||||
# raise has to be treated differently
|
|
||||||
# we cannot follow indirectbr for local access validation, so we
|
|
||||||
# have to construct the control flow explicitly
|
|
||||||
def exception_final_branch(target, block):
|
|
||||||
final_exn_targets.append(target)
|
|
||||||
final_exn_paths.append(block)
|
|
||||||
|
|
||||||
if self.break_target is not None:
|
if self.break_target is not None:
|
||||||
break_proxy = self.add_block("try.break")
|
break_proxy = self.add_block("try.break")
|
||||||
old_break, self.break_target = self.break_target, break_proxy
|
old_break, self.break_target = self.break_target, break_proxy
|
||||||
|
@ -704,16 +712,51 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
value = return_action.append(ir.GetLocal(self.current_private_env, "$return"))
|
value = return_action.append(ir.GetLocal(self.current_private_env, "$return"))
|
||||||
return_action.append(ir.Return(value))
|
return_action.append(ir.Return(value))
|
||||||
final_branch(return_action, return_proxy)
|
final_branch(return_action, return_proxy)
|
||||||
|
else:
|
||||||
|
landingpad.has_cleanup = False
|
||||||
|
|
||||||
|
# we should propagate the clauses to nested try catch blocks
|
||||||
|
# so nested try catch will jump to our clause if the inner one does not
|
||||||
|
# match
|
||||||
|
# note that the phi instruction here requires some hack, see
|
||||||
|
# llvm_ir_generator process_function for details
|
||||||
|
clauses = []
|
||||||
|
found_catch_all = False
|
||||||
|
for handler_node in node.handlers:
|
||||||
|
if found_catch_all:
|
||||||
|
self.warn_unreachable(handler_node)
|
||||||
|
continue
|
||||||
|
exn_type = handler_node.name_type.find()
|
||||||
|
if handler_node.filter is not None and \
|
||||||
|
not builtins.is_exception(exn_type, 'Exception'):
|
||||||
|
handler = self.add_block("handler." + exn_type.name)
|
||||||
|
phi = ir.Phi(builtins.TException(), 'exn')
|
||||||
|
handler.append(phi)
|
||||||
|
clauses.append((handler, exn_type, phi))
|
||||||
|
else:
|
||||||
|
handler = self.add_block("handler.catchall")
|
||||||
|
phi = ir.Phi(builtins.TException(), 'exn')
|
||||||
|
handler.append(phi)
|
||||||
|
clauses.append((handler, None, phi))
|
||||||
|
found_catch_all = True
|
||||||
|
|
||||||
|
all_clauses = clauses[:]
|
||||||
|
for clause in self.catch_clauses:
|
||||||
|
# if the last clause is accept all, do not add further clauses
|
||||||
|
if len(all_clauses) == 0 or all_clauses[-1][1] is not None:
|
||||||
|
all_clauses.append(clause)
|
||||||
|
|
||||||
body = self.add_block("try.body")
|
body = self.add_block("try.body")
|
||||||
self.append(ir.Branch(body))
|
self.append(ir.Branch(body))
|
||||||
self.current_block = body
|
self.current_block = body
|
||||||
|
|
||||||
try:
|
|
||||||
old_unwind, self.unwind_target = self.unwind_target, dispatcher
|
old_unwind, self.unwind_target = self.unwind_target, dispatcher
|
||||||
|
old_clauses, self.catch_clauses = self.catch_clauses, all_clauses
|
||||||
|
try:
|
||||||
self.visit(node.body)
|
self.visit(node.body)
|
||||||
finally:
|
finally:
|
||||||
self.unwind_target = old_unwind
|
self.unwind_target = old_unwind
|
||||||
|
self.catch_clauses = old_clauses
|
||||||
|
|
||||||
if not self.current_block.is_terminated():
|
if not self.current_block.is_terminated():
|
||||||
self.visit(node.orelse)
|
self.visit(node.orelse)
|
||||||
|
@ -722,95 +765,149 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
body = self.current_block
|
body = self.current_block
|
||||||
|
|
||||||
if any(node.finalbody):
|
if any(node.finalbody):
|
||||||
|
# if we have a final block, we should not append clauses to our
|
||||||
|
# landingpad or we will skip the finally block.
|
||||||
|
# when the finally block calls resume, it will unwind to the outer
|
||||||
|
# try catch block automatically
|
||||||
|
all_clauses = clauses
|
||||||
|
# reset targets
|
||||||
if self.break_target:
|
if self.break_target:
|
||||||
self.break_target = old_break
|
self.break_target = old_break
|
||||||
if self.continue_target:
|
if self.continue_target:
|
||||||
self.continue_target = old_continue
|
self.continue_target = old_continue
|
||||||
self.return_target = old_return
|
self.return_target = old_return
|
||||||
|
|
||||||
old_final_branch, self.final_branch = self.final_branch, exception_final_branch
|
if any(node.finalbody):
|
||||||
|
# create new unwind target for cleanup
|
||||||
|
final_dispatcher = self.add_block("try.final.dispatch")
|
||||||
|
final_landingpad = ir.LandingPad(cleanup)
|
||||||
|
final_dispatcher.append(final_landingpad)
|
||||||
|
|
||||||
cleanup = self.add_block('handler.cleanup')
|
# make sure that exception clauses are unwinded to the finally block
|
||||||
landingpad = dispatcher.append(ir.LandingPad(cleanup))
|
old_unwind, self.unwind_target = self.unwind_target, final_dispatcher
|
||||||
if not any(node.finalbody):
|
|
||||||
landingpad.has_cleanup = False
|
if any(node.finalbody):
|
||||||
|
# if we have a while:try/finally continue must execute finally
|
||||||
|
# before continuing the while
|
||||||
|
redirect = final_branch
|
||||||
|
else:
|
||||||
|
redirect = lambda dest, proxy: proxy.append(ir.Branch(dest))
|
||||||
|
|
||||||
|
# we need to set break/continue/return to execute end_catch
|
||||||
|
if self.break_target is not None:
|
||||||
|
break_proxy = self.add_block("try.break")
|
||||||
|
break_proxy.append(ir.Builtin("end_catch", [], builtins.TNone()))
|
||||||
|
old_break, self.break_target = self.break_target, break_proxy
|
||||||
|
redirect(old_break, break_proxy)
|
||||||
|
|
||||||
|
if self.continue_target is not None:
|
||||||
|
continue_proxy = self.add_block("try.continue")
|
||||||
|
continue_proxy.append(ir.Builtin("end_catch", [],
|
||||||
|
builtins.TNone()))
|
||||||
|
old_continue, self.continue_target = self.continue_target, continue_proxy
|
||||||
|
redirect(old_continue, continue_proxy)
|
||||||
|
|
||||||
|
return_proxy = self.add_block("try.return")
|
||||||
|
return_proxy.append(ir.Builtin("end_catch", [], builtins.TNone()))
|
||||||
|
old_return, self.return_target = self.return_target, return_proxy
|
||||||
|
old_return_target = old_return
|
||||||
|
if old_return_target is None:
|
||||||
|
old_return_target = self.add_block("try.doreturn")
|
||||||
|
value = old_return_target.append(ir.GetLocal(self.current_private_env, "$return"))
|
||||||
|
old_return_target.append(ir.Return(value))
|
||||||
|
redirect(old_return_target, return_proxy)
|
||||||
|
|
||||||
handlers = []
|
handlers = []
|
||||||
for handler_node in node.handlers:
|
|
||||||
exn_type = handler_node.name_type.find()
|
|
||||||
if handler_node.filter is not None and \
|
|
||||||
not builtins.is_exception(exn_type, 'Exception'):
|
|
||||||
handler = self.add_block("handler." + exn_type.name)
|
|
||||||
landingpad.add_clause(handler, exn_type)
|
|
||||||
else:
|
|
||||||
handler = self.add_block("handler.catchall")
|
|
||||||
landingpad.add_clause(handler, None)
|
|
||||||
|
|
||||||
|
for (handler_node, (handler, exn_type, phi)) in zip(node.handlers, clauses):
|
||||||
self.current_block = handler
|
self.current_block = handler
|
||||||
if handler_node.name is not None:
|
if handler_node.name is not None:
|
||||||
exn = self.append(ir.Builtin("exncast", [landingpad], handler_node.name_type))
|
exn = self.append(ir.Builtin("exncast", [phi], handler_node.name_type))
|
||||||
self._set_local(handler_node.name, exn)
|
self._set_local(handler_node.name, exn)
|
||||||
self.visit(handler_node.body)
|
self.visit(handler_node.body)
|
||||||
|
# only need to call end_catch if the current block is not terminated
|
||||||
|
# other possible paths: break/continue/return/raise
|
||||||
|
# we will call end_catch in the first 3 cases, and we should not
|
||||||
|
# end_catch in the last case for nested exception
|
||||||
|
if not self.current_block.is_terminated():
|
||||||
|
self.append(ir.Builtin("end_catch", [], builtins.TNone()))
|
||||||
post_handler = self.current_block
|
post_handler = self.current_block
|
||||||
|
handlers.append(post_handler)
|
||||||
|
|
||||||
handlers.append((handler, post_handler))
|
# branch to all possible clauses, including those from outer try catch
|
||||||
|
# block
|
||||||
|
# if we have a finally block, all_clauses will not include those from
|
||||||
|
# the outer block
|
||||||
|
for (handler, clause, phi) in all_clauses:
|
||||||
|
phi.add_incoming(landingpad, dispatcher)
|
||||||
|
landingpad.add_clause(handler, clause)
|
||||||
|
|
||||||
|
if self.break_target:
|
||||||
|
self.break_target = old_break
|
||||||
|
if self.continue_target:
|
||||||
|
self.continue_target = old_continue
|
||||||
|
self.return_target = old_return
|
||||||
|
|
||||||
if any(node.finalbody):
|
if any(node.finalbody):
|
||||||
# Finalize and continue after try statement.
|
# Finalize and continue after try statement.
|
||||||
self.final_branch = old_final_branch
|
self.unwind_target = old_unwind
|
||||||
|
# Exception path
|
||||||
for (i, (target, block)) in enumerate(zip(final_exn_targets, final_exn_paths)):
|
finalizer_reraise = self.add_block("finally.resume")
|
||||||
finalizer = self.add_block(f"finally{i}")
|
|
||||||
self.current_block = block
|
|
||||||
self.terminate(ir.Branch(finalizer))
|
|
||||||
self.current_block = finalizer
|
|
||||||
self.visit(node.finalbody)
|
|
||||||
self.terminate(ir.Branch(target))
|
|
||||||
|
|
||||||
finalizer = self.add_block("finally")
|
|
||||||
self.current_block = finalizer
|
|
||||||
|
|
||||||
self.visit(node.finalbody)
|
|
||||||
post_finalizer = self.current_block
|
|
||||||
|
|
||||||
# Finalize and reraise. Separate from previous case to expose flow
|
|
||||||
# to LocalAccessValidator.
|
|
||||||
finalizer_reraise = self.add_block("finally.reraise")
|
|
||||||
self.current_block = finalizer_reraise
|
self.current_block = finalizer_reraise
|
||||||
|
|
||||||
self.visit(node.finalbody)
|
self.visit(node.finalbody)
|
||||||
self.terminate(ir.Reraise(self.unwind_target))
|
self.terminate(ir.Resume(self.unwind_target))
|
||||||
|
|
||||||
self.current_block = tail = self.add_block("try.tail")
|
|
||||||
if any(node.finalbody):
|
|
||||||
final_targets.append(tail)
|
|
||||||
|
|
||||||
for block in final_paths:
|
|
||||||
block.append(ir.Branch(finalizer))
|
|
||||||
|
|
||||||
if not body.is_terminated():
|
|
||||||
body.append(ir.SetLocal(final_state, "$cont", tail))
|
|
||||||
body.append(ir.Branch(finalizer))
|
|
||||||
|
|
||||||
cleanup.append(ir.Branch(finalizer_reraise))
|
cleanup.append(ir.Branch(finalizer_reraise))
|
||||||
|
|
||||||
for handler, post_handler in handlers:
|
# Normal path
|
||||||
if not post_handler.is_terminated():
|
finalizer = self.add_block("finally")
|
||||||
post_handler.append(ir.SetLocal(final_state, "$cont", tail))
|
self.current_block = finalizer
|
||||||
post_handler.append(ir.Branch(finalizer))
|
self.visit(node.finalbody)
|
||||||
|
post_finalizer = self.current_block
|
||||||
|
self.current_block = tail = self.add_block("try.tail")
|
||||||
|
final_targets.append(tail)
|
||||||
|
|
||||||
|
# if final block is not terminated, branch to tail
|
||||||
if not post_finalizer.is_terminated():
|
if not post_finalizer.is_terminated():
|
||||||
dest = post_finalizer.append(ir.GetLocal(final_state, "$cont"))
|
dest = post_finalizer.append(ir.GetLocal(final_state, "$cont"))
|
||||||
post_finalizer.append(ir.IndirectBranch(dest, final_targets))
|
post_finalizer.append(ir.IndirectBranch(dest, final_targets))
|
||||||
|
# make sure proxies will branch to finalizer
|
||||||
|
for block in final_paths:
|
||||||
|
if finalizer in block.predecessors():
|
||||||
|
# avoid producing irreducible graphs
|
||||||
|
# generate a new finalizer
|
||||||
|
self.current_block = tmp_finalizer = self.add_block("finally.tmp")
|
||||||
|
self.visit(node.finalbody)
|
||||||
|
if not self.current_block.is_terminated():
|
||||||
|
assert isinstance(block.instructions[-1], ir.SetLocal)
|
||||||
|
self.current_block.append(ir.Branch(block.instructions[-1].operands[-1]))
|
||||||
|
block.instructions[-1].erase()
|
||||||
|
block.append(ir.Branch(tmp_finalizer))
|
||||||
|
self.current_block = tail
|
||||||
else:
|
else:
|
||||||
|
block.append(ir.Branch(finalizer))
|
||||||
|
# if no raise in body/handlers, branch to finalizer
|
||||||
|
for block in chain([body], handlers):
|
||||||
|
if not block.is_terminated():
|
||||||
|
if finalizer in block.predecessors():
|
||||||
|
# similar to the above case
|
||||||
|
self.current_block = tmp_finalizer = self.add_block("finally.tmp")
|
||||||
|
self.visit(node.finalbody)
|
||||||
|
self.terminate(ir.Branch(tail))
|
||||||
|
block.append(ir.Branch(tmp_finalizer))
|
||||||
|
self.current_block = tail
|
||||||
|
else:
|
||||||
|
block.append(ir.SetLocal(final_state, "$cont", tail))
|
||||||
|
block.append(ir.Branch(finalizer))
|
||||||
|
else:
|
||||||
|
self.current_block = tail = self.add_block("try.tail")
|
||||||
if not body.is_terminated():
|
if not body.is_terminated():
|
||||||
body.append(ir.Branch(tail))
|
body.append(ir.Branch(tail))
|
||||||
|
|
||||||
cleanup.append(ir.Reraise(self.unwind_target))
|
cleanup.append(ir.Resume(self.unwind_target))
|
||||||
|
|
||||||
for handler, post_handler in handlers:
|
for handler in handlers:
|
||||||
if not post_handler.is_terminated():
|
if not handler.is_terminated():
|
||||||
post_handler.append(ir.Branch(tail))
|
handler.append(ir.Branch(tail))
|
||||||
|
|
||||||
def _try_finally(self, body_gen, finally_gen, name):
|
def _try_finally(self, body_gen, finally_gen, name):
|
||||||
dispatcher = self.add_block("{}.dispatch".format(name))
|
dispatcher = self.add_block("{}.dispatch".format(name))
|
||||||
|
@ -829,7 +926,7 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
self.current_block = self.add_block("{}.cleanup".format(name))
|
self.current_block = self.add_block("{}.cleanup".format(name))
|
||||||
dispatcher.append(ir.LandingPad(self.current_block))
|
dispatcher.append(ir.LandingPad(self.current_block))
|
||||||
finally_gen()
|
finally_gen()
|
||||||
self.raise_exn()
|
self.terminate(ir.Resume(self.unwind_target))
|
||||||
|
|
||||||
self.current_block = self.post_body
|
self.current_block = self.post_body
|
||||||
|
|
||||||
|
@ -1108,7 +1205,27 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
finally:
|
finally:
|
||||||
self.current_assign = old_assign
|
self.current_assign = old_assign
|
||||||
|
|
||||||
if isinstance(node.slice, ast.Index):
|
if types.is_tuple(node.value.type):
|
||||||
|
assert isinstance(node.slice, ast.Index), \
|
||||||
|
"Internal compiler error: tuple index should be an Index"
|
||||||
|
assert isinstance(node.slice.value, ast.Num), \
|
||||||
|
"Internal compiler error: tuple index should be a constant"
|
||||||
|
|
||||||
|
if self.current_assign is not None:
|
||||||
|
diag = diagnostic.Diagnostic("error",
|
||||||
|
"cannot assign to a tuple element",
|
||||||
|
{}, node.loc)
|
||||||
|
self.engine.process(diag)
|
||||||
|
|
||||||
|
index = node.slice.value.n
|
||||||
|
indexed = self.append(
|
||||||
|
ir.GetAttr(value, index, name="{}.e{}".format(value.name, index)),
|
||||||
|
loc=node.loc
|
||||||
|
)
|
||||||
|
|
||||||
|
return indexed
|
||||||
|
|
||||||
|
elif isinstance(node.slice, ast.Index):
|
||||||
try:
|
try:
|
||||||
old_assign, self.current_assign = self.current_assign, None
|
old_assign, self.current_assign = self.current_assign, None
|
||||||
index = self.visit(node.slice.value)
|
index = self.visit(node.slice.value)
|
||||||
|
@ -2102,11 +2219,13 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
return phi
|
return phi
|
||||||
|
|
||||||
# Keep this function with builtins.TException.attributes.
|
# Keep this function with builtins.TException.attributes.
|
||||||
def alloc_exn(self, typ, message=None, param0=None, param1=None, param2=None):
|
def alloc_exn(self, typ, message=None, param0=None, param1=None,
|
||||||
|
param2=None, nomsgcheck=False):
|
||||||
typ = typ.find()
|
typ = typ.find()
|
||||||
name = "{}:{}".format(typ.id, typ.name)
|
name = "{}:{}".format(typ.id, typ.name)
|
||||||
|
name_id = self.embedding_map.store_str(name)
|
||||||
attributes = [
|
attributes = [
|
||||||
ir.Constant(name, builtins.TStr()), # typeinfo
|
ir.Constant(name_id, builtins.TInt32()), # typeinfo
|
||||||
ir.Constant("<not thrown>", builtins.TStr()), # file
|
ir.Constant("<not thrown>", builtins.TStr()), # file
|
||||||
ir.Constant(0, builtins.TInt32()), # line
|
ir.Constant(0, builtins.TInt32()), # line
|
||||||
ir.Constant(0, builtins.TInt32()), # column
|
ir.Constant(0, builtins.TInt32()), # column
|
||||||
|
@ -2115,8 +2234,16 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
|
|
||||||
if message is None:
|
if message is None:
|
||||||
attributes.append(ir.Constant(typ.name, builtins.TStr()))
|
attributes.append(ir.Constant(typ.name, builtins.TStr()))
|
||||||
else:
|
elif isinstance(message, ir.Constant) or nomsgcheck:
|
||||||
attributes.append(message) # message
|
attributes.append(message) # message
|
||||||
|
else:
|
||||||
|
diag = diagnostic.Diagnostic(
|
||||||
|
"error",
|
||||||
|
"only constant exception messages are supported",
|
||||||
|
{},
|
||||||
|
self.current_loc if message.loc is None else message.loc
|
||||||
|
)
|
||||||
|
self.engine.process(diag)
|
||||||
|
|
||||||
param_type = builtins.TInt64()
|
param_type = builtins.TInt64()
|
||||||
for param in [param0, param1, param2]:
|
for param in [param0, param1, param2]:
|
||||||
|
@ -2404,6 +2531,33 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
or types.is_builtin(typ, "at_mu"):
|
or types.is_builtin(typ, "at_mu"):
|
||||||
return self.append(ir.Builtin(typ.name,
|
return self.append(ir.Builtin(typ.name,
|
||||||
[self.visit(arg) for arg in node.args], node.type))
|
[self.visit(arg) for arg in node.args], node.type))
|
||||||
|
elif types.is_builtin(typ, "subkernel_await"):
|
||||||
|
if len(node.args) == 2 and len(node.keywords) == 0:
|
||||||
|
fn = node.args[0].type
|
||||||
|
timeout = self.visit(node.args[1])
|
||||||
|
elif len(node.args) == 1 and len(node.keywords) == 0:
|
||||||
|
fn = node.args[0].type
|
||||||
|
timeout = ir.Constant(10_000, builtins.TInt64())
|
||||||
|
else:
|
||||||
|
assert False
|
||||||
|
if types.is_method(fn):
|
||||||
|
fn = types.get_method_function(fn)
|
||||||
|
sid = ir.Constant(fn.sid, builtins.TInt32())
|
||||||
|
if not builtins.is_none(fn.ret):
|
||||||
|
ret = self.append(ir.Builtin("subkernel_retrieve_return", [sid, timeout], fn.ret))
|
||||||
|
else:
|
||||||
|
ret = ir.Constant(None, builtins.TNone())
|
||||||
|
self.append(ir.Builtin("subkernel_await_finish", [sid, timeout], builtins.TNone()))
|
||||||
|
return ret
|
||||||
|
elif types.is_builtin(typ, "subkernel_preload"):
|
||||||
|
if len(node.args) == 1 and len(node.keywords) == 0:
|
||||||
|
fn = node.args[0].type
|
||||||
|
else:
|
||||||
|
assert False
|
||||||
|
if types.is_method(fn):
|
||||||
|
fn = types.get_method_function(fn)
|
||||||
|
sid = ir.Constant(fn.sid, builtins.TInt32())
|
||||||
|
return self.append(ir.Builtin("subkernel_preload", [sid], builtins.TNone()))
|
||||||
elif types.is_exn_constructor(typ):
|
elif types.is_exn_constructor(typ):
|
||||||
return self.alloc_exn(node.type, *[self.visit(arg_node) for arg_node in node.args])
|
return self.alloc_exn(node.type, *[self.visit(arg_node) for arg_node in node.args])
|
||||||
elif types.is_constructor(typ):
|
elif types.is_constructor(typ):
|
||||||
|
@ -2415,8 +2569,8 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
node.loc)
|
node.loc)
|
||||||
self.engine.process(diag)
|
self.engine.process(diag)
|
||||||
|
|
||||||
def _user_call(self, callee, positional, keywords, arg_exprs={}):
|
def _user_call(self, callee, positional, keywords, arg_exprs={}, remote_fn=False):
|
||||||
if types.is_function(callee.type) or types.is_rpc(callee.type):
|
if types.is_function(callee.type) or types.is_rpc(callee.type) or types.is_subkernel(callee.type):
|
||||||
func = callee
|
func = callee
|
||||||
self_arg = None
|
self_arg = None
|
||||||
fn_typ = callee.type
|
fn_typ = callee.type
|
||||||
|
@ -2431,16 +2585,51 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
else:
|
else:
|
||||||
assert False
|
assert False
|
||||||
|
|
||||||
if types.is_rpc(fn_typ):
|
if types.is_rpc(fn_typ) or types.is_subkernel(fn_typ):
|
||||||
if self_arg is None:
|
if self_arg is None or types.is_subkernel(fn_typ):
|
||||||
|
# self is not passed to subkernels by remote
|
||||||
args = positional
|
args = positional
|
||||||
else:
|
elif self_arg is not None:
|
||||||
args = [self_arg] + positional
|
args = [self_arg] + positional
|
||||||
|
|
||||||
for keyword in keywords:
|
for keyword in keywords:
|
||||||
arg = keywords[keyword]
|
arg = keywords[keyword]
|
||||||
args.append(self.append(ir.Alloc([ir.Constant(keyword, builtins.TStr()), arg],
|
args.append(self.append(ir.Alloc([ir.Constant(keyword, builtins.TStr()), arg],
|
||||||
ir.TKeyword(arg.type))))
|
ir.TKeyword(arg.type))))
|
||||||
|
elif remote_fn:
|
||||||
|
assert self_arg is None
|
||||||
|
assert len(fn_typ.args) >= len(positional)
|
||||||
|
assert len(keywords) == 0 # no keyword support
|
||||||
|
args = [None] * fn_typ.arity()
|
||||||
|
index = 0
|
||||||
|
# fill in first available args
|
||||||
|
for arg in positional:
|
||||||
|
args[index] = arg
|
||||||
|
index += 1
|
||||||
|
|
||||||
|
# remaining args are received through DRTIO
|
||||||
|
if index < len(args):
|
||||||
|
# min/max args received remotely (minus already filled)
|
||||||
|
offset = index
|
||||||
|
min_args = ir.Constant(len(fn_typ.args)-offset, builtins.TInt8())
|
||||||
|
max_args = ir.Constant(fn_typ.arity()-offset, builtins.TInt8())
|
||||||
|
|
||||||
|
arg_types = list(fn_typ.args.items())[offset:]
|
||||||
|
arg_type_list = [a[1] for a in arg_types] + [a[1] for a in fn_typ.optargs.items()]
|
||||||
|
rcvd_count = self.append(ir.SubkernelAwaitArgs([min_args, max_args], arg_type_list))
|
||||||
|
# obligatory arguments
|
||||||
|
for arg_name, arg_type in arg_types:
|
||||||
|
args[index] = self.append(ir.GetArgFromRemote(arg_name, arg_type,
|
||||||
|
name="ARG.{}".format(arg_name)))
|
||||||
|
index += 1
|
||||||
|
|
||||||
|
# optional arguments
|
||||||
|
for optarg_name, optarg_type in fn_typ.optargs.items():
|
||||||
|
idx = ir.Constant(index-offset, builtins.TInt8())
|
||||||
|
args[index] = \
|
||||||
|
self.append(ir.GetOptArgFromRemote(optarg_name, optarg_type, rcvd_count, idx))
|
||||||
|
index += 1
|
||||||
|
|
||||||
else:
|
else:
|
||||||
args = [None] * (len(fn_typ.args) + len(fn_typ.optargs))
|
args = [None] * (len(fn_typ.args) + len(fn_typ.optargs))
|
||||||
|
|
||||||
|
@ -2526,7 +2715,8 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
else:
|
else:
|
||||||
assert False, "Broadcasting for {} arguments not implemented".format(len)
|
assert False, "Broadcasting for {} arguments not implemented".format(len)
|
||||||
else:
|
else:
|
||||||
insn = self._user_call(callee, args, keywords, node.arg_exprs)
|
remote_fn = getattr(node, "remote_fn", False)
|
||||||
|
insn = self._user_call(callee, args, keywords, node.arg_exprs, remote_fn)
|
||||||
if isinstance(node.func, asttyped.AttributeT):
|
if isinstance(node.func, asttyped.AttributeT):
|
||||||
attr_node = node.func
|
attr_node = node.func
|
||||||
self.method_map[(attr_node.value.type.find(),
|
self.method_map[(attr_node.value.type.find(),
|
||||||
|
@ -2577,11 +2767,12 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
old_final_branch, self.final_branch = self.final_branch, None
|
old_final_branch, self.final_branch = self.final_branch, None
|
||||||
old_unwind, self.unwind_target = self.unwind_target, None
|
old_unwind, self.unwind_target = self.unwind_target, None
|
||||||
|
|
||||||
exn = self.alloc_exn(builtins.TException("AssertionError"), message=msg)
|
exn = self.alloc_exn(builtins.TException("AssertionError"),
|
||||||
self.append(ir.SetAttr(exn, "__file__", file))
|
message=msg, nomsgcheck=True)
|
||||||
self.append(ir.SetAttr(exn, "__line__", line))
|
self.append(ir.SetAttr(exn, "#__file__", file))
|
||||||
self.append(ir.SetAttr(exn, "__col__", col))
|
self.append(ir.SetAttr(exn, "#__line__", line))
|
||||||
self.append(ir.SetAttr(exn, "__func__", function))
|
self.append(ir.SetAttr(exn, "#__col__", col))
|
||||||
|
self.append(ir.SetAttr(exn, "#__func__", function))
|
||||||
self.append(ir.Raise(exn))
|
self.append(ir.Raise(exn))
|
||||||
finally:
|
finally:
|
||||||
self.current_function = old_func
|
self.current_function = old_func
|
||||||
|
@ -2717,14 +2908,15 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
||||||
|
|
||||||
format_string += ")"
|
format_string += ")"
|
||||||
elif builtins.is_exception(value.type):
|
elif builtins.is_exception(value.type):
|
||||||
name = self.append(ir.GetAttr(value, "__name__"))
|
# message may not be an actual string...
|
||||||
message = self.append(ir.GetAttr(value, "__message__"))
|
# so we cannot really print it
|
||||||
param1 = self.append(ir.GetAttr(value, "__param0__"))
|
name = self.append(ir.GetAttr(value, "#__name__"))
|
||||||
param2 = self.append(ir.GetAttr(value, "__param1__"))
|
param1 = self.append(ir.GetAttr(value, "#__param0__"))
|
||||||
param3 = self.append(ir.GetAttr(value, "__param2__"))
|
param2 = self.append(ir.GetAttr(value, "#__param1__"))
|
||||||
|
param3 = self.append(ir.GetAttr(value, "#__param2__"))
|
||||||
|
|
||||||
format_string += "%.*s(%.*s, %lld, %lld, %lld)"
|
format_string += "%ld(%lld, %lld, %lld)"
|
||||||
args += [name, message, param1, param2, param3]
|
args += [name, param1, param2, param3]
|
||||||
else:
|
else:
|
||||||
assert False
|
assert False
|
||||||
|
|
||||||
|
|
|
@ -238,7 +238,7 @@ class ASTTypedRewriter(algorithm.Transformer):
|
||||||
body=node.body, decorator_list=node.decorator_list,
|
body=node.body, decorator_list=node.decorator_list,
|
||||||
keyword_loc=node.keyword_loc, name_loc=node.name_loc,
|
keyword_loc=node.keyword_loc, name_loc=node.name_loc,
|
||||||
arrow_loc=node.arrow_loc, colon_loc=node.colon_loc, at_locs=node.at_locs,
|
arrow_loc=node.arrow_loc, colon_loc=node.colon_loc, at_locs=node.at_locs,
|
||||||
loc=node.loc)
|
loc=node.loc, remote_fn=False)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.env_stack.append(node.typing_env)
|
self.env_stack.append(node.typing_env)
|
||||||
|
@ -440,7 +440,8 @@ class ASTTypedRewriter(algorithm.Transformer):
|
||||||
def visit_Call(self, node):
|
def visit_Call(self, node):
|
||||||
node = self.generic_visit(node)
|
node = self.generic_visit(node)
|
||||||
node = asttyped.CallT(type=types.TVar(), iodelay=None, arg_exprs={},
|
node = asttyped.CallT(type=types.TVar(), iodelay=None, arg_exprs={},
|
||||||
func=node.func, args=node.args, keywords=node.keywords,
|
remote_fn=False, func=node.func,
|
||||||
|
args=node.args, keywords=node.keywords,
|
||||||
starargs=node.starargs, kwargs=node.kwargs,
|
starargs=node.starargs, kwargs=node.kwargs,
|
||||||
star_loc=node.star_loc, dstar_loc=node.dstar_loc,
|
star_loc=node.star_loc, dstar_loc=node.dstar_loc,
|
||||||
begin_loc=node.begin_loc, end_loc=node.end_loc, loc=node.loc)
|
begin_loc=node.begin_loc, end_loc=node.end_loc, loc=node.loc)
|
||||||
|
|
|
@ -15,13 +15,26 @@ class DeadCodeEliminator:
|
||||||
self.process_function(func)
|
self.process_function(func)
|
||||||
|
|
||||||
def process_function(self, func):
|
def process_function(self, func):
|
||||||
modified = True
|
# defer removing those blocks, so our use checks will ignore deleted blocks
|
||||||
while modified:
|
preserve = [func.entry()]
|
||||||
modified = False
|
work_list = [func.entry()]
|
||||||
for block in list(func.basic_blocks):
|
while any(work_list):
|
||||||
if not any(block.predecessors()) and block != func.entry():
|
block = work_list.pop()
|
||||||
|
for succ in block.successors():
|
||||||
|
if succ not in preserve:
|
||||||
|
preserve.append(succ)
|
||||||
|
work_list.append(succ)
|
||||||
|
|
||||||
|
to_be_removed = []
|
||||||
|
for block in func.basic_blocks:
|
||||||
|
if block not in preserve:
|
||||||
|
block.is_removed = True
|
||||||
|
to_be_removed.append(block)
|
||||||
|
for insn in block.instructions:
|
||||||
|
insn.is_removed = True
|
||||||
|
|
||||||
|
for block in to_be_removed:
|
||||||
self.remove_block(block)
|
self.remove_block(block)
|
||||||
modified = True
|
|
||||||
|
|
||||||
modified = True
|
modified = True
|
||||||
while modified:
|
while modified:
|
||||||
|
@ -42,6 +55,8 @@ class DeadCodeEliminator:
|
||||||
def remove_block(self, block):
|
def remove_block(self, block):
|
||||||
# block.uses are updated while iterating
|
# block.uses are updated while iterating
|
||||||
for use in set(block.uses):
|
for use in set(block.uses):
|
||||||
|
if use.is_removed:
|
||||||
|
continue
|
||||||
if isinstance(use, ir.Phi):
|
if isinstance(use, ir.Phi):
|
||||||
use.remove_incoming_block(block)
|
use.remove_incoming_block(block)
|
||||||
if not any(use.operands):
|
if not any(use.operands):
|
||||||
|
@ -56,6 +71,8 @@ class DeadCodeEliminator:
|
||||||
|
|
||||||
def remove_instruction(self, insn):
|
def remove_instruction(self, insn):
|
||||||
for use in set(insn.uses):
|
for use in set(insn.uses):
|
||||||
|
if use.is_removed:
|
||||||
|
continue
|
||||||
if isinstance(use, ir.Phi):
|
if isinstance(use, ir.Phi):
|
||||||
use.remove_incoming_value(insn)
|
use.remove_incoming_value(insn)
|
||||||
if not any(use.operands):
|
if not any(use.operands):
|
||||||
|
|
|
@ -46,6 +46,7 @@ class Inferencer(algorithm.Visitor):
|
||||||
self.function = None # currently visited function, for Return inference
|
self.function = None # currently visited function, for Return inference
|
||||||
self.in_loop = False
|
self.in_loop = False
|
||||||
self.has_return = False
|
self.has_return = False
|
||||||
|
self.subkernel_arg_types = dict()
|
||||||
|
|
||||||
def _unify(self, typea, typeb, loca, locb, makenotes=None, when=""):
|
def _unify(self, typea, typeb, loca, locb, makenotes=None, when=""):
|
||||||
try:
|
try:
|
||||||
|
@ -178,7 +179,7 @@ class Inferencer(algorithm.Visitor):
|
||||||
# Convert to a method.
|
# Convert to a method.
|
||||||
attr_type = types.TMethod(object_type, attr_type)
|
attr_type = types.TMethod(object_type, attr_type)
|
||||||
self._unify_method_self(attr_type, attr_name, attr_loc, loc, value_node.loc)
|
self._unify_method_self(attr_type, attr_name, attr_loc, loc, value_node.loc)
|
||||||
elif types.is_rpc(attr_type):
|
elif types.is_rpc(attr_type) or types.is_subkernel(attr_type):
|
||||||
# Convert to a method. We don't have to bother typechecking
|
# Convert to a method. We don't have to bother typechecking
|
||||||
# the self argument, since for RPCs anything goes.
|
# the self argument, since for RPCs anything goes.
|
||||||
attr_type = types.TMethod(object_type, attr_type)
|
attr_type = types.TMethod(object_type, attr_type)
|
||||||
|
@ -259,7 +260,31 @@ class Inferencer(algorithm.Visitor):
|
||||||
|
|
||||||
def visit_SubscriptT(self, node):
|
def visit_SubscriptT(self, node):
|
||||||
self.generic_visit(node)
|
self.generic_visit(node)
|
||||||
if isinstance(node.slice, ast.Index):
|
|
||||||
|
if types.is_tuple(node.value.type):
|
||||||
|
if (not isinstance(node.slice, ast.Index) or
|
||||||
|
not isinstance(node.slice.value, ast.Num)):
|
||||||
|
diag = diagnostic.Diagnostic(
|
||||||
|
"error", "tuples can only be indexed by a constant", {},
|
||||||
|
node.slice.loc, []
|
||||||
|
)
|
||||||
|
self.engine.process(diag)
|
||||||
|
return
|
||||||
|
|
||||||
|
tuple_type = node.value.type.find()
|
||||||
|
index = node.slice.value.n
|
||||||
|
if index < 0 or index >= len(tuple_type.elts):
|
||||||
|
diag = diagnostic.Diagnostic(
|
||||||
|
"error",
|
||||||
|
"index {index} is out of range for tuple of size {size}",
|
||||||
|
{"index": index, "size": len(tuple_type.elts)},
|
||||||
|
node.slice.loc, []
|
||||||
|
)
|
||||||
|
self.engine.process(diag)
|
||||||
|
return
|
||||||
|
|
||||||
|
self._unify(node.type, tuple_type.elts[index], node.loc, node.value.loc)
|
||||||
|
elif isinstance(node.slice, ast.Index):
|
||||||
if types.is_tuple(node.slice.value.type):
|
if types.is_tuple(node.slice.value.type):
|
||||||
if types.is_var(node.value.type):
|
if types.is_var(node.value.type):
|
||||||
return
|
return
|
||||||
|
@ -1269,6 +1294,55 @@ class Inferencer(algorithm.Visitor):
|
||||||
# Ignored.
|
# Ignored.
|
||||||
self._unify(node.type, builtins.TNone(),
|
self._unify(node.type, builtins.TNone(),
|
||||||
node.loc, None)
|
node.loc, None)
|
||||||
|
elif types.is_builtin(typ, "subkernel_await"):
|
||||||
|
valid_forms = lambda: [
|
||||||
|
valid_form("subkernel_await(f: subkernel) -> f return type"),
|
||||||
|
valid_form("subkernel_await(f: subkernel, timeout: numpy.int64) -> f return type")
|
||||||
|
]
|
||||||
|
if 1 <= len(node.args) <= 2:
|
||||||
|
arg0 = node.args[0].type
|
||||||
|
if types.is_var(arg0):
|
||||||
|
pass # undetermined yet
|
||||||
|
else:
|
||||||
|
if types.is_method(arg0):
|
||||||
|
fn = types.get_method_function(arg0)
|
||||||
|
elif types.is_function(arg0) or types.is_subkernel(arg0):
|
||||||
|
fn = arg0
|
||||||
|
else:
|
||||||
|
diagnose(valid_forms())
|
||||||
|
self._unify(node.type, fn.ret,
|
||||||
|
node.loc, None)
|
||||||
|
if len(node.args) == 2:
|
||||||
|
arg1 = node.args[1]
|
||||||
|
if types.is_var(arg1.type):
|
||||||
|
pass
|
||||||
|
elif builtins.is_int(arg1.type):
|
||||||
|
# promote to TInt64
|
||||||
|
self._unify(arg1.type, builtins.TInt64(),
|
||||||
|
arg1.loc, None)
|
||||||
|
else:
|
||||||
|
diagnose(valid_forms())
|
||||||
|
else:
|
||||||
|
diagnose(valid_forms())
|
||||||
|
elif types.is_builtin(typ, "subkernel_preload"):
|
||||||
|
valid_forms = lambda: [
|
||||||
|
valid_form("subkernel_preload(f: subkernel) -> None")
|
||||||
|
]
|
||||||
|
if len(node.args) == 1:
|
||||||
|
arg0 = node.args[0].type
|
||||||
|
if types.is_var(arg0):
|
||||||
|
pass # undetermined yet
|
||||||
|
else:
|
||||||
|
if types.is_method(arg0):
|
||||||
|
fn = types.get_method_function(arg0)
|
||||||
|
elif types.is_function(arg0) or types.is_subkernel(arg0):
|
||||||
|
fn = arg0
|
||||||
|
else:
|
||||||
|
diagnose(valid_forms())
|
||||||
|
self._unify(node.type, fn.ret,
|
||||||
|
node.loc, None)
|
||||||
|
else:
|
||||||
|
diagnose(valid_forms())
|
||||||
else:
|
else:
|
||||||
assert False
|
assert False
|
||||||
|
|
||||||
|
@ -1307,6 +1381,7 @@ class Inferencer(algorithm.Visitor):
|
||||||
typ_args = typ.args
|
typ_args = typ.args
|
||||||
typ_optargs = typ.optargs
|
typ_optargs = typ.optargs
|
||||||
typ_ret = typ.ret
|
typ_ret = typ.ret
|
||||||
|
typ_func = typ
|
||||||
else:
|
else:
|
||||||
typ_self = types.get_method_self(typ)
|
typ_self = types.get_method_self(typ)
|
||||||
typ_func = types.get_method_function(typ)
|
typ_func = types.get_method_function(typ)
|
||||||
|
@ -1364,12 +1439,23 @@ class Inferencer(algorithm.Visitor):
|
||||||
other_node=node.args[0])
|
other_node=node.args[0])
|
||||||
self._unify(node.type, ret, node.loc, None)
|
self._unify(node.type, ret, node.loc, None)
|
||||||
return
|
return
|
||||||
|
if types.is_subkernel(typ_func) and typ_func.sid not in self.subkernel_arg_types:
|
||||||
|
self.subkernel_arg_types[typ_func.sid] = []
|
||||||
|
|
||||||
for actualarg, (formalname, formaltyp) in \
|
for actualarg, (formalname, formaltyp) in \
|
||||||
zip(node.args, list(typ_args.items()) + list(typ_optargs.items())):
|
zip(node.args, list(typ_args.items()) + list(typ_optargs.items())):
|
||||||
self._unify(actualarg.type, formaltyp,
|
self._unify(actualarg.type, formaltyp,
|
||||||
actualarg.loc, None)
|
actualarg.loc, None)
|
||||||
passed_args[formalname] = actualarg.loc
|
passed_args[formalname] = actualarg.loc
|
||||||
|
if types.is_subkernel(typ_func):
|
||||||
|
if types.is_instance(actualarg.type):
|
||||||
|
# objects cannot be passed to subkernels, as rpc code doesn't support them
|
||||||
|
diag = diagnostic.Diagnostic("error",
|
||||||
|
"argument '{name}' of type: {typ} is not supported in subkernels",
|
||||||
|
{"name": formalname, "typ": actualarg.type},
|
||||||
|
actualarg.loc, [])
|
||||||
|
self.engine.process(diag)
|
||||||
|
self.subkernel_arg_types[typ_func.sid].append((formalname, formaltyp))
|
||||||
|
|
||||||
for keyword in node.keywords:
|
for keyword in node.keywords:
|
||||||
if keyword.arg in passed_args:
|
if keyword.arg in passed_args:
|
||||||
|
@ -1400,7 +1486,7 @@ class Inferencer(algorithm.Visitor):
|
||||||
passed_args[keyword.arg] = keyword.arg_loc
|
passed_args[keyword.arg] = keyword.arg_loc
|
||||||
|
|
||||||
for formalname in typ_args:
|
for formalname in typ_args:
|
||||||
if formalname not in passed_args:
|
if formalname not in passed_args and not node.remote_fn:
|
||||||
note = diagnostic.Diagnostic("note",
|
note = diagnostic.Diagnostic("note",
|
||||||
"the called function is of type {type}",
|
"the called function is of type {type}",
|
||||||
{"type": types.TypePrinter().name(node.func.type)},
|
{"type": types.TypePrinter().name(node.func.type)},
|
||||||
|
|
|
@ -280,7 +280,7 @@ class IODelayEstimator(algorithm.Visitor):
|
||||||
context="as an argument for delay_mu()")
|
context="as an argument for delay_mu()")
|
||||||
call_delay = value
|
call_delay = value
|
||||||
elif not types.is_builtin(typ):
|
elif not types.is_builtin(typ):
|
||||||
if types.is_function(typ) or types.is_rpc(typ):
|
if types.is_function(typ) or types.is_rpc(typ) or types.is_subkernel(typ):
|
||||||
offset = 0
|
offset = 0
|
||||||
elif types.is_method(typ):
|
elif types.is_method(typ):
|
||||||
offset = 1
|
offset = 1
|
||||||
|
@ -288,7 +288,7 @@ class IODelayEstimator(algorithm.Visitor):
|
||||||
else:
|
else:
|
||||||
assert False
|
assert False
|
||||||
|
|
||||||
if types.is_rpc(typ):
|
if types.is_rpc(typ) or types.is_subkernel(typ):
|
||||||
call_delay = iodelay.Const(0)
|
call_delay = iodelay.Const(0)
|
||||||
else:
|
else:
|
||||||
delay = typ.find().delay.find()
|
delay = typ.find().delay.find()
|
||||||
|
@ -311,6 +311,7 @@ class IODelayEstimator(algorithm.Visitor):
|
||||||
args[arg_name] = arg_node
|
args[arg_name] = arg_node
|
||||||
|
|
||||||
free_vars = delay.duration.free_vars()
|
free_vars = delay.duration.free_vars()
|
||||||
|
try:
|
||||||
node.arg_exprs = {
|
node.arg_exprs = {
|
||||||
arg: self.evaluate(args[arg], abort=abort,
|
arg: self.evaluate(args[arg], abort=abort,
|
||||||
context="in the expression for argument '{}' "
|
context="in the expression for argument '{}' "
|
||||||
|
@ -318,6 +319,12 @@ class IODelayEstimator(algorithm.Visitor):
|
||||||
for arg in free_vars
|
for arg in free_vars
|
||||||
}
|
}
|
||||||
call_delay = delay.duration.fold(node.arg_exprs)
|
call_delay = delay.duration.fold(node.arg_exprs)
|
||||||
|
except KeyError as e:
|
||||||
|
if getattr(node, "remote_fn", False):
|
||||||
|
note = diagnostic.Diagnostic("note",
|
||||||
|
"function called here", {},
|
||||||
|
node.loc)
|
||||||
|
self.abort("due to arguments passed remotely", node.loc, note)
|
||||||
else:
|
else:
|
||||||
assert False
|
assert False
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -171,11 +171,26 @@ class LLVMIRGenerator:
|
||||||
self.llfunction = None
|
self.llfunction = None
|
||||||
self.llmap = {}
|
self.llmap = {}
|
||||||
self.llobject_map = {}
|
self.llobject_map = {}
|
||||||
|
self.llpred_map = {}
|
||||||
self.phis = []
|
self.phis = []
|
||||||
self.debug_info_emitter = DebugInfoEmitter(self.llmodule)
|
self.debug_info_emitter = DebugInfoEmitter(self.llmodule)
|
||||||
self.empty_metadata = self.llmodule.add_metadata([])
|
self.empty_metadata = self.llmodule.add_metadata([])
|
||||||
self.quote_fail_msg = None
|
self.quote_fail_msg = None
|
||||||
|
|
||||||
|
# Maximum alignment required according to the target platform ABI. As this is
|
||||||
|
# not directly exposed by LLVM, just take the maximum across all the "big"
|
||||||
|
# elementary types we use. (Vector types, should we ever support them, are
|
||||||
|
# likely contenders for even larger alignment requirements.)
|
||||||
|
self.max_target_alignment = max(map(
|
||||||
|
lambda t: self.abi_layout_info.get_size_align(t)[1],
|
||||||
|
[lli64, lldouble, llptr]
|
||||||
|
))
|
||||||
|
|
||||||
|
def add_pred(self, pred, block):
|
||||||
|
if block not in self.llpred_map:
|
||||||
|
self.llpred_map[block] = set()
|
||||||
|
self.llpred_map[block].add(pred)
|
||||||
|
|
||||||
def needs_sret(self, lltyp, may_be_large=True):
|
def needs_sret(self, lltyp, may_be_large=True):
|
||||||
if isinstance(lltyp, ll.VoidType):
|
if isinstance(lltyp, ll.VoidType):
|
||||||
return False
|
return False
|
||||||
|
@ -200,7 +215,7 @@ class LLVMIRGenerator:
|
||||||
typ = typ.find()
|
typ = typ.find()
|
||||||
if types.is_tuple(typ):
|
if types.is_tuple(typ):
|
||||||
return ll.LiteralStructType([self.llty_of_type(eltty) for eltty in typ.elts])
|
return ll.LiteralStructType([self.llty_of_type(eltty) for eltty in typ.elts])
|
||||||
elif types.is_rpc(typ) or types.is_external_function(typ):
|
elif types.is_rpc(typ) or types.is_external_function(typ) or types.is_subkernel(typ):
|
||||||
if for_return:
|
if for_return:
|
||||||
return llvoid
|
return llvoid
|
||||||
else:
|
else:
|
||||||
|
@ -247,7 +262,10 @@ class LLVMIRGenerator:
|
||||||
return ll.LiteralStructType([llbufferty, llshapety])
|
return ll.LiteralStructType([llbufferty, llshapety])
|
||||||
elif builtins.is_listish(typ):
|
elif builtins.is_listish(typ):
|
||||||
lleltty = self.llty_of_type(builtins.get_iterable_elt(typ))
|
lleltty = self.llty_of_type(builtins.get_iterable_elt(typ))
|
||||||
return ll.LiteralStructType([lleltty.as_pointer(), lli32])
|
lltyp = ll.LiteralStructType([lleltty.as_pointer(), lli32])
|
||||||
|
if builtins.is_list(typ):
|
||||||
|
lltyp = lltyp.as_pointer()
|
||||||
|
return lltyp
|
||||||
elif builtins.is_range(typ):
|
elif builtins.is_range(typ):
|
||||||
lleltty = self.llty_of_type(builtins.get_iterable_elt(typ))
|
lleltty = self.llty_of_type(builtins.get_iterable_elt(typ))
|
||||||
return ll.LiteralStructType([lleltty, lleltty, lleltty])
|
return ll.LiteralStructType([lleltty, lleltty, lleltty])
|
||||||
|
@ -326,8 +344,8 @@ class LLVMIRGenerator:
|
||||||
else:
|
else:
|
||||||
value = const.value
|
value = const.value
|
||||||
|
|
||||||
llptr = self.llstr_of_str(const.value, linkage="private", unnamed_addr=True)
|
llptr = self.llstr_of_str(value, linkage="private", unnamed_addr=True)
|
||||||
lllen = ll.Constant(lli32, len(const.value))
|
lllen = ll.Constant(lli32, len(value))
|
||||||
return ll.Constant(llty, (llptr, lllen))
|
return ll.Constant(llty, (llptr, lllen))
|
||||||
else:
|
else:
|
||||||
assert False
|
assert False
|
||||||
|
@ -367,7 +385,9 @@ class LLVMIRGenerator:
|
||||||
llty = ll.FunctionType(lli32, [], var_arg=True)
|
llty = ll.FunctionType(lli32, [], var_arg=True)
|
||||||
elif name == "__artiq_raise":
|
elif name == "__artiq_raise":
|
||||||
llty = ll.FunctionType(llvoid, [self.llty_of_type(builtins.TException())])
|
llty = ll.FunctionType(llvoid, [self.llty_of_type(builtins.TException())])
|
||||||
elif name == "__artiq_reraise":
|
elif name == "__artiq_resume":
|
||||||
|
llty = ll.FunctionType(llvoid, [])
|
||||||
|
elif name == "__artiq_end_catch":
|
||||||
llty = ll.FunctionType(llvoid, [])
|
llty = ll.FunctionType(llvoid, [])
|
||||||
elif name == "memcmp":
|
elif name == "memcmp":
|
||||||
llty = ll.FunctionType(lli32, [llptr, llptr, lli32])
|
llty = ll.FunctionType(lli32, [llptr, llptr, lli32])
|
||||||
|
@ -378,6 +398,15 @@ class LLVMIRGenerator:
|
||||||
elif name == "rpc_recv":
|
elif name == "rpc_recv":
|
||||||
llty = ll.FunctionType(lli32, [llptr])
|
llty = ll.FunctionType(lli32, [llptr])
|
||||||
|
|
||||||
|
elif name == "subkernel_send_message":
|
||||||
|
llty = ll.FunctionType(llvoid, [lli32, lli8, llsliceptr, llptrptr])
|
||||||
|
elif name == "subkernel_load_run":
|
||||||
|
llty = ll.FunctionType(llvoid, [lli32, lli1])
|
||||||
|
elif name == "subkernel_await_finish":
|
||||||
|
llty = ll.FunctionType(llvoid, [lli32, lli64])
|
||||||
|
elif name == "subkernel_await_message":
|
||||||
|
llty = ll.FunctionType(lli8, [lli32, lli64, llsliceptr, lli8, lli8])
|
||||||
|
|
||||||
# with now-pinning
|
# with now-pinning
|
||||||
elif name == "now":
|
elif name == "now":
|
||||||
llty = lli64
|
llty = lli64
|
||||||
|
@ -395,7 +424,7 @@ class LLVMIRGenerator:
|
||||||
|
|
||||||
if isinstance(llty, ll.FunctionType):
|
if isinstance(llty, ll.FunctionType):
|
||||||
llglobal = ll.Function(self.llmodule, llty, name)
|
llglobal = ll.Function(self.llmodule, llty, name)
|
||||||
if name in ("__artiq_raise", "__artiq_reraise", "llvm.trap"):
|
if name in ("__artiq_raise", "__artiq_resume", "llvm.trap"):
|
||||||
llglobal.attributes.add("noreturn")
|
llglobal.attributes.add("noreturn")
|
||||||
if name in ("rtio_log", "rpc_send", "rpc_send_async",
|
if name in ("rtio_log", "rpc_send", "rpc_send_async",
|
||||||
self.target.print_function):
|
self.target.print_function):
|
||||||
|
@ -653,6 +682,28 @@ class LLVMIRGenerator:
|
||||||
self.llbuilder = ll.IRBuilder()
|
self.llbuilder = ll.IRBuilder()
|
||||||
llblock_map = {}
|
llblock_map = {}
|
||||||
|
|
||||||
|
# this is the predecessor map, from basic block to the set of its
|
||||||
|
# predecessors
|
||||||
|
# handling for branch and cbranch is here, and the handling of
|
||||||
|
# indirectbr and landingpad are in their respective process_*
|
||||||
|
# function
|
||||||
|
self.llpred_map = llpred_map = {}
|
||||||
|
branch_fn = self.llbuilder.branch
|
||||||
|
cbranch_fn = self.llbuilder.cbranch
|
||||||
|
def override_branch(block):
|
||||||
|
nonlocal self, branch_fn
|
||||||
|
self.add_pred(self.llbuilder.basic_block, block)
|
||||||
|
return branch_fn(block)
|
||||||
|
|
||||||
|
def override_cbranch(pred, bbif, bbelse):
|
||||||
|
nonlocal self, cbranch_fn
|
||||||
|
self.add_pred(self.llbuilder.basic_block, bbif)
|
||||||
|
self.add_pred(self.llbuilder.basic_block, bbelse)
|
||||||
|
return cbranch_fn(pred, bbif, bbelse)
|
||||||
|
|
||||||
|
self.llbuilder.branch = override_branch
|
||||||
|
self.llbuilder.cbranch = override_cbranch
|
||||||
|
|
||||||
if not func.is_generated:
|
if not func.is_generated:
|
||||||
lldisubprogram = self.debug_info_emitter.emit_subprogram(func, self.llfunction)
|
lldisubprogram = self.debug_info_emitter.emit_subprogram(func, self.llfunction)
|
||||||
self.llfunction.set_metadata('dbg', lldisubprogram)
|
self.llfunction.set_metadata('dbg', lldisubprogram)
|
||||||
|
@ -675,6 +726,10 @@ class LLVMIRGenerator:
|
||||||
# Third, translate all instructions.
|
# Third, translate all instructions.
|
||||||
for block in func.basic_blocks:
|
for block in func.basic_blocks:
|
||||||
self.llbuilder.position_at_end(self.llmap[block])
|
self.llbuilder.position_at_end(self.llmap[block])
|
||||||
|
old_block = None
|
||||||
|
if len(block.instructions) == 1 and \
|
||||||
|
isinstance(block.instructions[0], ir.LandingPad):
|
||||||
|
old_block = self.llbuilder.basic_block
|
||||||
for insn in block.instructions:
|
for insn in block.instructions:
|
||||||
if insn.loc is not None and not func.is_generated:
|
if insn.loc is not None and not func.is_generated:
|
||||||
self.llbuilder.debug_metadata = \
|
self.llbuilder.debug_metadata = \
|
||||||
|
@ -689,11 +744,27 @@ class LLVMIRGenerator:
|
||||||
# instruction so that the result spans several LLVM basic
|
# instruction so that the result spans several LLVM basic
|
||||||
# blocks. This only really matters for phis, which are thus
|
# blocks. This only really matters for phis, which are thus
|
||||||
# using a different map (the following one).
|
# using a different map (the following one).
|
||||||
|
if old_block is None:
|
||||||
llblock_map[block] = self.llbuilder.basic_block
|
llblock_map[block] = self.llbuilder.basic_block
|
||||||
|
else:
|
||||||
|
llblock_map[block] = old_block
|
||||||
|
|
||||||
# Fourth, add incoming values to phis.
|
# Fourth, add incoming values to phis.
|
||||||
for phi, llphi in self.phis:
|
for phi, llphi in self.phis:
|
||||||
for value, block in phi.incoming():
|
for value, block in phi.incoming():
|
||||||
|
if isinstance(phi.type, builtins.TException):
|
||||||
|
# a hack to patch phi from landingpad
|
||||||
|
# because landingpad is a single bb in artiq IR, but
|
||||||
|
# generates multiple bb, we need to find out the
|
||||||
|
# predecessor to figure out the actual bb
|
||||||
|
landingpad = llblock_map[block]
|
||||||
|
for pred in llpred_map[llphi.parent]:
|
||||||
|
if pred in llpred_map and landingpad in llpred_map[pred]:
|
||||||
|
llphi.add_incoming(self.map(value), pred)
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
llphi.add_incoming(self.map(value), landingpad)
|
||||||
|
else:
|
||||||
llphi.add_incoming(self.map(value), llblock_map[block])
|
llphi.add_incoming(self.map(value), llblock_map[block])
|
||||||
finally:
|
finally:
|
||||||
self.function_flags = None
|
self.function_flags = None
|
||||||
|
@ -732,8 +803,19 @@ class LLVMIRGenerator:
|
||||||
llalloc = self.llbuilder.alloca(lleltty, size=llsize)
|
llalloc = self.llbuilder.alloca(lleltty, size=llsize)
|
||||||
if types._is_pointer(insn.type):
|
if types._is_pointer(insn.type):
|
||||||
return llalloc
|
return llalloc
|
||||||
|
if builtins.is_list(insn.type):
|
||||||
|
llvalue = self.llbuilder.alloca(self.llty_of_type(insn.type).pointee, size=1)
|
||||||
|
self.llbuilder.store(llalloc, self.llbuilder.gep(llvalue,
|
||||||
|
[self.llindex(0),
|
||||||
|
self.llindex(0)],
|
||||||
|
inbounds=True))
|
||||||
|
self.llbuilder.store(llsize, self.llbuilder.gep(llvalue,
|
||||||
|
[self.llindex(0),
|
||||||
|
self.llindex(1)],
|
||||||
|
inbounds=True))
|
||||||
|
else:
|
||||||
llvalue = ll.Constant(self.llty_of_type(insn.type), ll.Undefined)
|
llvalue = ll.Constant(self.llty_of_type(insn.type), ll.Undefined)
|
||||||
llvalue = self.llbuilder.insert_value(llvalue, llalloc, 0, name=insn.name)
|
llvalue = self.llbuilder.insert_value(llvalue, llalloc, 0)
|
||||||
llvalue = self.llbuilder.insert_value(llvalue, llsize, 1)
|
llvalue = self.llbuilder.insert_value(llvalue, llsize, 1)
|
||||||
return llvalue
|
return llvalue
|
||||||
elif (not builtins.is_allocated(insn.type) or ir.is_keyword(insn.type)
|
elif (not builtins.is_allocated(insn.type) or ir.is_keyword(insn.type)
|
||||||
|
@ -801,6 +883,53 @@ class LLVMIRGenerator:
|
||||||
llvalue = self.llbuilder.bitcast(llvalue, llptr.type.pointee)
|
llvalue = self.llbuilder.bitcast(llvalue, llptr.type.pointee)
|
||||||
return self.llbuilder.store(llvalue, llptr)
|
return self.llbuilder.store(llvalue, llptr)
|
||||||
|
|
||||||
|
def process_GetArgFromRemote(self, insn):
|
||||||
|
llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [],
|
||||||
|
name="subkernel.arg.stack")
|
||||||
|
llval = self._build_rpc_recv(insn.arg_type, llstackptr)
|
||||||
|
return llval
|
||||||
|
|
||||||
|
def process_GetOptArgFromRemote(self, insn):
|
||||||
|
# optarg = index < rcv_count ? Some(rcv_recv()) : None
|
||||||
|
llhead = self.llbuilder.basic_block
|
||||||
|
llrcv = self.llbuilder.append_basic_block(name="optarg.get.{}".format(insn.arg_name))
|
||||||
|
|
||||||
|
# argument received
|
||||||
|
self.llbuilder.position_at_end(llrcv)
|
||||||
|
llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [],
|
||||||
|
name="subkernel.arg.stack")
|
||||||
|
llval = self._build_rpc_recv(insn.arg_type, llstackptr)
|
||||||
|
llrpcretblock = self.llbuilder.basic_block # 'return' from rpc_recv, will be needed later
|
||||||
|
|
||||||
|
# create the tail block, needs to be after the rpc recv tail block
|
||||||
|
lltail = self.llbuilder.append_basic_block(name="optarg.tail.{}".format(insn.arg_name))
|
||||||
|
self.llbuilder.branch(lltail)
|
||||||
|
|
||||||
|
# go back to head to add a branch to the tail
|
||||||
|
self.llbuilder.position_at_end(llhead)
|
||||||
|
llargrcvd = self.llbuilder.icmp_unsigned("<", self.map(insn.index), self.map(insn.rcv_count))
|
||||||
|
self.llbuilder.cbranch(llargrcvd, llrcv, lltail)
|
||||||
|
|
||||||
|
# argument not received/after arg recvd
|
||||||
|
self.llbuilder.position_at_end(lltail)
|
||||||
|
|
||||||
|
llargtype = self.llty_of_type(insn.arg_type)
|
||||||
|
|
||||||
|
llphi_arg_present = self.llbuilder.phi(lli1, name="optarg.phi.present.{}".format(insn.arg_name))
|
||||||
|
llphi_arg = self.llbuilder.phi(llargtype, name="optarg.phi.{}".format(insn.arg_name))
|
||||||
|
|
||||||
|
llphi_arg_present.add_incoming(ll.Constant(lli1, 0), llhead)
|
||||||
|
llphi_arg.add_incoming(ll.Constant(llargtype, ll.Undefined), llhead)
|
||||||
|
|
||||||
|
llphi_arg_present.add_incoming(ll.Constant(lli1, 1), llrpcretblock)
|
||||||
|
llphi_arg.add_incoming(llval, llrpcretblock)
|
||||||
|
|
||||||
|
lloptarg = ll.Constant(ll.LiteralStructType([lli1, llargtype]), ll.Undefined)
|
||||||
|
lloptarg = self.llbuilder.insert_value(lloptarg, llphi_arg_present, 0)
|
||||||
|
lloptarg = self.llbuilder.insert_value(lloptarg, llphi_arg, 1)
|
||||||
|
|
||||||
|
return lloptarg
|
||||||
|
|
||||||
def attr_index(self, typ, attr):
|
def attr_index(self, typ, attr):
|
||||||
return list(typ.attributes.keys()).index(attr)
|
return list(typ.attributes.keys()).index(attr)
|
||||||
|
|
||||||
|
@ -825,8 +954,8 @@ class LLVMIRGenerator:
|
||||||
def get_global_closure_ptr(self, typ, attr):
|
def get_global_closure_ptr(self, typ, attr):
|
||||||
closure_type = typ.attributes[attr]
|
closure_type = typ.attributes[attr]
|
||||||
assert types.is_constructor(typ)
|
assert types.is_constructor(typ)
|
||||||
assert types.is_function(closure_type) or types.is_rpc(closure_type)
|
assert types.is_function(closure_type) or types.is_rpc(closure_type) or types.is_subkernel(closure_type)
|
||||||
if types.is_external_function(closure_type) or types.is_rpc(closure_type):
|
if types.is_external_function(closure_type) or types.is_rpc(closure_type) or types.is_subkernel(closure_type):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
llty = self.llty_of_type(typ.attributes[attr])
|
llty = self.llty_of_type(typ.attributes[attr])
|
||||||
|
@ -938,8 +1067,14 @@ class LLVMIRGenerator:
|
||||||
def process_Offset(self, insn):
|
def process_Offset(self, insn):
|
||||||
base, idx = insn.base(), insn.index()
|
base, idx = insn.base(), insn.index()
|
||||||
llelts, llidx = map(self.map, (base, idx))
|
llelts, llidx = map(self.map, (base, idx))
|
||||||
if not types._is_pointer(base.type):
|
if builtins.is_listish(base.type):
|
||||||
# This is list-ish.
|
# This is list-ish.
|
||||||
|
if builtins.is_list(base.type):
|
||||||
|
llelts = self.llbuilder.load(self.llbuilder.gep(llelts,
|
||||||
|
[self.llindex(0),
|
||||||
|
self.llindex(0)],
|
||||||
|
inbounds=True))
|
||||||
|
else:
|
||||||
llelts = self.llbuilder.extract_value(llelts, 0)
|
llelts = self.llbuilder.extract_value(llelts, 0)
|
||||||
llelt = self.llbuilder.gep(llelts, [llidx], inbounds=True)
|
llelt = self.llbuilder.gep(llelts, [llidx], inbounds=True)
|
||||||
return llelt
|
return llelt
|
||||||
|
@ -954,8 +1089,14 @@ class LLVMIRGenerator:
|
||||||
def process_SetElem(self, insn):
|
def process_SetElem(self, insn):
|
||||||
base, idx = insn.base(), insn.index()
|
base, idx = insn.base(), insn.index()
|
||||||
llelts, llidx = map(self.map, (base, idx))
|
llelts, llidx = map(self.map, (base, idx))
|
||||||
if not types._is_pointer(base.type):
|
if builtins.is_listish(base.type):
|
||||||
# This is list-ish.
|
# This is list-ish.
|
||||||
|
if builtins.is_list(base.type):
|
||||||
|
llelts = self.llbuilder.load(self.llbuilder.gep(llelts,
|
||||||
|
[self.llindex(0),
|
||||||
|
self.llindex(0)],
|
||||||
|
inbounds=True))
|
||||||
|
else:
|
||||||
llelts = self.llbuilder.extract_value(llelts, 0)
|
llelts = self.llbuilder.extract_value(llelts, 0)
|
||||||
llelt = self.llbuilder.gep(llelts, [llidx], inbounds=True)
|
llelt = self.llbuilder.gep(llelts, [llidx], inbounds=True)
|
||||||
return self.llbuilder.store(self.map(insn.value()), llelt)
|
return self.llbuilder.store(self.map(insn.value()), llelt)
|
||||||
|
@ -1102,6 +1243,11 @@ class LLVMIRGenerator:
|
||||||
lllhs, llrhs = map(self.map, (insn.lhs(), insn.rhs()))
|
lllhs, llrhs = map(self.map, (insn.lhs(), insn.rhs()))
|
||||||
assert lllhs.type == llrhs.type
|
assert lllhs.type == llrhs.type
|
||||||
|
|
||||||
|
if isinstance(lllhs.type, ll.PointerType) and \
|
||||||
|
isinstance(lllhs.type.pointee, ll.LiteralStructType):
|
||||||
|
lllhs = self.llbuilder.load(lllhs)
|
||||||
|
llrhs = self.llbuilder.load(llrhs)
|
||||||
|
|
||||||
if isinstance(lllhs.type, ll.IntType):
|
if isinstance(lllhs.type, ll.IntType):
|
||||||
return self.llbuilder.icmp_signed(op, lllhs, llrhs,
|
return self.llbuilder.icmp_signed(op, lllhs, llrhs,
|
||||||
name=insn.name)
|
name=insn.name)
|
||||||
|
@ -1172,6 +1318,11 @@ class LLVMIRGenerator:
|
||||||
shape = self.llbuilder.extract_value(self.map(collection),
|
shape = self.llbuilder.extract_value(self.map(collection),
|
||||||
self.attr_index(collection.type, "shape"))
|
self.attr_index(collection.type, "shape"))
|
||||||
return self.llbuilder.extract_value(shape, 0)
|
return self.llbuilder.extract_value(shape, 0)
|
||||||
|
elif builtins.is_list(collection.type):
|
||||||
|
return self.llbuilder.load(self.llbuilder.gep(self.map(collection),
|
||||||
|
[self.llindex(0),
|
||||||
|
self.llindex(1)]))
|
||||||
|
else:
|
||||||
return self.llbuilder.extract_value(self.map(collection), 1)
|
return self.llbuilder.extract_value(self.map(collection), 1)
|
||||||
elif insn.op in ("printf", "rtio_log"):
|
elif insn.op in ("printf", "rtio_log"):
|
||||||
# We only get integers, floats, pointers and strings here.
|
# We only get integers, floats, pointers and strings here.
|
||||||
|
@ -1247,9 +1398,38 @@ class LLVMIRGenerator:
|
||||||
return llstore_lo
|
return llstore_lo
|
||||||
else:
|
else:
|
||||||
return self.llbuilder.call(self.llbuiltin("delay_mu"), [llinterval])
|
return self.llbuilder.call(self.llbuiltin("delay_mu"), [llinterval])
|
||||||
|
elif insn.op == "end_catch":
|
||||||
|
return self.llbuilder.call(self.llbuiltin("__artiq_end_catch"), [])
|
||||||
|
elif insn.op == "subkernel_await_finish":
|
||||||
|
llsid = self.map(insn.operands[0])
|
||||||
|
lltimeout = self.map(insn.operands[1])
|
||||||
|
return self.llbuilder.call(self.llbuiltin("subkernel_await_finish"), [llsid, lltimeout],
|
||||||
|
name="subkernel.await.finish")
|
||||||
|
elif insn.op == "subkernel_retrieve_return":
|
||||||
|
llsid = self.map(insn.operands[0])
|
||||||
|
lltimeout = self.map(insn.operands[1])
|
||||||
|
lltagptr = self._build_subkernel_tags([insn.type])
|
||||||
|
self.llbuilder.call(self.llbuiltin("subkernel_await_message"),
|
||||||
|
[llsid, lltimeout, lltagptr, ll.Constant(lli8, 1), ll.Constant(lli8, 1)],
|
||||||
|
name="subkernel.await.message")
|
||||||
|
llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [],
|
||||||
|
name="subkernel.arg.stack")
|
||||||
|
return self._build_rpc_recv(insn.type, llstackptr)
|
||||||
|
elif insn.op == "subkernel_preload":
|
||||||
|
llsid = self.map(insn.operands[0])
|
||||||
|
return self.llbuilder.call(self.llbuiltin("subkernel_load_run"), [llsid, ll.Constant(lli1, 0)],
|
||||||
|
name="subkernel.preload")
|
||||||
else:
|
else:
|
||||||
assert False
|
assert False
|
||||||
|
|
||||||
|
def process_SubkernelAwaitArgs(self, insn):
|
||||||
|
llmin = self.map(insn.operands[0])
|
||||||
|
llmax = self.map(insn.operands[1])
|
||||||
|
lltagptr = self._build_subkernel_tags(insn.arg_types)
|
||||||
|
return self.llbuilder.call(self.llbuiltin("subkernel_await_message"),
|
||||||
|
[ll.Constant(lli32, 0), ll.Constant(lli64, 10_000), lltagptr, llmin, llmax],
|
||||||
|
name="subkernel.await.args")
|
||||||
|
|
||||||
def process_Closure(self, insn):
|
def process_Closure(self, insn):
|
||||||
llenv = self.map(insn.environment())
|
llenv = self.map(insn.environment())
|
||||||
llenv = self.llbuilder.bitcast(llenv, llptr)
|
llenv = self.llbuilder.bitcast(llenv, llptr)
|
||||||
|
@ -1271,11 +1451,24 @@ class LLVMIRGenerator:
|
||||||
else:
|
else:
|
||||||
llfun = self.map(insn.static_target_function)
|
llfun = self.map(insn.static_target_function)
|
||||||
llenv = self.llbuilder.extract_value(llclosure, 0, name="env.fun")
|
llenv = self.llbuilder.extract_value(llclosure, 0, name="env.fun")
|
||||||
return llfun, [llenv] + list(llargs), {}
|
return llfun, [llenv] + list(llargs), {}, None
|
||||||
|
|
||||||
def _prepare_ffi_call(self, insn):
|
def _prepare_ffi_call(self, insn):
|
||||||
llargs = []
|
llargs = []
|
||||||
llarg_attrs = {}
|
llarg_attrs = {}
|
||||||
|
|
||||||
|
stack_save_needed = False
|
||||||
|
for i, arg in enumerate(insn.arguments()):
|
||||||
|
llarg = self.map(arg)
|
||||||
|
if isinstance(llarg.type, (ll.LiteralStructType, ll.IdentifiedStructType)):
|
||||||
|
stack_save_needed = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if stack_save_needed:
|
||||||
|
llcallstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [])
|
||||||
|
else:
|
||||||
|
llcallstackptr = None
|
||||||
|
|
||||||
for i, arg in enumerate(insn.arguments()):
|
for i, arg in enumerate(insn.arguments()):
|
||||||
llarg = self.map(arg)
|
llarg = self.map(arg)
|
||||||
if isinstance(llarg.type, (ll.LiteralStructType, ll.IdentifiedStructType)):
|
if isinstance(llarg.type, (ll.LiteralStructType, ll.IdentifiedStructType)):
|
||||||
|
@ -1308,10 +1501,83 @@ class LLVMIRGenerator:
|
||||||
llfun.args[idx].add_attribute(attr)
|
llfun.args[idx].add_attribute(attr)
|
||||||
if 'nounwind' in insn.target_function().type.flags:
|
if 'nounwind' in insn.target_function().type.flags:
|
||||||
llfun.attributes.add('nounwind')
|
llfun.attributes.add('nounwind')
|
||||||
if 'nowrite' in insn.target_function().type.flags:
|
if 'nowrite' in insn.target_function().type.flags and not is_sret:
|
||||||
|
# Even if "nowrite" is correct from the user's perspective (doesn't
|
||||||
|
# access any other memory observable to ARTIQ Python), this isn't
|
||||||
|
# true on the LLVM IR level for sret return values.
|
||||||
llfun.attributes.add('inaccessiblememonly')
|
llfun.attributes.add('inaccessiblememonly')
|
||||||
|
|
||||||
return llfun, list(llargs), llarg_attrs
|
return llfun, list(llargs), llarg_attrs, llcallstackptr
|
||||||
|
|
||||||
|
def _build_subkernel_tags(self, tag_list):
|
||||||
|
def ret_error_handler(typ):
|
||||||
|
printer = types.TypePrinter()
|
||||||
|
note = diagnostic.Diagnostic("note",
|
||||||
|
"value of type {type}",
|
||||||
|
{"type": printer.name(typ)},
|
||||||
|
fun_loc)
|
||||||
|
diag = diagnostic.Diagnostic("error",
|
||||||
|
"type {type} is not supported in subkernels",
|
||||||
|
{"type": printer.name(fun_type.ret)},
|
||||||
|
fun_loc, notes=[note])
|
||||||
|
self.engine.process(diag)
|
||||||
|
tag = b"".join([ir.rpc_tag(arg_type, ret_error_handler) for arg_type in tag_list])
|
||||||
|
lltag = self.llconst_of_const(ir.Constant(tag, builtins.TStr()))
|
||||||
|
lltagptr = self.llbuilder.alloca(lltag.type)
|
||||||
|
self.llbuilder.store(lltag, lltagptr)
|
||||||
|
return lltagptr
|
||||||
|
|
||||||
|
def _build_rpc_recv(self, ret, llstackptr, llnormalblock=None, llunwindblock=None):
|
||||||
|
# T result = {
|
||||||
|
# void *ret_ptr = alloca(sizeof(T));
|
||||||
|
# void *ptr = ret_ptr;
|
||||||
|
# loop: int size = rpc_recv(ptr);
|
||||||
|
# // Non-zero: Provide `size` bytes of extra storage for variable-length data.
|
||||||
|
# if(size) { ptr = alloca(size); goto loop; }
|
||||||
|
# else *(T*)ret_ptr
|
||||||
|
# }
|
||||||
|
llprehead = self.llbuilder.basic_block
|
||||||
|
llhead = self.llbuilder.append_basic_block(name="rpc.head")
|
||||||
|
if llunwindblock:
|
||||||
|
llheadu = self.llbuilder.append_basic_block(name="rpc.head.unwind")
|
||||||
|
llalloc = self.llbuilder.append_basic_block(name="rpc.continue")
|
||||||
|
lltail = self.llbuilder.append_basic_block(name="rpc.tail")
|
||||||
|
|
||||||
|
llretty = self.llty_of_type(ret)
|
||||||
|
llslot = self.llbuilder.alloca(llretty, name="rpc.ret.alloc")
|
||||||
|
llslotgen = self.llbuilder.bitcast(llslot, llptr, name="rpc.ret.ptr")
|
||||||
|
self.llbuilder.branch(llhead)
|
||||||
|
|
||||||
|
self.llbuilder.position_at_end(llhead)
|
||||||
|
llphi = self.llbuilder.phi(llslotgen.type, name="rpc.ptr")
|
||||||
|
llphi.add_incoming(llslotgen, llprehead)
|
||||||
|
if llunwindblock:
|
||||||
|
llsize = self.llbuilder.invoke(self.llbuiltin("rpc_recv"), [llphi],
|
||||||
|
llheadu, llunwindblock,
|
||||||
|
name="rpc.size.next")
|
||||||
|
self.llbuilder.position_at_end(llheadu)
|
||||||
|
else:
|
||||||
|
llsize = self.llbuilder.call(self.llbuiltin("rpc_recv"), [llphi],
|
||||||
|
name="rpc.size.next")
|
||||||
|
lldone = self.llbuilder.icmp_unsigned('==', llsize, ll.Constant(llsize.type, 0),
|
||||||
|
name="rpc.done")
|
||||||
|
self.llbuilder.cbranch(lldone, lltail, llalloc)
|
||||||
|
|
||||||
|
self.llbuilder.position_at_end(llalloc)
|
||||||
|
llalloca = self.llbuilder.alloca(lli8, llsize, name="rpc.alloc")
|
||||||
|
llalloca.align = self.max_target_alignment
|
||||||
|
llphi.add_incoming(llalloca, llalloc)
|
||||||
|
self.llbuilder.branch(llhead)
|
||||||
|
|
||||||
|
self.llbuilder.position_at_end(lltail)
|
||||||
|
llret = self.llbuilder.load(llslot, name="rpc.ret")
|
||||||
|
if not ret.fold(False, lambda r, t: r or builtins.is_allocated(t)):
|
||||||
|
# We didn't allocate anything except the slot for the value itself.
|
||||||
|
# Don't waste stack space.
|
||||||
|
self.llbuilder.call(self.llbuiltin("llvm.stackrestore"), [llstackptr])
|
||||||
|
if llnormalblock:
|
||||||
|
self.llbuilder.branch(llnormalblock)
|
||||||
|
return llret
|
||||||
|
|
||||||
def _build_rpc(self, fun_loc, fun_type, args, llnormalblock, llunwindblock):
|
def _build_rpc(self, fun_loc, fun_type, args, llnormalblock, llunwindblock):
|
||||||
llservice = ll.Constant(lli32, fun_type.service)
|
llservice = ll.Constant(lli32, fun_type.service)
|
||||||
|
@ -1388,57 +1654,103 @@ class LLVMIRGenerator:
|
||||||
|
|
||||||
return ll.Undefined
|
return ll.Undefined
|
||||||
|
|
||||||
# T result = {
|
llret = self._build_rpc_recv(fun_type.ret, llstackptr, llnormalblock, llunwindblock)
|
||||||
# void *ret_ptr = alloca(sizeof(T));
|
|
||||||
# void *ptr = ret_ptr;
|
|
||||||
# loop: int size = rpc_recv(ptr);
|
|
||||||
# // Non-zero: Provide `size` bytes of extra storage for variable-length data.
|
|
||||||
# if(size) { ptr = alloca(size); goto loop; }
|
|
||||||
# else *(T*)ret_ptr
|
|
||||||
# }
|
|
||||||
llprehead = self.llbuilder.basic_block
|
|
||||||
llhead = self.llbuilder.append_basic_block(name="rpc.head")
|
|
||||||
if llunwindblock:
|
|
||||||
llheadu = self.llbuilder.append_basic_block(name="rpc.head.unwind")
|
|
||||||
llalloc = self.llbuilder.append_basic_block(name="rpc.continue")
|
|
||||||
lltail = self.llbuilder.append_basic_block(name="rpc.tail")
|
|
||||||
|
|
||||||
llretty = self.llty_of_type(fun_type.ret)
|
|
||||||
llslot = self.llbuilder.alloca(llretty, name="rpc.ret.alloc")
|
|
||||||
llslotgen = self.llbuilder.bitcast(llslot, llptr, name="rpc.ret.ptr")
|
|
||||||
self.llbuilder.branch(llhead)
|
|
||||||
|
|
||||||
self.llbuilder.position_at_end(llhead)
|
|
||||||
llphi = self.llbuilder.phi(llslotgen.type, name="rpc.ptr")
|
|
||||||
llphi.add_incoming(llslotgen, llprehead)
|
|
||||||
if llunwindblock:
|
|
||||||
llsize = self.llbuilder.invoke(self.llbuiltin("rpc_recv"), [llphi],
|
|
||||||
llheadu, llunwindblock,
|
|
||||||
name="rpc.size.next")
|
|
||||||
self.llbuilder.position_at_end(llheadu)
|
|
||||||
else:
|
|
||||||
llsize = self.llbuilder.call(self.llbuiltin("rpc_recv"), [llphi],
|
|
||||||
name="rpc.size.next")
|
|
||||||
lldone = self.llbuilder.icmp_unsigned('==', llsize, ll.Constant(llsize.type, 0),
|
|
||||||
name="rpc.done")
|
|
||||||
self.llbuilder.cbranch(lldone, lltail, llalloc)
|
|
||||||
|
|
||||||
self.llbuilder.position_at_end(llalloc)
|
|
||||||
llalloca = self.llbuilder.alloca(lli8, llsize, name="rpc.alloc")
|
|
||||||
llalloca.align = 4 # maximum alignment required by OR1K ABI
|
|
||||||
llphi.add_incoming(llalloca, llalloc)
|
|
||||||
self.llbuilder.branch(llhead)
|
|
||||||
|
|
||||||
self.llbuilder.position_at_end(lltail)
|
|
||||||
llret = self.llbuilder.load(llslot, name="rpc.ret")
|
|
||||||
if not fun_type.ret.fold(False, lambda r, t: r or builtins.is_allocated(t)):
|
|
||||||
# We didn't allocate anything except the slot for the value itself.
|
|
||||||
# Don't waste stack space.
|
|
||||||
self.llbuilder.call(self.llbuiltin("llvm.stackrestore"), [llstackptr])
|
|
||||||
if llnormalblock:
|
|
||||||
self.llbuilder.branch(llnormalblock)
|
|
||||||
return llret
|
return llret
|
||||||
|
|
||||||
|
def _build_subkernel_call(self, fun_loc, fun_type, args):
|
||||||
|
llsid = ll.Constant(lli32, fun_type.sid)
|
||||||
|
tag = b""
|
||||||
|
|
||||||
|
for arg in args:
|
||||||
|
def arg_error_handler(typ):
|
||||||
|
printer = types.TypePrinter()
|
||||||
|
note = diagnostic.Diagnostic("note",
|
||||||
|
"value of type {type}",
|
||||||
|
{"type": printer.name(typ)},
|
||||||
|
arg.loc)
|
||||||
|
diag = diagnostic.Diagnostic("error",
|
||||||
|
"type {type} is not supported in subkernel calls",
|
||||||
|
{"type": printer.name(arg.type)},
|
||||||
|
arg.loc, notes=[note])
|
||||||
|
self.engine.process(diag)
|
||||||
|
tag += ir.rpc_tag(arg.type, arg_error_handler)
|
||||||
|
tag += b":"
|
||||||
|
|
||||||
|
# run the kernel first
|
||||||
|
self.llbuilder.call(self.llbuiltin("subkernel_load_run"), [llsid, ll.Constant(lli1, 1)])
|
||||||
|
|
||||||
|
# arg sent in the same vein as RPC
|
||||||
|
llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [],
|
||||||
|
name="subkernel.stack")
|
||||||
|
|
||||||
|
lltag = self.llconst_of_const(ir.Constant(tag, builtins.TStr()))
|
||||||
|
lltagptr = self.llbuilder.alloca(lltag.type)
|
||||||
|
self.llbuilder.store(lltag, lltagptr)
|
||||||
|
|
||||||
|
if args:
|
||||||
|
# only send args if there's anything to send, 'self' is excluded
|
||||||
|
llargs = self.llbuilder.alloca(llptr, ll.Constant(lli32, len(args)),
|
||||||
|
name="subkernel.args")
|
||||||
|
for index, arg in enumerate(args):
|
||||||
|
if builtins.is_none(arg.type):
|
||||||
|
llargslot = self.llbuilder.alloca(llunit,
|
||||||
|
name="subkernel.arg{}".format(index))
|
||||||
|
else:
|
||||||
|
llarg = self.map(arg)
|
||||||
|
llargslot = self.llbuilder.alloca(llarg.type,
|
||||||
|
name="subkernel.arg{}".format(index))
|
||||||
|
self.llbuilder.store(llarg, llargslot)
|
||||||
|
llargslot = self.llbuilder.bitcast(llargslot, llptr)
|
||||||
|
|
||||||
|
llargptr = self.llbuilder.gep(llargs, [ll.Constant(lli32, index)])
|
||||||
|
self.llbuilder.store(llargslot, llargptr)
|
||||||
|
|
||||||
|
llargcount = ll.Constant(lli8, len(args))
|
||||||
|
|
||||||
|
self.llbuilder.call(self.llbuiltin("subkernel_send_message"),
|
||||||
|
[llsid, llargcount, lltagptr, llargs])
|
||||||
|
self.llbuilder.call(self.llbuiltin("llvm.stackrestore"), [llstackptr])
|
||||||
|
|
||||||
|
return llsid
|
||||||
|
|
||||||
|
def _build_subkernel_return(self, insn):
|
||||||
|
# builds a remote return.
|
||||||
|
# unlike args, return only sends one thing.
|
||||||
|
if builtins.is_none(insn.value().type):
|
||||||
|
# do not waste time and bandwidth on Nones
|
||||||
|
return
|
||||||
|
|
||||||
|
def ret_error_handler(typ):
|
||||||
|
printer = types.TypePrinter()
|
||||||
|
note = diagnostic.Diagnostic("note",
|
||||||
|
"value of type {type}",
|
||||||
|
{"type": printer.name(typ)},
|
||||||
|
fun_loc)
|
||||||
|
diag = diagnostic.Diagnostic("error",
|
||||||
|
"return type {type} is not supported in subkernel returns",
|
||||||
|
{"type": printer.name(fun_type.ret)},
|
||||||
|
fun_loc, notes=[note])
|
||||||
|
self.engine.process(diag)
|
||||||
|
tag = ir.rpc_tag(insn.value().type, ret_error_handler)
|
||||||
|
tag += b":"
|
||||||
|
lltag = self.llconst_of_const(ir.Constant(tag, builtins.TStr()))
|
||||||
|
lltagptr = self.llbuilder.alloca(lltag.type)
|
||||||
|
self.llbuilder.store(lltag, lltagptr)
|
||||||
|
|
||||||
|
llrets = self.llbuilder.alloca(llptr, ll.Constant(lli32, 1),
|
||||||
|
name="subkernel.return")
|
||||||
|
llret = self.map(insn.value())
|
||||||
|
llretslot = self.llbuilder.alloca(llret.type, name="subkernel.retval")
|
||||||
|
self.llbuilder.store(llret, llretslot)
|
||||||
|
llretslot = self.llbuilder.bitcast(llretslot, llptr)
|
||||||
|
self.llbuilder.store(llretslot, llrets)
|
||||||
|
|
||||||
|
llsid = ll.Constant(lli32, 0) # return goes back to master, sid is ignored
|
||||||
|
lltagcount = ll.Constant(lli8, 1) # only one thing is returned
|
||||||
|
self.llbuilder.call(self.llbuiltin("subkernel_send_message"),
|
||||||
|
[llsid, lltagcount, lltagptr, llrets])
|
||||||
|
|
||||||
def process_Call(self, insn):
|
def process_Call(self, insn):
|
||||||
functiontyp = insn.target_function().type
|
functiontyp = insn.target_function().type
|
||||||
if types.is_rpc(functiontyp):
|
if types.is_rpc(functiontyp):
|
||||||
|
@ -1446,10 +1758,14 @@ class LLVMIRGenerator:
|
||||||
functiontyp,
|
functiontyp,
|
||||||
insn.arguments(),
|
insn.arguments(),
|
||||||
llnormalblock=None, llunwindblock=None)
|
llnormalblock=None, llunwindblock=None)
|
||||||
|
elif types.is_subkernel(functiontyp):
|
||||||
|
return self._build_subkernel_call(insn.target_function().loc,
|
||||||
|
functiontyp,
|
||||||
|
insn.arguments())
|
||||||
elif types.is_external_function(functiontyp):
|
elif types.is_external_function(functiontyp):
|
||||||
llfun, llargs, llarg_attrs = self._prepare_ffi_call(insn)
|
llfun, llargs, llarg_attrs, llcallstackptr = self._prepare_ffi_call(insn)
|
||||||
else:
|
else:
|
||||||
llfun, llargs, llarg_attrs = self._prepare_closure_call(insn)
|
llfun, llargs, llarg_attrs, llcallstackptr = self._prepare_closure_call(insn)
|
||||||
|
|
||||||
if self.has_sret(functiontyp):
|
if self.has_sret(functiontyp):
|
||||||
llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [])
|
llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [])
|
||||||
|
@ -1468,6 +1784,9 @@ class LLVMIRGenerator:
|
||||||
# {} elsewhere.
|
# {} elsewhere.
|
||||||
llresult = ll.Constant(llunit, [])
|
llresult = ll.Constant(llunit, [])
|
||||||
|
|
||||||
|
if llcallstackptr != None:
|
||||||
|
self.llbuilder.call(self.llbuiltin("llvm.stackrestore"), [llcallstackptr])
|
||||||
|
|
||||||
return llresult
|
return llresult
|
||||||
|
|
||||||
def process_Invoke(self, insn):
|
def process_Invoke(self, insn):
|
||||||
|
@ -1479,10 +1798,15 @@ class LLVMIRGenerator:
|
||||||
functiontyp,
|
functiontyp,
|
||||||
insn.arguments(),
|
insn.arguments(),
|
||||||
llnormalblock, llunwindblock)
|
llnormalblock, llunwindblock)
|
||||||
|
elif types.is_subkernel(functiontyp):
|
||||||
|
return self._build_subkernel_call(insn.target_function().loc,
|
||||||
|
functiontyp,
|
||||||
|
insn.arguments(),
|
||||||
|
llnormalblock, llunwindblock)
|
||||||
elif types.is_external_function(functiontyp):
|
elif types.is_external_function(functiontyp):
|
||||||
llfun, llargs, llarg_attrs = self._prepare_ffi_call(insn)
|
llfun, llargs, llarg_attrs, llcallstackptr = self._prepare_ffi_call(insn)
|
||||||
else:
|
else:
|
||||||
llfun, llargs, llarg_attrs = self._prepare_closure_call(insn)
|
llfun, llargs, llarg_attrs, llcallstackptr = self._prepare_closure_call(insn)
|
||||||
|
|
||||||
if self.has_sret(functiontyp):
|
if self.has_sret(functiontyp):
|
||||||
llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [])
|
llstackptr = self.llbuilder.call(self.llbuiltin("llvm.stacksave"), [])
|
||||||
|
@ -1504,6 +1828,9 @@ class LLVMIRGenerator:
|
||||||
# The !tbaa metadata is not legal to use with the invoke instruction,
|
# The !tbaa metadata is not legal to use with the invoke instruction,
|
||||||
# so unlike process_Call, we do not set it here.
|
# so unlike process_Call, we do not set it here.
|
||||||
|
|
||||||
|
if llcallstackptr != None:
|
||||||
|
self.llbuilder.call(self.llbuiltin("llvm.stackrestore"), [llcallstackptr])
|
||||||
|
|
||||||
return llresult
|
return llresult
|
||||||
|
|
||||||
def _quote_listish_to_llglobal(self, value, elt_type, path, kind_name):
|
def _quote_listish_to_llglobal(self, value, elt_type, path, kind_name):
|
||||||
|
@ -1554,7 +1881,8 @@ class LLVMIRGenerator:
|
||||||
attrvalue = getattr(value, attr)
|
attrvalue = getattr(value, attr)
|
||||||
is_class_function = (types.is_constructor(typ) and
|
is_class_function = (types.is_constructor(typ) and
|
||||||
types.is_function(typ.attributes[attr]) and
|
types.is_function(typ.attributes[attr]) and
|
||||||
not types.is_external_function(typ.attributes[attr]))
|
not types.is_external_function(typ.attributes[attr]) and
|
||||||
|
not types.is_subkernel(typ.attributes[attr]))
|
||||||
if is_class_function:
|
if is_class_function:
|
||||||
attrvalue = self.embedding_map.specialize_function(typ.instance, attrvalue)
|
attrvalue = self.embedding_map.specialize_function(typ.instance, attrvalue)
|
||||||
if not (types.is_instance(typ) and attr in typ.constant_attributes):
|
if not (types.is_instance(typ) and attr in typ.constant_attributes):
|
||||||
|
@ -1625,6 +1953,13 @@ class LLVMIRGenerator:
|
||||||
assert isinstance(value, (list, numpy.ndarray)), fail_msg
|
assert isinstance(value, (list, numpy.ndarray)), fail_msg
|
||||||
elt_type = builtins.get_iterable_elt(typ)
|
elt_type = builtins.get_iterable_elt(typ)
|
||||||
lleltsptr = self._quote_listish_to_llglobal(value, elt_type, path, typ.find().name)
|
lleltsptr = self._quote_listish_to_llglobal(value, elt_type, path, typ.find().name)
|
||||||
|
if builtins.is_list(typ):
|
||||||
|
llconst = ll.Constant(llty.pointee, [lleltsptr, ll.Constant(lli32, len(value))])
|
||||||
|
name = self.llmodule.scope.deduplicate("quoted.{}".format(typ.find().name))
|
||||||
|
llglobal = ll.GlobalVariable(self.llmodule, llconst.type, name)
|
||||||
|
llglobal.initializer = llconst
|
||||||
|
llglobal.linkage = "private"
|
||||||
|
return llglobal
|
||||||
llconst = ll.Constant(llty, [lleltsptr, ll.Constant(lli32, len(value))])
|
llconst = ll.Constant(llty, [lleltsptr, ll.Constant(lli32, len(value))])
|
||||||
return llconst
|
return llconst
|
||||||
elif types.is_tuple(typ):
|
elif types.is_tuple(typ):
|
||||||
|
@ -1632,7 +1967,8 @@ class LLVMIRGenerator:
|
||||||
llelts = [self._quote(v, t, lambda: path() + [str(i)])
|
llelts = [self._quote(v, t, lambda: path() + [str(i)])
|
||||||
for i, (v, t) in enumerate(zip(value, typ.elts))]
|
for i, (v, t) in enumerate(zip(value, typ.elts))]
|
||||||
return ll.Constant(llty, llelts)
|
return ll.Constant(llty, llelts)
|
||||||
elif types.is_rpc(typ) or types.is_external_function(typ) or types.is_builtin_function(typ):
|
elif types.is_rpc(typ) or types.is_external_function(typ) or \
|
||||||
|
types.is_builtin_function(typ) or types.is_subkernel(typ):
|
||||||
# RPC, C and builtin functions have no runtime representation.
|
# RPC, C and builtin functions have no runtime representation.
|
||||||
return ll.Constant(llty, ll.Undefined)
|
return ll.Constant(llty, ll.Undefined)
|
||||||
elif types.is_function(typ):
|
elif types.is_function(typ):
|
||||||
|
@ -1678,10 +2014,17 @@ class LLVMIRGenerator:
|
||||||
def process_IndirectBranch(self, insn):
|
def process_IndirectBranch(self, insn):
|
||||||
llinsn = self.llbuilder.branch_indirect(self.map(insn.target()))
|
llinsn = self.llbuilder.branch_indirect(self.map(insn.target()))
|
||||||
for dest in insn.destinations():
|
for dest in insn.destinations():
|
||||||
llinsn.add_destination(self.map(dest))
|
dest = self.map(dest)
|
||||||
|
self.add_pred(self.llbuilder.basic_block, dest)
|
||||||
|
if dest not in self.llpred_map:
|
||||||
|
self.llpred_map[dest] = set()
|
||||||
|
self.llpred_map[dest].add(self.llbuilder.basic_block)
|
||||||
|
llinsn.add_destination(dest)
|
||||||
return llinsn
|
return llinsn
|
||||||
|
|
||||||
def process_Return(self, insn):
|
def process_Return(self, insn):
|
||||||
|
if insn.remote_return:
|
||||||
|
self._build_subkernel_return(insn)
|
||||||
if builtins.is_none(insn.value().type):
|
if builtins.is_none(insn.value().type):
|
||||||
return self.llbuilder.ret_void()
|
return self.llbuilder.ret_void()
|
||||||
else:
|
else:
|
||||||
|
@ -1716,8 +2059,8 @@ class LLVMIRGenerator:
|
||||||
llexn = self.map(insn.value())
|
llexn = self.map(insn.value())
|
||||||
return self._gen_raise(insn, self.llbuiltin("__artiq_raise"), [llexn])
|
return self._gen_raise(insn, self.llbuiltin("__artiq_raise"), [llexn])
|
||||||
|
|
||||||
def process_Reraise(self, insn):
|
def process_Resume(self, insn):
|
||||||
return self._gen_raise(insn, self.llbuiltin("__artiq_reraise"), [])
|
return self._gen_raise(insn, self.llbuiltin("__artiq_resume"), [])
|
||||||
|
|
||||||
def process_LandingPad(self, insn):
|
def process_LandingPad(self, insn):
|
||||||
# Layout on return from landing pad: {%_Unwind_Exception*, %Exception*}
|
# Layout on return from landing pad: {%_Unwind_Exception*, %Exception*}
|
||||||
|
@ -1726,10 +2069,11 @@ class LLVMIRGenerator:
|
||||||
cleanup=insn.has_cleanup)
|
cleanup=insn.has_cleanup)
|
||||||
llrawexn = self.llbuilder.extract_value(lllandingpad, 1)
|
llrawexn = self.llbuilder.extract_value(lllandingpad, 1)
|
||||||
llexn = self.llbuilder.bitcast(llrawexn, self.llty_of_type(insn.type))
|
llexn = self.llbuilder.bitcast(llrawexn, self.llty_of_type(insn.type))
|
||||||
llexnnameptr = self.llbuilder.gep(llexn, [self.llindex(0), self.llindex(0)],
|
llexnidptr = self.llbuilder.gep(llexn, [self.llindex(0), self.llindex(0)],
|
||||||
inbounds=True)
|
inbounds=True)
|
||||||
llexnname = self.llbuilder.load(llexnnameptr)
|
llexnid = self.llbuilder.load(llexnidptr)
|
||||||
|
|
||||||
|
landingpadbb = self.llbuilder.basic_block
|
||||||
for target, typ in insn.clauses():
|
for target, typ in insn.clauses():
|
||||||
if typ is None:
|
if typ is None:
|
||||||
# we use a null pointer here, similar to how cpp does it
|
# we use a null pointer here, similar to how cpp does it
|
||||||
|
@ -1742,42 +2086,40 @@ class LLVMIRGenerator:
|
||||||
ll.Constant(lli32, 0).inttoptr(llptr)
|
ll.Constant(lli32, 0).inttoptr(llptr)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
else:
|
|
||||||
exnname = "{}:{}".format(typ.id, typ.name)
|
|
||||||
|
|
||||||
llclauseexnname = self.llconst_of_const(
|
|
||||||
ir.Constant(exnname, builtins.TStr()))
|
|
||||||
llclauseexnnameptr = self.llmodule.globals.get("exn.{}".format(exnname))
|
|
||||||
if llclauseexnnameptr is None:
|
|
||||||
llclauseexnnameptr = ll.GlobalVariable(self.llmodule, llclauseexnname.type,
|
|
||||||
name="exn.{}".format(exnname))
|
|
||||||
llclauseexnnameptr.global_constant = True
|
|
||||||
llclauseexnnameptr.initializer = llclauseexnname
|
|
||||||
llclauseexnnameptr.linkage = "private"
|
|
||||||
llclauseexnnameptr.unnamed_addr = True
|
|
||||||
lllandingpad.add_clause(ll.CatchClause(llclauseexnnameptr))
|
|
||||||
|
|
||||||
if typ is None:
|
|
||||||
# typ is None means that we match all exceptions, so no need to
|
# typ is None means that we match all exceptions, so no need to
|
||||||
# compare
|
# compare
|
||||||
self.llbuilder.branch(self.map(target))
|
target = self.map(target)
|
||||||
|
self.add_pred(landingpadbb, target)
|
||||||
|
self.add_pred(landingpadbb, self.llbuilder.basic_block)
|
||||||
|
self.llbuilder.branch(target)
|
||||||
else:
|
else:
|
||||||
llexnlen = self.llbuilder.extract_value(llexnname, 1)
|
exnname = "{}:{}".format(typ.id, typ.name)
|
||||||
llclauseexnlen = self.llbuilder.extract_value(llclauseexnname, 1)
|
llclauseexnidptr = self.llmodule.globals.get("exn.{}".format(exnname))
|
||||||
llmatchinglen = self.llbuilder.icmp_unsigned('==', llexnlen, llclauseexnlen)
|
exnid = ll.Constant(lli32, self.embedding_map.store_str(exnname))
|
||||||
with self.llbuilder.if_then(llmatchinglen):
|
if llclauseexnidptr is None:
|
||||||
llexnptr = self.llbuilder.extract_value(llexnname, 0)
|
llclauseexnidptr = ll.GlobalVariable(self.llmodule, lli32,
|
||||||
llclauseexnptr = self.llbuilder.extract_value(llclauseexnname, 0)
|
name="exn.{}".format(exnname))
|
||||||
llcomparedata = self.llbuilder.call(self.llbuiltin("memcmp"),
|
llclauseexnidptr.global_constant = True
|
||||||
[llexnptr, llclauseexnptr, llexnlen])
|
llclauseexnidptr.initializer = exnid
|
||||||
llmatchingdata = self.llbuilder.icmp_unsigned('==', llcomparedata,
|
llclauseexnidptr.linkage = "private"
|
||||||
ll.Constant(lli32, 0))
|
llclauseexnidptr.unnamed_addr = True
|
||||||
|
lllandingpad.add_clause(ll.CatchClause(llclauseexnidptr))
|
||||||
|
llmatchingdata = self.llbuilder.icmp_unsigned("==", llexnid,
|
||||||
|
exnid)
|
||||||
with self.llbuilder.if_then(llmatchingdata):
|
with self.llbuilder.if_then(llmatchingdata):
|
||||||
self.llbuilder.branch(self.map(target))
|
target = self.map(target)
|
||||||
|
self.add_pred(landingpadbb, target)
|
||||||
|
self.add_pred(landingpadbb, self.llbuilder.basic_block)
|
||||||
|
self.llbuilder.branch(target)
|
||||||
|
self.add_pred(landingpadbb, self.llbuilder.basic_block)
|
||||||
|
|
||||||
if self.llbuilder.basic_block.terminator is None:
|
if self.llbuilder.basic_block.terminator is None:
|
||||||
if insn.has_cleanup:
|
if insn.has_cleanup:
|
||||||
self.llbuilder.branch(self.map(insn.cleanup()))
|
target = self.map(insn.cleanup())
|
||||||
|
self.add_pred(landingpadbb, target)
|
||||||
|
self.add_pred(landingpadbb, self.llbuilder.basic_block)
|
||||||
|
self.llbuilder.branch(target)
|
||||||
else:
|
else:
|
||||||
self.llbuilder.resume(lllandingpad)
|
self.llbuilder.resume(lllandingpad)
|
||||||
|
|
||||||
|
|
|
@ -385,6 +385,50 @@ class TRPC(Type):
|
||||||
def __hash__(self):
|
def __hash__(self):
|
||||||
return hash(self.service)
|
return hash(self.service)
|
||||||
|
|
||||||
|
class TSubkernel(TFunction):
|
||||||
|
"""
|
||||||
|
A kernel to be run on a satellite.
|
||||||
|
|
||||||
|
:ivar args: (:class:`collections.OrderedDict` of string to :class:`Type`)
|
||||||
|
function arguments
|
||||||
|
:ivar ret: (:class:`Type`)
|
||||||
|
return type
|
||||||
|
:ivar sid: (int) subkernel ID number
|
||||||
|
:ivar destination: (int) satellite destination number
|
||||||
|
"""
|
||||||
|
|
||||||
|
attributes = OrderedDict()
|
||||||
|
|
||||||
|
def __init__(self, args, optargs, ret, sid, destination):
|
||||||
|
assert isinstance(ret, Type)
|
||||||
|
super().__init__(args, optargs, ret)
|
||||||
|
self.sid, self.destination = sid, destination
|
||||||
|
self.delay = TFixedDelay(iodelay.Const(0))
|
||||||
|
|
||||||
|
def unify(self, other):
|
||||||
|
if other is self:
|
||||||
|
return
|
||||||
|
if isinstance(other, TSubkernel) and \
|
||||||
|
self.sid == other.sid and \
|
||||||
|
self.destination == other.destination:
|
||||||
|
self.ret.unify(other.ret)
|
||||||
|
elif isinstance(other, TVar):
|
||||||
|
other.unify(self)
|
||||||
|
else:
|
||||||
|
raise UnificationError(self, other)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
if getattr(builtins, "__in_sphinx__", False):
|
||||||
|
return str(self)
|
||||||
|
return "artiq.compiler.types.TSubkernel({})".format(repr(self.ret))
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
return isinstance(other, TSubkernel) and \
|
||||||
|
self.sid == other.sid
|
||||||
|
|
||||||
|
def __hash__(self):
|
||||||
|
return hash(self.sid)
|
||||||
|
|
||||||
class TBuiltin(Type):
|
class TBuiltin(Type):
|
||||||
"""
|
"""
|
||||||
An instance of builtin type. Every instance of a builtin
|
An instance of builtin type. Every instance of a builtin
|
||||||
|
@ -644,6 +688,9 @@ def is_function(typ):
|
||||||
def is_rpc(typ):
|
def is_rpc(typ):
|
||||||
return isinstance(typ.find(), TRPC)
|
return isinstance(typ.find(), TRPC)
|
||||||
|
|
||||||
|
def is_subkernel(typ):
|
||||||
|
return isinstance(typ.find(), TSubkernel)
|
||||||
|
|
||||||
def is_external_function(typ, name=None):
|
def is_external_function(typ, name=None):
|
||||||
typ = typ.find()
|
typ = typ.find()
|
||||||
if name is None:
|
if name is None:
|
||||||
|
@ -810,6 +857,10 @@ class TypePrinter(object):
|
||||||
return "[rpc{} #{}](...)->{}".format(typ.service,
|
return "[rpc{} #{}](...)->{}".format(typ.service,
|
||||||
" async" if typ.is_async else "",
|
" async" if typ.is_async else "",
|
||||||
self.name(typ.ret, depth + 1))
|
self.name(typ.ret, depth + 1))
|
||||||
|
elif isinstance(typ, TSubkernel):
|
||||||
|
return "<subkernel{} dest#{}>->{}".format(typ.sid,
|
||||||
|
typ.destination,
|
||||||
|
self.name(typ.ret, depth + 1))
|
||||||
elif isinstance(typ, TBuiltinFunction):
|
elif isinstance(typ, TBuiltinFunction):
|
||||||
return "<function {}>".format(typ.name)
|
return "<function {}>".format(typ.name)
|
||||||
elif isinstance(typ, (TConstructor, TExceptionConstructor)):
|
elif isinstance(typ, (TConstructor, TExceptionConstructor)):
|
||||||
|
|
|
@ -102,7 +102,19 @@ class RegionOf(algorithm.Visitor):
|
||||||
if types.is_external_function(node.func.type, "cache_get"):
|
if types.is_external_function(node.func.type, "cache_get"):
|
||||||
# The cache is borrow checked dynamically
|
# The cache is borrow checked dynamically
|
||||||
return Global()
|
return Global()
|
||||||
else:
|
|
||||||
|
if (types.is_builtin_function(node.func.type, "array")
|
||||||
|
or types.is_builtin_function(node.func.type, "make_array")
|
||||||
|
or types.is_builtin_function(node.func.type, "numpy.transpose")):
|
||||||
|
# While lifetime tracking across function calls in general is currently
|
||||||
|
# broken (see below), these special builtins that allocate an array on
|
||||||
|
# the stack of the caller _always_ allocate regardless of the parameters,
|
||||||
|
# and we can thus handle them without running into the precision issue
|
||||||
|
# mentioned in commit ae999db.
|
||||||
|
return self.visit_allocating(node)
|
||||||
|
|
||||||
|
# FIXME: Return statement missing here, but see m-labs/artiq#1497 and
|
||||||
|
# commit ae999db.
|
||||||
self.visit_sometimes_allocating(node)
|
self.visit_sometimes_allocating(node)
|
||||||
|
|
||||||
# Value lives as long as the object/container, if it's mutable,
|
# Value lives as long as the object/container, if it's mutable,
|
||||||
|
|
|
@ -233,7 +233,7 @@ class AD53xx:
|
||||||
def write_gain_mu(self, channel, gain=0xffff):
|
def write_gain_mu(self, channel, gain=0xffff):
|
||||||
"""Program the gain register for a DAC channel.
|
"""Program the gain register for a DAC channel.
|
||||||
|
|
||||||
The DAC output is not updated until LDAC is pulsed (see :meth load:).
|
The DAC output is not updated until LDAC is pulsed (see :meth:`load`).
|
||||||
This method advances the timeline by the duration of one SPI transfer.
|
This method advances the timeline by the duration of one SPI transfer.
|
||||||
|
|
||||||
:param gain: 16-bit gain register value (default: 0xffff)
|
:param gain: 16-bit gain register value (default: 0xffff)
|
||||||
|
@ -245,7 +245,7 @@ class AD53xx:
|
||||||
def write_offset_mu(self, channel, offset=0x8000):
|
def write_offset_mu(self, channel, offset=0x8000):
|
||||||
"""Program the offset register for a DAC channel.
|
"""Program the offset register for a DAC channel.
|
||||||
|
|
||||||
The DAC output is not updated until LDAC is pulsed (see :meth load:).
|
The DAC output is not updated until LDAC is pulsed (see :meth:`load`).
|
||||||
This method advances the timeline by the duration of one SPI transfer.
|
This method advances the timeline by the duration of one SPI transfer.
|
||||||
|
|
||||||
:param offset: 16-bit offset register value (default: 0x8000)
|
:param offset: 16-bit offset register value (default: 0x8000)
|
||||||
|
@ -258,7 +258,7 @@ class AD53xx:
|
||||||
"""Program the DAC offset voltage for a channel.
|
"""Program the DAC offset voltage for a channel.
|
||||||
|
|
||||||
An offset of +V can be used to trim out a DAC offset error of -V.
|
An offset of +V can be used to trim out a DAC offset error of -V.
|
||||||
The DAC output is not updated until LDAC is pulsed (see :meth load:).
|
The DAC output is not updated until LDAC is pulsed (see :meth:`load`).
|
||||||
This method advances the timeline by the duration of one SPI transfer.
|
This method advances the timeline by the duration of one SPI transfer.
|
||||||
|
|
||||||
:param voltage: the offset voltage
|
:param voltage: the offset voltage
|
||||||
|
@ -270,7 +270,7 @@ class AD53xx:
|
||||||
def write_dac_mu(self, channel, value):
|
def write_dac_mu(self, channel, value):
|
||||||
"""Program the DAC input register for a channel.
|
"""Program the DAC input register for a channel.
|
||||||
|
|
||||||
The DAC output is not updated until LDAC is pulsed (see :meth load:).
|
The DAC output is not updated until LDAC is pulsed (see :meth:`load`).
|
||||||
This method advances the timeline by the duration of one SPI transfer.
|
This method advances the timeline by the duration of one SPI transfer.
|
||||||
"""
|
"""
|
||||||
self.bus.write(
|
self.bus.write(
|
||||||
|
@ -280,7 +280,7 @@ class AD53xx:
|
||||||
def write_dac(self, channel, voltage):
|
def write_dac(self, channel, voltage):
|
||||||
"""Program the DAC output voltage for a channel.
|
"""Program the DAC output voltage for a channel.
|
||||||
|
|
||||||
The DAC output is not updated until LDAC is pulsed (see :meth load:).
|
The DAC output is not updated until LDAC is pulsed (see :meth:`load`).
|
||||||
This method advances the timeline by the duration of one SPI transfer.
|
This method advances the timeline by the duration of one SPI transfer.
|
||||||
"""
|
"""
|
||||||
self.write_dac_mu(channel, voltage_to_mu(voltage, self.offset_dacs,
|
self.write_dac_mu(channel, voltage_to_mu(voltage, self.offset_dacs,
|
||||||
|
@ -313,7 +313,7 @@ class AD53xx:
|
||||||
|
|
||||||
If no LDAC device was defined, the LDAC pulse is skipped.
|
If no LDAC device was defined, the LDAC pulse is skipped.
|
||||||
|
|
||||||
See :meth load:.
|
See :meth:`load`.
|
||||||
|
|
||||||
:param values: list of DAC values to program
|
:param values: list of DAC values to program
|
||||||
:param channels: list of DAC channels to program. If not specified,
|
:param channels: list of DAC channels to program. If not specified,
|
||||||
|
@ -355,7 +355,7 @@ class AD53xx:
|
||||||
""" Two-point calibration of a DAC channel.
|
""" Two-point calibration of a DAC channel.
|
||||||
|
|
||||||
Programs the offset and gain register to trim out DAC errors. Does not
|
Programs the offset and gain register to trim out DAC errors. Does not
|
||||||
take effect until LDAC is pulsed (see :meth load:).
|
take effect until LDAC is pulsed (see :meth:`load`).
|
||||||
|
|
||||||
Calibration consists of measuring the DAC output voltage for a channel
|
Calibration consists of measuring the DAC output voltage for a channel
|
||||||
with the DAC set to zero-scale (0x0000) and full-scale (0xffff).
|
with the DAC set to zero-scale (0x0000) and full-scale (0xffff).
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,23 +0,0 @@
|
||||||
from artiq.language.core import kernel
|
|
||||||
|
|
||||||
|
|
||||||
class AD9154:
|
|
||||||
"""Kernel interface to AD9154 registers, using non-realtime SPI."""
|
|
||||||
|
|
||||||
def __init__(self, dmgr, spi_device, chip_select):
|
|
||||||
self.core = dmgr.get("core")
|
|
||||||
self.bus = dmgr.get(spi_device)
|
|
||||||
self.chip_select = chip_select
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def setup_bus(self, div=16):
|
|
||||||
self.bus.set_config_mu(0, 24, div, self.chip_select)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def write(self, addr, data):
|
|
||||||
self.bus.write((addr << 16) | (data<< 8))
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def read(self, addr):
|
|
||||||
self.write((1 << 15) | addr, 0)
|
|
||||||
return self.bus.read()
|
|
|
@ -25,12 +25,14 @@ class AD9912:
|
||||||
f_ref/clk_div*pll_n where f_ref is the reference frequency and clk_div
|
f_ref/clk_div*pll_n where f_ref is the reference frequency and clk_div
|
||||||
is the reference clock divider (both set in the parent Urukul CPLD
|
is the reference clock divider (both set in the parent Urukul CPLD
|
||||||
instance).
|
instance).
|
||||||
|
:param pll_en: PLL enable bit, set to 0 to bypass PLL (default: 1).
|
||||||
|
Note that when bypassing the PLL the red front panel LED may remain on.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, dmgr, chip_select, cpld_device, sw_device=None,
|
def __init__(self, dmgr, chip_select, cpld_device, sw_device=None,
|
||||||
pll_n=10):
|
pll_n=10, pll_en=1):
|
||||||
self.kernel_invariants = {"cpld", "core", "bus", "chip_select",
|
self.kernel_invariants = {"cpld", "core", "bus", "chip_select",
|
||||||
"pll_n", "ftw_per_hz"}
|
"pll_n", "pll_en", "ftw_per_hz"}
|
||||||
self.cpld = dmgr.get(cpld_device)
|
self.cpld = dmgr.get(cpld_device)
|
||||||
self.core = self.cpld.core
|
self.core = self.cpld.core
|
||||||
self.bus = self.cpld.bus
|
self.bus = self.cpld.bus
|
||||||
|
@ -39,8 +41,12 @@ class AD9912:
|
||||||
if sw_device:
|
if sw_device:
|
||||||
self.sw = dmgr.get(sw_device)
|
self.sw = dmgr.get(sw_device)
|
||||||
self.kernel_invariants.add("sw")
|
self.kernel_invariants.add("sw")
|
||||||
|
self.pll_en = pll_en
|
||||||
self.pll_n = pll_n
|
self.pll_n = pll_n
|
||||||
|
if pll_en:
|
||||||
sysclk = self.cpld.refclk / [1, 1, 2, 4][self.cpld.clk_div] * pll_n
|
sysclk = self.cpld.refclk / [1, 1, 2, 4][self.cpld.clk_div] * pll_n
|
||||||
|
else:
|
||||||
|
sysclk = self.cpld.refclk
|
||||||
assert sysclk <= 1e9
|
assert sysclk <= 1e9
|
||||||
self.ftw_per_hz = 1 / sysclk * (int64(1) << 48)
|
self.ftw_per_hz = 1 / sysclk * (int64(1) << 48)
|
||||||
|
|
||||||
|
@ -102,8 +108,10 @@ class AD9912:
|
||||||
raise ValueError("Urukul AD9912 product id mismatch")
|
raise ValueError("Urukul AD9912 product id mismatch")
|
||||||
delay(50 * us)
|
delay(50 * us)
|
||||||
# HSTL power down, CMOS power down
|
# HSTL power down, CMOS power down
|
||||||
self.write(AD9912_PWRCNTRL1, 0x80, length=1)
|
pwrcntrl1 = 0x80 | ((~self.pll_en & 1) << 4)
|
||||||
|
self.write(AD9912_PWRCNTRL1, pwrcntrl1, length=1)
|
||||||
self.cpld.io_update.pulse(2 * us)
|
self.cpld.io_update.pulse(2 * us)
|
||||||
|
if self.pll_en:
|
||||||
self.write(AD9912_N_DIV, self.pll_n // 2 - 2, length=1)
|
self.write(AD9912_N_DIV, self.pll_n // 2 - 2, length=1)
|
||||||
self.cpld.io_update.pulse(2 * us)
|
self.cpld.io_update.pulse(2 * us)
|
||||||
# I_cp = 375 µA, VCO high range
|
# I_cp = 375 µA, VCO high range
|
||||||
|
|
|
@ -80,6 +80,13 @@ class AD9914:
|
||||||
self.set_x_duration_mu = 7 * self.write_duration_mu
|
self.set_x_duration_mu = 7 * self.write_duration_mu
|
||||||
self.exit_x_duration_mu = 3 * self.write_duration_mu
|
self.exit_x_duration_mu = 3 * self.write_duration_mu
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(bus_channel, channel, **kwargs):
|
||||||
|
# return only first entry, as there are several devices with the same RTIO channel
|
||||||
|
if channel == 0:
|
||||||
|
return [(bus_channel, None)]
|
||||||
|
return []
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def write(self, addr, data):
|
def write(self, addr, data):
|
||||||
rtio_output((self.bus_channel << 8) | addr, data)
|
rtio_output((self.bus_channel << 8) | addr, data)
|
||||||
|
|
|
@ -73,6 +73,10 @@ class ADF5356:
|
||||||
|
|
||||||
self._init_registers()
|
self._init_registers()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(**kwargs):
|
||||||
|
return []
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def init(self, blind=False):
|
def init(self, blind=False):
|
||||||
"""
|
"""
|
||||||
|
@ -102,6 +106,18 @@ class ADF5356:
|
||||||
else:
|
else:
|
||||||
self.sync()
|
self.sync()
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_att(self, att):
|
||||||
|
"""Set digital step attenuator in SI units.
|
||||||
|
|
||||||
|
This method will write the attenuator settings of the channel.
|
||||||
|
|
||||||
|
.. seealso:: :meth:`artiq.coredevice.mirny.Mirny.set_att`
|
||||||
|
|
||||||
|
:param att: Attenuation in dB.
|
||||||
|
"""
|
||||||
|
self.cpld.set_att(self.channel, att)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set_att_mu(self, att):
|
def set_att_mu(self, att):
|
||||||
"""Set digital step attenuator in machine units.
|
"""Set digital step attenuator in machine units.
|
||||||
|
|
|
@ -0,0 +1,185 @@
|
||||||
|
from artiq.language.core import kernel, portable
|
||||||
|
|
||||||
|
from numpy import int32
|
||||||
|
|
||||||
|
|
||||||
|
# almazny-specific data
|
||||||
|
ALMAZNY_LEGACY_REG_BASE = 0x0C
|
||||||
|
ALMAZNY_LEGACY_OE_SHIFT = 12
|
||||||
|
|
||||||
|
# higher SPI write divider to match almazny shift register timing
|
||||||
|
# min SER time before SRCLK rise = 125ns
|
||||||
|
# -> div=32 gives 125ns for data before clock rise
|
||||||
|
# works at faster dividers too but could be less reliable
|
||||||
|
ALMAZNY_LEGACY_SPIT_WR = 32
|
||||||
|
|
||||||
|
|
||||||
|
class AlmaznyLegacy:
|
||||||
|
"""
|
||||||
|
Almazny (High frequency mezzanine board for Mirny)
|
||||||
|
|
||||||
|
This applies to Almazny hardware v1.1 and earlier.
|
||||||
|
Use :class:`artiq.coredevice.almazny.AlmaznyChannel` for Almazny v1.2 and later.
|
||||||
|
|
||||||
|
:param host_mirny: Mirny device Almazny is connected to
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, dmgr, host_mirny):
|
||||||
|
self.mirny_cpld = dmgr.get(host_mirny)
|
||||||
|
self.att_mu = [0x3f] * 4
|
||||||
|
self.channel_sw = [0] * 4
|
||||||
|
self.output_enable = False
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def init(self):
|
||||||
|
self.output_toggle(self.output_enable)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def att_to_mu(self, att):
|
||||||
|
"""
|
||||||
|
Convert an attenuator setting in dB to machine units.
|
||||||
|
|
||||||
|
:param att: attenuator setting in dB [0-31.5]
|
||||||
|
:return: attenuator setting in machine units
|
||||||
|
"""
|
||||||
|
mu = round(att * 2.0)
|
||||||
|
if mu > 63 or mu < 0:
|
||||||
|
raise ValueError("Invalid Almazny attenuator settings!")
|
||||||
|
return mu
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def mu_to_att(self, att_mu):
|
||||||
|
"""
|
||||||
|
Convert a digital attenuator setting to dB.
|
||||||
|
|
||||||
|
:param att_mu: attenuator setting in machine units
|
||||||
|
:return: attenuator setting in dB
|
||||||
|
"""
|
||||||
|
return att_mu / 2
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_att(self, channel, att, rf_switch=True):
|
||||||
|
"""
|
||||||
|
Sets attenuators on chosen shift register (channel).
|
||||||
|
|
||||||
|
:param channel: index of the register [0-3]
|
||||||
|
:param att: attenuation setting in dBm [0-31.5]
|
||||||
|
:param rf_switch: rf switch (bool)
|
||||||
|
"""
|
||||||
|
self.set_att_mu(channel, self.att_to_mu(att), rf_switch)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_att_mu(self, channel, att_mu, rf_switch=True):
|
||||||
|
"""
|
||||||
|
Sets attenuators on chosen shift register (channel).
|
||||||
|
|
||||||
|
:param channel: index of the register [0-3]
|
||||||
|
:param att_mu: attenuation setting in machine units [0-63]
|
||||||
|
:param rf_switch: rf switch (bool)
|
||||||
|
"""
|
||||||
|
self.channel_sw[channel] = 1 if rf_switch else 0
|
||||||
|
self.att_mu[channel] = att_mu
|
||||||
|
self._update_register(channel)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def output_toggle(self, oe):
|
||||||
|
"""
|
||||||
|
Toggles output on all shift registers on or off.
|
||||||
|
|
||||||
|
:param oe: toggle output enable (bool)
|
||||||
|
"""
|
||||||
|
self.output_enable = oe
|
||||||
|
cfg_reg = self.mirny_cpld.read_reg(1)
|
||||||
|
en = 1 if self.output_enable else 0
|
||||||
|
delay(100 * us)
|
||||||
|
new_reg = (en << ALMAZNY_LEGACY_OE_SHIFT) | (cfg_reg & 0x3FF)
|
||||||
|
self.mirny_cpld.write_reg(1, new_reg)
|
||||||
|
delay(100 * us)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def _flip_mu_bits(self, mu):
|
||||||
|
# in this form MSB is actually 0.5dB attenuator
|
||||||
|
# unnatural for users, so we flip the six bits
|
||||||
|
return (((mu & 0x01) << 5)
|
||||||
|
| ((mu & 0x02) << 3)
|
||||||
|
| ((mu & 0x04) << 1)
|
||||||
|
| ((mu & 0x08) >> 1)
|
||||||
|
| ((mu & 0x10) >> 3)
|
||||||
|
| ((mu & 0x20) >> 5))
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def _update_register(self, ch):
|
||||||
|
self.mirny_cpld.write_ext(
|
||||||
|
ALMAZNY_LEGACY_REG_BASE + ch,
|
||||||
|
8,
|
||||||
|
self._flip_mu_bits(self.att_mu[ch]) | (self.channel_sw[ch] << 6),
|
||||||
|
ALMAZNY_LEGACY_SPIT_WR
|
||||||
|
)
|
||||||
|
delay(100 * us)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def _update_all_registers(self):
|
||||||
|
for i in range(4):
|
||||||
|
self._update_register(i)
|
||||||
|
|
||||||
|
|
||||||
|
class AlmaznyChannel:
|
||||||
|
"""
|
||||||
|
One Almazny channel
|
||||||
|
Almazny is a mezzanine for the Quad PLL RF source Mirny that exposes and
|
||||||
|
controls the frequency-doubled outputs.
|
||||||
|
This driver requires Almazny hardware revision v1.2 or later
|
||||||
|
and Mirny CPLD gateware v0.3 or later.
|
||||||
|
Use :class:`artiq.coredevice.almazny.AlmaznyLegacy` for Almazny hardware v1.1 and earlier.
|
||||||
|
|
||||||
|
:param host_mirny: Mirny CPLD device name
|
||||||
|
:param channel: channel index (0-3)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, dmgr, host_mirny, channel):
|
||||||
|
self.channel = channel
|
||||||
|
self.mirny_cpld = dmgr.get(host_mirny)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def to_mu(self, att, enable, led):
|
||||||
|
"""
|
||||||
|
Convert an attenuation in dB, RF switch state and LED state to machine
|
||||||
|
units.
|
||||||
|
|
||||||
|
:param att: attenuator setting in dB (0-31.5)
|
||||||
|
:param enable: RF switch state (bool)
|
||||||
|
:param led: LED state (bool)
|
||||||
|
:return: channel setting in machine units
|
||||||
|
"""
|
||||||
|
mu = int32(round(att * 2.))
|
||||||
|
if mu >= 64 or mu < 0:
|
||||||
|
raise ValueError("Attenuation out of range")
|
||||||
|
# unfortunate hardware design: bit reverse
|
||||||
|
mu = ((mu & 0x15) << 1) | ((mu >> 1) & 0x15)
|
||||||
|
mu = ((mu & 0x03) << 4) | (mu & 0x0c) | ((mu >> 4) & 0x03)
|
||||||
|
if enable:
|
||||||
|
mu |= 1 << 6
|
||||||
|
if led:
|
||||||
|
mu |= 1 << 7
|
||||||
|
return mu
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_mu(self, mu):
|
||||||
|
"""
|
||||||
|
Set channel state (machine units).
|
||||||
|
|
||||||
|
:param mu: channel state in machine units.
|
||||||
|
"""
|
||||||
|
self.mirny_cpld.write_ext(
|
||||||
|
addr=0xc + self.channel, length=8, data=mu, ext_div=32)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set(self, att, enable, led=False):
|
||||||
|
"""
|
||||||
|
Set attenuation, RF switch, and LED state (SI units).
|
||||||
|
|
||||||
|
:param att: attenuator setting in dB (0-31.5)
|
||||||
|
:param enable: RF switch state (bool)
|
||||||
|
:param led: LED state (bool)
|
||||||
|
"""
|
||||||
|
self.set_mu(self.to_mu(att, enable, led))
|
|
@ -1,79 +0,0 @@
|
||||||
from artiq.language.core import kernel, portable, delay
|
|
||||||
from artiq.language.units import us, ms
|
|
||||||
from artiq.coredevice.shiftreg import ShiftReg
|
|
||||||
|
|
||||||
|
|
||||||
@portable
|
|
||||||
def to_mu(att):
|
|
||||||
return round(att*2.0) ^ 0x3f
|
|
||||||
|
|
||||||
@portable
|
|
||||||
def from_mu(att_mu):
|
|
||||||
return 0.5*(att_mu ^ 0x3f)
|
|
||||||
|
|
||||||
|
|
||||||
class BaseModAtt:
|
|
||||||
def __init__(self, dmgr, rst_n, clk, le, mosi, miso):
|
|
||||||
self.rst_n = dmgr.get(rst_n)
|
|
||||||
self.shift_reg = ShiftReg(dmgr,
|
|
||||||
clk=clk, ser=mosi, latch=le, ser_in=miso, n=8*4)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def reset(self):
|
|
||||||
# HMC's incompetence in digital design and interfaces means that
|
|
||||||
# the HMC542 needs a level low on RST_N and then a rising edge
|
|
||||||
# on Latch Enable. Their "latch" isn't a latch but a DFF.
|
|
||||||
# Of course, it also powers up with a random attenuation, and
|
|
||||||
# that cannot be fixed with simple pull-ups/pull-downs.
|
|
||||||
self.rst_n.off()
|
|
||||||
self.shift_reg.latch.off()
|
|
||||||
delay(1*us)
|
|
||||||
self.shift_reg.latch.on()
|
|
||||||
delay(1*us)
|
|
||||||
self.shift_reg.latch.off()
|
|
||||||
self.rst_n.on()
|
|
||||||
delay(1*us)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_mu(self, att0, att1, att2, att3):
|
|
||||||
"""
|
|
||||||
Sets the four attenuators on BaseMod.
|
|
||||||
The values are in half decibels, between 0 (no attenuation)
|
|
||||||
and 63 (31.5dB attenuation).
|
|
||||||
"""
|
|
||||||
word = (
|
|
||||||
(att0 << 2) |
|
|
||||||
(att1 << 10) |
|
|
||||||
(att2 << 18) |
|
|
||||||
(att3 << 26)
|
|
||||||
)
|
|
||||||
self.shift_reg.set(word)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def get_mu(self):
|
|
||||||
"""
|
|
||||||
Retrieves the current settings of the four attenuators on BaseMod.
|
|
||||||
"""
|
|
||||||
word = self.shift_reg.get()
|
|
||||||
att0 = (word >> 2) & 0x3f
|
|
||||||
att1 = (word >> 10) & 0x3f
|
|
||||||
att2 = (word >> 18) & 0x3f
|
|
||||||
att3 = (word >> 26) & 0x3f
|
|
||||||
return att0, att1, att2, att3
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set(self, att0, att1, att2, att3):
|
|
||||||
"""
|
|
||||||
Sets the four attenuators on BaseMod.
|
|
||||||
The values are in decibels.
|
|
||||||
"""
|
|
||||||
self.set_mu(to_mu(att0), to_mu(att1), to_mu(att2), to_mu(att3))
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def get(self):
|
|
||||||
"""
|
|
||||||
Retrieves the current settings of the four attenuators on BaseMod.
|
|
||||||
The values are in decibels.
|
|
||||||
"""
|
|
||||||
att0, att1, att2, att3 = self.get_mu()
|
|
||||||
return from_mu(att0), from_mu(att1), from_mu(att2), from_mu(att3)
|
|
|
@ -1,28 +0,0 @@
|
||||||
import sys
|
|
||||||
import socket
|
|
||||||
import logging
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def set_keepalive(sock, after_idle, interval, max_fails):
|
|
||||||
if sys.platform.startswith("linux"):
|
|
||||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
|
||||||
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, after_idle)
|
|
||||||
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, interval)
|
|
||||||
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, max_fails)
|
|
||||||
elif sys.platform.startswith("win") or sys.platform.startswith("cygwin"):
|
|
||||||
# setting max_fails is not supported, typically ends up being 5 or 10
|
|
||||||
# depending on Windows version
|
|
||||||
sock.ioctl(socket.SIO_KEEPALIVE_VALS,
|
|
||||||
(1, after_idle * 1000, interval * 1000))
|
|
||||||
else:
|
|
||||||
logger.warning("TCP keepalive not supported on platform '%s', ignored",
|
|
||||||
sys.platform)
|
|
||||||
|
|
||||||
|
|
||||||
def initialize_connection(host, port):
|
|
||||||
sock = socket.create_connection((host, port))
|
|
||||||
set_keepalive(sock, 10, 10, 3)
|
|
||||||
logger.debug("connected to %s:%d", host, port)
|
|
||||||
return sock
|
|
|
@ -102,15 +102,15 @@ def decode_dump(data):
|
||||||
# messages are big endian
|
# messages are big endian
|
||||||
parts = struct.unpack(endian + "IQbbb", data[:15])
|
parts = struct.unpack(endian + "IQbbb", data[:15])
|
||||||
(sent_bytes, total_byte_count,
|
(sent_bytes, total_byte_count,
|
||||||
error_occured, log_channel, dds_onehot_sel) = parts
|
error_occurred, log_channel, dds_onehot_sel) = parts
|
||||||
|
|
||||||
expected_len = sent_bytes + 15
|
expected_len = sent_bytes + 15
|
||||||
if expected_len != len(data):
|
if expected_len != len(data):
|
||||||
raise ValueError("analyzer dump has incorrect length "
|
raise ValueError("analyzer dump has incorrect length "
|
||||||
"(got {}, expected {})".format(
|
"(got {}, expected {})".format(
|
||||||
len(data), expected_len))
|
len(data), expected_len))
|
||||||
if error_occured:
|
if error_occurred:
|
||||||
logger.warning("error occured within the analyzer, "
|
logger.warning("error occurred within the analyzer, "
|
||||||
"data may be corrupted")
|
"data may be corrupted")
|
||||||
if total_byte_count > sent_bytes:
|
if total_byte_count > sent_bytes:
|
||||||
logger.info("analyzer ring buffer has wrapped %d times",
|
logger.info("analyzer ring buffer has wrapped %d times",
|
||||||
|
|
|
@ -8,9 +8,8 @@ from fractions import Fraction
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
from artiq.coredevice import exceptions
|
from artiq.coredevice import exceptions
|
||||||
from artiq.coredevice.comm import initialize_connection
|
|
||||||
from artiq import __version__ as software_version
|
from artiq import __version__ as software_version
|
||||||
|
from sipyco.keepalive import create_connection
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -24,6 +23,8 @@ class Request(Enum):
|
||||||
RPCReply = 7
|
RPCReply = 7
|
||||||
RPCException = 8
|
RPCException = 8
|
||||||
|
|
||||||
|
SubkernelUpload = 9
|
||||||
|
|
||||||
|
|
||||||
class Reply(Enum):
|
class Reply(Enum):
|
||||||
SystemInfo = 2
|
SystemInfo = 2
|
||||||
|
@ -171,6 +172,16 @@ class CommKernelDummy:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def incompatible_versions(v1, v2):
|
||||||
|
if v1.endswith(".beta") or v2.endswith(".beta"):
|
||||||
|
# Beta branches may introduce breaking changes. Check version strictly.
|
||||||
|
return v1 != v2
|
||||||
|
else:
|
||||||
|
# On stable branches, runtime/software protocol backward compatibility is kept.
|
||||||
|
# Runtime and software with the same major version number are compatible.
|
||||||
|
return v1.split(".", maxsplit=1)[0] != v2.split(".", maxsplit=1)[0]
|
||||||
|
|
||||||
|
|
||||||
class CommKernel:
|
class CommKernel:
|
||||||
warned_of_mismatch = False
|
warned_of_mismatch = False
|
||||||
|
|
||||||
|
@ -185,7 +196,7 @@ class CommKernel:
|
||||||
def open(self):
|
def open(self):
|
||||||
if hasattr(self, "socket"):
|
if hasattr(self, "socket"):
|
||||||
return
|
return
|
||||||
self.socket = initialize_connection(self.host, self.port)
|
self.socket = create_connection(self.host, self.port)
|
||||||
self.socket.sendall(b"ARTIQ coredev\n")
|
self.socket.sendall(b"ARTIQ coredev\n")
|
||||||
endian = self._read(1)
|
endian = self._read(1)
|
||||||
if endian == b"e":
|
if endian == b"e":
|
||||||
|
@ -199,6 +210,7 @@ class CommKernel:
|
||||||
self.unpack_float64 = struct.Struct(self.endian + "d").unpack
|
self.unpack_float64 = struct.Struct(self.endian + "d").unpack
|
||||||
|
|
||||||
self.pack_header = struct.Struct(self.endian + "lB").pack
|
self.pack_header = struct.Struct(self.endian + "lB").pack
|
||||||
|
self.pack_int8 = struct.Struct(self.endian + "B").pack
|
||||||
self.pack_int32 = struct.Struct(self.endian + "l").pack
|
self.pack_int32 = struct.Struct(self.endian + "l").pack
|
||||||
self.pack_int64 = struct.Struct(self.endian + "q").pack
|
self.pack_int64 = struct.Struct(self.endian + "q").pack
|
||||||
self.pack_float64 = struct.Struct(self.endian + "d").pack
|
self.pack_float64 = struct.Struct(self.endian + "d").pack
|
||||||
|
@ -313,7 +325,7 @@ class CommKernel:
|
||||||
self._write(chunk)
|
self._write(chunk)
|
||||||
|
|
||||||
def _write_int8(self, value):
|
def _write_int8(self, value):
|
||||||
self._write(value)
|
self._write(self.pack_int8(value))
|
||||||
|
|
||||||
def _write_int32(self, value):
|
def _write_int32(self, value):
|
||||||
self._write(self.pack_int32(value))
|
self._write(self.pack_int32(value))
|
||||||
|
@ -347,7 +359,7 @@ class CommKernel:
|
||||||
runtime_id = self._read(4)
|
runtime_id = self._read(4)
|
||||||
if runtime_id == b"AROR":
|
if runtime_id == b"AROR":
|
||||||
gateware_version = self._read_string().split(";")[0]
|
gateware_version = self._read_string().split(";")[0]
|
||||||
if gateware_version != software_version and not self.warned_of_mismatch:
|
if not self.warned_of_mismatch and incompatible_versions(gateware_version, software_version):
|
||||||
logger.warning("Mismatch between gateware (%s) "
|
logger.warning("Mismatch between gateware (%s) "
|
||||||
"and software (%s) versions",
|
"and software (%s) versions",
|
||||||
gateware_version, software_version)
|
gateware_version, software_version)
|
||||||
|
@ -373,6 +385,19 @@ class CommKernel:
|
||||||
else:
|
else:
|
||||||
self._read_expect(Reply.LoadCompleted)
|
self._read_expect(Reply.LoadCompleted)
|
||||||
|
|
||||||
|
def upload_subkernel(self, kernel_library, id, destination):
|
||||||
|
self._write_header(Request.SubkernelUpload)
|
||||||
|
self._write_int32(id)
|
||||||
|
self._write_int8(destination)
|
||||||
|
self._write_bytes(kernel_library)
|
||||||
|
self._flush()
|
||||||
|
|
||||||
|
self._read_header()
|
||||||
|
if self._read_type == Reply.LoadFailed:
|
||||||
|
raise LoadError(self._read_string())
|
||||||
|
else:
|
||||||
|
self._read_expect(Reply.LoadCompleted)
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
self._write_empty(Request.RunKernel)
|
self._write_empty(Request.RunKernel)
|
||||||
self._flush()
|
self._flush()
|
||||||
|
@ -409,6 +434,9 @@ class CommKernel:
|
||||||
self._skip_rpc_value(tags)
|
self._skip_rpc_value(tags)
|
||||||
elif tag == "r":
|
elif tag == "r":
|
||||||
self._skip_rpc_value(tags)
|
self._skip_rpc_value(tags)
|
||||||
|
elif tag == "a":
|
||||||
|
_ndims = tags.pop(0)
|
||||||
|
self._skip_rpc_value(tags)
|
||||||
else:
|
else:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -571,29 +599,33 @@ class CommKernel:
|
||||||
|
|
||||||
self._write_header(Request.RPCException)
|
self._write_header(Request.RPCException)
|
||||||
|
|
||||||
|
# Note: instead of sending strings, we send object ID
|
||||||
|
# This is to avoid the need of allocatio on the device side
|
||||||
|
# This is a special case: this only applies to exceptions
|
||||||
if hasattr(exn, "artiq_core_exception"):
|
if hasattr(exn, "artiq_core_exception"):
|
||||||
exn = exn.artiq_core_exception
|
exn = exn.artiq_core_exception
|
||||||
self._write_string(exn.name)
|
self._write_int32(embedding_map.store_str(exn.name))
|
||||||
self._write_string(self._truncate_message(exn.message))
|
self._write_int32(embedding_map.store_str(self._truncate_message(exn.message)))
|
||||||
for index in range(3):
|
for index in range(3):
|
||||||
self._write_int64(exn.param[index])
|
self._write_int64(exn.param[index])
|
||||||
|
|
||||||
filename, line, column, function = exn.traceback[-1]
|
filename, line, column, function = exn.traceback[-1]
|
||||||
self._write_string(filename)
|
self._write_int32(embedding_map.store_str(filename))
|
||||||
self._write_int32(line)
|
self._write_int32(line)
|
||||||
self._write_int32(column)
|
self._write_int32(column)
|
||||||
self._write_string(function)
|
self._write_int32(embedding_map.store_str(function))
|
||||||
else:
|
else:
|
||||||
exn_type = type(exn)
|
exn_type = type(exn)
|
||||||
if exn_type in (ZeroDivisionError, ValueError, IndexError, RuntimeError) or \
|
if exn_type in (ZeroDivisionError, ValueError, IndexError, RuntimeError) or \
|
||||||
hasattr(exn, "artiq_builtin"):
|
hasattr(exn, "artiq_builtin"):
|
||||||
self._write_string("0:{}".format(exn_type.__name__))
|
name = "0:{}".format(exn_type.__name__)
|
||||||
else:
|
else:
|
||||||
exn_id = embedding_map.store_object(exn_type)
|
exn_id = embedding_map.store_object(exn_type)
|
||||||
self._write_string("{}:{}.{}".format(exn_id,
|
name = "{}:{}.{}".format(exn_id,
|
||||||
exn_type.__module__,
|
exn_type.__module__,
|
||||||
exn_type.__qualname__))
|
exn_type.__qualname__)
|
||||||
self._write_string(self._truncate_message(str(exn)))
|
self._write_int32(embedding_map.store_str(name))
|
||||||
|
self._write_int32(embedding_map.store_str(self._truncate_message(str(exn))))
|
||||||
for index in range(3):
|
for index in range(3):
|
||||||
self._write_int64(0)
|
self._write_int64(0)
|
||||||
|
|
||||||
|
@ -604,10 +636,10 @@ class CommKernel:
|
||||||
((filename, line, function, _), ) = tb
|
((filename, line, function, _), ) = tb
|
||||||
else:
|
else:
|
||||||
assert False
|
assert False
|
||||||
self._write_string(filename)
|
self._write_int32(embedding_map.store_str(filename))
|
||||||
self._write_int32(line)
|
self._write_int32(line)
|
||||||
self._write_int32(-1) # column not known
|
self._write_int32(-1) # column not known
|
||||||
self._write_string(function)
|
self._write_int32(embedding_map.store_str(function))
|
||||||
self._flush()
|
self._flush()
|
||||||
else:
|
else:
|
||||||
logger.debug("rpc service: %d %r %r = %r",
|
logger.debug("rpc service: %d %r %r = %r",
|
||||||
|
@ -619,28 +651,65 @@ class CommKernel:
|
||||||
self._flush()
|
self._flush()
|
||||||
|
|
||||||
def _serve_exception(self, embedding_map, symbolizer, demangler):
|
def _serve_exception(self, embedding_map, symbolizer, demangler):
|
||||||
name = self._read_string()
|
exception_count = self._read_int32()
|
||||||
message = self._read_string()
|
nested_exceptions = []
|
||||||
|
|
||||||
|
def read_exception_string():
|
||||||
|
# note: if length == -1, the following int32 is the object key
|
||||||
|
length = self._read_int32()
|
||||||
|
if length == -1:
|
||||||
|
return embedding_map.retrieve_str(self._read_int32())
|
||||||
|
else:
|
||||||
|
return self._read(length).decode("utf-8")
|
||||||
|
|
||||||
|
for _ in range(exception_count):
|
||||||
|
name = embedding_map.retrieve_str(self._read_int32())
|
||||||
|
message = read_exception_string()
|
||||||
params = [self._read_int64() for _ in range(3)]
|
params = [self._read_int64() for _ in range(3)]
|
||||||
|
|
||||||
filename = self._read_string()
|
filename = read_exception_string()
|
||||||
line = self._read_int32()
|
line = self._read_int32()
|
||||||
column = self._read_int32()
|
column = self._read_int32()
|
||||||
function = self._read_string()
|
function = read_exception_string()
|
||||||
|
nested_exceptions.append([name, message, params,
|
||||||
|
filename, line, column, function])
|
||||||
|
|
||||||
|
demangled_names = demangler([ex[6] for ex in nested_exceptions])
|
||||||
|
for i in range(exception_count):
|
||||||
|
nested_exceptions[i][6] = demangled_names[i]
|
||||||
|
|
||||||
|
exception_info = []
|
||||||
|
for _ in range(exception_count):
|
||||||
|
sp = self._read_int32()
|
||||||
|
initial_backtrace = self._read_int32()
|
||||||
|
current_backtrace = self._read_int32()
|
||||||
|
exception_info.append((sp, initial_backtrace, current_backtrace))
|
||||||
|
|
||||||
|
backtrace = []
|
||||||
|
stack_pointers = []
|
||||||
|
for _ in range(self._read_int32()):
|
||||||
|
backtrace.append(self._read_int32())
|
||||||
|
stack_pointers.append(self._read_int32())
|
||||||
|
|
||||||
backtrace = [self._read_int32() for _ in range(self._read_int32())]
|
|
||||||
self._process_async_error()
|
self._process_async_error()
|
||||||
|
|
||||||
traceback = list(reversed(symbolizer(backtrace))) + \
|
traceback = list(symbolizer(backtrace))
|
||||||
[(filename, line, column, *demangler([function]), None)]
|
core_exn = exceptions.CoreException(nested_exceptions, exception_info,
|
||||||
core_exn = exceptions.CoreException(name, message, params, traceback)
|
traceback, stack_pointers)
|
||||||
|
|
||||||
if core_exn.id == 0:
|
if core_exn.id == 0:
|
||||||
python_exn_type = getattr(exceptions, core_exn.name.split('.')[-1])
|
python_exn_type = getattr(exceptions, core_exn.name.split('.')[-1])
|
||||||
else:
|
else:
|
||||||
python_exn_type = embedding_map.retrieve_object(core_exn.id)
|
python_exn_type = embedding_map.retrieve_object(core_exn.id)
|
||||||
|
|
||||||
python_exn = python_exn_type(message.format(*params))
|
try:
|
||||||
|
python_exn = python_exn_type(
|
||||||
|
nested_exceptions[-1][1].format(*nested_exceptions[0][2]))
|
||||||
|
except Exception as ex:
|
||||||
|
python_exn = RuntimeError(
|
||||||
|
f"Exception type={python_exn_type}, which couldn't be "
|
||||||
|
f"reconstructed ({ex})"
|
||||||
|
)
|
||||||
python_exn.artiq_core_exception = core_exn
|
python_exn.artiq_core_exception = core_exn
|
||||||
raise python_exn
|
raise python_exn
|
||||||
|
|
||||||
|
|
|
@ -2,8 +2,7 @@ from enum import Enum
|
||||||
import logging
|
import logging
|
||||||
import struct
|
import struct
|
||||||
|
|
||||||
from artiq.coredevice.comm import initialize_connection
|
from sipyco.keepalive import create_connection
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -54,7 +53,7 @@ class CommMgmt:
|
||||||
def open(self):
|
def open(self):
|
||||||
if hasattr(self, "socket"):
|
if hasattr(self, "socket"):
|
||||||
return
|
return
|
||||||
self.socket = initialize_connection(self.host, self.port)
|
self.socket = create_connection(self.host, self.port)
|
||||||
self.socket.sendall(b"ARTIQ management\n")
|
self.socket.sendall(b"ARTIQ management\n")
|
||||||
endian = self._read(1)
|
endian = self._read(1)
|
||||||
if endian == b"e":
|
if endian == b"e":
|
||||||
|
@ -111,9 +110,10 @@ class CommMgmt:
|
||||||
return ty
|
return ty
|
||||||
|
|
||||||
def _read_expect(self, ty):
|
def _read_expect(self, ty):
|
||||||
if self._read_header() != ty:
|
header = self._read_header()
|
||||||
|
if header != ty:
|
||||||
raise IOError("Incorrect reply from device: {} (expected {})".
|
raise IOError("Incorrect reply from device: {} (expected {})".
|
||||||
format(self._read_type, ty))
|
format(header, ty))
|
||||||
|
|
||||||
def _read_int32(self):
|
def _read_int32(self):
|
||||||
(value, ) = struct.unpack(self.endian + "l", self._read(4))
|
(value, ) = struct.unpack(self.endian + "l", self._read(4))
|
||||||
|
@ -160,7 +160,12 @@ class CommMgmt:
|
||||||
def config_read(self, key):
|
def config_read(self, key):
|
||||||
self._write_header(Request.ConfigRead)
|
self._write_header(Request.ConfigRead)
|
||||||
self._write_string(key)
|
self._write_string(key)
|
||||||
self._read_expect(Reply.ConfigData)
|
ty = self._read_header()
|
||||||
|
if ty == Reply.Error:
|
||||||
|
raise IOError("Device failed to read config. The key may not exist.")
|
||||||
|
elif ty != Reply.ConfigData:
|
||||||
|
raise IOError("Incorrect reply from device: {} (expected {})".
|
||||||
|
format(ty, Reply.ConfigData))
|
||||||
return self._read_string()
|
return self._read_string()
|
||||||
|
|
||||||
def config_write(self, key, value):
|
def config_write(self, key, value):
|
||||||
|
@ -169,7 +174,7 @@ class CommMgmt:
|
||||||
self._write_bytes(value)
|
self._write_bytes(value)
|
||||||
ty = self._read_header()
|
ty = self._read_header()
|
||||||
if ty == Reply.Error:
|
if ty == Reply.Error:
|
||||||
raise IOError("Flash storage is full")
|
raise IOError("Device failed to write config. More information may be available in the log.")
|
||||||
elif ty != Reply.Success:
|
elif ty != Reply.Success:
|
||||||
raise IOError("Incorrect reply from device: {} (expected {})".
|
raise IOError("Incorrect reply from device: {} (expected {})".
|
||||||
format(ty, Reply.Success))
|
format(ty, Reply.Success))
|
||||||
|
|
|
@ -3,6 +3,7 @@ import logging
|
||||||
import struct
|
import struct
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
|
||||||
|
from sipyco.keepalive import async_open_connection
|
||||||
|
|
||||||
__all__ = ["TTLProbe", "TTLOverride", "CommMonInj"]
|
__all__ = ["TTLProbe", "TTLOverride", "CommMonInj"]
|
||||||
|
|
||||||
|
@ -28,17 +29,16 @@ class CommMonInj:
|
||||||
self.disconnect_cb = disconnect_cb
|
self.disconnect_cb = disconnect_cb
|
||||||
|
|
||||||
async def connect(self, host, port=1383):
|
async def connect(self, host, port=1383):
|
||||||
self._reader, self._writer = await asyncio.open_connection(host, port)
|
self._reader, self._writer = await async_open_connection(
|
||||||
|
host,
|
||||||
|
port,
|
||||||
|
after_idle=1,
|
||||||
|
interval=1,
|
||||||
|
max_fails=3,
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self._writer.write(b"ARTIQ moninj\n")
|
self._writer.write(b"ARTIQ moninj\n")
|
||||||
# get device endian
|
|
||||||
endian = await self._reader.read(1)
|
|
||||||
if endian == b"e":
|
|
||||||
self.endian = "<"
|
|
||||||
elif endian == b"E":
|
|
||||||
self.endian = ">"
|
|
||||||
else:
|
|
||||||
raise IOError("Incorrect reply from device: expected e/E.")
|
|
||||||
self._receive_task = asyncio.ensure_future(self._receive_cr())
|
self._receive_task = asyncio.ensure_future(self._receive_cr())
|
||||||
except:
|
except:
|
||||||
self._writer.close()
|
self._writer.close()
|
||||||
|
@ -46,6 +46,9 @@ class CommMonInj:
|
||||||
del self._writer
|
del self._writer
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
def wait_terminate(self):
|
||||||
|
return self._receive_task
|
||||||
|
|
||||||
async def close(self):
|
async def close(self):
|
||||||
self.disconnect_cb = None
|
self.disconnect_cb = None
|
||||||
try:
|
try:
|
||||||
|
@ -60,19 +63,19 @@ class CommMonInj:
|
||||||
del self._writer
|
del self._writer
|
||||||
|
|
||||||
def monitor_probe(self, enable, channel, probe):
|
def monitor_probe(self, enable, channel, probe):
|
||||||
packet = struct.pack(self.endian + "bblb", 0, enable, channel, probe)
|
packet = struct.pack("<bblb", 0, enable, channel, probe)
|
||||||
self._writer.write(packet)
|
self._writer.write(packet)
|
||||||
|
|
||||||
def monitor_injection(self, enable, channel, overrd):
|
def monitor_injection(self, enable, channel, overrd):
|
||||||
packet = struct.pack(self.endian + "bblb", 3, enable, channel, overrd)
|
packet = struct.pack("<bblb", 3, enable, channel, overrd)
|
||||||
self._writer.write(packet)
|
self._writer.write(packet)
|
||||||
|
|
||||||
def inject(self, channel, override, value):
|
def inject(self, channel, override, value):
|
||||||
packet = struct.pack(self.endian + "blbb", 1, channel, override, value)
|
packet = struct.pack("<blbb", 1, channel, override, value)
|
||||||
self._writer.write(packet)
|
self._writer.write(packet)
|
||||||
|
|
||||||
def get_injection_status(self, channel, override):
|
def get_injection_status(self, channel, override):
|
||||||
packet = struct.pack(self.endian + "blb", 2, channel, override)
|
packet = struct.pack("<blb", 2, channel, override)
|
||||||
self._writer.write(packet)
|
self._writer.write(packet)
|
||||||
|
|
||||||
async def _receive_cr(self):
|
async def _receive_cr(self):
|
||||||
|
@ -82,17 +85,19 @@ class CommMonInj:
|
||||||
if not ty:
|
if not ty:
|
||||||
return
|
return
|
||||||
if ty == b"\x00":
|
if ty == b"\x00":
|
||||||
payload = await self._reader.readexactly(9)
|
payload = await self._reader.readexactly(13)
|
||||||
channel, probe, value = struct.unpack(
|
channel, probe, value = struct.unpack("<lbq", payload)
|
||||||
self.endian + "lbl", payload)
|
|
||||||
self.monitor_cb(channel, probe, value)
|
self.monitor_cb(channel, probe, value)
|
||||||
elif ty == b"\x01":
|
elif ty == b"\x01":
|
||||||
payload = await self._reader.readexactly(6)
|
payload = await self._reader.readexactly(6)
|
||||||
channel, override, value = struct.unpack(
|
channel, override, value = struct.unpack("<lbb", payload)
|
||||||
self.endian + "lbb", payload)
|
|
||||||
self.injection_status_cb(channel, override, value)
|
self.injection_status_cb(channel, override, value)
|
||||||
else:
|
else:
|
||||||
raise ValueError("Unknown packet type", ty)
|
raise ValueError("Unknown packet type", ty)
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
raise
|
||||||
|
except:
|
||||||
|
logger.error("Moninj connection terminating with exception", exc_info=True)
|
||||||
finally:
|
finally:
|
||||||
if self.disconnect_cb is not None:
|
if self.disconnect_cb is not None:
|
||||||
self.disconnect_cb()
|
self.disconnect_cb()
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
import os, sys
|
import os, sys
|
||||||
import numpy
|
import numpy
|
||||||
|
from inspect import getfullargspec
|
||||||
|
from functools import wraps
|
||||||
|
|
||||||
from pythonparser import diagnostic
|
from pythonparser import diagnostic
|
||||||
|
|
||||||
|
@ -52,6 +54,17 @@ def rtio_get_counter() -> TInt64:
|
||||||
raise NotImplementedError("syscall not simulated")
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
|
|
||||||
|
def get_target_cls(target):
|
||||||
|
if target == "rv32g":
|
||||||
|
return RV32GTarget
|
||||||
|
elif target == "rv32ima":
|
||||||
|
return RV32IMATarget
|
||||||
|
elif target == "cortexa9":
|
||||||
|
return CortexA9Target
|
||||||
|
else:
|
||||||
|
raise ValueError("Unsupported target")
|
||||||
|
|
||||||
|
|
||||||
class Core:
|
class Core:
|
||||||
"""Core device driver.
|
"""Core device driver.
|
||||||
|
|
||||||
|
@ -65,81 +78,164 @@ class Core:
|
||||||
:param ref_multiplier: ratio between the RTIO fine timestamp frequency
|
:param ref_multiplier: ratio between the RTIO fine timestamp frequency
|
||||||
and the RTIO coarse timestamp frequency (e.g. SERDES multiplication
|
and the RTIO coarse timestamp frequency (e.g. SERDES multiplication
|
||||||
factor).
|
factor).
|
||||||
|
:param analyzer_proxy: name of the core device analyzer proxy to trigger
|
||||||
|
(optional).
|
||||||
|
:param analyze_at_run_end: automatically trigger the core device analyzer
|
||||||
|
proxy after the Experiment's run stage finishes.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
kernel_invariants = {
|
kernel_invariants = {
|
||||||
"core", "ref_period", "coarse_ref_period", "ref_multiplier",
|
"core", "ref_period", "coarse_ref_period", "ref_multiplier",
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self, dmgr, host, ref_period, ref_multiplier=8, target="rv32g"):
|
def __init__(self, dmgr,
|
||||||
|
host, ref_period,
|
||||||
|
analyzer_proxy=None, analyze_at_run_end=False,
|
||||||
|
ref_multiplier=8,
|
||||||
|
target="rv32g", satellite_cpu_targets={}):
|
||||||
self.ref_period = ref_period
|
self.ref_period = ref_period
|
||||||
self.ref_multiplier = ref_multiplier
|
self.ref_multiplier = ref_multiplier
|
||||||
if target == "rv32g":
|
self.satellite_cpu_targets = satellite_cpu_targets
|
||||||
self.target_cls = RV32GTarget
|
self.target_cls = get_target_cls(target)
|
||||||
elif target == "rv32ima":
|
|
||||||
self.target_cls = RV32IMATarget
|
|
||||||
elif target == "cortexa9":
|
|
||||||
self.target_cls = CortexA9Target
|
|
||||||
else:
|
|
||||||
raise ValueError("Unsupported target")
|
|
||||||
self.coarse_ref_period = ref_period*ref_multiplier
|
self.coarse_ref_period = ref_period*ref_multiplier
|
||||||
if host is None:
|
if host is None:
|
||||||
self.comm = CommKernelDummy()
|
self.comm = CommKernelDummy()
|
||||||
else:
|
else:
|
||||||
self.comm = CommKernel(host)
|
self.comm = CommKernel(host)
|
||||||
|
self.analyzer_proxy_name = analyzer_proxy
|
||||||
|
self.analyze_at_run_end = analyze_at_run_end
|
||||||
|
|
||||||
self.first_run = True
|
self.first_run = True
|
||||||
self.dmgr = dmgr
|
self.dmgr = dmgr
|
||||||
self.core = self
|
self.core = self
|
||||||
self.comm.core = self
|
self.comm.core = self
|
||||||
|
self.analyzer_proxy = None
|
||||||
|
|
||||||
|
def notify_run_end(self):
|
||||||
|
if self.analyze_at_run_end:
|
||||||
|
self.trigger_analyzer_proxy()
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
self.comm.close()
|
self.comm.close()
|
||||||
|
|
||||||
def compile(self, function, args, kwargs, set_result=None,
|
def compile(self, function, args, kwargs, set_result=None,
|
||||||
attribute_writeback=True, print_as_rpc=True):
|
attribute_writeback=True, print_as_rpc=True,
|
||||||
|
target=None, destination=0, subkernel_arg_types=[]):
|
||||||
try:
|
try:
|
||||||
engine = _DiagnosticEngine(all_errors_are_fatal=True)
|
engine = _DiagnosticEngine(all_errors_are_fatal=True)
|
||||||
|
|
||||||
stitcher = Stitcher(engine=engine, core=self, dmgr=self.dmgr,
|
stitcher = Stitcher(engine=engine, core=self, dmgr=self.dmgr,
|
||||||
print_as_rpc=print_as_rpc)
|
print_as_rpc=print_as_rpc,
|
||||||
|
destination=destination, subkernel_arg_types=subkernel_arg_types)
|
||||||
stitcher.stitch_call(function, args, kwargs, set_result)
|
stitcher.stitch_call(function, args, kwargs, set_result)
|
||||||
stitcher.finalize()
|
stitcher.finalize()
|
||||||
|
|
||||||
module = Module(stitcher,
|
module = Module(stitcher,
|
||||||
ref_period=self.ref_period,
|
ref_period=self.ref_period,
|
||||||
attribute_writeback=attribute_writeback)
|
attribute_writeback=attribute_writeback)
|
||||||
target = self.target_cls()
|
target = target if target is not None else self.target_cls()
|
||||||
|
|
||||||
library = target.compile_and_link([module])
|
library = target.compile_and_link([module])
|
||||||
stripped_library = target.strip(library)
|
stripped_library = target.strip(library)
|
||||||
|
|
||||||
return stitcher.embedding_map, stripped_library, \
|
return stitcher.embedding_map, stripped_library, \
|
||||||
lambda addresses: target.symbolize(library, addresses), \
|
lambda addresses: target.symbolize(library, addresses), \
|
||||||
lambda symbols: target.demangle(symbols)
|
lambda symbols: target.demangle(symbols), \
|
||||||
|
module.subkernel_arg_types
|
||||||
except diagnostic.Error as error:
|
except diagnostic.Error as error:
|
||||||
raise CompileError(error.diagnostic) from error
|
raise CompileError(error.diagnostic) from error
|
||||||
|
|
||||||
|
def _run_compiled(self, kernel_library, embedding_map, symbolizer, demangler):
|
||||||
|
if self.first_run:
|
||||||
|
self.comm.check_system_info()
|
||||||
|
self.first_run = False
|
||||||
|
self.comm.load(kernel_library)
|
||||||
|
self.comm.run()
|
||||||
|
self.comm.serve(embedding_map, symbolizer, demangler)
|
||||||
|
|
||||||
def run(self, function, args, kwargs):
|
def run(self, function, args, kwargs):
|
||||||
result = None
|
result = None
|
||||||
@rpc(flags={"async"})
|
@rpc(flags={"async"})
|
||||||
def set_result(new_result):
|
def set_result(new_result):
|
||||||
nonlocal result
|
nonlocal result
|
||||||
result = new_result
|
result = new_result
|
||||||
|
embedding_map, kernel_library, symbolizer, demangler, subkernel_arg_types = \
|
||||||
embedding_map, kernel_library, symbolizer, demangler = \
|
|
||||||
self.compile(function, args, kwargs, set_result)
|
self.compile(function, args, kwargs, set_result)
|
||||||
|
self.compile_and_upload_subkernels(embedding_map, args, subkernel_arg_types)
|
||||||
if self.first_run:
|
self._run_compiled(kernel_library, embedding_map, symbolizer, demangler)
|
||||||
self.comm.check_system_info()
|
|
||||||
self.first_run = False
|
|
||||||
|
|
||||||
self.comm.load(kernel_library)
|
|
||||||
self.comm.run()
|
|
||||||
self.comm.serve(embedding_map, symbolizer, demangler)
|
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
def compile_subkernel(self, sid, subkernel_fn, embedding_map, args, subkernel_arg_types):
|
||||||
|
# pass self to subkernels (if applicable)
|
||||||
|
# assuming the first argument is self
|
||||||
|
subkernel_args = getfullargspec(subkernel_fn.artiq_embedded.function)
|
||||||
|
self_arg = []
|
||||||
|
if len(subkernel_args[0]) > 0:
|
||||||
|
if subkernel_args[0][0] == 'self':
|
||||||
|
self_arg = args[:1]
|
||||||
|
destination = subkernel_fn.artiq_embedded.destination
|
||||||
|
destination_tgt = self.satellite_cpu_targets[destination]
|
||||||
|
target = get_target_cls(destination_tgt)(subkernel_id=sid)
|
||||||
|
object_map, kernel_library, _, _, _ = \
|
||||||
|
self.compile(subkernel_fn, self_arg, {}, attribute_writeback=False,
|
||||||
|
print_as_rpc=False, target=target, destination=destination,
|
||||||
|
subkernel_arg_types=subkernel_arg_types.get(sid, []))
|
||||||
|
if object_map.has_rpc_or_subkernel():
|
||||||
|
raise ValueError("Subkernel must not use RPC or subkernels in other destinations")
|
||||||
|
return destination, kernel_library
|
||||||
|
|
||||||
|
def compile_and_upload_subkernels(self, embedding_map, args, subkernel_arg_types):
|
||||||
|
for sid, subkernel_fn in embedding_map.subkernels().items():
|
||||||
|
destination, kernel_library = \
|
||||||
|
self.compile_subkernel(sid, subkernel_fn, embedding_map,
|
||||||
|
args, subkernel_arg_types)
|
||||||
|
self.comm.upload_subkernel(kernel_library, sid, destination)
|
||||||
|
|
||||||
|
def precompile(self, function, *args, **kwargs):
|
||||||
|
"""Precompile a kernel and return a callable that executes it on the core device
|
||||||
|
at a later time.
|
||||||
|
|
||||||
|
Arguments to the kernel are set at compilation time and passed to this function,
|
||||||
|
as additional positional and keyword arguments.
|
||||||
|
The returned callable accepts no arguments.
|
||||||
|
|
||||||
|
Precompiled kernels may use RPCs and subkernels.
|
||||||
|
|
||||||
|
Object attributes at the beginning of a precompiled kernel execution have the
|
||||||
|
values they had at precompilation time. If up-to-date values are required,
|
||||||
|
use RPC to read them.
|
||||||
|
Similarly, modified values are not written back, and explicit RPC should be used
|
||||||
|
to modify host objects.
|
||||||
|
Carefully review the source code of drivers calls used in precompiled kernels, as
|
||||||
|
they may rely on host object attributes being transfered between kernel calls.
|
||||||
|
Examples include code used to control DDS phase, and Urukul RF switch control
|
||||||
|
via the CPLD register.
|
||||||
|
|
||||||
|
The return value of the callable is the return value of the kernel, if any.
|
||||||
|
|
||||||
|
The callable may be called several times.
|
||||||
|
"""
|
||||||
|
if not hasattr(function, "artiq_embedded"):
|
||||||
|
raise ValueError("Argument is not a kernel")
|
||||||
|
|
||||||
|
result = None
|
||||||
|
@rpc(flags={"async"})
|
||||||
|
def set_result(new_result):
|
||||||
|
nonlocal result
|
||||||
|
result = new_result
|
||||||
|
|
||||||
|
embedding_map, kernel_library, symbolizer, demangler, subkernel_arg_types = \
|
||||||
|
self.compile(function, args, kwargs, set_result, attribute_writeback=False)
|
||||||
|
self.compile_and_upload_subkernels(embedding_map, args, subkernel_arg_types)
|
||||||
|
|
||||||
|
@wraps(function)
|
||||||
|
def run_precompiled():
|
||||||
|
nonlocal result
|
||||||
|
self._run_compiled(kernel_library, embedding_map, symbolizer, demangler)
|
||||||
|
return result
|
||||||
|
|
||||||
|
return run_precompiled
|
||||||
|
|
||||||
@portable
|
@portable
|
||||||
def seconds_to_mu(self, seconds):
|
def seconds_to_mu(self, seconds):
|
||||||
"""Convert seconds to the corresponding number of machine units
|
"""Convert seconds to the corresponding number of machine units
|
||||||
|
@ -206,3 +302,23 @@ class Core:
|
||||||
min_now = rtio_get_counter() + 125000
|
min_now = rtio_get_counter() + 125000
|
||||||
if now_mu() < min_now:
|
if now_mu() < min_now:
|
||||||
at_mu(min_now)
|
at_mu(min_now)
|
||||||
|
|
||||||
|
def trigger_analyzer_proxy(self):
|
||||||
|
"""Causes the core analyzer proxy to retrieve a dump from the device,
|
||||||
|
and distribute it to all connected clients (typically dashboards).
|
||||||
|
|
||||||
|
Returns only after the dump has been retrieved from the device.
|
||||||
|
|
||||||
|
Raises IOError if no analyzer proxy has been configured, or if the
|
||||||
|
analyzer proxy fails. In the latter case, more details would be
|
||||||
|
available in the proxy log.
|
||||||
|
"""
|
||||||
|
if self.analyzer_proxy is None:
|
||||||
|
if self.analyzer_proxy_name is not None:
|
||||||
|
self.analyzer_proxy = self.dmgr.get(self.analyzer_proxy_name)
|
||||||
|
if self.analyzer_proxy is None:
|
||||||
|
raise IOError("No analyzer proxy configured")
|
||||||
|
else:
|
||||||
|
success = self.analyzer_proxy.trigger()
|
||||||
|
if not success:
|
||||||
|
raise IOError("Analyzer proxy reported failure")
|
||||||
|
|
|
@ -19,16 +19,24 @@
|
||||||
},
|
},
|
||||||
"min_artiq_version": {
|
"min_artiq_version": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Minimum required ARTIQ version"
|
"description": "Minimum required ARTIQ version",
|
||||||
|
"default": "0"
|
||||||
},
|
},
|
||||||
"hw_rev": {
|
"hw_rev": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Hardware revision"
|
"description": "Hardware revision"
|
||||||
},
|
},
|
||||||
"base": {
|
"base": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["use_drtio_role", "standalone", "master", "satellite"],
|
||||||
|
"description": "Deprecated, use drtio_role instead",
|
||||||
|
"default": "use_drtio_role"
|
||||||
|
},
|
||||||
|
"drtio_role": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"enum": ["standalone", "master", "satellite"],
|
"enum": ["standalone", "master", "satellite"],
|
||||||
"description": "SoC base; value depends on intended system topology"
|
"description": "Role that this device takes in a DRTIO network; 'standalone' means no DRTIO",
|
||||||
|
"default": "standalone"
|
||||||
},
|
},
|
||||||
"ext_ref_frequency": {
|
"ext_ref_frequency": {
|
||||||
"type": "number",
|
"type": "number",
|
||||||
|
@ -122,7 +130,7 @@
|
||||||
},
|
},
|
||||||
"hw_rev": {
|
"hw_rev": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"enum": ["v1.0"]
|
"enum": ["v1.0", "v1.1"]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -134,7 +142,7 @@
|
||||||
"properties": {
|
"properties": {
|
||||||
"type": {
|
"type": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"enum": ["dio", "urukul", "novogorny", "sampler", "suservo", "zotino", "grabber", "mirny", "fastino", "phaser", "hvamp"]
|
"enum": ["dio", "dio_spi", "urukul", "novogorny", "sampler", "suservo", "zotino", "grabber", "mirny", "fastino", "phaser", "hvamp", "shuttler"]
|
||||||
},
|
},
|
||||||
"board": {
|
"board": {
|
||||||
"type": "string"
|
"type": "string"
|
||||||
|
@ -170,15 +178,101 @@
|
||||||
},
|
},
|
||||||
"bank_direction_low": {
|
"bank_direction_low": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"enum": ["input", "output"]
|
"enum": ["input", "output", "clkgen"]
|
||||||
},
|
},
|
||||||
"bank_direction_high": {
|
"bank_direction_high": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"enum": ["input", "output"]
|
"enum": ["input", "output", "clkgen"]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"required": ["ports", "bank_direction_low", "bank_direction_high"]
|
"required": ["ports", "bank_direction_low", "bank_direction_high"]
|
||||||
}
|
}
|
||||||
|
}, {
|
||||||
|
"title": "DIO_SPI",
|
||||||
|
"if": {
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"const": "dio_spi"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"then": {
|
||||||
|
"properties": {
|
||||||
|
"ports": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"minItems": 1,
|
||||||
|
"maxItems": 1
|
||||||
|
},
|
||||||
|
"spi": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"default": "dio_spi"
|
||||||
|
},
|
||||||
|
"clk": {
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 7
|
||||||
|
},
|
||||||
|
"mosi": {
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 7
|
||||||
|
},
|
||||||
|
"miso": {
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 7
|
||||||
|
},
|
||||||
|
"cs": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 7
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["clk"]
|
||||||
|
},
|
||||||
|
"minItems": 1
|
||||||
|
},
|
||||||
|
"ttl": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"default": "ttl"
|
||||||
|
},
|
||||||
|
"pin": {
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 7
|
||||||
|
},
|
||||||
|
"direction": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["input", "output"]
|
||||||
|
},
|
||||||
|
"edge_counter": {
|
||||||
|
"type": "boolean",
|
||||||
|
"default": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["pin", "direction"]
|
||||||
|
},
|
||||||
|
"default": []
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["ports", "spi"]
|
||||||
|
}
|
||||||
}, {
|
}, {
|
||||||
"title": "Urukul",
|
"title": "Urukul",
|
||||||
"if": {
|
"if": {
|
||||||
|
@ -214,11 +308,18 @@
|
||||||
"clk_div": {
|
"clk_div": {
|
||||||
"type": "integer",
|
"type": "integer",
|
||||||
"minimum": 0,
|
"minimum": 0,
|
||||||
"maximum": 3
|
"maximum": 3,
|
||||||
|
"default": 0
|
||||||
},
|
},
|
||||||
"pll_n": {
|
"pll_n": {
|
||||||
"type": "integer"
|
"type": "integer"
|
||||||
},
|
},
|
||||||
|
"pll_en": {
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 1,
|
||||||
|
"default": 1
|
||||||
|
},
|
||||||
"pll_vco": {
|
"pll_vco": {
|
||||||
"type": "integer"
|
"type": "integer"
|
||||||
},
|
},
|
||||||
|
@ -293,6 +394,11 @@
|
||||||
"minItems": 2,
|
"minItems": 2,
|
||||||
"maxItems": 2
|
"maxItems": 2
|
||||||
},
|
},
|
||||||
|
"sampler_hw_rev": {
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "^v[0-9]+\\.[0-9]+",
|
||||||
|
"default": "v2.2"
|
||||||
|
},
|
||||||
"urukul0_ports": {
|
"urukul0_ports": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
|
@ -322,6 +428,12 @@
|
||||||
"type": "integer",
|
"type": "integer",
|
||||||
"default": 32
|
"default": 32
|
||||||
},
|
},
|
||||||
|
"pll_en": {
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 1,
|
||||||
|
"default": 1
|
||||||
|
},
|
||||||
"pll_vco": {
|
"pll_vco": {
|
||||||
"type": "integer"
|
"type": "integer"
|
||||||
}
|
}
|
||||||
|
@ -413,6 +525,11 @@
|
||||||
"almazny": {
|
"almazny": {
|
||||||
"type": "boolean",
|
"type": "boolean",
|
||||||
"default": false
|
"default": false
|
||||||
|
},
|
||||||
|
"almazny_hw_rev": {
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "^v[0-9]+\\.[0-9]+",
|
||||||
|
"default": "v1.2"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"required": ["ports"]
|
"required": ["ports"]
|
||||||
|
@ -462,6 +579,11 @@
|
||||||
},
|
},
|
||||||
"minItems": 1,
|
"minItems": 1,
|
||||||
"maxItems": 1
|
"maxItems": 1
|
||||||
|
},
|
||||||
|
"mode": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["base", "miqro"],
|
||||||
|
"default": "base"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"required": ["ports"]
|
"required": ["ports"]
|
||||||
|
@ -488,6 +610,31 @@
|
||||||
},
|
},
|
||||||
"required": ["ports"]
|
"required": ["ports"]
|
||||||
}
|
}
|
||||||
|
},{
|
||||||
|
"title": "Shuttler",
|
||||||
|
"if": {
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"const": "shuttler"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"then": {
|
||||||
|
"properties": {
|
||||||
|
"ports": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"minItems": 1,
|
||||||
|
"maxItems": 2
|
||||||
|
},
|
||||||
|
"drtio_destination": {
|
||||||
|
"type": "integer"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["ports"]
|
||||||
|
}
|
||||||
}]
|
}]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,7 @@ alone could achieve.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from artiq.language.core import syscall, kernel
|
from artiq.language.core import syscall, kernel
|
||||||
from artiq.language.types import TInt32, TInt64, TStr, TNone, TTuple
|
from artiq.language.types import TInt32, TInt64, TStr, TNone, TTuple, TBool
|
||||||
from artiq.coredevice.exceptions import DMAError
|
from artiq.coredevice.exceptions import DMAError
|
||||||
|
|
||||||
from numpy import int64
|
from numpy import int64
|
||||||
|
@ -17,7 +17,7 @@ def dma_record_start(name: TStr) -> TNone:
|
||||||
raise NotImplementedError("syscall not simulated")
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
@syscall
|
@syscall
|
||||||
def dma_record_stop(duration: TInt64) -> TNone:
|
def dma_record_stop(duration: TInt64, enable_ddma: TBool) -> TNone:
|
||||||
raise NotImplementedError("syscall not simulated")
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
@syscall
|
@syscall
|
||||||
|
@ -25,11 +25,11 @@ def dma_erase(name: TStr) -> TNone:
|
||||||
raise NotImplementedError("syscall not simulated")
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
@syscall
|
@syscall
|
||||||
def dma_retrieve(name: TStr) -> TTuple([TInt64, TInt32]):
|
def dma_retrieve(name: TStr) -> TTuple([TInt64, TInt32, TBool]):
|
||||||
raise NotImplementedError("syscall not simulated")
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
@syscall
|
@syscall
|
||||||
def dma_playback(timestamp: TInt64, ptr: TInt32) -> TNone:
|
def dma_playback(timestamp: TInt64, ptr: TInt32, enable_ddma: TBool) -> TNone:
|
||||||
raise NotImplementedError("syscall not simulated")
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
|
|
||||||
|
@ -47,6 +47,7 @@ class DMARecordContextManager:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.name = ""
|
self.name = ""
|
||||||
self.saved_now_mu = int64(0)
|
self.saved_now_mu = int64(0)
|
||||||
|
self.enable_ddma = False
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
|
@ -56,7 +57,7 @@ class DMARecordContextManager:
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def __exit__(self, type, value, traceback):
|
def __exit__(self, type, value, traceback):
|
||||||
dma_record_stop(now_mu()) # see above
|
dma_record_stop(now_mu(), self.enable_ddma) # see above
|
||||||
at_mu(self.saved_now_mu)
|
at_mu(self.saved_now_mu)
|
||||||
|
|
||||||
|
|
||||||
|
@ -74,12 +75,20 @@ class CoreDMA:
|
||||||
self.epoch = 0
|
self.epoch = 0
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def record(self, name):
|
def record(self, name, enable_ddma=False):
|
||||||
"""Returns a context manager that will record a DMA trace called ``name``.
|
"""Returns a context manager that will record a DMA trace called ``name``.
|
||||||
Any previously recorded trace with the same name is overwritten.
|
Any previously recorded trace with the same name is overwritten.
|
||||||
The trace will persist across kernel switches."""
|
The trace will persist across kernel switches.
|
||||||
|
|
||||||
|
In DRTIO context, distributed DMA can be toggled with ``enable_ddma``.
|
||||||
|
Enabling it allows running DMA on satellites, rather than sending all
|
||||||
|
events from the master.
|
||||||
|
|
||||||
|
Keeping it disabled it may improve performance in some scenarios,
|
||||||
|
e.g. when there are many small satellite buffers."""
|
||||||
self.epoch += 1
|
self.epoch += 1
|
||||||
self.recorder.name = name
|
self.recorder.name = name
|
||||||
|
self.recorder.enable_ddma = enable_ddma
|
||||||
return self.recorder
|
return self.recorder
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
|
@ -92,24 +101,24 @@ class CoreDMA:
|
||||||
def playback(self, name):
|
def playback(self, name):
|
||||||
"""Replays a previously recorded DMA trace. This function blocks until
|
"""Replays a previously recorded DMA trace. This function blocks until
|
||||||
the entire trace is submitted to the RTIO FIFOs."""
|
the entire trace is submitted to the RTIO FIFOs."""
|
||||||
(advance_mu, ptr) = dma_retrieve(name)
|
(advance_mu, ptr, uses_ddma) = dma_retrieve(name)
|
||||||
dma_playback(now_mu(), ptr)
|
dma_playback(now_mu(), ptr, uses_ddma)
|
||||||
delay_mu(advance_mu)
|
delay_mu(advance_mu)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def get_handle(self, name):
|
def get_handle(self, name):
|
||||||
"""Returns a handle to a previously recorded DMA trace. The returned handle
|
"""Returns a handle to a previously recorded DMA trace. The returned handle
|
||||||
is only valid until the next call to :meth:`record` or :meth:`erase`."""
|
is only valid until the next call to :meth:`record` or :meth:`erase`."""
|
||||||
(advance_mu, ptr) = dma_retrieve(name)
|
(advance_mu, ptr, uses_ddma) = dma_retrieve(name)
|
||||||
return (self.epoch, advance_mu, ptr)
|
return (self.epoch, advance_mu, ptr, uses_ddma)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def playback_handle(self, handle):
|
def playback_handle(self, handle):
|
||||||
"""Replays a handle obtained with :meth:`get_handle`. Using this function
|
"""Replays a handle obtained with :meth:`get_handle`. Using this function
|
||||||
is much faster than :meth:`playback` for replaying a set of traces repeatedly,
|
is much faster than :meth:`playback` for replaying a set of traces repeatedly,
|
||||||
but incurs the overhead of managing the handles onto the programmer."""
|
but incurs the overhead of managing the handles onto the programmer."""
|
||||||
(epoch, advance_mu, ptr) = handle
|
(epoch, advance_mu, ptr, uses_ddma) = handle
|
||||||
if self.epoch != epoch:
|
if self.epoch != epoch:
|
||||||
raise DMAError("Invalid handle")
|
raise DMAError("Invalid handle")
|
||||||
dma_playback(now_mu(), ptr)
|
dma_playback(now_mu(), ptr, uses_ddma)
|
||||||
delay_mu(advance_mu)
|
delay_mu(advance_mu)
|
||||||
|
|
|
@ -91,6 +91,10 @@ class EdgeCounter:
|
||||||
self.channel = channel
|
self.channel = channel
|
||||||
self.counter_max = (1 << (gateware_width - 1)) - 1
|
self.counter_max = (1 << (gateware_width - 1)) - 1
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(channel, **kwargs):
|
||||||
|
return [(channel, None)]
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def gate_rising(self, duration):
|
def gate_rising(self, duration):
|
||||||
"""Count rising edges for the given duration and request the total at
|
"""Count rising edges for the given duration and request the total at
|
||||||
|
|
|
@ -16,50 +16,94 @@ AssertionError = builtins.AssertionError
|
||||||
|
|
||||||
class CoreException:
|
class CoreException:
|
||||||
"""Information about an exception raised or passed through the core device."""
|
"""Information about an exception raised or passed through the core device."""
|
||||||
|
def __init__(self, exceptions, exception_info, traceback, stack_pointers):
|
||||||
|
self.exceptions = exceptions
|
||||||
|
self.exception_info = exception_info
|
||||||
|
self.traceback = list(traceback)
|
||||||
|
self.stack_pointers = stack_pointers
|
||||||
|
|
||||||
def __init__(self, name, message, params, traceback):
|
first_exception = exceptions[0]
|
||||||
|
name = first_exception[0]
|
||||||
if ':' in name:
|
if ':' in name:
|
||||||
exn_id, self.name = name.split(':', 2)
|
exn_id, self.name = name.split(':', 2)
|
||||||
self.id = int(exn_id)
|
self.id = int(exn_id)
|
||||||
else:
|
else:
|
||||||
self.id, self.name = 0, name
|
self.id, self.name = 0, name
|
||||||
self.message, self.params = message, params
|
self.message = first_exception[1]
|
||||||
self.traceback = list(traceback)
|
self.params = first_exception[2]
|
||||||
|
|
||||||
def __str__(self):
|
def append_backtrace(self, record, inlined=False):
|
||||||
lines = []
|
filename, line, column, function, address = record
|
||||||
lines.append("Core Device Traceback (most recent call last):")
|
|
||||||
last_address = 0
|
|
||||||
for (filename, line, column, function, address) in self.traceback:
|
|
||||||
stub_globals = {"__name__": filename, "__loader__": source_loader}
|
stub_globals = {"__name__": filename, "__loader__": source_loader}
|
||||||
source_line = linecache.getline(filename, line, stub_globals)
|
source_line = linecache.getline(filename, line, stub_globals)
|
||||||
indentation = re.search(r"^\s*", source_line).end()
|
indentation = re.search(r"^\s*", source_line).end()
|
||||||
|
|
||||||
if address is None:
|
if address is None:
|
||||||
formatted_address = ""
|
formatted_address = ""
|
||||||
elif address == last_address:
|
elif inlined:
|
||||||
formatted_address = " (inlined)"
|
formatted_address = " (inlined)"
|
||||||
else:
|
else:
|
||||||
formatted_address = " (RA=+0x{:x})".format(address)
|
formatted_address = " (RA=+0x{:x})".format(address)
|
||||||
last_address = address
|
|
||||||
|
|
||||||
filename = filename.replace(artiq_dir, "<artiq>")
|
filename = filename.replace(artiq_dir, "<artiq>")
|
||||||
|
lines = []
|
||||||
if column == -1:
|
if column == -1:
|
||||||
|
lines.append(" {}".format(source_line.strip() if source_line else "<unknown>"))
|
||||||
lines.append(" File \"{file}\", line {line}, in {function}{address}".
|
lines.append(" File \"{file}\", line {line}, in {function}{address}".
|
||||||
format(file=filename, line=line, function=function,
|
format(file=filename, line=line, function=function,
|
||||||
address=formatted_address))
|
address=formatted_address))
|
||||||
lines.append(" {}".format(source_line.strip() if source_line else "<unknown>"))
|
|
||||||
else:
|
else:
|
||||||
|
lines.append(" {}^".format(" " * (column - indentation)))
|
||||||
|
lines.append(" {}".format(source_line.strip() if source_line else "<unknown>"))
|
||||||
lines.append(" File \"{file}\", line {line}, column {column},"
|
lines.append(" File \"{file}\", line {line}, column {column},"
|
||||||
" in {function}{address}".
|
" in {function}{address}".
|
||||||
format(file=filename, line=line, column=column + 1,
|
format(file=filename, line=line, column=column + 1,
|
||||||
function=function, address=formatted_address))
|
function=function, address=formatted_address))
|
||||||
lines.append(" {}".format(source_line.strip() if source_line else "<unknown>"))
|
return lines
|
||||||
lines.append(" {}^".format(" " * (column - indentation)))
|
|
||||||
|
|
||||||
lines.append("{}({}): {}".format(self.name, self.id,
|
def single_traceback(self, exception_index):
|
||||||
self.message.format(*self.params)))
|
# note that we insert in reversed order
|
||||||
return "\n".join(lines)
|
lines = []
|
||||||
|
last_sp = 0
|
||||||
|
start_backtrace_index = self.exception_info[exception_index][1]
|
||||||
|
zipped = list(zip(self.traceback[start_backtrace_index:],
|
||||||
|
self.stack_pointers[start_backtrace_index:]))
|
||||||
|
exception = self.exceptions[exception_index]
|
||||||
|
name = exception[0]
|
||||||
|
message = exception[1]
|
||||||
|
params = exception[2]
|
||||||
|
if ':' in name:
|
||||||
|
exn_id, name = name.split(':', 2)
|
||||||
|
exn_id = int(exn_id)
|
||||||
|
else:
|
||||||
|
exn_id = 0
|
||||||
|
lines.append("{}({}): {}".format(name, exn_id, message.format(*params)))
|
||||||
|
zipped.append(((exception[3], exception[4], exception[5], exception[6],
|
||||||
|
None, []), None))
|
||||||
|
|
||||||
|
for ((filename, line, column, function, address, inlined), sp) in zipped:
|
||||||
|
# backtrace of nested exceptions may be discontinuous
|
||||||
|
# but the stack pointer must increase monotonically
|
||||||
|
if sp is not None and sp <= last_sp:
|
||||||
|
continue
|
||||||
|
last_sp = sp
|
||||||
|
|
||||||
|
for record in reversed(inlined):
|
||||||
|
lines += self.append_backtrace(record, True)
|
||||||
|
lines += self.append_backtrace((filename, line, column, function,
|
||||||
|
address))
|
||||||
|
|
||||||
|
lines.append("Traceback (most recent call first):")
|
||||||
|
|
||||||
|
return "\n".join(reversed(lines))
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
tracebacks = [self.single_traceback(i) for i in range(len(self.exceptions))]
|
||||||
|
traceback_str = ('\n\nDuring handling of the above exception, ' +
|
||||||
|
'another exception occurred:\n\n').join(tracebacks)
|
||||||
|
return 'Core Device Traceback:\n' +\
|
||||||
|
traceback_str +\
|
||||||
|
'\n\nEnd of Core Device Traceback\n'
|
||||||
|
|
||||||
|
|
||||||
class InternalError(Exception):
|
class InternalError(Exception):
|
||||||
|
@ -104,6 +148,13 @@ class DMAError(Exception):
|
||||||
artiq_builtin = True
|
artiq_builtin = True
|
||||||
|
|
||||||
|
|
||||||
|
class SubkernelError(Exception):
|
||||||
|
"""Raised when an operation regarding a subkernel is invalid
|
||||||
|
or cannot be completed.
|
||||||
|
"""
|
||||||
|
artiq_builtin = True
|
||||||
|
|
||||||
|
|
||||||
class ClockFailure(Exception):
|
class ClockFailure(Exception):
|
||||||
"""Raised when RTIO PLL has lost lock."""
|
"""Raised when RTIO PLL has lost lock."""
|
||||||
|
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
"""RTIO driver for the Fastino 32channel, 16 bit, 2.5 MS/s per channel,
|
"""RTIO driver for the Fastino 32channel, 16 bit, 2.5 MS/s per channel,
|
||||||
streaming DAC.
|
streaming DAC.
|
||||||
"""
|
"""
|
||||||
from numpy import int32
|
from numpy import int32, int64
|
||||||
|
|
||||||
from artiq.language.core import kernel, portable, delay, delay_mu
|
from artiq.language.core import kernel, portable, delay, delay_mu
|
||||||
from artiq.coredevice.rtio import (rtio_output, rtio_output_wide,
|
from artiq.coredevice.rtio import (rtio_output, rtio_output_wide,
|
||||||
rtio_input_data)
|
rtio_input_data)
|
||||||
from artiq.language.units import us
|
from artiq.language.units import ns
|
||||||
from artiq.language.types import TInt32, TList
|
from artiq.language.types import TInt32, TList
|
||||||
|
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ class Fastino:
|
||||||
DAC updates synchronized to a frame edge.
|
DAC updates synchronized to a frame edge.
|
||||||
|
|
||||||
The `log2_width=0` RTIO layout uses one DAC channel per RTIO address and a
|
The `log2_width=0` RTIO layout uses one DAC channel per RTIO address and a
|
||||||
dense RTIO address space. The RTIO words are narrow. (32 bit) and
|
dense RTIO address space. The RTIO words are narrow (32 bit) and
|
||||||
few-channel updates are efficient. There is the least amount of DAC state
|
few-channel updates are efficient. There is the least amount of DAC state
|
||||||
tracking in kernels, at the cost of more DMA and RTIO data.
|
tracking in kernels, at the cost of more DMA and RTIO data.
|
||||||
The setting here and in the RTIO PHY (gateware) must match.
|
The setting here and in the RTIO PHY (gateware) must match.
|
||||||
|
@ -41,24 +41,52 @@ class Fastino:
|
||||||
:param log2_width: Width of DAC channel group (logarithm base 2).
|
:param log2_width: Width of DAC channel group (logarithm base 2).
|
||||||
Value must match the corresponding value in the RTIO PHY (gateware).
|
Value must match the corresponding value in the RTIO PHY (gateware).
|
||||||
"""
|
"""
|
||||||
kernel_invariants = {"core", "channel", "width"}
|
kernel_invariants = {"core", "channel", "width", "t_frame"}
|
||||||
|
|
||||||
def __init__(self, dmgr, channel, core_device="core", log2_width=0):
|
def __init__(self, dmgr, channel, core_device="core", log2_width=0):
|
||||||
self.channel = channel << 8
|
self.channel = channel << 8
|
||||||
self.core = dmgr.get(core_device)
|
self.core = dmgr.get(core_device)
|
||||||
self.width = 1 << log2_width
|
self.width = 1 << log2_width
|
||||||
|
# frame duration in mu (14 words each 7 clock cycles each 4 ns)
|
||||||
|
# self.core.seconds_to_mu(14*7*4*ns) # unfortunately this may round wrong
|
||||||
|
assert self.core.ref_period == 1*ns
|
||||||
|
self.t_frame = int64(14*7*4)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(channel, **kwargs):
|
||||||
|
return [(channel, None)]
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def init(self):
|
def init(self):
|
||||||
"""Initialize the device.
|
"""Initialize the device.
|
||||||
|
|
||||||
This clears reset, unsets DAC_CLR, enables AFE_PWR,
|
* disables RESET, DAC_CLR, enables AFE_PWR
|
||||||
clears error counters, then enables error counting
|
* clears error counters, enables error counting
|
||||||
|
* turns LEDs off
|
||||||
|
* clears `hold` and `continuous` on all channels
|
||||||
|
* clear and resets interpolators to unit rate change on all
|
||||||
|
channels
|
||||||
|
|
||||||
|
It does not change set channel voltages and does not reset the PLLs or clock
|
||||||
|
domains.
|
||||||
|
|
||||||
|
Note: On Fastino gateware before v0.2 this may lead to 0 voltage being emitted
|
||||||
|
transiently.
|
||||||
"""
|
"""
|
||||||
self.set_cfg(reset=0, afe_power_down=0, dac_clr=0, clr_err=1)
|
self.set_cfg(reset=0, afe_power_down=0, dac_clr=0, clr_err=1)
|
||||||
delay(1*us)
|
delay_mu(self.t_frame)
|
||||||
self.set_cfg(reset=0, afe_power_down=0, dac_clr=0, clr_err=0)
|
self.set_cfg(reset=0, afe_power_down=0, dac_clr=0, clr_err=0)
|
||||||
delay(1*us)
|
delay_mu(self.t_frame)
|
||||||
|
self.set_continuous(0)
|
||||||
|
delay_mu(self.t_frame)
|
||||||
|
self.stage_cic(1)
|
||||||
|
delay_mu(self.t_frame)
|
||||||
|
self.apply_cic(0xffffffff)
|
||||||
|
delay_mu(self.t_frame)
|
||||||
|
self.set_leds(0)
|
||||||
|
delay_mu(self.t_frame)
|
||||||
|
self.set_hold(0)
|
||||||
|
delay_mu(self.t_frame)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def write(self, addr, data):
|
def write(self, addr, data):
|
||||||
|
@ -78,8 +106,9 @@ class Fastino:
|
||||||
:param addr: Address to read from.
|
:param addr: Address to read from.
|
||||||
:return: The data read.
|
:return: The data read.
|
||||||
"""
|
"""
|
||||||
rtio_output(self.channel | addr | 0x80)
|
raise NotImplementedError
|
||||||
return rtio_input_data(self.channel >> 8)
|
# rtio_output(self.channel | addr | 0x80)
|
||||||
|
# return rtio_input_data(self.channel >> 8)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set_dac_mu(self, dac, data):
|
def set_dac_mu(self, dac, data):
|
||||||
|
@ -252,9 +281,12 @@ class Fastino:
|
||||||
def apply_cic(self, channel_mask):
|
def apply_cic(self, channel_mask):
|
||||||
"""Apply the staged interpolator configuration on the specified channels.
|
"""Apply the staged interpolator configuration on the specified channels.
|
||||||
|
|
||||||
Each Fastino channel includes a fourth order (cubic) CIC interpolator with
|
Each Fastino channel starting with gateware v0.2 includes a fourth order
|
||||||
variable rate change and variable output gain compensation (see
|
(cubic) CIC interpolator with variable rate change and variable output
|
||||||
:meth:`stage_cic`).
|
gain compensation (see :meth:`stage_cic`).
|
||||||
|
|
||||||
|
Fastino gateware before v0.2 does not include the interpolators and the
|
||||||
|
methods affecting the CICs should not be used.
|
||||||
|
|
||||||
Channels using non-unity interpolation rate should have
|
Channels using non-unity interpolation rate should have
|
||||||
continous DAC updates enabled (see :meth:`set_continuous`) unless
|
continous DAC updates enabled (see :meth:`set_continuous`) unless
|
||||||
|
|
|
@ -25,6 +25,10 @@ class Grabber:
|
||||||
# ROI engine outputs for one video frame.
|
# ROI engine outputs for one video frame.
|
||||||
self.sentinel = int32(int64(2**count_width))
|
self.sentinel = int32(int64(2**count_width))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(channel_base, **kwargs):
|
||||||
|
return [(channel_base, "ROI coordinates"), (channel_base + 1, "ROI mask")]
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def setup_roi(self, n, x0, y0, x1, y1):
|
def setup_roi(self, n, x0, y0, x1, y1):
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -33,6 +33,11 @@ def i2c_read(busno: TInt32, ack: TBool) -> TInt32:
|
||||||
raise NotImplementedError("syscall not simulated")
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
|
|
||||||
|
@syscall(flags={"nounwind", "nowrite"})
|
||||||
|
def i2c_switch_select(busno: TInt32, address: TInt32, mask: TInt32) -> TNone:
|
||||||
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def i2c_poll(busno, busaddr):
|
def i2c_poll(busno, busaddr):
|
||||||
"""Poll I2C device at address.
|
"""Poll I2C device at address.
|
||||||
|
@ -137,8 +142,10 @@ def i2c_read_many(busno, busaddr, addr, data):
|
||||||
i2c_stop(busno)
|
i2c_stop(busno)
|
||||||
|
|
||||||
|
|
||||||
class PCA9548:
|
class I2CSwitch:
|
||||||
"""Driver for the PCA9548 I2C bus switch.
|
"""Driver for the I2C bus switch.
|
||||||
|
|
||||||
|
PCA954X (or other) type detection is done by the CPU during I2C init.
|
||||||
|
|
||||||
I2C transactions not real-time, and are performed by the CPU without
|
I2C transactions not real-time, and are performed by the CPU without
|
||||||
involving RTIO.
|
involving RTIO.
|
||||||
|
@ -151,25 +158,19 @@ class PCA9548:
|
||||||
self.busno = busno
|
self.busno = busno
|
||||||
self.address = address
|
self.address = address
|
||||||
|
|
||||||
@kernel
|
|
||||||
def select(self, mask):
|
|
||||||
"""Enable/disable channels.
|
|
||||||
|
|
||||||
:param mask: Bit mask of enabled channels
|
|
||||||
"""
|
|
||||||
i2c_write_byte(self.busno, self.address, mask)
|
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set(self, channel):
|
def set(self, channel):
|
||||||
"""Enable one channel.
|
"""Enable one channel.
|
||||||
|
|
||||||
:param channel: channel number (0-7)
|
:param channel: channel number (0-7)
|
||||||
"""
|
"""
|
||||||
self.select(1 << channel)
|
i2c_switch_select(self.busno, self.address >> 1, 1 << channel)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def readback(self):
|
def unset(self):
|
||||||
return i2c_read_byte(self.busno, self.address)
|
"""Disable output of the I2C switch.
|
||||||
|
"""
|
||||||
|
i2c_switch_select(self.busno, self.address >> 1, 0)
|
||||||
|
|
||||||
|
|
||||||
class TCA6424A:
|
class TCA6424A:
|
||||||
|
@ -207,3 +208,46 @@ class TCA6424A:
|
||||||
|
|
||||||
self._write24(0x8c, 0) # set all directions to output
|
self._write24(0x8c, 0) # set all directions to output
|
||||||
self._write24(0x84, outputs_le) # set levels
|
self._write24(0x84, outputs_le) # set levels
|
||||||
|
|
||||||
|
class PCF8574A:
|
||||||
|
"""Driver for the PCF8574 I2C remote 8-bit I/O expander.
|
||||||
|
|
||||||
|
I2C transactions not real-time, and are performed by the CPU without
|
||||||
|
involving RTIO.
|
||||||
|
"""
|
||||||
|
def __init__(self, dmgr, busno=0, address=0x7c, core_device="core"):
|
||||||
|
self.core = dmgr.get(core_device)
|
||||||
|
self.busno = busno
|
||||||
|
self.address = address
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set(self, data):
|
||||||
|
"""Drive data on the quasi-bidirectional pins.
|
||||||
|
|
||||||
|
:param data: Pin data. High bits are weakly driven high
|
||||||
|
(and thus inputs), low bits are strongly driven low.
|
||||||
|
"""
|
||||||
|
i2c_start(self.busno)
|
||||||
|
try:
|
||||||
|
if not i2c_write(self.busno, self.address):
|
||||||
|
raise I2CError("PCF8574A failed to ack address")
|
||||||
|
if not i2c_write(self.busno, data):
|
||||||
|
raise I2CError("PCF8574A failed to ack data")
|
||||||
|
finally:
|
||||||
|
i2c_stop(self.busno)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def get(self):
|
||||||
|
"""Retrieve quasi-bidirectional pin input data.
|
||||||
|
|
||||||
|
:return: Pin data
|
||||||
|
"""
|
||||||
|
i2c_start(self.busno)
|
||||||
|
ret = 0
|
||||||
|
try:
|
||||||
|
if not i2c_write(self.busno, self.address | 1):
|
||||||
|
raise I2CError("PCF8574A failed to ack address")
|
||||||
|
ret = i2c_read(self.busno, False)
|
||||||
|
finally:
|
||||||
|
i2c_stop(self.busno)
|
||||||
|
return ret
|
||||||
|
|
|
@ -32,4 +32,7 @@ def load(description_path):
|
||||||
global validator
|
global validator
|
||||||
validator.validate(result)
|
validator.validate(result)
|
||||||
|
|
||||||
|
if result["base"] != "use_drtio_role":
|
||||||
|
result["drtio_role"] = result["base"]
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
|
@ -37,13 +37,17 @@ class KasliEEPROM:
|
||||||
@kernel
|
@kernel
|
||||||
def select(self):
|
def select(self):
|
||||||
mask = 1 << self.port
|
mask = 1 << self.port
|
||||||
self.sw0.select(mask)
|
if self.port < 8:
|
||||||
self.sw1.select(mask >> 8)
|
self.sw0.set(self.port)
|
||||||
|
self.sw1.unset()
|
||||||
|
else:
|
||||||
|
self.sw0.unset()
|
||||||
|
self.sw1.set(self.port - 8)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def deselect(self):
|
def deselect(self):
|
||||||
self.sw0.select(0)
|
self.sw0.unset()
|
||||||
self.sw1.select(0)
|
self.sw1.unset()
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def write_i32(self, addr, value):
|
def write_i32(self, addr, value):
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
"""RTIO driver for Mirny (4 channel GHz PLLs)
|
"""RTIO driver for Mirny (4 channel GHz PLLs)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from artiq.language.core import kernel, delay
|
from artiq.language.core import kernel, delay, portable
|
||||||
from artiq.language.units import us
|
from artiq.language.units import us
|
||||||
|
|
||||||
from numpy import int32
|
from numpy import int32
|
||||||
|
@ -31,16 +31,6 @@ WE = 1 << 24
|
||||||
# supported CPLD code version
|
# supported CPLD code version
|
||||||
PROTO_REV_MATCH = 0x0
|
PROTO_REV_MATCH = 0x0
|
||||||
|
|
||||||
# almazny-specific data
|
|
||||||
ALMAZNY_REG_BASE = 0x0C
|
|
||||||
ALMAZNY_OE_SHIFT = 12
|
|
||||||
|
|
||||||
# higher SPI write divider to match almazny shift register timing
|
|
||||||
# min SER time before SRCLK rise = 125ns
|
|
||||||
# -> div=32 gives 125ns for data before clock rise
|
|
||||||
# works at faster dividers too but could be less reliable
|
|
||||||
ALMAZNY_SPIT_WR = 32
|
|
||||||
|
|
||||||
|
|
||||||
class Mirny:
|
class Mirny:
|
||||||
"""
|
"""
|
||||||
|
@ -132,6 +122,18 @@ class Mirny:
|
||||||
self.write_reg(1, (self.clk_sel << 4))
|
self.write_reg(1, (self.clk_sel << 4))
|
||||||
delay(1000 * us)
|
delay(1000 * us)
|
||||||
|
|
||||||
|
@portable(flags={"fast-math"})
|
||||||
|
def att_to_mu(self, att):
|
||||||
|
"""Convert an attenuation setting in dB to machine units.
|
||||||
|
|
||||||
|
:param att: Attenuation setting in dB.
|
||||||
|
:return: Digital attenuation setting.
|
||||||
|
"""
|
||||||
|
code = int32(255) - int32(round(att * 8))
|
||||||
|
if code < 0 or code > 255:
|
||||||
|
raise ValueError("Invalid Mirny attenuation!")
|
||||||
|
return code
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set_att_mu(self, channel, att):
|
def set_att_mu(self, channel, att):
|
||||||
"""Set digital step attenuator in machine units.
|
"""Set digital step attenuator in machine units.
|
||||||
|
@ -141,6 +143,21 @@ class Mirny:
|
||||||
self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END, 16, SPIT_WR, SPI_CS)
|
self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END, 16, SPIT_WR, SPI_CS)
|
||||||
self.bus.write(((channel | 8) << 25) | (att << 16))
|
self.bus.write(((channel | 8) << 25) | (att << 16))
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_att(self, channel, att):
|
||||||
|
"""Set digital step attenuator in SI units.
|
||||||
|
|
||||||
|
This method will write the attenuator settings of the selected channel.
|
||||||
|
|
||||||
|
.. seealso:: :meth:`set_att_mu`
|
||||||
|
|
||||||
|
:param channel: Attenuator channel (0-3).
|
||||||
|
:param att: Attenuation setting in dB. Higher value is more
|
||||||
|
attenuation. Minimum attenuation is 0*dB, maximum attenuation is
|
||||||
|
31.5*dB.
|
||||||
|
"""
|
||||||
|
self.set_att_mu(channel, self.att_to_mu(att))
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def write_ext(self, addr, length, data, ext_div=SPIT_WR):
|
def write_ext(self, addr, length, data, ext_div=SPIT_WR):
|
||||||
"""Perform SPI write to a prefixed address"""
|
"""Perform SPI write to a prefixed address"""
|
||||||
|
@ -150,106 +167,3 @@ class Mirny:
|
||||||
if length < 32:
|
if length < 32:
|
||||||
data <<= 32 - length
|
data <<= 32 - length
|
||||||
self.bus.write(data)
|
self.bus.write(data)
|
||||||
|
|
||||||
|
|
||||||
class Almazny:
|
|
||||||
"""
|
|
||||||
Almazny (High frequency mezzanine board for Mirny)
|
|
||||||
|
|
||||||
:param host_mirny - Mirny device Almazny is connected to
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, dmgr, host_mirny):
|
|
||||||
self.mirny_cpld = dmgr.get(host_mirny)
|
|
||||||
self.att_mu = [0x3f] * 4
|
|
||||||
self.channel_sw = [0] * 4
|
|
||||||
self.output_enable = False
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def init(self):
|
|
||||||
self.output_toggle(self.output_enable)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def att_to_mu(self, att):
|
|
||||||
"""
|
|
||||||
Convert an attenuator setting in dB to machine units.
|
|
||||||
|
|
||||||
:param att: attenuator setting in dB [0-31.5]
|
|
||||||
:return: attenuator setting in machine units
|
|
||||||
"""
|
|
||||||
mu = round(att * 2.0)
|
|
||||||
if mu > 63 or mu < 0:
|
|
||||||
raise ValueError("Invalid Almazny attenuator settings!")
|
|
||||||
return mu
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def mu_to_att(self, att_mu):
|
|
||||||
"""
|
|
||||||
Convert a digital attenuator setting to dB.
|
|
||||||
|
|
||||||
:param att_mu: attenuator setting in machine units
|
|
||||||
:return: attenuator setting in dB
|
|
||||||
"""
|
|
||||||
return att_mu / 2
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_att(self, channel, att, rf_switch=True):
|
|
||||||
"""
|
|
||||||
Sets attenuators on chosen shift register (channel).
|
|
||||||
:param channel - index of the register [0-3]
|
|
||||||
:param att_mu - attenuation setting in dBm [0-31.5]
|
|
||||||
:param rf_switch - rf switch (bool)
|
|
||||||
"""
|
|
||||||
self.set_att_mu(channel, self.att_to_mu(att), rf_switch)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_att_mu(self, channel, att_mu, rf_switch=True):
|
|
||||||
"""
|
|
||||||
Sets attenuators on chosen shift register (channel).
|
|
||||||
:param channel - index of the register [0-3]
|
|
||||||
:param att_mu - attenuation setting in machine units [0-63]
|
|
||||||
:param rf_switch - rf switch (bool)
|
|
||||||
"""
|
|
||||||
self.channel_sw[channel] = 1 if rf_switch else 0
|
|
||||||
self.att_mu[channel] = att_mu
|
|
||||||
self._update_register(channel)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def output_toggle(self, oe):
|
|
||||||
"""
|
|
||||||
Toggles output on all shift registers on or off.
|
|
||||||
:param oe - toggle output enable (bool)
|
|
||||||
"""
|
|
||||||
self.output_enable = oe
|
|
||||||
cfg_reg = self.mirny_cpld.read_reg(1)
|
|
||||||
en = 1 if self.output_enable else 0
|
|
||||||
delay(100 * us)
|
|
||||||
new_reg = (en << ALMAZNY_OE_SHIFT) | (cfg_reg & 0x3FF)
|
|
||||||
self.mirny_cpld.write_reg(1, new_reg)
|
|
||||||
delay(100 * us)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def _flip_mu_bits(self, mu):
|
|
||||||
# in this form MSB is actually 0.5dB attenuator
|
|
||||||
# unnatural for users, so we flip the six bits
|
|
||||||
return (((mu & 0x01) << 5)
|
|
||||||
| ((mu & 0x02) << 3)
|
|
||||||
| ((mu & 0x04) << 1)
|
|
||||||
| ((mu & 0x08) >> 1)
|
|
||||||
| ((mu & 0x10) >> 3)
|
|
||||||
| ((mu & 0x20) >> 5))
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def _update_register(self, ch):
|
|
||||||
self.mirny_cpld.write_ext(
|
|
||||||
ALMAZNY_REG_BASE + ch,
|
|
||||||
8,
|
|
||||||
self._flip_mu_bits(self.att_mu[ch]) | (self.channel_sw[ch] << 6),
|
|
||||||
ALMAZNY_SPIT_WR
|
|
||||||
)
|
|
||||||
delay(100 * us)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def _update_all_registers(self):
|
|
||||||
for i in range(4):
|
|
||||||
self._update_register(i)
|
|
|
@ -1,47 +0,0 @@
|
||||||
from artiq.experiment import kernel
|
|
||||||
from artiq.coredevice.i2c import (
|
|
||||||
i2c_start, i2c_write, i2c_read, i2c_stop, I2CError)
|
|
||||||
|
|
||||||
|
|
||||||
class PCF8574A:
|
|
||||||
"""Driver for the PCF8574 I2C remote 8-bit I/O expander.
|
|
||||||
|
|
||||||
I2C transactions not real-time, and are performed by the CPU without
|
|
||||||
involving RTIO.
|
|
||||||
"""
|
|
||||||
def __init__(self, dmgr, busno=0, address=0x7c, core_device="core"):
|
|
||||||
self.core = dmgr.get(core_device)
|
|
||||||
self.busno = busno
|
|
||||||
self.address = address
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set(self, data):
|
|
||||||
"""Drive data on the quasi-bidirectional pins.
|
|
||||||
|
|
||||||
:param data: Pin data. High bits are weakly driven high
|
|
||||||
(and thus inputs), low bits are strongly driven low.
|
|
||||||
"""
|
|
||||||
i2c_start(self.busno)
|
|
||||||
try:
|
|
||||||
if not i2c_write(self.busno, self.address):
|
|
||||||
raise I2CError("PCF8574A failed to ack address")
|
|
||||||
if not i2c_write(self.busno, data):
|
|
||||||
raise I2CError("PCF8574A failed to ack data")
|
|
||||||
finally:
|
|
||||||
i2c_stop(self.busno)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def get(self):
|
|
||||||
"""Retrieve quasi-bidirectional pin input data.
|
|
||||||
|
|
||||||
:return: Pin data
|
|
||||||
"""
|
|
||||||
i2c_start(self.busno)
|
|
||||||
ret = 0
|
|
||||||
try:
|
|
||||||
if not i2c_write(self.busno, self.address | 1):
|
|
||||||
raise I2CError("PCF8574A failed to ack address")
|
|
||||||
ret = i2c_read(self.busno, False)
|
|
||||||
finally:
|
|
||||||
i2c_stop(self.busno)
|
|
||||||
return ret
|
|
|
@ -9,6 +9,10 @@ from artiq.coredevice.trf372017 import TRF372017
|
||||||
|
|
||||||
|
|
||||||
PHASER_BOARD_ID = 19
|
PHASER_BOARD_ID = 19
|
||||||
|
|
||||||
|
PHASER_GW_BASE = 1
|
||||||
|
PHASER_GW_MIQRO = 2
|
||||||
|
|
||||||
PHASER_ADDR_BOARD_ID = 0x00
|
PHASER_ADDR_BOARD_ID = 0x00
|
||||||
PHASER_ADDR_HW_REV = 0x01
|
PHASER_ADDR_HW_REV = 0x01
|
||||||
PHASER_ADDR_GW_REV = 0x02
|
PHASER_ADDR_GW_REV = 0x02
|
||||||
|
@ -40,6 +44,20 @@ PHASER_ADDR_DUC1_P = 0x26
|
||||||
PHASER_ADDR_DAC1_DATA = 0x28
|
PHASER_ADDR_DAC1_DATA = 0x28
|
||||||
PHASER_ADDR_DAC1_TEST = 0x2c
|
PHASER_ADDR_DAC1_TEST = 0x2c
|
||||||
|
|
||||||
|
# servo registers
|
||||||
|
PHASER_ADDR_SERVO_CFG0 = 0x30
|
||||||
|
PHASER_ADDR_SERVO_CFG1 = 0x31
|
||||||
|
|
||||||
|
# 0x32 - 0x71 servo coefficients + offset data
|
||||||
|
PHASER_ADDR_SERVO_DATA_BASE = 0x32
|
||||||
|
|
||||||
|
# 0x72 - 0x78 Miqro channel profile/window memories
|
||||||
|
PHASER_ADDR_MIQRO_MEM_ADDR = 0x72
|
||||||
|
PHASER_ADDR_MIQRO_MEM_DATA = 0x74
|
||||||
|
|
||||||
|
# Miqro profile memory select
|
||||||
|
PHASER_MIQRO_SEL_PROFILE = 1 << 14
|
||||||
|
|
||||||
PHASER_SEL_DAC = 1 << 0
|
PHASER_SEL_DAC = 1 << 0
|
||||||
PHASER_SEL_TRF0 = 1 << 1
|
PHASER_SEL_TRF0 = 1 << 1
|
||||||
PHASER_SEL_TRF1 = 1 << 2
|
PHASER_SEL_TRF1 = 1 << 2
|
||||||
|
@ -58,6 +76,11 @@ PHASER_DAC_SEL_TEST = 1
|
||||||
|
|
||||||
PHASER_HW_REV_VARIANT = 1 << 4
|
PHASER_HW_REV_VARIANT = 1 << 4
|
||||||
|
|
||||||
|
SERVO_COEFF_WIDTH = 16
|
||||||
|
SERVO_DATA_WIDTH = 16
|
||||||
|
SERVO_COEFF_SHIFT = 14
|
||||||
|
SERVO_T_CYCLE = (32+12+192+24+4)*ns # Must match gateware ADC parameters
|
||||||
|
|
||||||
|
|
||||||
class Phaser:
|
class Phaser:
|
||||||
"""Phaser 4-channel, 16-bit, 1 GS/s DAC coredevice driver.
|
"""Phaser 4-channel, 16-bit, 1 GS/s DAC coredevice driver.
|
||||||
|
@ -65,6 +88,26 @@ class Phaser:
|
||||||
Phaser contains a 4 channel, 1 GS/s DAC chip with integrated upconversion,
|
Phaser contains a 4 channel, 1 GS/s DAC chip with integrated upconversion,
|
||||||
quadrature modulation compensation and interpolation features.
|
quadrature modulation compensation and interpolation features.
|
||||||
|
|
||||||
|
The coredevice RTIO PHY and the Phaser gateware come in different modes
|
||||||
|
that have different features. Phaser mode and coredevice PHY mode are both
|
||||||
|
selected at their respective gateware compile-time and need to match.
|
||||||
|
|
||||||
|
=============== ============== ===================================
|
||||||
|
Phaser gateware Coredevice PHY Features per :class:`PhaserChannel`
|
||||||
|
=============== ============== ===================================
|
||||||
|
Base <= v0.5 Base Base (5 :class:`PhaserOscillator`)
|
||||||
|
Base >= v0.6 Base Base + Servo
|
||||||
|
Miqro >= v0.6 Miqro :class:`Miqro`
|
||||||
|
=============== ============== ===================================
|
||||||
|
|
||||||
|
The coredevice driver (this class and :class:`PhaserChannel`) exposes
|
||||||
|
the superset of all functionality regardless of the Coredevice RTIO PHY
|
||||||
|
or Phaser gateware modes. This is to evade type unification limitations.
|
||||||
|
Features absent in Coredevice PHY/Phaser gateware will not work and
|
||||||
|
should not be accessed.
|
||||||
|
|
||||||
|
**Base mode**
|
||||||
|
|
||||||
The coredevice produces 2 IQ (in-phase and quadrature) data streams with 25
|
The coredevice produces 2 IQ (in-phase and quadrature) data streams with 25
|
||||||
MS/s and 14 bit per quadrature. Each data stream supports 5 independent
|
MS/s and 14 bit per quadrature. Each data stream supports 5 independent
|
||||||
numerically controlled IQ oscillators (NCOs, DDSs with 32 bit frequency, 16
|
numerically controlled IQ oscillators (NCOs, DDSs with 32 bit frequency, 16
|
||||||
|
@ -95,6 +138,14 @@ class Phaser:
|
||||||
absolute phase with respect to other RTIO input and output events
|
absolute phase with respect to other RTIO input and output events
|
||||||
(see `get_next_frame_mu()`).
|
(see `get_next_frame_mu()`).
|
||||||
|
|
||||||
|
**Miqro mode**
|
||||||
|
|
||||||
|
See :class:`Miqro`
|
||||||
|
|
||||||
|
Here the DAC operates in 4x interpolation.
|
||||||
|
|
||||||
|
**Analog flow**
|
||||||
|
|
||||||
The four analog DAC outputs are passed through anti-aliasing filters.
|
The four analog DAC outputs are passed through anti-aliasing filters.
|
||||||
|
|
||||||
In the baseband variant, the even/in-phase DAC channels feed 31.5 dB range
|
In the baseband variant, the even/in-phase DAC channels feed 31.5 dB range
|
||||||
|
@ -112,6 +163,33 @@ class Phaser:
|
||||||
configured through a shared SPI bus that is accessed and controlled via
|
configured through a shared SPI bus that is accessed and controlled via
|
||||||
FPGA registers.
|
FPGA registers.
|
||||||
|
|
||||||
|
**Servo**
|
||||||
|
|
||||||
|
Each phaser output channel features a servo to control the RF output amplitude
|
||||||
|
using feedback from an ADC. The servo consists of a first order IIR (infinite
|
||||||
|
impulse response) filter fed by the ADC and a multiplier that scales the I
|
||||||
|
and Q datastreams from the DUC by the IIR output. The IIR state is updated at
|
||||||
|
the 3.788 MHz ADC sampling rate.
|
||||||
|
|
||||||
|
Each channel IIR features 4 profiles, each consisting of the [b0, b1, a1] filter
|
||||||
|
coefficients as well as an output offset. The coefficients and offset can be
|
||||||
|
set for each profile individually and the profiles each have their own ``y0``,
|
||||||
|
``y1`` output registers (the ``x0``, ``x1`` inputs are shared). To avoid
|
||||||
|
transient effects, care should be taken to not update the coefficents in the
|
||||||
|
currently selected profile.
|
||||||
|
|
||||||
|
The servo can be en- or disabled for each channel. When disabled, the servo
|
||||||
|
output multiplier is simply bypassed and the datastream reaches the DAC unscaled.
|
||||||
|
|
||||||
|
The IIR output can be put on hold for each channel. In hold mode, the filter
|
||||||
|
still ingests samples and updates its input ``x0`` and ``x1`` registers, but
|
||||||
|
does not update the ``y0``, ``y1`` output registers.
|
||||||
|
|
||||||
|
After power-up the servo is disabled, in profile 0, with coefficients [0, 0, 0]
|
||||||
|
and hold is enabled. If older gateware without ther servo is loaded onto the
|
||||||
|
Phaser FPGA, the device simply behaves as if the servo is disabled and none of
|
||||||
|
the servo functions have any effect.
|
||||||
|
|
||||||
.. note:: Various register settings of the DAC and the quadrature
|
.. note:: Various register settings of the DAC and the quadrature
|
||||||
upconverters are available to be modified through the `dac`, `trf0`,
|
upconverters are available to be modified through the `dac`, `trf0`,
|
||||||
`trf1` dictionaries. These can be set through the device database
|
`trf1` dictionaries. These can be set through the device database
|
||||||
|
@ -151,7 +229,7 @@ class Phaser:
|
||||||
"dac_mmap"}
|
"dac_mmap"}
|
||||||
|
|
||||||
def __init__(self, dmgr, channel_base, miso_delay=1, tune_fifo_offset=True,
|
def __init__(self, dmgr, channel_base, miso_delay=1, tune_fifo_offset=True,
|
||||||
clk_sel=0, sync_dly=0, dac=None, trf0=None, trf1=None,
|
clk_sel=0, sync_dly=0, dac=None, trf0=None, trf1=None, gw_rev=PHASER_GW_BASE,
|
||||||
core_device="core"):
|
core_device="core"):
|
||||||
self.channel_base = channel_base
|
self.channel_base = channel_base
|
||||||
self.core = dmgr.get(core_device)
|
self.core = dmgr.get(core_device)
|
||||||
|
@ -165,12 +243,25 @@ class Phaser:
|
||||||
self.clk_sel = clk_sel
|
self.clk_sel = clk_sel
|
||||||
self.tune_fifo_offset = tune_fifo_offset
|
self.tune_fifo_offset = tune_fifo_offset
|
||||||
self.sync_dly = sync_dly
|
self.sync_dly = sync_dly
|
||||||
|
self.gw_rev = gw_rev # verified in init()
|
||||||
|
|
||||||
self.dac_mmap = DAC34H84(dac).get_mmap()
|
self.dac_mmap = DAC34H84(dac).get_mmap()
|
||||||
|
|
||||||
self.channel = [PhaserChannel(self, ch, trf)
|
self.channel = [PhaserChannel(self, ch, trf)
|
||||||
for ch, trf in enumerate([trf0, trf1])]
|
for ch, trf in enumerate([trf0, trf1])]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(channel_base, gw_rev=PHASER_GW_BASE, **kwargs):
|
||||||
|
if gw_rev == PHASER_GW_MIQRO:
|
||||||
|
return [(channel_base, "base"), (channel_base + 1, "ch0"), (channel_base + 2, "ch1")]
|
||||||
|
elif gw_rev == PHASER_GW_BASE:
|
||||||
|
return [(channel_base, "base"),
|
||||||
|
(channel_base + 1, "ch0 frequency"),
|
||||||
|
(channel_base + 2, "ch0 phase amplitude"),
|
||||||
|
(channel_base + 3, "ch1 frequency"),
|
||||||
|
(channel_base + 4, "ch1 phase amplitude")]
|
||||||
|
raise ValueError("invalid gw_rev `{}`".format(gw_rev))
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def init(self, debug=False):
|
def init(self, debug=False):
|
||||||
"""Initialize the board.
|
"""Initialize the board.
|
||||||
|
@ -190,8 +281,9 @@ class Phaser:
|
||||||
|
|
||||||
gw_rev = self.read8(PHASER_ADDR_GW_REV)
|
gw_rev = self.read8(PHASER_ADDR_GW_REV)
|
||||||
if debug:
|
if debug:
|
||||||
print("gw_rev:", gw_rev)
|
print("gw_rev:", self.gw_rev)
|
||||||
self.core.break_realtime()
|
self.core.break_realtime()
|
||||||
|
assert gw_rev == self.gw_rev
|
||||||
delay(.1*ms) # slack
|
delay(.1*ms) # slack
|
||||||
|
|
||||||
# allow a few errors during startup and alignment since boot
|
# allow a few errors during startup and alignment since boot
|
||||||
|
@ -237,7 +329,7 @@ class Phaser:
|
||||||
|
|
||||||
for data in self.dac_mmap:
|
for data in self.dac_mmap:
|
||||||
self.dac_write(data >> 16, data)
|
self.dac_write(data >> 16, data)
|
||||||
delay(40*us)
|
delay(120*us)
|
||||||
self.dac_sync()
|
self.dac_sync()
|
||||||
delay(40*us)
|
delay(40*us)
|
||||||
|
|
||||||
|
@ -308,8 +400,11 @@ class Phaser:
|
||||||
if channel.get_att_mu() != 0x5a:
|
if channel.get_att_mu() != 0x5a:
|
||||||
raise ValueError("attenuator test failed")
|
raise ValueError("attenuator test failed")
|
||||||
delay(.1*ms)
|
delay(.1*ms)
|
||||||
channel.set_att_mu(0x00) # minimum attenuation
|
channel.set_att_mu(0x00) # maximum attenuation
|
||||||
|
|
||||||
|
channel.set_servo(profile=0, enable=0, hold=1)
|
||||||
|
|
||||||
|
if self.gw_rev == PHASER_GW_BASE:
|
||||||
# test oscillators and DUC
|
# test oscillators and DUC
|
||||||
for i in range(len(channel.oscillator)):
|
for i in range(len(channel.oscillator)):
|
||||||
oscillator = channel.oscillator[i]
|
oscillator = channel.oscillator[i]
|
||||||
|
@ -337,6 +432,9 @@ class Phaser:
|
||||||
abs(data_i - data_q) > 2):
|
abs(data_i - data_q) > 2):
|
||||||
raise ValueError("DUC+oscillator phase/amplitude test failed")
|
raise ValueError("DUC+oscillator phase/amplitude test failed")
|
||||||
|
|
||||||
|
if self.gw_rev == PHASER_GW_MIQRO:
|
||||||
|
channel.miqro.reset()
|
||||||
|
|
||||||
if is_baseband:
|
if is_baseband:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
@ -382,6 +480,12 @@ class Phaser:
|
||||||
response = rtio_input_data(self.channel_base)
|
response = rtio_input_data(self.channel_base)
|
||||||
return response >> self.miso_delay
|
return response >> self.miso_delay
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def write16(self, addr, data: TInt32):
|
||||||
|
"""Write 16 bit to a sequence of FPGA registers."""
|
||||||
|
self.write8(addr, data >> 8)
|
||||||
|
self.write8(addr + 1, data)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def write32(self, addr, data: TInt32):
|
def write32(self, addr, data: TInt32):
|
||||||
"""Write 32 bit to a sequence of FPGA registers."""
|
"""Write 32 bit to a sequence of FPGA registers."""
|
||||||
|
@ -460,8 +564,7 @@ class Phaser:
|
||||||
* :const:`PHASER_STA_TRF1_LD`: Quadrature upconverter 1 lock detect
|
* :const:`PHASER_STA_TRF1_LD`: Quadrature upconverter 1 lock detect
|
||||||
* :const:`PHASER_STA_TERM0`: ADC channel 0 termination indicator
|
* :const:`PHASER_STA_TERM0`: ADC channel 0 termination indicator
|
||||||
* :const:`PHASER_STA_TERM1`: ADC channel 1 termination indicator
|
* :const:`PHASER_STA_TERM1`: ADC channel 1 termination indicator
|
||||||
* :const:`PHASER_STA_SPI_IDLE`: SPI machine is idle and data registers
|
* :const:`PHASER_STA_SPI_IDLE`: SPI machine is idle and data registers can be read/written
|
||||||
can be read/written
|
|
||||||
|
|
||||||
:return: Status register
|
:return: Status register
|
||||||
"""
|
"""
|
||||||
|
@ -617,7 +720,7 @@ class Phaser:
|
||||||
.. note:: Synchronising the NCO clears the phase-accumulator
|
.. note:: Synchronising the NCO clears the phase-accumulator
|
||||||
"""
|
"""
|
||||||
config1f = self.dac_read(0x1f)
|
config1f = self.dac_read(0x1f)
|
||||||
delay(.1*ms)
|
delay(.4*ms)
|
||||||
self.dac_write(0x1f, config1f & ~int32(1 << 1))
|
self.dac_write(0x1f, config1f & ~int32(1 << 1))
|
||||||
self.dac_write(0x1f, config1f | (1 << 1))
|
self.dac_write(0x1f, config1f | (1 << 1))
|
||||||
|
|
||||||
|
@ -737,6 +840,8 @@ class Phaser:
|
||||||
if good & (1 << o):
|
if good & (1 << o):
|
||||||
sum += o
|
sum += o
|
||||||
count += 1
|
count += 1
|
||||||
|
if count == 0:
|
||||||
|
raise ValueError("no good fifo offset")
|
||||||
best = ((sum // count) + offset) % 8
|
best = ((sum // count) + offset) % 8
|
||||||
self.dac_write(0x09, (config9 & 0x1fff) | (best << 13))
|
self.dac_write(0x09, (config9 & 0x1fff) | (best << 13))
|
||||||
return best
|
return best
|
||||||
|
@ -747,18 +852,21 @@ class PhaserChannel:
|
||||||
|
|
||||||
A Phaser channel contains:
|
A Phaser channel contains:
|
||||||
|
|
||||||
* multiple oscillators (in the coredevice phy),
|
* multiple :class:`PhaserOscillator` (in the coredevice phy),
|
||||||
* an interpolation chain and digital upconverter (DUC) on Phaser,
|
* an interpolation chain and digital upconverter (DUC) on Phaser,
|
||||||
|
* a :class:`Miqro` instance on Phaser,
|
||||||
* several channel-specific settings in the DAC:
|
* several channel-specific settings in the DAC:
|
||||||
|
|
||||||
* quadrature modulation compensation QMC
|
* quadrature modulation compensation QMC
|
||||||
* numerically controlled oscillator NCO or coarse mixer CMIX,
|
* numerically controlled oscillator NCO or coarse mixer CMIX,
|
||||||
* the analog quadrature upconverter (in the Phaser-Upconverter hardware
|
|
||||||
variant), and
|
* the analog quadrature upconverter (in the Phaser-Upconverter hardware variant), and
|
||||||
* a digitally controlled step attenuator.
|
* a digitally controlled step attenuator.
|
||||||
|
|
||||||
Attributes:
|
Attributes:
|
||||||
|
|
||||||
* :attr:`oscillator`: List of five :class:`PhaserOscillator`.
|
* :attr:`oscillator`: List of five :class:`PhaserOscillator`.
|
||||||
|
* :attr:`miqro`: A :class:`Miqro`.
|
||||||
|
|
||||||
.. note:: The amplitude sum of the oscillators must be less than one to
|
.. note:: The amplitude sum of the oscillators must be less than one to
|
||||||
avoid clipping or overflow. If any of the DDS or DUC frequencies are
|
avoid clipping or overflow. If any of the DDS or DUC frequencies are
|
||||||
|
@ -771,6 +879,8 @@ class PhaserChannel:
|
||||||
changes in oscillator parameters, the overshoot can lead to clipping
|
changes in oscillator parameters, the overshoot can lead to clipping
|
||||||
or overflow after the interpolation. Either band-limit any changes
|
or overflow after the interpolation. Either band-limit any changes
|
||||||
in the oscillator parameters or back off the amplitude sufficiently.
|
in the oscillator parameters or back off the amplitude sufficiently.
|
||||||
|
Miqro is not affected by this. But both the oscillators and Miqro can
|
||||||
|
be affected by intrinsic overshoot of the interpolator on the DAC.
|
||||||
"""
|
"""
|
||||||
kernel_invariants = {"index", "phaser", "trf_mmap"}
|
kernel_invariants = {"index", "phaser", "trf_mmap"}
|
||||||
|
|
||||||
|
@ -780,6 +890,7 @@ class PhaserChannel:
|
||||||
self.trf_mmap = TRF372017(trf).get_mmap()
|
self.trf_mmap = TRF372017(trf).get_mmap()
|
||||||
|
|
||||||
self.oscillator = [PhaserOscillator(self, osc) for osc in range(5)]
|
self.oscillator = [PhaserOscillator(self, osc) for osc in range(5)]
|
||||||
|
self.miqro = Miqro(self)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def get_dac_data(self) -> TInt32:
|
def get_dac_data(self) -> TInt32:
|
||||||
|
@ -1039,6 +1150,133 @@ class PhaserChannel:
|
||||||
data = data ^ ((1 << 12) | (1 << 13))
|
data = data ^ ((1 << 12) | (1 << 13))
|
||||||
self.trf_write(data)
|
self.trf_write(data)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_servo(self, profile=0, enable=0, hold=0):
|
||||||
|
"""Set the servo configuration.
|
||||||
|
|
||||||
|
:param enable: 1 to enable servo, 0 to disable servo (default). If disabled,
|
||||||
|
the servo is bypassed and hold is enforced since the control loop is broken.
|
||||||
|
:param hold: 1 to hold the servo IIR filter output constant, 0 for normal operation.
|
||||||
|
:param profile: Profile index to select for channel. (0 to 3)
|
||||||
|
"""
|
||||||
|
if (profile < 0) or (profile > 3):
|
||||||
|
raise ValueError("invalid profile index")
|
||||||
|
addr = PHASER_ADDR_SERVO_CFG0 + self.index
|
||||||
|
# enforce hold if the servo is disabled
|
||||||
|
data = (profile << 2) | (((hold | ~enable) & 1) << 1) | (enable & 1)
|
||||||
|
self.phaser.write8(addr, data)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_iir_mu(self, profile, b0, b1, a1, offset):
|
||||||
|
"""Load a servo profile consiting of the three filter coefficients and an output offset.
|
||||||
|
|
||||||
|
Avoid setting the IIR parameters of the currently active profile.
|
||||||
|
|
||||||
|
The recurrence relation is (all data signed and MSB aligned):
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
a_0 y_n = a_1 y_{n - 1} + b_0 x_n + b_1 x_{n - 1} + o
|
||||||
|
|
||||||
|
Where:
|
||||||
|
|
||||||
|
* :math:`y_n` and :math:`y_{n-1}` are the current and previous
|
||||||
|
filter outputs, clipped to :math:`[0, 1[`.
|
||||||
|
* :math:`x_n` and :math:`x_{n-1}` are the current and previous
|
||||||
|
filter inputs in :math:`[-1, 1[`.
|
||||||
|
* :math:`o` is the offset
|
||||||
|
* :math:`a_0` is the normalization factor :math:`2^{14}`
|
||||||
|
* :math:`a_1` is the feedback gain
|
||||||
|
* :math:`b_0` and :math:`b_1` are the feedforward gains for the two
|
||||||
|
delays
|
||||||
|
|
||||||
|
.. seealso:: :meth:`set_iir`
|
||||||
|
|
||||||
|
:param profile: Profile to set (0 to 3)
|
||||||
|
:param b0: b0 filter coefficient (16 bit signed)
|
||||||
|
:param b1: b1 filter coefficient (16 bit signed)
|
||||||
|
:param a1: a1 filter coefficient (16 bit signed)
|
||||||
|
:param offset: Output offset (16 bit signed)
|
||||||
|
"""
|
||||||
|
if (profile < 0) or (profile > 3):
|
||||||
|
raise ValueError("invalid profile index")
|
||||||
|
# 32 byte-sized data registers per channel and 8 (2 bytes * (3 coefficients + 1 offset)) registers per profile
|
||||||
|
addr = PHASER_ADDR_SERVO_DATA_BASE + (8 * profile) + (self.index * 32)
|
||||||
|
for data in [b0, b1, a1, offset]:
|
||||||
|
self.phaser.write16(addr, data)
|
||||||
|
addr += 2
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_iir(self, profile, kp, ki=0., g=0., x_offset=0., y_offset=0.):
|
||||||
|
"""Set servo profile IIR coefficients.
|
||||||
|
|
||||||
|
Avoid setting the IIR parameters of the currently active profile.
|
||||||
|
|
||||||
|
Gains are given in units of output full per scale per input full scale.
|
||||||
|
|
||||||
|
.. note:: Due to inherent constraints of the fixed point datatypes and IIR
|
||||||
|
filters, the ``x_offset`` (setpoint) resolution depends on the selected
|
||||||
|
gains. Low ``ki`` gains will lead to a low ``x_offset`` resolution.
|
||||||
|
|
||||||
|
The transfer function is (up to time discretization and
|
||||||
|
coefficient quantization errors):
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
H(s) = k_p + \\frac{k_i}{s + \\frac{k_i}{g}}
|
||||||
|
|
||||||
|
Where:
|
||||||
|
* :math:`s = \\sigma + i\\omega` is the complex frequency
|
||||||
|
* :math:`k_p` is the proportional gain
|
||||||
|
* :math:`k_i` is the integrator gain
|
||||||
|
* :math:`g` is the integrator gain limit
|
||||||
|
|
||||||
|
:param profile: Profile number (0-3)
|
||||||
|
:param kp: Proportional gain. This is usually negative (closed
|
||||||
|
loop, positive ADC voltage, positive setpoint). When 0, this
|
||||||
|
implements a pure I controller.
|
||||||
|
:param ki: Integrator gain (rad/s). Equivalent to the gain at 1 Hz.
|
||||||
|
When 0 (the default) this implements a pure P controller.
|
||||||
|
Same sign as ``kp``.
|
||||||
|
:param g: Integrator gain limit (1). When 0 (the default) the
|
||||||
|
integrator gain limit is infinite. Same sign as ``ki``.
|
||||||
|
:param x_offset: IIR input offset. Used as the negative
|
||||||
|
setpoint when stabilizing to a desired input setpoint. Will
|
||||||
|
be converted to an equivalent output offset and added to y_offset.
|
||||||
|
:param y_offset: IIR output offset.
|
||||||
|
"""
|
||||||
|
NORM = 1 << SERVO_COEFF_SHIFT
|
||||||
|
COEFF_MAX = 1 << SERVO_COEFF_WIDTH - 1
|
||||||
|
DATA_MAX = 1 << SERVO_DATA_WIDTH - 1
|
||||||
|
|
||||||
|
kp *= NORM
|
||||||
|
if ki == 0.:
|
||||||
|
# pure P
|
||||||
|
a1 = 0
|
||||||
|
b1 = 0
|
||||||
|
b0 = int(round(kp))
|
||||||
|
else:
|
||||||
|
# I or PI
|
||||||
|
ki *= NORM*SERVO_T_CYCLE/2.
|
||||||
|
if g == 0.:
|
||||||
|
c = 1.
|
||||||
|
a1 = NORM
|
||||||
|
else:
|
||||||
|
c = 1./(1. + ki/(g*NORM))
|
||||||
|
a1 = int(round((2.*c - 1.)*NORM))
|
||||||
|
b0 = int(round(kp + ki*c))
|
||||||
|
b1 = int(round(kp + (ki - 2.*kp)*c))
|
||||||
|
if b1 == -b0:
|
||||||
|
raise ValueError("low integrator gain and/or gain limit")
|
||||||
|
|
||||||
|
if (b0 >= COEFF_MAX or b0 < -COEFF_MAX or
|
||||||
|
b1 >= COEFF_MAX or b1 < -COEFF_MAX):
|
||||||
|
raise ValueError("high gains")
|
||||||
|
|
||||||
|
forward_gain = (b0 + b1) * (1 << SERVO_DATA_WIDTH - 1 - SERVO_COEFF_SHIFT)
|
||||||
|
effective_offset = int(round(DATA_MAX * y_offset + forward_gain * x_offset))
|
||||||
|
|
||||||
|
self.set_iir_mu(profile, b0, b1, a1, effective_offset)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class PhaserOscillator:
|
class PhaserOscillator:
|
||||||
"""Phaser IQ channel oscillator (NCO/DDS).
|
"""Phaser IQ channel oscillator (NCO/DDS).
|
||||||
|
@ -1096,3 +1334,305 @@ class PhaserOscillator:
|
||||||
raise ValueError("amplitude out of bounds")
|
raise ValueError("amplitude out of bounds")
|
||||||
pow = int32(round(phase*(1 << 16)))
|
pow = int32(round(phase*(1 << 16)))
|
||||||
self.set_amplitude_phase_mu(asf, pow, clr)
|
self.set_amplitude_phase_mu(asf, pow, clr)
|
||||||
|
|
||||||
|
|
||||||
|
class Miqro:
|
||||||
|
"""
|
||||||
|
Miqro pulse generator.
|
||||||
|
|
||||||
|
A Miqro instance represents one RF output. The DSP components are fully
|
||||||
|
contained in the Phaser gateware. The output is generated by with
|
||||||
|
the following data flow:
|
||||||
|
|
||||||
|
**Oscillators**
|
||||||
|
|
||||||
|
* There are n_osc = 16 oscillators with oscillator IDs 0..n_osc-1.
|
||||||
|
* Each oscillator outputs one tone at any given time
|
||||||
|
|
||||||
|
* I/Q (quadrature, a.k.a. complex) 2x16 bit signed data
|
||||||
|
at tau = 4 ns sample intervals, 250 MS/s, Nyquist 125 MHz, bandwidth 200 MHz
|
||||||
|
(from f = -100..+100 MHz, taking into account the interpolation anti-aliasing
|
||||||
|
filters in subsequent interpolators),
|
||||||
|
* 32 bit frequency (f) resolution (~ 1/16 Hz),
|
||||||
|
* 16 bit unsigned amplitude (a) resolution
|
||||||
|
* 16 bit phase offset (p) resolution
|
||||||
|
|
||||||
|
* The output phase p' of each oscillator at time t (boot/reset/initialization of the
|
||||||
|
device at t=0) is then p' = f*t + p (mod 1 turn) where f and p are the (currently
|
||||||
|
active) profile frequency and phase offset.
|
||||||
|
* Note: The terms "phase coherent" and "phase tracking" are defined to refer to this
|
||||||
|
choice of oscillator output phase p'. Note that the phase offset p is not relative to
|
||||||
|
(on top of previous phase/profiles/oscillator history).
|
||||||
|
It is "absolute" in the sense that frequency f and phase offset p fully determine
|
||||||
|
oscillator output phase p' at time t. This is unlike typical DDS behavior.
|
||||||
|
* Frequency, phase, and amplitude of each oscillator are configurable by selecting one of
|
||||||
|
n_profile = 32 profiles 0..n_profile-1. This selection is fast and can be done for
|
||||||
|
each pulse. The phase coherence defined above is guaranteed for each
|
||||||
|
profile individually.
|
||||||
|
* Note: one profile per oscillator (usually profile index 0) should be reserved
|
||||||
|
for the NOP (no operation, identity) profile, usually with zero amplitude.
|
||||||
|
* Data for each profile for each oscillator can be configured
|
||||||
|
individually. Storing profile data should be considered "expensive".
|
||||||
|
* Note: The annotation that some operation is "expensive" does not mean it is
|
||||||
|
impossible, just that it may take a significant amount of time and
|
||||||
|
resources to execute such that it may be impractical when used often or
|
||||||
|
during fast pulse sequences. They are intended for use in calibration and
|
||||||
|
initialization.
|
||||||
|
|
||||||
|
**Summation**
|
||||||
|
|
||||||
|
* The oscillator outputs are added together (wrapping addition).
|
||||||
|
* The user must ensure that the sum of oscillators outputs does not exceed the
|
||||||
|
data range. In general that means that the sum of the amplitudes must not
|
||||||
|
exceed one.
|
||||||
|
|
||||||
|
**Shaper**
|
||||||
|
|
||||||
|
* The summed complex output stream is then multiplied with a the complex-valued
|
||||||
|
output of a triggerable shaper.
|
||||||
|
* Triggering the shaper corresponds to passing a pulse from all oscillators to
|
||||||
|
the RF output.
|
||||||
|
* Selected profiles become active simultaneously (on the same output sample) when
|
||||||
|
triggering the shaper with the first shaper output sample.
|
||||||
|
* The shaper reads (replays) window samples from a memory of size n_window = 1 << 10.
|
||||||
|
* The window memory can be segmented by choosing different start indices
|
||||||
|
to support different windows.
|
||||||
|
* Each window memory segment starts with a header determining segment
|
||||||
|
length and interpolation parameters.
|
||||||
|
* The window samples are interpolated by a factor (rate change) between 1 and
|
||||||
|
r = 1 << 12.
|
||||||
|
* The interpolation order is constant, linear, quadratic, or cubic. This
|
||||||
|
corresponds to interpolation modes from rectangular window (1st order CIC)
|
||||||
|
or zero order hold) to Parzen window (4th order CIC or cubic spline).
|
||||||
|
* This results in support for single shot pulse lengths (envelope support) between
|
||||||
|
tau and a bit more than r * n_window * tau = (1 << 12 + 10) tau ~ 17 ms.
|
||||||
|
* Windows can be configured to be head-less and/or tail-less, meaning, they
|
||||||
|
do not feed zero-amplitude samples into the shaper before and after
|
||||||
|
each window respectively. This is used to implement pulses with arbitrary
|
||||||
|
length or CW output.
|
||||||
|
|
||||||
|
**Overall properties**
|
||||||
|
|
||||||
|
* The DAC may upconvert the signal by applying a frequency offset f1 with
|
||||||
|
phase p1.
|
||||||
|
* In the Upconverter Phaser variant, the analog quadrature upconverter
|
||||||
|
applies another frequency of f2 and phase p2.
|
||||||
|
* The resulting phase of the signal from one oscillator at the SMA output is
|
||||||
|
(f + f1 + f2)*t + p + s(t - t0) + p1 + p2 (mod 1 turn)
|
||||||
|
where s(t - t0) is the phase of the interpolated
|
||||||
|
shaper output, and t0 is the trigger time (fiducial of the shaper).
|
||||||
|
Unsurprisingly the frequency is the derivative of the phase.
|
||||||
|
* Group delays between pulse parameter updates are matched across oscillators,
|
||||||
|
shapers, and channels.
|
||||||
|
* The minimum time to change profiles and phase offsets is ~128 ns (estimate, TBC).
|
||||||
|
This is the minimum pulse interval.
|
||||||
|
The sustained pulse rate of the RTIO PHY/Fastlink is one pulse per Fastlink frame
|
||||||
|
(may be increased, TBC).
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, channel):
|
||||||
|
self.channel = channel
|
||||||
|
self.base_addr = (self.channel.phaser.channel_base + 1 +
|
||||||
|
self.channel.index) << 8
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def reset(self):
|
||||||
|
"""Establish no-output profiles and no-output window and execute them.
|
||||||
|
|
||||||
|
This establishes the first profile (index 0) on all oscillators as zero
|
||||||
|
amplitude, creates a trivial window (one sample with zero amplitude,
|
||||||
|
minimal interpolation), and executes a corresponding pulse.
|
||||||
|
"""
|
||||||
|
for osc in range(16):
|
||||||
|
self.set_profile_mu(osc, profile=0, ftw=0, asf=0)
|
||||||
|
delay(20*us)
|
||||||
|
self.set_window_mu(start=0, iq=[0], order=0)
|
||||||
|
self.pulse(window=0, profiles=[0])
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_profile_mu(self, oscillator, profile, ftw, asf, pow_=0):
|
||||||
|
"""Store an oscillator profile (machine units).
|
||||||
|
|
||||||
|
:param oscillator: Oscillator index (0 to 15)
|
||||||
|
:param profile: Profile index (0 to 31)
|
||||||
|
:param ftw: Frequency tuning word (32 bit signed integer on a 250 MHz clock)
|
||||||
|
:param asf: Amplitude scale factor (16 bit unsigned integer)
|
||||||
|
:param pow_: Phase offset word (16 bit integer)
|
||||||
|
"""
|
||||||
|
if oscillator >= 16:
|
||||||
|
raise ValueError("invalid oscillator index")
|
||||||
|
if profile >= 32:
|
||||||
|
raise ValueError("invalid profile index")
|
||||||
|
self.channel.phaser.write16(PHASER_ADDR_MIQRO_MEM_ADDR,
|
||||||
|
(self.channel.index << 15) | PHASER_MIQRO_SEL_PROFILE |
|
||||||
|
(oscillator << 6) | (profile << 1))
|
||||||
|
self.channel.phaser.write32(PHASER_ADDR_MIQRO_MEM_DATA, ftw)
|
||||||
|
self.channel.phaser.write32(PHASER_ADDR_MIQRO_MEM_DATA,
|
||||||
|
(asf & 0xffff) | (pow_ << 16))
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_profile(self, oscillator, profile, frequency, amplitude, phase=0.):
|
||||||
|
"""Store an oscillator profile.
|
||||||
|
|
||||||
|
:param oscillator: Oscillator index (0 to 15)
|
||||||
|
:param profile: Profile index (0 to 31)
|
||||||
|
:param frequency: Frequency in Hz (passband -100 to 100 MHz).
|
||||||
|
Interpreted in the Nyquist sense, i.e. aliased.
|
||||||
|
:param amplitude: Amplitude in units of full scale (0. to 1.)
|
||||||
|
:param phase: Phase in turns. See :class:`Miqro` for a definition of
|
||||||
|
phase in this context.
|
||||||
|
:return: The quantized 32 bit frequency tuning word
|
||||||
|
"""
|
||||||
|
ftw = int32(round(frequency*((1 << 30)/(62.5*MHz))))
|
||||||
|
asf = int32(round(amplitude*0xffff))
|
||||||
|
if asf < 0 or asf > 0xffff:
|
||||||
|
raise ValueError("amplitude out of bounds")
|
||||||
|
pow_ = int32(round(phase*(1 << 16)))
|
||||||
|
self.set_profile_mu(oscillator, profile, ftw, asf, pow_)
|
||||||
|
return ftw
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_window_mu(self, start, iq, rate=1, shift=0, order=3, head=1, tail=1):
|
||||||
|
"""Store a window segment (machine units)
|
||||||
|
|
||||||
|
:param start: Window start address (0 to 0x3ff)
|
||||||
|
:param iq: List of IQ window samples. Each window sample is an integer
|
||||||
|
containing the signed I part in the 16 LSB and the signed Q part in
|
||||||
|
the 16 MSB. The maximum window length is 0x3fe. The user must
|
||||||
|
ensure that this window does not overlap with other windows in the
|
||||||
|
memory.
|
||||||
|
:param rate: Interpolation rate change (1 to 1 << 12)
|
||||||
|
:param shift: Interpolator amplitude gain compensation in powers of 2 (0 to 63)
|
||||||
|
:param order: Interpolation order from 0 (corresponding to
|
||||||
|
constant/rectangular window/zero-order-hold/1st order CIC interpolation)
|
||||||
|
to 3 (corresponding to cubic/Parzen window/4th order CIC interpolation)
|
||||||
|
:param head: Update the interpolator settings and clear its state at the start
|
||||||
|
of the window. This also implies starting the envelope from zero.
|
||||||
|
:param tail: Feed zeros into the interpolator after the window samples.
|
||||||
|
In the absence of further pulses this will return the output envelope
|
||||||
|
to zero with the chosen interpolation.
|
||||||
|
:return: Next available window memory address after this segment.
|
||||||
|
"""
|
||||||
|
if start >= 1 << 10:
|
||||||
|
raise ValueError("start out of bounds")
|
||||||
|
if len(iq) >= 1 << 10:
|
||||||
|
raise ValueError("window length out of bounds")
|
||||||
|
if rate < 1 or rate > 1 << 12:
|
||||||
|
raise ValueError("rate out of bounds")
|
||||||
|
if shift > 0x3f:
|
||||||
|
raise ValueError("shift out of bounds")
|
||||||
|
if order > 3:
|
||||||
|
raise ValueError("order out of bounds")
|
||||||
|
self.channel.phaser.write16(PHASER_ADDR_MIQRO_MEM_ADDR,
|
||||||
|
(self.channel.index << 15) | start)
|
||||||
|
self.channel.phaser.write32(PHASER_ADDR_MIQRO_MEM_DATA,
|
||||||
|
(len(iq) & 0x3ff) |
|
||||||
|
((rate - 1) << 10) |
|
||||||
|
(shift << 22) |
|
||||||
|
(order << 28) |
|
||||||
|
((head & 1) << 30) |
|
||||||
|
((tail & 1) << 31)
|
||||||
|
)
|
||||||
|
for iqi in iq:
|
||||||
|
self.channel.phaser.write32(PHASER_ADDR_MIQRO_MEM_DATA, iqi)
|
||||||
|
delay(20*us) # slack for long windows
|
||||||
|
return (start + 1 + len(iq)) & 0x3ff
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_window(self, start, iq, period=4*ns, order=3, head=1, tail=1):
|
||||||
|
"""Store a window segment
|
||||||
|
|
||||||
|
:param start: Window start address (0 to 0x3ff)
|
||||||
|
:param iq: List of IQ window samples. Each window sample is a pair of
|
||||||
|
two float numbers -1 to 1, one for each I and Q in units of full scale.
|
||||||
|
The maximum window length is 0x3fe. The user must ensure that this window
|
||||||
|
does not overlap with other windows in the memory.
|
||||||
|
:param period: Desired window sample period in SI units (4*ns to (4 << 12)*ns).
|
||||||
|
:param order: Interpolation order from 0 (corresponding to
|
||||||
|
constant/zero-order-hold/1st order CIC interpolation) to 3 (corresponding
|
||||||
|
to cubic/Parzen/4th order CIC interpolation)
|
||||||
|
:param head: Update the interpolator settings and clear its state at the start
|
||||||
|
of the window. This also implies starting the envelope from zero.
|
||||||
|
:param tail: Feed zeros into the interpolator after the window samples.
|
||||||
|
In the absence of further pulses this will return the output envelope
|
||||||
|
to zero with the chosen interpolation.
|
||||||
|
:return: Actual sample period in SI units
|
||||||
|
"""
|
||||||
|
rate = int32(round(period/(4*ns)))
|
||||||
|
gain = 1.
|
||||||
|
for _ in range(order):
|
||||||
|
gain *= rate
|
||||||
|
shift = 0
|
||||||
|
while gain >= 2.:
|
||||||
|
shift += 1
|
||||||
|
gain *= .5
|
||||||
|
scale = ((1 << 15) - 1)/gain
|
||||||
|
iq_mu = [
|
||||||
|
(int32(round(iqi[0]*scale)) & 0xffff) |
|
||||||
|
(int32(round(iqi[1]*scale)) << 16)
|
||||||
|
for iqi in iq
|
||||||
|
]
|
||||||
|
self.set_window_mu(start, iq_mu, rate, shift, order, head, tail)
|
||||||
|
return (len(iq) + order)*rate*4*ns
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def encode(self, window, profiles, data):
|
||||||
|
"""Encode window and profile selection
|
||||||
|
|
||||||
|
:param window: Window start address (0 to 0x3ff)
|
||||||
|
:param profiles: List of profile indices for the oscillators. Maximum
|
||||||
|
length 16. Unused oscillators will be set to profile 0.
|
||||||
|
:param data: List of integers to store the encoded data words into.
|
||||||
|
Unused entries will remain untouched. Must contain at least three
|
||||||
|
lements if all oscillators are used and should be initialized to
|
||||||
|
zeros.
|
||||||
|
:return: Number of words from `data` used.
|
||||||
|
"""
|
||||||
|
if len(profiles) > 16:
|
||||||
|
raise ValueError("too many oscillators")
|
||||||
|
if window > 0x3ff:
|
||||||
|
raise ValueError("window start out of bounds")
|
||||||
|
data[0] = window
|
||||||
|
word = 0
|
||||||
|
idx = 10
|
||||||
|
for profile in profiles:
|
||||||
|
if profile > 0x1f:
|
||||||
|
raise ValueError("profile out of bounds")
|
||||||
|
if idx > 32 - 5:
|
||||||
|
word += 1
|
||||||
|
idx = 0
|
||||||
|
data[word] |= profile << idx
|
||||||
|
idx += 5
|
||||||
|
return word + 1
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def pulse_mu(self, data):
|
||||||
|
"""Emit a pulse (encoded)
|
||||||
|
|
||||||
|
The pulse fiducial timing resolution is 4 ns.
|
||||||
|
|
||||||
|
:param data: List of up to 3 words containing an encoded MIQRO pulse as
|
||||||
|
returned by :meth:`encode`.
|
||||||
|
"""
|
||||||
|
word = len(data)
|
||||||
|
delay_mu(-8*word) # back shift to align
|
||||||
|
while word > 0:
|
||||||
|
word -= 1
|
||||||
|
delay_mu(8)
|
||||||
|
# final write sets pulse stb
|
||||||
|
rtio_output(self.base_addr + word, data[word])
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def pulse(self, window, profiles):
|
||||||
|
"""Emit a pulse
|
||||||
|
|
||||||
|
This encodes the window and profiles (see :meth:`encode`) and emits them
|
||||||
|
(see :meth:`pulse_mu`).
|
||||||
|
|
||||||
|
:param window: Window start address (0 to 0x3ff)
|
||||||
|
:param profiles: List of profile indices for the oscillators. Maximum
|
||||||
|
length 16. Unused oscillators will select profile 0.
|
||||||
|
"""
|
||||||
|
data = [0, 0, 0]
|
||||||
|
words = self.encode(window, profiles, data)
|
||||||
|
self.pulse_mu(data[:words])
|
||||||
|
|
|
@ -15,24 +15,26 @@ SPI_CS_PGIA = 1 # separate SPI bus, CS used as RCLK
|
||||||
|
|
||||||
|
|
||||||
@portable
|
@portable
|
||||||
def adc_mu_to_volt(data, gain=0):
|
def adc_mu_to_volt(data, gain=0, corrected_fs=True):
|
||||||
"""Convert ADC data in machine units to Volts.
|
"""Convert ADC data in machine units to Volts.
|
||||||
|
|
||||||
:param data: 16 bit signed ADC word
|
:param data: 16 bit signed ADC word
|
||||||
:param gain: PGIA gain setting (0: 1, ..., 3: 1000)
|
:param gain: PGIA gain setting (0: 1, ..., 3: 1000)
|
||||||
|
:param corrected_fs: use corrected ADC FS reference.
|
||||||
|
Should be True for Samplers' revisions after v2.1. False for v2.1 and earlier.
|
||||||
:return: Voltage in Volts
|
:return: Voltage in Volts
|
||||||
"""
|
"""
|
||||||
if gain == 0:
|
if gain == 0:
|
||||||
volt_per_lsb = 20./(1 << 16)
|
volt_per_lsb = 20.48 / (1 << 16) if corrected_fs else 20. / (1 << 16)
|
||||||
elif gain == 1:
|
elif gain == 1:
|
||||||
volt_per_lsb = 2./(1 << 16)
|
volt_per_lsb = 2.048 / (1 << 16) if corrected_fs else 2. / (1 << 16)
|
||||||
elif gain == 2:
|
elif gain == 2:
|
||||||
volt_per_lsb = .2/(1 << 16)
|
volt_per_lsb = .2048 / (1 << 16) if corrected_fs else .2 / (1 << 16)
|
||||||
elif gain == 3:
|
elif gain == 3:
|
||||||
volt_per_lsb = .02/(1 << 16)
|
volt_per_lsb = 0.02048 / (1 << 16) if corrected_fs else .02 / (1 << 16)
|
||||||
else:
|
else:
|
||||||
raise ValueError("invalid gain")
|
raise ValueError("invalid gain")
|
||||||
return data*volt_per_lsb
|
return data * volt_per_lsb
|
||||||
|
|
||||||
|
|
||||||
class Sampler:
|
class Sampler:
|
||||||
|
@ -48,12 +50,13 @@ class Sampler:
|
||||||
:param gains: Initial value for PGIA gains shift register
|
:param gains: Initial value for PGIA gains shift register
|
||||||
(default: 0x0000). Knowledge of this state is not transferred
|
(default: 0x0000). Knowledge of this state is not transferred
|
||||||
between experiments.
|
between experiments.
|
||||||
|
:param hw_rev: Sampler's hardware revision string (default 'v2.2')
|
||||||
:param core_device: Core device name
|
:param core_device: Core device name
|
||||||
"""
|
"""
|
||||||
kernel_invariants = {"bus_adc", "bus_pgia", "core", "cnv", "div"}
|
kernel_invariants = {"bus_adc", "bus_pgia", "core", "cnv", "div", "corrected_fs"}
|
||||||
|
|
||||||
def __init__(self, dmgr, spi_adc_device, spi_pgia_device, cnv_device,
|
def __init__(self, dmgr, spi_adc_device, spi_pgia_device, cnv_device,
|
||||||
div=8, gains=0x0000, core_device="core"):
|
div=8, gains=0x0000, hw_rev="v2.2", core_device="core"):
|
||||||
self.bus_adc = dmgr.get(spi_adc_device)
|
self.bus_adc = dmgr.get(spi_adc_device)
|
||||||
self.bus_adc.update_xfer_duration_mu(div, 32)
|
self.bus_adc.update_xfer_duration_mu(div, 32)
|
||||||
self.bus_pgia = dmgr.get(spi_pgia_device)
|
self.bus_pgia = dmgr.get(spi_pgia_device)
|
||||||
|
@ -62,6 +65,11 @@ class Sampler:
|
||||||
self.cnv = dmgr.get(cnv_device)
|
self.cnv = dmgr.get(cnv_device)
|
||||||
self.div = div
|
self.div = div
|
||||||
self.gains = gains
|
self.gains = gains
|
||||||
|
self.corrected_fs = self.use_corrected_fs(hw_rev)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def use_corrected_fs(hw_rev):
|
||||||
|
return hw_rev != "v2.1"
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def init(self):
|
def init(self):
|
||||||
|
@ -144,4 +152,4 @@ class Sampler:
|
||||||
for i in range(n):
|
for i in range(n):
|
||||||
channel = i + 8 - len(data)
|
channel = i + 8 - len(data)
|
||||||
gain = (self.gains >> (channel*2)) & 0b11
|
gain = (self.gains >> (channel*2)) & 0b11
|
||||||
data[i] = adc_mu_to_volt(adc_data[i], gain)
|
data[i] = adc_mu_to_volt(adc_data[i], gain, self.corrected_fs)
|
||||||
|
|
|
@ -1,372 +0,0 @@
|
||||||
"""
|
|
||||||
Driver for the Smart Arbitrary Waveform Generator (SAWG) on RTIO.
|
|
||||||
|
|
||||||
The SAWG is an "improved DDS" built in gateware and interfacing to
|
|
||||||
high-speed DACs.
|
|
||||||
|
|
||||||
Output event replacement is supported except on the configuration channel.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
from artiq.language.types import TInt32, TFloat
|
|
||||||
from numpy import int32, int64
|
|
||||||
from artiq.language.core import kernel
|
|
||||||
from artiq.coredevice.spline import Spline
|
|
||||||
from artiq.coredevice.rtio import rtio_output
|
|
||||||
|
|
||||||
|
|
||||||
# sawg.Config addresses
|
|
||||||
_SAWG_DIV = 0
|
|
||||||
_SAWG_CLR = 1
|
|
||||||
_SAWG_IQ_EN = 2
|
|
||||||
# _SAWF_PAD = 3 # reserved
|
|
||||||
_SAWG_OUT_MIN = 4
|
|
||||||
_SAWG_OUT_MAX = 5
|
|
||||||
_SAWG_DUC_MIN = 6
|
|
||||||
_SAWG_DUC_MAX = 7
|
|
||||||
|
|
||||||
|
|
||||||
class Config:
|
|
||||||
"""SAWG configuration.
|
|
||||||
|
|
||||||
Exposes the configurable quantities of a single SAWG channel.
|
|
||||||
|
|
||||||
Access to the configuration registers for a SAWG channel can not
|
|
||||||
be concurrent. There must be at least :attr:`_rtio_interval` machine
|
|
||||||
units of delay between accesses. Replacement is not supported and will be
|
|
||||||
lead to an ``RTIOCollision`` as this is likely a programming error.
|
|
||||||
All methods therefore advance the timeline by the duration of one
|
|
||||||
configuration register transfer.
|
|
||||||
|
|
||||||
:param channel: RTIO channel number of the channel.
|
|
||||||
:param core: Core device.
|
|
||||||
"""
|
|
||||||
kernel_invariants = {"channel", "core", "_out_scale", "_duc_scale",
|
|
||||||
"_rtio_interval"}
|
|
||||||
|
|
||||||
def __init__(self, channel, core, cordic_gain=1.):
|
|
||||||
self.channel = channel
|
|
||||||
self.core = core
|
|
||||||
# normalized DAC output
|
|
||||||
self._out_scale = (1 << 15) - 1.
|
|
||||||
# normalized DAC output including DUC cordic gain
|
|
||||||
self._duc_scale = self._out_scale/cordic_gain
|
|
||||||
# configuration channel access interval
|
|
||||||
self._rtio_interval = int64(3*self.core.ref_multiplier)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_div(self, div: TInt32, n: TInt32=0):
|
|
||||||
"""Set the spline evolution divider and current counter value.
|
|
||||||
|
|
||||||
The divider and the spline evolution are synchronized across all
|
|
||||||
spline channels within a SAWG channel. The DDS/DUC phase accumulators
|
|
||||||
always evolves at full speed.
|
|
||||||
|
|
||||||
.. note:: The spline evolution divider has not been tested extensively
|
|
||||||
and is currently considered a technological preview only.
|
|
||||||
|
|
||||||
:param div: Spline evolution divider, such that
|
|
||||||
``t_sawg_spline/t_rtio_coarse = div + 1``. Default: ``0``.
|
|
||||||
:param n: Current value of the counter. Default: ``0``.
|
|
||||||
"""
|
|
||||||
rtio_output((self.channel << 8) | _SAWG_DIV, div | (n << 16))
|
|
||||||
delay_mu(self._rtio_interval)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_clr(self, clr0: TInt32, clr1: TInt32, clr2: TInt32):
|
|
||||||
"""Set the accumulator clear mode for the three phase accumulators.
|
|
||||||
|
|
||||||
When the ``clr`` bit for a given DDS/DUC phase accumulator is
|
|
||||||
set, that phase accumulator will be cleared with every phase offset
|
|
||||||
RTIO command and the output phase of the DDS/DUC will be
|
|
||||||
exactly the phase RTIO value ("absolute phase update mode").
|
|
||||||
|
|
||||||
.. math::
|
|
||||||
q^\prime(t) = p^\prime + (t - t^\prime) f^\prime
|
|
||||||
|
|
||||||
In turn, when the bit is cleared, the phase RTIO channels
|
|
||||||
determine a phase offset to the current (carrier-) value of the
|
|
||||||
DDS/DUC phase accumulator. This "relative phase update mode" is
|
|
||||||
sometimes also called “continuous phase mode”.
|
|
||||||
|
|
||||||
.. math::
|
|
||||||
q^\prime(t) = q(t^\prime) + (p^\prime - p) +
|
|
||||||
(t - t^\prime) f^\prime
|
|
||||||
|
|
||||||
Where:
|
|
||||||
|
|
||||||
* :math:`q`, :math:`q^\prime`: old/new phase accumulator
|
|
||||||
* :math:`p`, :math:`p^\prime`: old/new phase offset
|
|
||||||
* :math:`f^\prime`: new frequency
|
|
||||||
* :math:`t^\prime`: timestamp of setting new :math:`p`, :math:`f`
|
|
||||||
* :math:`t`: running time
|
|
||||||
|
|
||||||
:param clr0: Auto-clear phase accumulator of the ``phase0``/
|
|
||||||
``frequency0`` DUC. Default: ``True``
|
|
||||||
:param clr1: Auto-clear phase accumulator of the ``phase1``/
|
|
||||||
``frequency1`` DDS. Default: ``True``
|
|
||||||
:param clr2: Auto-clear phase accumulator of the ``phase2``/
|
|
||||||
``frequency2`` DDS. Default: ``True``
|
|
||||||
"""
|
|
||||||
rtio_output((self.channel << 8) | _SAWG_CLR, clr0 |
|
|
||||||
(clr1 << 1) | (clr2 << 2))
|
|
||||||
delay_mu(self._rtio_interval)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_iq_en(self, i_enable: TInt32, q_enable: TInt32):
|
|
||||||
"""Enable I/Q data on this DAC channel.
|
|
||||||
|
|
||||||
Every pair of SAWG channels forms a buddy pair.
|
|
||||||
The ``iq_en`` configuration controls which DDS data is emitted to the
|
|
||||||
DACs.
|
|
||||||
|
|
||||||
Refer to the documentation of :class:`SAWG` for a mathematical
|
|
||||||
description of ``i_enable`` and ``q_enable``.
|
|
||||||
|
|
||||||
.. note:: Quadrature data from the buddy channel is currently
|
|
||||||
a technological preview only. The data is ignored in the SAWG
|
|
||||||
gateware and not added to the DAC output.
|
|
||||||
This is equivalent to the ``q_enable`` switch always being ``0``.
|
|
||||||
|
|
||||||
:param i_enable: Controls adding the in-phase
|
|
||||||
DUC-DDS data of *this* SAWG channel to *this* DAC channel.
|
|
||||||
Default: ``1``.
|
|
||||||
:param q_enable: controls adding the quadrature
|
|
||||||
DUC-DDS data of this SAWG's *buddy* channel to *this* DAC
|
|
||||||
channel. Default: ``0``.
|
|
||||||
"""
|
|
||||||
rtio_output((self.channel << 8) | _SAWG_IQ_EN, i_enable |
|
|
||||||
(q_enable << 1))
|
|
||||||
delay_mu(self._rtio_interval)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_duc_max_mu(self, limit: TInt32):
|
|
||||||
"""Set the digital up-converter (DUC) I and Q data summing junctions
|
|
||||||
upper limit. In machine units.
|
|
||||||
|
|
||||||
The default limits are chosen to reach maximum and minimum DAC output
|
|
||||||
amplitude.
|
|
||||||
|
|
||||||
For a description of the limiter functions in normalized units see:
|
|
||||||
|
|
||||||
.. seealso:: :meth:`set_duc_max`
|
|
||||||
"""
|
|
||||||
rtio_output((self.channel << 8) | _SAWG_DUC_MAX, limit)
|
|
||||||
delay_mu(self._rtio_interval)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_duc_min_mu(self, limit: TInt32):
|
|
||||||
""".. seealso:: :meth:`set_duc_max_mu`"""
|
|
||||||
rtio_output((self.channel << 8) | _SAWG_DUC_MIN, limit)
|
|
||||||
delay_mu(self._rtio_interval)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_out_max_mu(self, limit: TInt32):
|
|
||||||
""".. seealso:: :meth:`set_duc_max_mu`"""
|
|
||||||
rtio_output((self.channel << 8) | _SAWG_OUT_MAX, limit)
|
|
||||||
delay_mu(self._rtio_interval)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_out_min_mu(self, limit: TInt32):
|
|
||||||
""".. seealso:: :meth:`set_duc_max_mu`"""
|
|
||||||
rtio_output((self.channel << 8) | _SAWG_OUT_MIN, limit)
|
|
||||||
delay_mu(self._rtio_interval)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_duc_max(self, limit: TFloat):
|
|
||||||
"""Set the digital up-converter (DUC) I and Q data summing junctions
|
|
||||||
upper limit.
|
|
||||||
|
|
||||||
Each of the three summing junctions has a saturating adder with
|
|
||||||
configurable upper and lower limits. The three summing junctions are:
|
|
||||||
|
|
||||||
* At the in-phase input to the ``phase0``/``frequency0`` fast DUC,
|
|
||||||
after the anti-aliasing FIR filter.
|
|
||||||
* At the quadrature input to the ``phase0``/``frequency0``
|
|
||||||
fast DUC, after the anti-aliasing FIR filter. The in-phase and
|
|
||||||
quadrature data paths both use the same limits.
|
|
||||||
* Before the DAC, where the following three data streams
|
|
||||||
are added together:
|
|
||||||
|
|
||||||
* the output of the ``offset`` spline,
|
|
||||||
* (optionally, depending on ``i_enable``) the in-phase output
|
|
||||||
of the ``phase0``/``frequency0`` fast DUC, and
|
|
||||||
* (optionally, depending on ``q_enable``) the quadrature
|
|
||||||
output of the ``phase0``/``frequency0`` fast DUC of the
|
|
||||||
buddy channel.
|
|
||||||
|
|
||||||
Refer to the documentation of :class:`SAWG` for a mathematical
|
|
||||||
description of the summing junctions.
|
|
||||||
|
|
||||||
:param limit: Limit value ``[-1, 1]``. The output of the limiter will
|
|
||||||
never exceed this limit. The default limits are the full range
|
|
||||||
``[-1, 1]``.
|
|
||||||
|
|
||||||
.. seealso::
|
|
||||||
* :meth:`set_duc_max`: Upper limit of the in-phase and quadrature
|
|
||||||
inputs to the DUC.
|
|
||||||
* :meth:`set_duc_min`: Lower limit of the in-phase and quadrature
|
|
||||||
inputs to the DUC.
|
|
||||||
* :meth:`set_out_max`: Upper limit of the DAC output.
|
|
||||||
* :meth:`set_out_min`: Lower limit of the DAC output.
|
|
||||||
"""
|
|
||||||
self.set_duc_max_mu(int32(round(limit*self._duc_scale)))
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_duc_min(self, limit: TFloat):
|
|
||||||
""".. seealso:: :meth:`set_duc_max`"""
|
|
||||||
self.set_duc_min_mu(int32(round(limit*self._duc_scale)))
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_out_max(self, limit: TFloat):
|
|
||||||
""".. seealso:: :meth:`set_duc_max`"""
|
|
||||||
self.set_out_max_mu(int32(round(limit*self._out_scale)))
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_out_min(self, limit: TFloat):
|
|
||||||
""".. seealso:: :meth:`set_duc_max`"""
|
|
||||||
self.set_out_min_mu(int32(round(limit*self._out_scale)))
|
|
||||||
|
|
||||||
|
|
||||||
class SAWG:
|
|
||||||
"""Smart arbitrary waveform generator channel.
|
|
||||||
The channel is parametrized as: ::
|
|
||||||
|
|
||||||
oscillators = exp(2j*pi*(frequency0*t + phase0))*(
|
|
||||||
amplitude1*exp(2j*pi*(frequency1*t + phase1)) +
|
|
||||||
amplitude2*exp(2j*pi*(frequency2*t + phase2)))
|
|
||||||
|
|
||||||
output = (offset +
|
|
||||||
i_enable*Re(oscillators) +
|
|
||||||
q_enable*Im(buddy_oscillators))
|
|
||||||
|
|
||||||
This parametrization can be viewed as two complex (quadrature) oscillators
|
|
||||||
(``frequency1``/``phase1`` and ``frequency2``/``phase2``) that are
|
|
||||||
executing and sampling at the coarse RTIO frequency. They can represent
|
|
||||||
frequencies within the first Nyquist zone from ``-f_rtio_coarse/2`` to
|
|
||||||
``f_rtio_coarse/2``.
|
|
||||||
|
|
||||||
.. note:: The coarse RTIO frequency ``f_rtio_coarse`` is the inverse of
|
|
||||||
``ref_period*multiplier``. Both are arguments of the ``Core`` device,
|
|
||||||
specified in the device database ``device_db.py``.
|
|
||||||
|
|
||||||
The sum of their outputs is then interpolated by a factor of
|
|
||||||
:attr:`parallelism` (2, 4, 8 depending on the bitstream) using a
|
|
||||||
finite-impulse-response (FIR) anti-aliasing filter (more accurately
|
|
||||||
a half-band filter).
|
|
||||||
|
|
||||||
The filter is followed by a configurable saturating limiter.
|
|
||||||
|
|
||||||
After the limiter, the data is shifted in frequency using a complex
|
|
||||||
digital up-converter (DUC, ``frequency0``/``phase0``) running at
|
|
||||||
:attr:`parallelism` times the coarse RTIO frequency. The first Nyquist
|
|
||||||
zone of the DUC extends from ``-f_rtio_coarse*parallelism/2`` to
|
|
||||||
``f_rtio_coarse*parallelism/2``. Other Nyquist zones are usable depending
|
|
||||||
on the interpolation/modulation options configured in the DAC.
|
|
||||||
|
|
||||||
The real/in-phase data after digital up-conversion can be offset using
|
|
||||||
another spline interpolator ``offset``.
|
|
||||||
|
|
||||||
The ``i_enable``/``q_enable`` switches enable emission of quadrature
|
|
||||||
signals for later analog quadrature mixing distinguishing upper and lower
|
|
||||||
sidebands and thus doubling the bandwidth. They can also be used to emit
|
|
||||||
four-tone signals.
|
|
||||||
|
|
||||||
.. note:: Quadrature data from the buddy channel is currently
|
|
||||||
ignored in the SAWG gateware and not added to the DAC output.
|
|
||||||
This is equivalent to the ``q_enable`` switch always being ``0``.
|
|
||||||
|
|
||||||
The configuration channel and the nine
|
|
||||||
:class:`artiq.coredevice.spline.Spline` interpolators are accessible as
|
|
||||||
attributes:
|
|
||||||
|
|
||||||
* :attr:`config`: :class:`Config`
|
|
||||||
* :attr:`offset`, :attr:`amplitude1`, :attr:`amplitude2`: in units
|
|
||||||
of full scale
|
|
||||||
* :attr:`phase0`, :attr:`phase1`, :attr:`phase2`: in units of turns
|
|
||||||
* :attr:`frequency0`, :attr:`frequency1`, :attr:`frequency2`: in units
|
|
||||||
of Hz
|
|
||||||
|
|
||||||
.. note:: The latencies (pipeline depths) of the nine data channels (i.e.
|
|
||||||
all except :attr:`config`) are matched. Equivalent channels (e.g.
|
|
||||||
:attr:`phase1` and :attr:`phase2`) are exactly matched. Channels of
|
|
||||||
different type or functionality (e.g. :attr:`offset` vs
|
|
||||||
:attr:`amplitude1`, DDS vs DUC, :attr:`phase0` vs :attr:`phase1`) are
|
|
||||||
only matched to within one coarse RTIO cycle.
|
|
||||||
|
|
||||||
:param channel_base: RTIO channel number of the first channel (amplitude).
|
|
||||||
The configuration channel and frequency/phase/amplitude channels are
|
|
||||||
then assumed to be successive channels.
|
|
||||||
:param parallelism: Number of output samples per coarse RTIO clock cycle.
|
|
||||||
:param core_device: Name of the core device that this SAWG is on.
|
|
||||||
"""
|
|
||||||
kernel_invariants = {"channel_base", "core", "parallelism",
|
|
||||||
"amplitude1", "frequency1", "phase1",
|
|
||||||
"amplitude2", "frequency2", "phase2",
|
|
||||||
"frequency0", "phase0", "offset"}
|
|
||||||
|
|
||||||
def __init__(self, dmgr, channel_base, parallelism, core_device="core"):
|
|
||||||
self.core = dmgr.get(core_device)
|
|
||||||
self.channel_base = channel_base
|
|
||||||
self.parallelism = parallelism
|
|
||||||
width = 16
|
|
||||||
time_width = 16
|
|
||||||
cordic_gain = 1.646760258057163 # Cordic(width=16, guard=None).gain
|
|
||||||
head_room = 1.001
|
|
||||||
self.config = Config(channel_base, self.core, cordic_gain)
|
|
||||||
self.offset = Spline(width, time_width, channel_base + 1,
|
|
||||||
self.core, 2.*head_room)
|
|
||||||
self.amplitude1 = Spline(width, time_width, channel_base + 2,
|
|
||||||
self.core, 2*head_room*cordic_gain**2)
|
|
||||||
self.frequency1 = Spline(3*width, time_width, channel_base + 3,
|
|
||||||
self.core, 1/self.core.coarse_ref_period)
|
|
||||||
self.phase1 = Spline(width, time_width, channel_base + 4,
|
|
||||||
self.core, 1.)
|
|
||||||
self.amplitude2 = Spline(width, time_width, channel_base + 5,
|
|
||||||
self.core, 2*head_room*cordic_gain**2)
|
|
||||||
self.frequency2 = Spline(3*width, time_width, channel_base + 6,
|
|
||||||
self.core, 1/self.core.coarse_ref_period)
|
|
||||||
self.phase2 = Spline(width, time_width, channel_base + 7,
|
|
||||||
self.core, 1.)
|
|
||||||
self.frequency0 = Spline(2*width, time_width, channel_base + 8,
|
|
||||||
self.core,
|
|
||||||
parallelism/self.core.coarse_ref_period)
|
|
||||||
self.phase0 = Spline(width, time_width, channel_base + 9,
|
|
||||||
self.core, 1.)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def reset(self):
|
|
||||||
"""Re-establish initial conditions.
|
|
||||||
|
|
||||||
This clears all spline interpolators, accumulators and configuration
|
|
||||||
settings.
|
|
||||||
|
|
||||||
This method advances the timeline by the time required to perform all
|
|
||||||
7 writes to the configuration channel, plus 9 coarse RTIO cycles.
|
|
||||||
"""
|
|
||||||
self.config.set_div(0, 0)
|
|
||||||
self.config.set_clr(1, 1, 1)
|
|
||||||
self.config.set_iq_en(1, 0)
|
|
||||||
self.config.set_duc_min(-1.)
|
|
||||||
self.config.set_duc_max(1.)
|
|
||||||
self.config.set_out_min(-1.)
|
|
||||||
self.config.set_out_max(1.)
|
|
||||||
self.frequency0.set_mu(0)
|
|
||||||
coarse_cycle = int64(self.core.ref_multiplier)
|
|
||||||
delay_mu(coarse_cycle)
|
|
||||||
self.frequency1.set_mu(0)
|
|
||||||
delay_mu(coarse_cycle)
|
|
||||||
self.frequency2.set_mu(0)
|
|
||||||
delay_mu(coarse_cycle)
|
|
||||||
self.phase0.set_mu(0)
|
|
||||||
delay_mu(coarse_cycle)
|
|
||||||
self.phase1.set_mu(0)
|
|
||||||
delay_mu(coarse_cycle)
|
|
||||||
self.phase2.set_mu(0)
|
|
||||||
delay_mu(coarse_cycle)
|
|
||||||
self.amplitude1.set_mu(0)
|
|
||||||
delay_mu(coarse_cycle)
|
|
||||||
self.amplitude2.set_mu(0)
|
|
||||||
delay_mu(coarse_cycle)
|
|
||||||
self.offset.set_mu(0)
|
|
||||||
delay_mu(coarse_cycle)
|
|
|
@ -1,54 +0,0 @@
|
||||||
from artiq.language.core import kernel, delay
|
|
||||||
from artiq.language.units import us
|
|
||||||
|
|
||||||
|
|
||||||
class ShiftReg:
|
|
||||||
"""Driver for shift registers/latch combos connected to TTLs"""
|
|
||||||
kernel_invariants = {"dt", "n"}
|
|
||||||
|
|
||||||
def __init__(self, dmgr, clk, ser, latch, n=32, dt=10*us, ser_in=None):
|
|
||||||
self.core = dmgr.get("core")
|
|
||||||
self.clk = dmgr.get(clk)
|
|
||||||
self.ser = dmgr.get(ser)
|
|
||||||
self.latch = dmgr.get(latch)
|
|
||||||
self.n = n
|
|
||||||
self.dt = dt
|
|
||||||
if ser_in is not None:
|
|
||||||
self.ser_in = dmgr.get(ser_in)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set(self, data):
|
|
||||||
"""Sets the values of the latch outputs. This does not
|
|
||||||
advance the timeline and the waveform is generated before
|
|
||||||
`now`."""
|
|
||||||
delay(-2*(self.n + 1)*self.dt)
|
|
||||||
for i in range(self.n):
|
|
||||||
if (data >> (self.n-i-1)) & 1 == 0:
|
|
||||||
self.ser.off()
|
|
||||||
else:
|
|
||||||
self.ser.on()
|
|
||||||
self.clk.off()
|
|
||||||
delay(self.dt)
|
|
||||||
self.clk.on()
|
|
||||||
delay(self.dt)
|
|
||||||
self.clk.off()
|
|
||||||
self.latch.on()
|
|
||||||
delay(self.dt)
|
|
||||||
self.latch.off()
|
|
||||||
delay(self.dt)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def get(self):
|
|
||||||
delay(-2*(self.n + 1)*self.dt)
|
|
||||||
data = 0
|
|
||||||
for i in range(self.n):
|
|
||||||
data <<= 1
|
|
||||||
self.ser_in.sample_input()
|
|
||||||
if self.ser_in.sample_get():
|
|
||||||
data |= 1
|
|
||||||
delay(self.dt)
|
|
||||||
self.clk.on()
|
|
||||||
delay(self.dt)
|
|
||||||
self.clk.off()
|
|
||||||
delay(self.dt)
|
|
||||||
return data
|
|
|
@ -0,0 +1,623 @@
|
||||||
|
from artiq.language.core import *
|
||||||
|
from artiq.language.types import *
|
||||||
|
from artiq.coredevice.rtio import rtio_output, rtio_input_data
|
||||||
|
from artiq.coredevice import spi2 as spi
|
||||||
|
from artiq.language.units import us
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def shuttler_volt_to_mu(volt):
|
||||||
|
"""Return the equivalent DAC code. Valid input range is from -10 to
|
||||||
|
10 - LSB.
|
||||||
|
"""
|
||||||
|
return round((1 << 16) * (volt / 20.0)) & 0xffff
|
||||||
|
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
"""Shuttler configuration registers interface.
|
||||||
|
|
||||||
|
The configuration registers control waveform phase auto-clear, and pre-DAC
|
||||||
|
gain & offset values for calibration with ADC on the Shuttler AFE card.
|
||||||
|
|
||||||
|
To find the calibrated DAC code, the Shuttler Core first multiplies the
|
||||||
|
output data with pre-DAC gain, then adds the offset.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
The DAC code is capped at 0x7fff and 0x8000.
|
||||||
|
|
||||||
|
:param channel: RTIO channel number of this interface.
|
||||||
|
:param core_device: Core device name.
|
||||||
|
"""
|
||||||
|
kernel_invariants = {
|
||||||
|
"core", "channel", "target_base", "target_read",
|
||||||
|
"target_gain", "target_offset", "target_clr"
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, dmgr, channel, core_device="core"):
|
||||||
|
self.core = dmgr.get(core_device)
|
||||||
|
self.channel = channel
|
||||||
|
self.target_base = channel << 8
|
||||||
|
self.target_read = 1 << 6
|
||||||
|
self.target_gain = 0 * (1 << 4)
|
||||||
|
self.target_offset = 1 * (1 << 4)
|
||||||
|
self.target_clr = 1 * (1 << 5)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_clr(self, clr):
|
||||||
|
"""Set/Unset waveform phase clear bits.
|
||||||
|
|
||||||
|
Each bit corresponds to a Shuttler waveform generator core. Setting a
|
||||||
|
clear bit forces the Shuttler Core to clear the phase accumulator on
|
||||||
|
waveform trigger (See :class:`Trigger` for the trigger method).
|
||||||
|
Otherwise, the phase accumulator increments from its original value.
|
||||||
|
|
||||||
|
:param clr: Waveform phase clear bits. The MSB corresponds to Channel
|
||||||
|
15, LSB corresponds to Channel 0.
|
||||||
|
"""
|
||||||
|
rtio_output(self.target_base | self.target_clr, clr)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_gain(self, channel, gain):
|
||||||
|
"""Set the 16-bits pre-DAC gain register of a Shuttler Core channel.
|
||||||
|
|
||||||
|
The `gain` parameter represents the decimal portion of the gain
|
||||||
|
factor. The MSB represents 0.5 and the sign bit. Hence, the valid
|
||||||
|
total gain value (1 +/- 0.gain) ranges from 0.5 to 1.5 - LSB.
|
||||||
|
|
||||||
|
:param channel: Shuttler Core channel to be configured.
|
||||||
|
:param gain: Shuttler Core channel gain.
|
||||||
|
"""
|
||||||
|
rtio_output(self.target_base | self.target_gain | channel, gain)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def get_gain(self, channel):
|
||||||
|
"""Return the pre-DAC gain value of a Shuttler Core channel.
|
||||||
|
|
||||||
|
:param channel: The Shuttler Core channel.
|
||||||
|
:return: Pre-DAC gain value. See :meth:`set_gain`.
|
||||||
|
"""
|
||||||
|
rtio_output(self.target_base | self.target_gain |
|
||||||
|
self.target_read | channel, 0)
|
||||||
|
return rtio_input_data(self.channel)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_offset(self, channel, offset):
|
||||||
|
"""Set the 16-bits pre-DAC offset register of a Shuttler Core channel.
|
||||||
|
|
||||||
|
.. seealso::
|
||||||
|
:meth:`shuttler_volt_to_mu`
|
||||||
|
|
||||||
|
:param channel: Shuttler Core channel to be configured.
|
||||||
|
:param offset: Shuttler Core channel offset.
|
||||||
|
"""
|
||||||
|
rtio_output(self.target_base | self.target_offset | channel, offset)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def get_offset(self, channel):
|
||||||
|
"""Return the pre-DAC offset value of a Shuttler Core channel.
|
||||||
|
|
||||||
|
:param channel: The Shuttler Core channel.
|
||||||
|
:return: Pre-DAC offset value. See :meth:`set_offset`.
|
||||||
|
"""
|
||||||
|
rtio_output(self.target_base | self.target_offset |
|
||||||
|
self.target_read | channel, 0)
|
||||||
|
return rtio_input_data(self.channel)
|
||||||
|
|
||||||
|
|
||||||
|
class DCBias:
|
||||||
|
"""Shuttler Core cubic DC-bias spline.
|
||||||
|
|
||||||
|
A Shuttler channel can generate a waveform `w(t)` that is the sum of a
|
||||||
|
cubic spline `a(t)` and a sinusoid modulated in amplitude by a cubic
|
||||||
|
spline `b(t)` and in phase/frequency by a quadratic spline `c(t)`, where
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
w(t) = a(t) + b(t) * cos(c(t))
|
||||||
|
|
||||||
|
And `t` corresponds to time in seconds.
|
||||||
|
This class controls the cubic spline `a(t)`, in which
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
a(t) = p_0 + p_1t + \\frac{p_2t^2}{2} + \\frac{p_3t^3}{6}
|
||||||
|
|
||||||
|
And `a(t)` is in Volt.
|
||||||
|
|
||||||
|
:param channel: RTIO channel number of this DC-bias spline interface.
|
||||||
|
:param core_device: Core device name.
|
||||||
|
"""
|
||||||
|
kernel_invariants = {"core", "channel", "target_o"}
|
||||||
|
|
||||||
|
def __init__(self, dmgr, channel, core_device="core"):
|
||||||
|
self.core = dmgr.get(core_device)
|
||||||
|
self.channel = channel
|
||||||
|
self.target_o = channel << 8
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_waveform(self, a0: TInt32, a1: TInt32, a2: TInt64, a3: TInt64):
|
||||||
|
"""Set the DC-bias spline waveform.
|
||||||
|
|
||||||
|
Given `a(t)` as defined in :class:`DCBias`, the coefficients should be
|
||||||
|
configured by the following formulae.
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
T &= 8*10^{-9}
|
||||||
|
|
||||||
|
a_0 &= p_0
|
||||||
|
|
||||||
|
a_1 &= p_1T + \\frac{p_2T^2}{2} + \\frac{p_3T^3}{6}
|
||||||
|
|
||||||
|
a_2 &= p_2T^2 + p_3T^3
|
||||||
|
|
||||||
|
a_3 &= p_3T^3
|
||||||
|
|
||||||
|
:math:`a_0`, :math:`a_1`, :math:`a_2` and :math:`a_3` are 16, 32, 48
|
||||||
|
and 48 bits in width respectively. See :meth:`shuttler_volt_to_mu` for
|
||||||
|
machine unit conversion.
|
||||||
|
|
||||||
|
Note: The waveform is not updated to the Shuttler Core until
|
||||||
|
triggered. See :class:`Trigger` for the update triggering mechanism.
|
||||||
|
|
||||||
|
:param a0: The :math:`a_0` coefficient in machine unit.
|
||||||
|
:param a1: The :math:`a_1` coefficient in machine unit.
|
||||||
|
:param a2: The :math:`a_2` coefficient in machine unit.
|
||||||
|
:param a3: The :math:`a_3` coefficient in machine unit.
|
||||||
|
"""
|
||||||
|
coef_words = [
|
||||||
|
a0,
|
||||||
|
a1,
|
||||||
|
a1 >> 16,
|
||||||
|
a2 & 0xFFFF,
|
||||||
|
(a2 >> 16) & 0xFFFF,
|
||||||
|
(a2 >> 32) & 0xFFFF,
|
||||||
|
a3 & 0xFFFF,
|
||||||
|
(a3 >> 16) & 0xFFFF,
|
||||||
|
(a3 >> 32) & 0xFFFF,
|
||||||
|
]
|
||||||
|
|
||||||
|
for i in range(len(coef_words)):
|
||||||
|
rtio_output(self.target_o | i, coef_words[i])
|
||||||
|
delay_mu(int64(self.core.ref_multiplier))
|
||||||
|
|
||||||
|
|
||||||
|
class DDS:
|
||||||
|
"""Shuttler Core DDS spline.
|
||||||
|
|
||||||
|
A Shuttler channel can generate a waveform `w(t)` that is the sum of a
|
||||||
|
cubic spline `a(t)` and a sinusoid modulated in amplitude by a cubic
|
||||||
|
spline `b(t)` and in phase/frequency by a quadratic spline `c(t)`, where
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
w(t) = a(t) + b(t) * cos(c(t))
|
||||||
|
|
||||||
|
And `t` corresponds to time in seconds.
|
||||||
|
This class controls the cubic spline `b(t)` and quadratic spline `c(t)`,
|
||||||
|
in which
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
b(t) &= g * (q_0 + q_1t + \\frac{q_2t^2}{2} + \\frac{q_3t^3}{6})
|
||||||
|
|
||||||
|
c(t) &= r_0 + r_1t + \\frac{r_2t^2}{2}
|
||||||
|
|
||||||
|
And `b(t)` is in Volt, `c(t)` is in number of turns. Note that `b(t)`
|
||||||
|
contributes to a constant gain of :math:`g=1.64676`.
|
||||||
|
|
||||||
|
:param channel: RTIO channel number of this DC-bias spline interface.
|
||||||
|
:param core_device: Core device name.
|
||||||
|
"""
|
||||||
|
kernel_invariants = {"core", "channel", "target_o"}
|
||||||
|
|
||||||
|
def __init__(self, dmgr, channel, core_device="core"):
|
||||||
|
self.core = dmgr.get(core_device)
|
||||||
|
self.channel = channel
|
||||||
|
self.target_o = channel << 8
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_waveform(self, b0: TInt32, b1: TInt32, b2: TInt64, b3: TInt64,
|
||||||
|
c0: TInt32, c1: TInt32, c2: TInt32):
|
||||||
|
"""Set the DDS spline waveform.
|
||||||
|
|
||||||
|
Given `b(t)` and `c(t)` as defined in :class:`DDS`, the coefficients
|
||||||
|
should be configured by the following formulae.
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
T &= 8*10^{-9}
|
||||||
|
|
||||||
|
b_0 &= q_0
|
||||||
|
|
||||||
|
b_1 &= q_1T + \\frac{q_2T^2}{2} + \\frac{q_3T^3}{6}
|
||||||
|
|
||||||
|
b_2 &= q_2T^2 + q_3T^3
|
||||||
|
|
||||||
|
b_3 &= q_3T^3
|
||||||
|
|
||||||
|
c_0 &= r_0
|
||||||
|
|
||||||
|
c_1 &= r_1T + \\frac{r_2T^2}{2}
|
||||||
|
|
||||||
|
c_2 &= r_2T^2
|
||||||
|
|
||||||
|
:math:`b_0`, :math:`b_1`, :math:`b_2` and :math:`b_3` are 16, 32, 48
|
||||||
|
and 48 bits in width respectively. See :meth:`shuttler_volt_to_mu` for
|
||||||
|
machine unit conversion. :math:`c_0`, :math:`c_1` and :math:`c_2` are
|
||||||
|
16, 32 and 32 bits in width respectively.
|
||||||
|
|
||||||
|
Note: The waveform is not updated to the Shuttler Core until
|
||||||
|
triggered. See :class:`Trigger` for the update triggering mechanism.
|
||||||
|
|
||||||
|
:param b0: The :math:`b_0` coefficient in machine unit.
|
||||||
|
:param b1: The :math:`b_1` coefficient in machine unit.
|
||||||
|
:param b2: The :math:`b_2` coefficient in machine unit.
|
||||||
|
:param b3: The :math:`b_3` coefficient in machine unit.
|
||||||
|
:param c0: The :math:`c_0` coefficient in machine unit.
|
||||||
|
:param c1: The :math:`c_1` coefficient in machine unit.
|
||||||
|
:param c2: The :math:`c_2` coefficient in machine unit.
|
||||||
|
"""
|
||||||
|
coef_words = [
|
||||||
|
b0,
|
||||||
|
b1,
|
||||||
|
b1 >> 16,
|
||||||
|
b2 & 0xFFFF,
|
||||||
|
(b2 >> 16) & 0xFFFF,
|
||||||
|
(b2 >> 32) & 0xFFFF,
|
||||||
|
b3 & 0xFFFF,
|
||||||
|
(b3 >> 16) & 0xFFFF,
|
||||||
|
(b3 >> 32) & 0xFFFF,
|
||||||
|
c0,
|
||||||
|
c1,
|
||||||
|
c1 >> 16,
|
||||||
|
c2,
|
||||||
|
c2 >> 16,
|
||||||
|
]
|
||||||
|
|
||||||
|
for i in range(len(coef_words)):
|
||||||
|
rtio_output(self.target_o | i, coef_words[i])
|
||||||
|
delay_mu(int64(self.core.ref_multiplier))
|
||||||
|
|
||||||
|
|
||||||
|
class Trigger:
|
||||||
|
"""Shuttler Core spline coefficients update trigger.
|
||||||
|
|
||||||
|
:param channel: RTIO channel number of the trigger interface.
|
||||||
|
:param core_device: Core device name.
|
||||||
|
"""
|
||||||
|
kernel_invariants = {"core", "channel", "target_o"}
|
||||||
|
|
||||||
|
def __init__(self, dmgr, channel, core_device="core"):
|
||||||
|
self.core = dmgr.get(core_device)
|
||||||
|
self.channel = channel
|
||||||
|
self.target_o = channel << 8
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def trigger(self, trig_out):
|
||||||
|
"""Triggers coefficient update of (a) Shuttler Core channel(s).
|
||||||
|
|
||||||
|
Each bit corresponds to a Shuttler waveform generator core. Setting
|
||||||
|
`trig_out` bits commits the pending coefficient update (from
|
||||||
|
`set_waveform` in :class:`DCBias` and :class:`DDS`) to the Shuttler Core
|
||||||
|
synchronously.
|
||||||
|
|
||||||
|
:param trig_out: Coefficient update trigger bits. The MSB corresponds
|
||||||
|
to Channel 15, LSB corresponds to Channel 0.
|
||||||
|
"""
|
||||||
|
rtio_output(self.target_o, trig_out)
|
||||||
|
|
||||||
|
|
||||||
|
RELAY_SPI_CONFIG = (0*spi.SPI_OFFLINE | 1*spi.SPI_END |
|
||||||
|
0*spi.SPI_INPUT | 0*spi.SPI_CS_POLARITY |
|
||||||
|
0*spi.SPI_CLK_POLARITY | 0*spi.SPI_CLK_PHASE |
|
||||||
|
0*spi.SPI_LSB_FIRST | 0*spi.SPI_HALF_DUPLEX)
|
||||||
|
|
||||||
|
ADC_SPI_CONFIG = (0*spi.SPI_OFFLINE | 0*spi.SPI_END |
|
||||||
|
0*spi.SPI_INPUT | 0*spi.SPI_CS_POLARITY |
|
||||||
|
1*spi.SPI_CLK_POLARITY | 1*spi.SPI_CLK_PHASE |
|
||||||
|
0*spi.SPI_LSB_FIRST | 0*spi.SPI_HALF_DUPLEX)
|
||||||
|
|
||||||
|
# SPI clock write and read dividers
|
||||||
|
# CS should assert at least 9.5 ns after clk pulse
|
||||||
|
SPIT_RELAY_WR = 4
|
||||||
|
# 25 ns high/low pulse hold (limiting for write)
|
||||||
|
SPIT_ADC_WR = 4
|
||||||
|
SPIT_ADC_RD = 16
|
||||||
|
|
||||||
|
# SPI CS line
|
||||||
|
CS_RELAY = 1 << 0
|
||||||
|
CS_LED = 1 << 1
|
||||||
|
CS_ADC = 1 << 0
|
||||||
|
|
||||||
|
# Referenced AD4115 registers
|
||||||
|
_AD4115_REG_STATUS = 0x00
|
||||||
|
_AD4115_REG_ADCMODE = 0x01
|
||||||
|
_AD4115_REG_DATA = 0x04
|
||||||
|
_AD4115_REG_ID = 0x07
|
||||||
|
_AD4115_REG_CH0 = 0x10
|
||||||
|
_AD4115_REG_SETUPCON0 = 0x20
|
||||||
|
|
||||||
|
|
||||||
|
class Relay:
|
||||||
|
"""Shuttler AFE relay switches.
|
||||||
|
|
||||||
|
It controls the AFE relay switches and the LEDs. Switch on the relay to
|
||||||
|
enable AFE output; And off to disable the output. The LEDs indicates the
|
||||||
|
relay status.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
The relay does not disable ADC measurements. Voltage of any channels
|
||||||
|
can still be read by the ADC even after switching off the relays.
|
||||||
|
|
||||||
|
:param spi_device: SPI bus device name.
|
||||||
|
:param core_device: Core device name.
|
||||||
|
"""
|
||||||
|
kernel_invariant = {"core", "bus"}
|
||||||
|
|
||||||
|
def __init__(self, dmgr, spi_device, core_device="core"):
|
||||||
|
self.core = dmgr.get(core_device)
|
||||||
|
self.bus = dmgr.get(spi_device)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def init(self):
|
||||||
|
"""Initialize SPI device.
|
||||||
|
|
||||||
|
Configures the SPI bus to 16-bits, write-only, simultaneous relay
|
||||||
|
switches and LED control.
|
||||||
|
"""
|
||||||
|
self.bus.set_config_mu(
|
||||||
|
RELAY_SPI_CONFIG, 16, SPIT_RELAY_WR, CS_RELAY | CS_LED)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def enable(self, en: TInt32):
|
||||||
|
"""Enable/Disable relay switches of corresponding channels.
|
||||||
|
|
||||||
|
Each bit corresponds to the relay switch of a channel. Asserting a bit
|
||||||
|
turns on the corresponding relay switch; Deasserting the same bit
|
||||||
|
turns off the switch instead.
|
||||||
|
|
||||||
|
:param en: Switch enable bits. The MSB corresponds to Channel 15, LSB
|
||||||
|
corresponds to Channel 0.
|
||||||
|
"""
|
||||||
|
self.bus.write(en << 16)
|
||||||
|
|
||||||
|
|
||||||
|
class ADC:
|
||||||
|
"""Shuttler AFE ADC (AD4115) driver.
|
||||||
|
|
||||||
|
:param spi_device: SPI bus device name.
|
||||||
|
:param core_device: Core device name.
|
||||||
|
"""
|
||||||
|
kernel_invariant = {"core", "bus"}
|
||||||
|
|
||||||
|
def __init__(self, dmgr, spi_device, core_device="core"):
|
||||||
|
self.core = dmgr.get(core_device)
|
||||||
|
self.bus = dmgr.get(spi_device)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def read_id(self) -> TInt32:
|
||||||
|
"""Read the product ID of the ADC.
|
||||||
|
|
||||||
|
The expected return value is 0x38DX, the 4 LSbs are don't cares.
|
||||||
|
|
||||||
|
:return: The read-back product ID.
|
||||||
|
"""
|
||||||
|
return self.read16(_AD4115_REG_ID)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def reset(self):
|
||||||
|
"""AD4115 reset procedure.
|
||||||
|
|
||||||
|
This performs a write operation of 96 serial clock cycles with DIN
|
||||||
|
held at high. It resets the entire device, including the register
|
||||||
|
contents.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
The datasheet only requires 64 cycles, but reasserting `CS_n` right
|
||||||
|
after the transfer appears to interrupt the start-up sequence.
|
||||||
|
"""
|
||||||
|
self.bus.set_config_mu(ADC_SPI_CONFIG, 32, SPIT_ADC_WR, CS_ADC)
|
||||||
|
self.bus.write(0xffffffff)
|
||||||
|
self.bus.write(0xffffffff)
|
||||||
|
self.bus.set_config_mu(
|
||||||
|
ADC_SPI_CONFIG | spi.SPI_END, 32, SPIT_ADC_WR, CS_ADC)
|
||||||
|
self.bus.write(0xffffffff)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def read8(self, addr: TInt32) -> TInt32:
|
||||||
|
"""Read from 8 bit register.
|
||||||
|
|
||||||
|
:param addr: Register address.
|
||||||
|
:return: Read-back register content.
|
||||||
|
"""
|
||||||
|
self.bus.set_config_mu(
|
||||||
|
ADC_SPI_CONFIG | spi.SPI_END | spi.SPI_INPUT,
|
||||||
|
16, SPIT_ADC_RD, CS_ADC)
|
||||||
|
self.bus.write((addr | 0x40) << 24)
|
||||||
|
return self.bus.read() & 0xff
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def read16(self, addr: TInt32) -> TInt32:
|
||||||
|
"""Read from 16 bit register.
|
||||||
|
|
||||||
|
:param addr: Register address.
|
||||||
|
:return: Read-back register content.
|
||||||
|
"""
|
||||||
|
self.bus.set_config_mu(
|
||||||
|
ADC_SPI_CONFIG | spi.SPI_END | spi.SPI_INPUT,
|
||||||
|
24, SPIT_ADC_RD, CS_ADC)
|
||||||
|
self.bus.write((addr | 0x40) << 24)
|
||||||
|
return self.bus.read() & 0xffff
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def read24(self, addr: TInt32) -> TInt32:
|
||||||
|
"""Read from 24 bit register.
|
||||||
|
|
||||||
|
:param addr: Register address.
|
||||||
|
:return: Read-back register content.
|
||||||
|
"""
|
||||||
|
self.bus.set_config_mu(
|
||||||
|
ADC_SPI_CONFIG | spi.SPI_END | spi.SPI_INPUT,
|
||||||
|
32, SPIT_ADC_RD, CS_ADC)
|
||||||
|
self.bus.write((addr | 0x40) << 24)
|
||||||
|
return self.bus.read() & 0xffffff
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def write8(self, addr: TInt32, data: TInt32):
|
||||||
|
"""Write to 8 bit register.
|
||||||
|
|
||||||
|
:param addr: Register address.
|
||||||
|
:param data: Data to be written.
|
||||||
|
"""
|
||||||
|
self.bus.set_config_mu(
|
||||||
|
ADC_SPI_CONFIG | spi.SPI_END, 16, SPIT_ADC_WR, CS_ADC)
|
||||||
|
self.bus.write(addr << 24 | (data & 0xff) << 16)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def write16(self, addr: TInt32, data: TInt32):
|
||||||
|
"""Write to 16 bit register.
|
||||||
|
|
||||||
|
:param addr: Register address.
|
||||||
|
:param data: Data to be written.
|
||||||
|
"""
|
||||||
|
self.bus.set_config_mu(
|
||||||
|
ADC_SPI_CONFIG | spi.SPI_END, 24, SPIT_ADC_WR, CS_ADC)
|
||||||
|
self.bus.write(addr << 24 | (data & 0xffff) << 8)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def write24(self, addr: TInt32, data: TInt32):
|
||||||
|
"""Write to 24 bit register.
|
||||||
|
|
||||||
|
:param addr: Register address.
|
||||||
|
:param data: Data to be written.
|
||||||
|
"""
|
||||||
|
self.bus.set_config_mu(
|
||||||
|
ADC_SPI_CONFIG | spi.SPI_END, 32, SPIT_ADC_WR, CS_ADC)
|
||||||
|
self.bus.write(addr << 24 | (data & 0xffffff))
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def read_ch(self, channel: TInt32) -> TFloat:
|
||||||
|
"""Sample a Shuttler channel on the AFE.
|
||||||
|
|
||||||
|
It performs a single conversion using profile 0 and setup 0, on the
|
||||||
|
selected channel. The sample is then recovered and converted to volt.
|
||||||
|
|
||||||
|
:param channel: Shuttler channel to be sampled.
|
||||||
|
:return: Voltage sample in volt.
|
||||||
|
"""
|
||||||
|
# Always configure Profile 0 for single conversion
|
||||||
|
self.write16(_AD4115_REG_CH0, 0x8000 | ((channel * 2 + 1) << 4))
|
||||||
|
self.write16(_AD4115_REG_SETUPCON0, 0x1300)
|
||||||
|
self.single_conversion()
|
||||||
|
|
||||||
|
delay(100*us)
|
||||||
|
adc_code = self.read24(_AD4115_REG_DATA)
|
||||||
|
return ((adc_code / (1 << 23)) - 1) * 2.5 / 0.1
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def single_conversion(self):
|
||||||
|
"""Place the ADC in single conversion mode.
|
||||||
|
|
||||||
|
The ADC returns to standby mode after the conversion is complete.
|
||||||
|
"""
|
||||||
|
self.write16(_AD4115_REG_ADCMODE, 0x8010)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def standby(self):
|
||||||
|
"""Place the ADC in standby mode and disables power down the clock.
|
||||||
|
|
||||||
|
The ADC can be returned to single conversion mode by calling
|
||||||
|
:meth:`single_conversion`.
|
||||||
|
"""
|
||||||
|
# Selecting internal XO (0b00) also disables clock during standby
|
||||||
|
self.write16(_AD4115_REG_ADCMODE, 0x8020)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def power_down(self):
|
||||||
|
"""Place the ADC in power-down mode.
|
||||||
|
|
||||||
|
The ADC must be reset before returning to other modes.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
The AD4115 datasheet suggests placing the ADC in standby mode
|
||||||
|
before power-down. This is to prevent accidental entry into the
|
||||||
|
power-down mode.
|
||||||
|
|
||||||
|
.. seealso::
|
||||||
|
:meth:`standby`
|
||||||
|
|
||||||
|
:meth:`power_up`
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.write16(_AD4115_REG_ADCMODE, 0x8030)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def power_up(self):
|
||||||
|
"""Exit the ADC power-down mode.
|
||||||
|
|
||||||
|
The ADC should be in power-down mode before calling this method.
|
||||||
|
|
||||||
|
.. seealso::
|
||||||
|
:meth:`power_down`
|
||||||
|
"""
|
||||||
|
self.reset()
|
||||||
|
# Although the datasheet claims 500 us reset wait time, only waiting
|
||||||
|
# for ~500 us can result in DOUT pin stuck in high
|
||||||
|
delay(2500*us)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def calibrate(self, volts, trigger, config, samples=[-5.0, 0.0, 5.0]):
|
||||||
|
"""Calibrate the Shuttler waveform generator using the ADC on the AFE.
|
||||||
|
|
||||||
|
It finds the average slope rate and average offset by samples, and
|
||||||
|
compensate by writing the pre-DAC gain and offset registers in the
|
||||||
|
configuration registers.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
If the pre-calibration slope rate < 1, the calibration procedure
|
||||||
|
will introduce a pre-DAC gain compensation. However, this may
|
||||||
|
saturate the pre-DAC voltage code. (See :class:`Config` notes).
|
||||||
|
Shuttler cannot cover the entire +/- 10 V range in this case.
|
||||||
|
|
||||||
|
.. seealso::
|
||||||
|
:meth:`Config.set_gain`
|
||||||
|
|
||||||
|
:meth:`Config.set_offset`
|
||||||
|
|
||||||
|
:param volts: A list of all 16 cubic DC-bias spline.
|
||||||
|
(See :class:`DCBias`)
|
||||||
|
:param trigger: The Shuttler spline coefficient update trigger.
|
||||||
|
:param config: The Shuttler Core configuration registers.
|
||||||
|
:param samples: A list of sample voltages for calibration. There must
|
||||||
|
be at least 2 samples to perform slope rate calculation.
|
||||||
|
"""
|
||||||
|
assert len(volts) == 16
|
||||||
|
assert len(samples) > 1
|
||||||
|
|
||||||
|
measurements = [0.0] * len(samples)
|
||||||
|
|
||||||
|
for ch in range(16):
|
||||||
|
# Find the average slope rate and offset
|
||||||
|
for i in range(len(samples)):
|
||||||
|
self.core.break_realtime()
|
||||||
|
volts[ch].set_waveform(
|
||||||
|
shuttler_volt_to_mu(samples[i]), 0, 0, 0)
|
||||||
|
trigger.trigger(1 << ch)
|
||||||
|
measurements[i] = self.read_ch(ch)
|
||||||
|
|
||||||
|
# Find the average output slope
|
||||||
|
slope_sum = 0.0
|
||||||
|
for i in range(len(samples) - 1):
|
||||||
|
slope_sum += (measurements[i+1] - measurements[i])/(samples[i+1] - samples[i])
|
||||||
|
slope_avg = slope_sum / (len(samples) - 1)
|
||||||
|
|
||||||
|
gain_code = int32(1 / slope_avg * (2 ** 16)) & 0xffff
|
||||||
|
|
||||||
|
# Scale the measurements by 1/slope, find average offset
|
||||||
|
offset_sum = 0.0
|
||||||
|
for i in range(len(samples)):
|
||||||
|
offset_sum += (measurements[i] / slope_avg) - samples[i]
|
||||||
|
offset_avg = offset_sum / len(samples)
|
||||||
|
|
||||||
|
offset_code = shuttler_volt_to_mu(-offset_avg)
|
||||||
|
|
||||||
|
self.core.break_realtime()
|
||||||
|
config.set_gain(ch, gain_code)
|
||||||
|
|
||||||
|
delay_mu(int64(self.core.ref_multiplier))
|
||||||
|
config.set_offset(ch, offset_code)
|
|
@ -72,6 +72,10 @@ class SPIMaster:
|
||||||
self.channel = channel
|
self.channel = channel
|
||||||
self.update_xfer_duration_mu(div, length)
|
self.update_xfer_duration_mu(div, length)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(channel, **kwargs):
|
||||||
|
return [(channel, None)]
|
||||||
|
|
||||||
@portable
|
@portable
|
||||||
def frequency_to_div(self, f):
|
def frequency_to_div(self, f):
|
||||||
"""Convert a SPI clock frequency to the closest SPI clock divider."""
|
"""Convert a SPI clock frequency to the closest SPI clock divider."""
|
||||||
|
@ -273,9 +277,8 @@ class NRTSPIMaster:
|
||||||
def set_config_mu(self, flags=0, length=8, div=6, cs=1):
|
def set_config_mu(self, flags=0, length=8, div=6, cs=1):
|
||||||
"""Set the ``config`` register.
|
"""Set the ``config`` register.
|
||||||
|
|
||||||
Note that the non-realtime SPI cores are usually clocked by the system
|
In many cases, the SPI configuration is already set by the firmware
|
||||||
clock and not the RTIO clock. In many cases, the SPI configuration is
|
and you do not need to call this method.
|
||||||
already set by the firmware and you do not need to call this method.
|
|
||||||
"""
|
"""
|
||||||
spi_set_config(self.busno, flags, length, div, cs)
|
spi_set_config(self.busno, flags, length, div, cs)
|
||||||
|
|
||||||
|
|
|
@ -1,228 +0,0 @@
|
||||||
from numpy import int32, int64
|
|
||||||
from artiq.language.core import kernel, portable, delay
|
|
||||||
from artiq.coredevice.rtio import rtio_output, rtio_output_wide
|
|
||||||
from artiq.language.types import TInt32, TInt64, TFloat
|
|
||||||
|
|
||||||
|
|
||||||
class Spline:
|
|
||||||
r"""Spline interpolating RTIO channel.
|
|
||||||
|
|
||||||
One knot of a polynomial basis spline (B-spline) :math:`u(t)`
|
|
||||||
is defined by the coefficients :math:`u_n` up to order :math:`n = k`.
|
|
||||||
If the coefficients are evaluated starting at time :math:`t_0`,
|
|
||||||
the output :math:`u(t)` for :math:`t > t_0, t_0` is:
|
|
||||||
|
|
||||||
.. math::
|
|
||||||
u(t) &= \sum_{n=0}^k \frac{u_n}{n!} (t - t_0)^n \\
|
|
||||||
&= u_0 + u_1 (t - t_0) + \frac{u_2}{2} (t - t_0)^2 + \dots
|
|
||||||
|
|
||||||
This class contains multiple methods to convert spline knot data from SI
|
|
||||||
to machine units and multiple methods that set the current spline
|
|
||||||
coefficient data. None of these advance the timeline. The :meth:`smooth`
|
|
||||||
method is the only method that advances the timeline.
|
|
||||||
|
|
||||||
:param width: Width in bits of the quantity that this spline controls
|
|
||||||
:param time_width: Width in bits of the time counter of this spline
|
|
||||||
:param channel: RTIO channel number
|
|
||||||
:param core_device: Core device that this spline is attached to
|
|
||||||
:param scale: Scale for conversion between machine units and physical
|
|
||||||
units; to be given as the "full scale physical value".
|
|
||||||
"""
|
|
||||||
|
|
||||||
kernel_invariants = {"channel", "core", "scale", "width",
|
|
||||||
"time_width", "time_scale"}
|
|
||||||
|
|
||||||
def __init__(self, width, time_width, channel, core_device, scale=1.):
|
|
||||||
self.core = core_device
|
|
||||||
self.channel = channel
|
|
||||||
self.width = width
|
|
||||||
self.scale = float((int64(1) << width) / scale)
|
|
||||||
self.time_width = time_width
|
|
||||||
self.time_scale = float((1 << time_width) *
|
|
||||||
core_device.coarse_ref_period)
|
|
||||||
|
|
||||||
@portable(flags={"fast-math"})
|
|
||||||
def to_mu(self, value: TFloat) -> TInt32:
|
|
||||||
"""Convert floating point ``value`` from physical units to 32 bit
|
|
||||||
integer machine units."""
|
|
||||||
return int32(round(value*self.scale))
|
|
||||||
|
|
||||||
@portable(flags={"fast-math"})
|
|
||||||
def from_mu(self, value: TInt32) -> TFloat:
|
|
||||||
"""Convert 32 bit integer ``value`` from machine units to floating
|
|
||||||
point physical units."""
|
|
||||||
return value/self.scale
|
|
||||||
|
|
||||||
@portable(flags={"fast-math"})
|
|
||||||
def to_mu64(self, value: TFloat) -> TInt64:
|
|
||||||
"""Convert floating point ``value`` from physical units to 64 bit
|
|
||||||
integer machine units."""
|
|
||||||
return int64(round(value*self.scale))
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_mu(self, value: TInt32):
|
|
||||||
"""Set spline value (machine units).
|
|
||||||
|
|
||||||
:param value: Spline value in integer machine units.
|
|
||||||
"""
|
|
||||||
rtio_output(self.channel << 8, value)
|
|
||||||
|
|
||||||
@kernel(flags={"fast-math"})
|
|
||||||
def set(self, value: TFloat):
|
|
||||||
"""Set spline value.
|
|
||||||
|
|
||||||
:param value: Spline value relative to full-scale.
|
|
||||||
"""
|
|
||||||
if self.width > 32:
|
|
||||||
l = [int32(0)] * 2
|
|
||||||
self.pack_coeff_mu([self.to_mu64(value)], l)
|
|
||||||
rtio_output_wide(self.channel << 8, l)
|
|
||||||
else:
|
|
||||||
rtio_output(self.channel << 8, self.to_mu(value))
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_coeff_mu(self, value): # TList(TInt32)
|
|
||||||
"""Set spline raw values.
|
|
||||||
|
|
||||||
:param value: Spline packed raw values.
|
|
||||||
"""
|
|
||||||
rtio_output_wide(self.channel << 8, value)
|
|
||||||
|
|
||||||
@portable(flags={"fast-math"})
|
|
||||||
def pack_coeff_mu(self, coeff, packed): # TList(TInt64), TList(TInt32)
|
|
||||||
"""Pack coefficients into RTIO data
|
|
||||||
|
|
||||||
:param coeff: TList(TInt64) list of machine units spline coefficients.
|
|
||||||
Lowest (zeroth) order first. The coefficient list is zero-extended
|
|
||||||
by the RTIO gateware.
|
|
||||||
:param packed: TList(TInt32) list for packed RTIO data. Must be
|
|
||||||
pre-allocated. Length in bits is
|
|
||||||
``n*width + (n - 1)*n//2*time_width``
|
|
||||||
"""
|
|
||||||
pos = 0
|
|
||||||
for i in range(len(coeff)):
|
|
||||||
wi = self.width + i*self.time_width
|
|
||||||
ci = coeff[i]
|
|
||||||
while wi != 0:
|
|
||||||
j = pos//32
|
|
||||||
used = pos - 32*j
|
|
||||||
avail = 32 - used
|
|
||||||
if avail > wi:
|
|
||||||
avail = wi
|
|
||||||
cij = int32(ci)
|
|
||||||
if avail != 32:
|
|
||||||
cij &= (1 << avail) - 1
|
|
||||||
packed[j] |= cij << used
|
|
||||||
ci >>= avail
|
|
||||||
wi -= avail
|
|
||||||
pos += avail
|
|
||||||
|
|
||||||
@portable(flags={"fast-math"})
|
|
||||||
def coeff_to_mu(self, coeff, coeff64): # TList(TFloat), TList(TInt64)
|
|
||||||
"""Convert a floating point list of coefficients into a 64 bit
|
|
||||||
integer (preallocated).
|
|
||||||
|
|
||||||
:param coeff: TList(TFloat) list of coefficients in physical units.
|
|
||||||
:param coeff64: TList(TInt64) preallocated list of coefficients in
|
|
||||||
machine units.
|
|
||||||
"""
|
|
||||||
for i in range(len(coeff)):
|
|
||||||
vi = coeff[i] * self.scale
|
|
||||||
for j in range(i):
|
|
||||||
vi *= self.time_scale
|
|
||||||
ci = int64(round(vi))
|
|
||||||
coeff64[i] = ci
|
|
||||||
# artiq.wavesynth.coefficients.discrete_compensate:
|
|
||||||
if i == 2:
|
|
||||||
coeff64[1] += ci >> self.time_width + 1
|
|
||||||
elif i == 3:
|
|
||||||
coeff64[2] += ci >> self.time_width
|
|
||||||
coeff64[1] += ci // 6 >> 2*self.time_width
|
|
||||||
|
|
||||||
def coeff_as_packed_mu(self, coeff64):
|
|
||||||
"""Pack 64 bit integer machine units coefficients into 32 bit integer
|
|
||||||
RTIO data list.
|
|
||||||
|
|
||||||
This is a host-only method that can be used to generate packed
|
|
||||||
spline coefficient data to be frozen into kernels at compile time.
|
|
||||||
"""
|
|
||||||
n = len(coeff64)
|
|
||||||
width = n*self.width + (n - 1)*n//2*self.time_width
|
|
||||||
packed = [int32(0)] * ((width + 31)//32)
|
|
||||||
self.pack_coeff_mu(coeff64, packed)
|
|
||||||
return packed
|
|
||||||
|
|
||||||
def coeff_as_packed(self, coeff):
|
|
||||||
"""Convert floating point spline coefficients into 32 bit integer
|
|
||||||
packed data.
|
|
||||||
|
|
||||||
This is a host-only method that can be used to generate packed
|
|
||||||
spline coefficient data to be frozen into kernels at compile time.
|
|
||||||
"""
|
|
||||||
coeff64 = [int64(0)] * len(coeff)
|
|
||||||
self.coeff_to_mu(coeff, coeff64)
|
|
||||||
return self.coeff_as_packed_mu(coeff64)
|
|
||||||
|
|
||||||
@kernel(flags={"fast-math"})
|
|
||||||
def set_coeff(self, coeff): # TList(TFloat)
|
|
||||||
"""Set spline coefficients.
|
|
||||||
|
|
||||||
Missing coefficients (high order) are zero-extended byt the RTIO
|
|
||||||
gateware.
|
|
||||||
|
|
||||||
If more coefficients are supplied than the gateware supports the extra
|
|
||||||
coefficients are ignored.
|
|
||||||
|
|
||||||
:param value: List of floating point spline coefficients,
|
|
||||||
lowest order (constant) coefficient first. Units are the
|
|
||||||
unit of this spline's value times increasing powers of 1/s.
|
|
||||||
"""
|
|
||||||
n = len(coeff)
|
|
||||||
coeff64 = [int64(0)] * n
|
|
||||||
self.coeff_to_mu(coeff, coeff64)
|
|
||||||
width = n*self.width + (n - 1)*n//2*self.time_width
|
|
||||||
packed = [int32(0)] * ((width + 31)//32)
|
|
||||||
self.pack_coeff_mu(coeff64, packed)
|
|
||||||
self.set_coeff_mu(packed)
|
|
||||||
|
|
||||||
@kernel(flags={"fast-math"})
|
|
||||||
def smooth(self, start: TFloat, stop: TFloat, duration: TFloat,
|
|
||||||
order: TInt32):
|
|
||||||
"""Initiate an interpolated value change.
|
|
||||||
|
|
||||||
For zeroth order (step) interpolation, the step is at
|
|
||||||
``start + duration/2``.
|
|
||||||
|
|
||||||
First order interpolation corresponds to a linear value ramp from
|
|
||||||
``start`` to ``stop`` over ``duration``.
|
|
||||||
|
|
||||||
The third order interpolation is constrained to have zero first
|
|
||||||
order derivative at both `start` and `stop`.
|
|
||||||
|
|
||||||
For first order and third order interpolation (linear and cubic)
|
|
||||||
the interpolator needs to be stopped explicitly at the stop time
|
|
||||||
(e.g. by setting spline coefficient data or starting a new
|
|
||||||
:meth:`smooth` interpolation).
|
|
||||||
|
|
||||||
This method advances the timeline by ``duration``.
|
|
||||||
|
|
||||||
:param start: Initial value of the change. In physical units.
|
|
||||||
:param stop: Final value of the change. In physical units.
|
|
||||||
:param duration: Duration of the interpolation. In physical units.
|
|
||||||
:param order: Order of the interpolation. Only 0, 1,
|
|
||||||
and 3 are valid: step, linear, cubic.
|
|
||||||
"""
|
|
||||||
if order == 0:
|
|
||||||
delay(duration/2.)
|
|
||||||
self.set_coeff([stop])
|
|
||||||
delay(duration/2.)
|
|
||||||
elif order == 1:
|
|
||||||
self.set_coeff([start, (stop - start)/duration])
|
|
||||||
delay(duration)
|
|
||||||
elif order == 3:
|
|
||||||
v2 = 6.*(stop - start)/(duration*duration)
|
|
||||||
self.set_coeff([start, 0., v2, -2.*v2/duration])
|
|
||||||
delay(duration)
|
|
||||||
else:
|
|
||||||
raise ValueError("Invalid interpolation order. "
|
|
||||||
"Supported orders are: 0, 1, 3.")
|
|
|
@ -23,12 +23,12 @@ def y_mu_to_full_scale(y):
|
||||||
|
|
||||||
|
|
||||||
@portable
|
@portable
|
||||||
def adc_mu_to_volts(x, gain):
|
def adc_mu_to_volts(x, gain, corrected_fs=True):
|
||||||
"""Convert servo ADC data from machine units to Volt."""
|
"""Convert servo ADC data from machine units to Volt."""
|
||||||
val = (x >> 1) & 0xffff
|
val = (x >> 1) & 0xffff
|
||||||
mask = 1 << 15
|
mask = 1 << 15
|
||||||
val = -(val & mask) + (val & ~mask)
|
val = -(val & mask) + (val & ~mask)
|
||||||
return sampler.adc_mu_to_volt(val, gain)
|
return sampler.adc_mu_to_volt(val, gain, corrected_fs)
|
||||||
|
|
||||||
|
|
||||||
class SUServo:
|
class SUServo:
|
||||||
|
@ -62,14 +62,15 @@ class SUServo:
|
||||||
:param gains: Initial value for PGIA gains shift register
|
:param gains: Initial value for PGIA gains shift register
|
||||||
(default: 0x0000). Knowledge of this state is not transferred
|
(default: 0x0000). Knowledge of this state is not transferred
|
||||||
between experiments.
|
between experiments.
|
||||||
|
:param sampler_hw_rev: Sampler's revision string
|
||||||
:param core_device: Core device name
|
:param core_device: Core device name
|
||||||
"""
|
"""
|
||||||
kernel_invariants = {"channel", "core", "pgia", "cplds", "ddses",
|
kernel_invariants = {"channel", "core", "pgia", "cplds", "ddses",
|
||||||
"ref_period_mu"}
|
"ref_period_mu", "corrected_fs"}
|
||||||
|
|
||||||
def __init__(self, dmgr, channel, pgia_device,
|
def __init__(self, dmgr, channel, pgia_device,
|
||||||
cpld_devices, dds_devices,
|
cpld_devices, dds_devices,
|
||||||
gains=0x0000, core_device="core"):
|
gains=0x0000, sampler_hw_rev="v2.2", core_device="core"):
|
||||||
|
|
||||||
self.core = dmgr.get(core_device)
|
self.core = dmgr.get(core_device)
|
||||||
self.pgia = dmgr.get(pgia_device)
|
self.pgia = dmgr.get(pgia_device)
|
||||||
|
@ -81,8 +82,13 @@ class SUServo:
|
||||||
self.gains = gains
|
self.gains = gains
|
||||||
self.ref_period_mu = self.core.seconds_to_mu(
|
self.ref_period_mu = self.core.seconds_to_mu(
|
||||||
self.core.coarse_ref_period)
|
self.core.coarse_ref_period)
|
||||||
|
self.corrected_fs = sampler.Sampler.use_corrected_fs(sampler_hw_rev)
|
||||||
assert self.ref_period_mu == self.core.ref_multiplier
|
assert self.ref_period_mu == self.core.ref_multiplier
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(channel, **kwargs):
|
||||||
|
return [(channel, None)]
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def init(self):
|
def init(self):
|
||||||
"""Initialize the servo, Sampler and both Urukuls.
|
"""Initialize the servo, Sampler and both Urukuls.
|
||||||
|
@ -234,7 +240,7 @@ class SUServo:
|
||||||
"""
|
"""
|
||||||
val = self.get_adc_mu(channel)
|
val = self.get_adc_mu(channel)
|
||||||
gain = (self.gains >> (channel*2)) & 0b11
|
gain = (self.gains >> (channel*2)) & 0b11
|
||||||
return adc_mu_to_volts(val, gain)
|
return adc_mu_to_volts(val, gain, self.corrected_fs)
|
||||||
|
|
||||||
|
|
||||||
class Channel:
|
class Channel:
|
||||||
|
@ -255,6 +261,10 @@ class Channel:
|
||||||
self.servo.channel)
|
self.servo.channel)
|
||||||
self.dds = self.servo.ddses[self.servo_channel // 4]
|
self.dds = self.servo.ddses[self.servo_channel // 4]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(channel, **kwargs):
|
||||||
|
return [(channel, None)]
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set(self, en_out, en_iir=0, profile=0):
|
def set(self, en_out, en_iir=0, profile=0):
|
||||||
"""Operate channel.
|
"""Operate channel.
|
||||||
|
|
|
@ -36,6 +36,10 @@ class TTLOut:
|
||||||
self.channel = channel
|
self.channel = channel
|
||||||
self.target_o = channel << 8
|
self.target_o = channel << 8
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(channel, **kwargs):
|
||||||
|
return [(channel, None)]
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def output(self):
|
def output(self):
|
||||||
pass
|
pass
|
||||||
|
@ -128,6 +132,10 @@ class TTLInOut:
|
||||||
self.target_sens = (channel << 8) + 2
|
self.target_sens = (channel << 8) + 2
|
||||||
self.target_sample = (channel << 8) + 3
|
self.target_sample = (channel << 8) + 3
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(channel, **kwargs):
|
||||||
|
return [(channel, None)]
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set_oe(self, oe):
|
def set_oe(self, oe):
|
||||||
rtio_output(self.target_oe, 1 if oe else 0)
|
rtio_output(self.target_oe, 1 if oe else 0)
|
||||||
|
@ -465,6 +473,10 @@ class TTLClockGen:
|
||||||
|
|
||||||
self.acc_width = numpy.int64(acc_width)
|
self.acc_width = numpy.int64(acc_width)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(channel, **kwargs):
|
||||||
|
return [(channel, None)]
|
||||||
|
|
||||||
@portable
|
@portable
|
||||||
def frequency_to_ftw(self, frequency):
|
def frequency_to_ftw(self, frequency):
|
||||||
"""Returns the frequency tuning word corresponding to the given
|
"""Returns the frequency tuning word corresponding to the given
|
||||||
|
|
|
@ -5,7 +5,7 @@ import numpy as np
|
||||||
from PyQt5 import QtCore, QtWidgets
|
from PyQt5 import QtCore, QtWidgets
|
||||||
from sipyco import pyon
|
from sipyco import pyon
|
||||||
|
|
||||||
from artiq.tools import short_format, exc_to_warning
|
from artiq.tools import scale_from_metadata, short_format, exc_to_warning
|
||||||
from artiq.gui.tools import LayoutWidget, QRecursiveFilterProxyModel
|
from artiq.gui.tools import LayoutWidget, QRecursiveFilterProxyModel
|
||||||
from artiq.gui.models import DictSyncTreeSepModel
|
from artiq.gui.models import DictSyncTreeSepModel
|
||||||
from artiq.gui.scientific_spinbox import ScientificSpinBox
|
from artiq.gui.scientific_spinbox import ScientificSpinBox
|
||||||
|
@ -14,81 +14,18 @@ from artiq.gui.scientific_spinbox import ScientificSpinBox
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class Editor(QtWidgets.QDialog):
|
async def rename(key, new_key, value, metadata, persist, dataset_ctl):
|
||||||
def __init__(self, parent, dataset_ctl, key, value):
|
if key != new_key:
|
||||||
QtWidgets.QDialog.__init__(self, parent=parent)
|
await dataset_ctl.delete(key)
|
||||||
self.dataset_ctl = dataset_ctl
|
await dataset_ctl.set(new_key, value, metadata=metadata, persist=persist)
|
||||||
self.key = key
|
|
||||||
self.initial_type = type(value)
|
|
||||||
|
|
||||||
self.setWindowTitle("Edit dataset")
|
|
||||||
grid = QtWidgets.QGridLayout()
|
|
||||||
self.setLayout(grid)
|
|
||||||
|
|
||||||
grid.addWidget(QtWidgets.QLabel("Name:"), 0, 0)
|
|
||||||
grid.addWidget(QtWidgets.QLabel(key), 0, 1)
|
|
||||||
|
|
||||||
grid.addWidget(QtWidgets.QLabel("Value:"), 1, 0)
|
|
||||||
grid.addWidget(self.get_edit_widget(value), 1, 1)
|
|
||||||
|
|
||||||
buttons = QtWidgets.QDialogButtonBox(
|
|
||||||
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)
|
|
||||||
grid.setRowStretch(2, 1)
|
|
||||||
grid.addWidget(buttons, 3, 0, 1, 2)
|
|
||||||
buttons.accepted.connect(self.accept)
|
|
||||||
buttons.rejected.connect(self.reject)
|
|
||||||
|
|
||||||
def accept(self):
|
|
||||||
value = self.initial_type(self.get_edit_widget_value())
|
|
||||||
asyncio.ensure_future(self.dataset_ctl.set(self.key, value))
|
|
||||||
QtWidgets.QDialog.accept(self)
|
|
||||||
|
|
||||||
def get_edit_widget(self, initial_value):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def get_edit_widget_value(self):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
|
|
||||||
class NumberEditor(Editor):
|
class CreateEditDialog(QtWidgets.QDialog):
|
||||||
def get_edit_widget(self, initial_value):
|
def __init__(self, parent, dataset_ctl, key=None, value=None, metadata=None, persist=False):
|
||||||
self.edit_widget = ScientificSpinBox()
|
|
||||||
self.edit_widget.setDecimals(13)
|
|
||||||
self.edit_widget.setPrecision()
|
|
||||||
self.edit_widget.setRelativeStep()
|
|
||||||
self.edit_widget.setValue(float(initial_value))
|
|
||||||
return self.edit_widget
|
|
||||||
|
|
||||||
def get_edit_widget_value(self):
|
|
||||||
return self.edit_widget.value()
|
|
||||||
|
|
||||||
|
|
||||||
class BoolEditor(Editor):
|
|
||||||
def get_edit_widget(self, initial_value):
|
|
||||||
self.edit_widget = QtWidgets.QCheckBox()
|
|
||||||
self.edit_widget.setChecked(bool(initial_value))
|
|
||||||
return self.edit_widget
|
|
||||||
|
|
||||||
def get_edit_widget_value(self):
|
|
||||||
return self.edit_widget.isChecked()
|
|
||||||
|
|
||||||
|
|
||||||
class StringEditor(Editor):
|
|
||||||
def get_edit_widget(self, initial_value):
|
|
||||||
self.edit_widget = QtWidgets.QLineEdit()
|
|
||||||
self.edit_widget.setText(initial_value)
|
|
||||||
return self.edit_widget
|
|
||||||
|
|
||||||
def get_edit_widget_value(self):
|
|
||||||
return self.edit_widget.text()
|
|
||||||
|
|
||||||
|
|
||||||
class Creator(QtWidgets.QDialog):
|
|
||||||
def __init__(self, parent, dataset_ctl):
|
|
||||||
QtWidgets.QDialog.__init__(self, parent=parent)
|
QtWidgets.QDialog.__init__(self, parent=parent)
|
||||||
self.dataset_ctl = dataset_ctl
|
self.dataset_ctl = dataset_ctl
|
||||||
|
|
||||||
self.setWindowTitle("Create dataset")
|
self.setWindowTitle("Create dataset" if key is None else "Edit dataset")
|
||||||
grid = QtWidgets.QGridLayout()
|
grid = QtWidgets.QGridLayout()
|
||||||
grid.setRowMinimumHeight(1, 40)
|
grid.setRowMinimumHeight(1, 40)
|
||||||
grid.setColumnMinimumWidth(2, 60)
|
grid.setColumnMinimumWidth(2, 60)
|
||||||
|
@ -106,9 +43,21 @@ class Creator(QtWidgets.QDialog):
|
||||||
grid.addWidget(self.data_type, 1, 2)
|
grid.addWidget(self.data_type, 1, 2)
|
||||||
self.value_widget.textChanged.connect(self.dtype)
|
self.value_widget.textChanged.connect(self.dtype)
|
||||||
|
|
||||||
grid.addWidget(QtWidgets.QLabel("Persist:"), 2, 0)
|
grid.addWidget(QtWidgets.QLabel("Unit:"), 2, 0)
|
||||||
|
self.unit_widget = QtWidgets.QLineEdit()
|
||||||
|
grid.addWidget(self.unit_widget, 2, 1)
|
||||||
|
|
||||||
|
grid.addWidget(QtWidgets.QLabel("Scale:"), 3, 0)
|
||||||
|
self.scale_widget = QtWidgets.QLineEdit()
|
||||||
|
grid.addWidget(self.scale_widget, 3, 1)
|
||||||
|
|
||||||
|
grid.addWidget(QtWidgets.QLabel("Precision:"), 4, 0)
|
||||||
|
self.precision_widget = QtWidgets.QLineEdit()
|
||||||
|
grid.addWidget(self.precision_widget, 4, 1)
|
||||||
|
|
||||||
|
grid.addWidget(QtWidgets.QLabel("Persist:"), 5, 0)
|
||||||
self.box_widget = QtWidgets.QCheckBox()
|
self.box_widget = QtWidgets.QCheckBox()
|
||||||
grid.addWidget(self.box_widget, 2, 1)
|
grid.addWidget(self.box_widget, 5, 1)
|
||||||
|
|
||||||
self.ok = QtWidgets.QPushButton('&Ok')
|
self.ok = QtWidgets.QPushButton('&Ok')
|
||||||
self.ok.setEnabled(False)
|
self.ok.setEnabled(False)
|
||||||
|
@ -118,23 +67,62 @@ class Creator(QtWidgets.QDialog):
|
||||||
self.ok, QtWidgets.QDialogButtonBox.AcceptRole)
|
self.ok, QtWidgets.QDialogButtonBox.AcceptRole)
|
||||||
self.buttons.addButton(
|
self.buttons.addButton(
|
||||||
self.cancel, QtWidgets.QDialogButtonBox.RejectRole)
|
self.cancel, QtWidgets.QDialogButtonBox.RejectRole)
|
||||||
grid.setRowStretch(3, 1)
|
grid.setRowStretch(6, 1)
|
||||||
grid.addWidget(self.buttons, 4, 0, 1, 3)
|
grid.addWidget(self.buttons, 7, 0, 1, 3, alignment=QtCore.Qt.AlignHCenter)
|
||||||
self.buttons.accepted.connect(self.accept)
|
self.buttons.accepted.connect(self.accept)
|
||||||
self.buttons.rejected.connect(self.reject)
|
self.buttons.rejected.connect(self.reject)
|
||||||
|
|
||||||
|
self.key = key
|
||||||
|
self.name_widget.setText(key)
|
||||||
|
|
||||||
|
value_edit_string = self.value_to_edit_string(value)
|
||||||
|
if metadata is not None:
|
||||||
|
scale = scale_from_metadata(metadata)
|
||||||
|
t = value.dtype if value is np.ndarray else type(value)
|
||||||
|
if scale != 1 and np.issubdtype(t, np.number):
|
||||||
|
# degenerates to float type
|
||||||
|
value_edit_string = self.value_to_edit_string(
|
||||||
|
np.float64(value / scale))
|
||||||
|
self.unit_widget.setText(metadata.get('unit', ''))
|
||||||
|
self.scale_widget.setText(str(metadata.get('scale', '')))
|
||||||
|
self.precision_widget.setText(str(metadata.get('precision', '')))
|
||||||
|
|
||||||
|
self.value_widget.setText(value_edit_string)
|
||||||
|
self.box_widget.setChecked(persist)
|
||||||
|
|
||||||
def accept(self):
|
def accept(self):
|
||||||
key = self.name_widget.text()
|
key = self.name_widget.text()
|
||||||
value = self.value_widget.text()
|
value = self.value_widget.text()
|
||||||
persist = self.box_widget.isChecked()
|
persist = self.box_widget.isChecked()
|
||||||
asyncio.ensure_future(exc_to_warning(self.dataset_ctl.set(
|
unit = self.unit_widget.text()
|
||||||
key, pyon.decode(value), persist)))
|
scale = self.scale_widget.text()
|
||||||
|
precision = self.precision_widget.text()
|
||||||
|
metadata = {}
|
||||||
|
if unit != "":
|
||||||
|
metadata['unit'] = unit
|
||||||
|
if scale != "":
|
||||||
|
metadata['scale'] = float(scale)
|
||||||
|
if precision != "":
|
||||||
|
metadata['precision'] = int(precision)
|
||||||
|
scale = scale_from_metadata(metadata)
|
||||||
|
value = self.parse_edit_string(value)
|
||||||
|
t = value.dtype if value is np.ndarray else type(value)
|
||||||
|
if scale != 1 and np.issubdtype(t, np.number):
|
||||||
|
# degenerates to float type
|
||||||
|
value = np.float64(value * scale)
|
||||||
|
if self.key and self.key != key:
|
||||||
|
asyncio.ensure_future(exc_to_warning(rename(self.key, key, value, metadata, persist, self.dataset_ctl)))
|
||||||
|
else:
|
||||||
|
asyncio.ensure_future(exc_to_warning(self.dataset_ctl.set(key, value, metadata=metadata, persist=persist)))
|
||||||
|
self.key = key
|
||||||
QtWidgets.QDialog.accept(self)
|
QtWidgets.QDialog.accept(self)
|
||||||
|
|
||||||
def dtype(self):
|
def dtype(self):
|
||||||
txt = self.value_widget.text()
|
txt = self.value_widget.text()
|
||||||
try:
|
try:
|
||||||
result = pyon.decode(txt)
|
result = self.parse_edit_string(txt)
|
||||||
|
# ensure only pyon compatible types are permissable
|
||||||
|
pyon.encode(result)
|
||||||
except:
|
except:
|
||||||
pixmap = self.style().standardPixmap(
|
pixmap = self.style().standardPixmap(
|
||||||
QtWidgets.QStyle.SP_MessageBoxWarning)
|
QtWidgets.QStyle.SP_MessageBoxWarning)
|
||||||
|
@ -144,24 +132,53 @@ class Creator(QtWidgets.QDialog):
|
||||||
self.data_type.setText(type(result).__name__)
|
self.data_type.setText(type(result).__name__)
|
||||||
self.ok.setEnabled(True)
|
self.ok.setEnabled(True)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def parse_edit_string(s):
|
||||||
|
if s == "":
|
||||||
|
raise TypeError
|
||||||
|
_eval_dict = {
|
||||||
|
"__builtins__": {},
|
||||||
|
"array": np.array,
|
||||||
|
"null": np.nan,
|
||||||
|
"inf": np.inf
|
||||||
|
}
|
||||||
|
for t_ in pyon._numpy_scalar:
|
||||||
|
_eval_dict[t_] = eval("np.{}".format(t_), {"np": np})
|
||||||
|
return eval(s, _eval_dict, {})
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def value_to_edit_string(v):
|
||||||
|
t = type(v)
|
||||||
|
r = ""
|
||||||
|
if isinstance(v, np.generic):
|
||||||
|
r += t.__name__
|
||||||
|
r += "("
|
||||||
|
r += repr(v)
|
||||||
|
r += ")"
|
||||||
|
elif v is None:
|
||||||
|
return r
|
||||||
|
else:
|
||||||
|
r += repr(v)
|
||||||
|
return r
|
||||||
|
|
||||||
|
|
||||||
class Model(DictSyncTreeSepModel):
|
class Model(DictSyncTreeSepModel):
|
||||||
def __init__(self, init):
|
def __init__(self, init):
|
||||||
DictSyncTreeSepModel.__init__(
|
DictSyncTreeSepModel.__init__(self, ".",
|
||||||
self, ".", ["Dataset", "Persistent", "Value"], init
|
["Dataset", "Persistent", "Value"],
|
||||||
)
|
init)
|
||||||
|
|
||||||
def convert(self, k, v, column):
|
def convert(self, k, v, column):
|
||||||
if column == 1:
|
if column == 1:
|
||||||
return "Y" if v["persist"] else "N"
|
return "Y" if v[0] else "N"
|
||||||
elif column == 2:
|
elif column == 2:
|
||||||
return short_format(v["value"])
|
return short_format(v[1], v[2])
|
||||||
else:
|
else:
|
||||||
raise ValueError
|
raise ValueError
|
||||||
|
|
||||||
|
|
||||||
class DatasetsDock(QtWidgets.QDockWidget):
|
class DatasetsDock(QtWidgets.QDockWidget):
|
||||||
def __init__(self, datasets_sub, dataset_ctl):
|
def __init__(self, dataset_sub, dataset_ctl):
|
||||||
QtWidgets.QDockWidget.__init__(self, "Datasets")
|
QtWidgets.QDockWidget.__init__(self, "Datasets")
|
||||||
self.setObjectName("Datasets")
|
self.setObjectName("Datasets")
|
||||||
self.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
|
self.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
|
||||||
|
@ -201,7 +218,7 @@ class DatasetsDock(QtWidgets.QDockWidget):
|
||||||
self.table.addAction(delete_action)
|
self.table.addAction(delete_action)
|
||||||
|
|
||||||
self.table_model = Model(dict())
|
self.table_model = Model(dict())
|
||||||
datasets_sub.add_setmodel_callback(self.set_model)
|
dataset_sub.add_setmodel_callback(self.set_model)
|
||||||
|
|
||||||
def _search_datasets(self):
|
def _search_datasets(self):
|
||||||
if hasattr(self, "table_model_filter"):
|
if hasattr(self, "table_model_filter"):
|
||||||
|
@ -215,7 +232,7 @@ class DatasetsDock(QtWidgets.QDockWidget):
|
||||||
self.table.setModel(self.table_model_filter)
|
self.table.setModel(self.table_model_filter)
|
||||||
|
|
||||||
def create_clicked(self):
|
def create_clicked(self):
|
||||||
Creator(self, self.dataset_ctl).open()
|
CreateEditDialog(self, self.dataset_ctl).open()
|
||||||
|
|
||||||
def edit_clicked(self):
|
def edit_clicked(self):
|
||||||
idx = self.table.selectedIndexes()
|
idx = self.table.selectedIndexes()
|
||||||
|
@ -223,19 +240,8 @@ class DatasetsDock(QtWidgets.QDockWidget):
|
||||||
idx = self.table_model_filter.mapToSource(idx[0])
|
idx = self.table_model_filter.mapToSource(idx[0])
|
||||||
key = self.table_model.index_to_key(idx)
|
key = self.table_model.index_to_key(idx)
|
||||||
if key is not None:
|
if key is not None:
|
||||||
dataset = self.table_model.backing_store[key]
|
persist, value, metadata = self.table_model.backing_store[key]
|
||||||
t = type(dataset["value"])
|
CreateEditDialog(self, self.dataset_ctl, key, value, metadata, persist).open()
|
||||||
if np.issubdtype(t, np.number):
|
|
||||||
dialog_cls = NumberEditor
|
|
||||||
elif np.issubdtype(t, np.bool_):
|
|
||||||
dialog_cls = BoolEditor
|
|
||||||
elif np.issubdtype(t, np.unicode_):
|
|
||||||
dialog_cls = StringEditor
|
|
||||||
else:
|
|
||||||
logger.error("Cannot edit dataset %s: "
|
|
||||||
"type %s is not supported", key, t)
|
|
||||||
return
|
|
||||||
dialog_cls(self, self.dataset_ctl, key, dataset["value"]).open()
|
|
||||||
|
|
||||||
def delete_clicked(self):
|
def delete_clicked(self):
|
||||||
idx = self.table.selectedIndexes()
|
idx = self.table.selectedIndexes()
|
||||||
|
|
|
@ -11,7 +11,9 @@ from sipyco import pyon
|
||||||
|
|
||||||
from artiq.gui.entries import procdesc_to_entry, ScanEntry
|
from artiq.gui.entries import procdesc_to_entry, ScanEntry
|
||||||
from artiq.gui.fuzzy_select import FuzzySelectWidget
|
from artiq.gui.fuzzy_select import FuzzySelectWidget
|
||||||
from artiq.gui.tools import LayoutWidget, log_level_to_name, get_open_file_name
|
from artiq.gui.tools import (LayoutWidget, WheelFilter,
|
||||||
|
log_level_to_name, get_open_file_name)
|
||||||
|
from artiq.tools import parse_devarg_override, unparse_devarg_override
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -23,15 +25,6 @@ logger = logging.getLogger(__name__)
|
||||||
# 2. file:<class name>@<file name>
|
# 2. file:<class name>@<file name>
|
||||||
|
|
||||||
|
|
||||||
class _WheelFilter(QtCore.QObject):
|
|
||||||
def eventFilter(self, obj, event):
|
|
||||||
if (event.type() == QtCore.QEvent.Wheel and
|
|
||||||
event.modifiers() != QtCore.Qt.NoModifier):
|
|
||||||
event.ignore()
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class _ArgumentEditor(QtWidgets.QTreeWidget):
|
class _ArgumentEditor(QtWidgets.QTreeWidget):
|
||||||
def __init__(self, manager, dock, expurl):
|
def __init__(self, manager, dock, expurl):
|
||||||
self.manager = manager
|
self.manager = manager
|
||||||
|
@ -55,7 +48,7 @@ class _ArgumentEditor(QtWidgets.QTreeWidget):
|
||||||
self.setStyleSheet("QTreeWidget {background: " +
|
self.setStyleSheet("QTreeWidget {background: " +
|
||||||
self.palette().midlight().color().name() + " ;}")
|
self.palette().midlight().color().name() + " ;}")
|
||||||
|
|
||||||
self.viewport().installEventFilter(_WheelFilter(self.viewport()))
|
self.viewport().installEventFilter(WheelFilter(self.viewport(), True))
|
||||||
|
|
||||||
self._groups = dict()
|
self._groups = dict()
|
||||||
self._arg_to_widgets = dict()
|
self._arg_to_widgets = dict()
|
||||||
|
@ -159,12 +152,29 @@ class _ArgumentEditor(QtWidgets.QTreeWidget):
|
||||||
self._groups[name] = group
|
self._groups[name] = group
|
||||||
return group
|
return group
|
||||||
|
|
||||||
|
def update_argument(self, name, argument):
|
||||||
|
widgets = self._arg_to_widgets[name]
|
||||||
|
|
||||||
|
# Qt needs a setItemWidget() to handle layout correctly,
|
||||||
|
# simply replacing the entry inside the LayoutWidget
|
||||||
|
# results in a bug.
|
||||||
|
|
||||||
|
widgets["entry"].deleteLater()
|
||||||
|
widgets["entry"] = procdesc_to_entry(argument["desc"])(argument)
|
||||||
|
widgets["disable_other_scans"].setVisible(
|
||||||
|
isinstance(widgets["entry"], ScanEntry))
|
||||||
|
widgets["fix_layout"].deleteLater()
|
||||||
|
widgets["fix_layout"] = LayoutWidget()
|
||||||
|
widgets["fix_layout"].addWidget(widgets["entry"])
|
||||||
|
self.setItemWidget(widgets["widget_item"], 1, widgets["fix_layout"])
|
||||||
|
self.updateGeometries()
|
||||||
|
|
||||||
def _recompute_argument_clicked(self, name):
|
def _recompute_argument_clicked(self, name):
|
||||||
asyncio.ensure_future(self._recompute_argument(name))
|
asyncio.ensure_future(self._recompute_argument(name))
|
||||||
|
|
||||||
async def _recompute_argument(self, name):
|
async def _recompute_argument(self, name):
|
||||||
try:
|
try:
|
||||||
expdesc = await self.manager.compute_expdesc(self.expurl)
|
expdesc, _ = await self.manager.compute_expdesc(self.expurl)
|
||||||
except:
|
except:
|
||||||
logger.error("Could not recompute argument '%s' of '%s'",
|
logger.error("Could not recompute argument '%s' of '%s'",
|
||||||
name, self.expurl, exc_info=True)
|
name, self.expurl, exc_info=True)
|
||||||
|
@ -175,22 +185,7 @@ class _ArgumentEditor(QtWidgets.QTreeWidget):
|
||||||
state = procdesc_to_entry(procdesc).default_state(procdesc)
|
state = procdesc_to_entry(procdesc).default_state(procdesc)
|
||||||
argument["desc"] = procdesc
|
argument["desc"] = procdesc
|
||||||
argument["state"] = state
|
argument["state"] = state
|
||||||
|
self.update_argument(name, argument)
|
||||||
# Qt needs a setItemWidget() to handle layout correctly,
|
|
||||||
# simply replacing the entry inside the LayoutWidget
|
|
||||||
# results in a bug.
|
|
||||||
|
|
||||||
widgets = self._arg_to_widgets[name]
|
|
||||||
|
|
||||||
widgets["entry"].deleteLater()
|
|
||||||
widgets["entry"] = procdesc_to_entry(procdesc)(argument)
|
|
||||||
widgets["disable_other_scans"].setVisible(
|
|
||||||
isinstance(widgets["entry"], ScanEntry))
|
|
||||||
widgets["fix_layout"].deleteLater()
|
|
||||||
widgets["fix_layout"] = LayoutWidget()
|
|
||||||
widgets["fix_layout"].addWidget(widgets["entry"])
|
|
||||||
self.setItemWidget(widgets["widget_item"], 1, widgets["fix_layout"])
|
|
||||||
self.updateGeometries()
|
|
||||||
|
|
||||||
def _disable_other_scans(self, current_name):
|
def _disable_other_scans(self, current_name):
|
||||||
for name, widgets in self._arg_to_widgets.items():
|
for name, widgets in self._arg_to_widgets.items():
|
||||||
|
@ -216,6 +211,15 @@ class _ArgumentEditor(QtWidgets.QTreeWidget):
|
||||||
pass
|
pass
|
||||||
self.verticalScrollBar().setValue(state["scroll"])
|
self.verticalScrollBar().setValue(state["scroll"])
|
||||||
|
|
||||||
|
# Hooks that allow user-supplied argument editors to react to imminent user
|
||||||
|
# actions. Here, we always keep the manager-stored submission arguments
|
||||||
|
# up-to-date, so no further action is required.
|
||||||
|
def about_to_submit(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def about_to_close(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
log_levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
|
log_levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
|
||||||
|
|
||||||
|
@ -241,7 +245,8 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
self.manager = manager
|
self.manager = manager
|
||||||
self.expurl = expurl
|
self.expurl = expurl
|
||||||
|
|
||||||
self.argeditor = _ArgumentEditor(self.manager, self, self.expurl)
|
editor_class = self.manager.get_argument_editor_class(expurl)
|
||||||
|
self.argeditor = editor_class(self.manager, self, self.expurl)
|
||||||
self.layout.addWidget(self.argeditor, 0, 0, 1, 5)
|
self.layout.addWidget(self.argeditor, 0, 0, 1, 5)
|
||||||
self.layout.setRowStretch(0, 1)
|
self.layout.setRowStretch(0, 1)
|
||||||
|
|
||||||
|
@ -258,7 +263,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
datetime.setDate(QtCore.QDate.currentDate())
|
datetime.setDate(QtCore.QDate.currentDate())
|
||||||
else:
|
else:
|
||||||
datetime.setDateTime(QtCore.QDateTime.fromMSecsSinceEpoch(
|
datetime.setDateTime(QtCore.QDateTime.fromMSecsSinceEpoch(
|
||||||
scheduling["due_date"]*1000))
|
int(scheduling["due_date"]*1000)))
|
||||||
datetime_en.setChecked(scheduling["due_date"] is not None)
|
datetime_en.setChecked(scheduling["due_date"] is not None)
|
||||||
|
|
||||||
def update_datetime(dt):
|
def update_datetime(dt):
|
||||||
|
@ -301,7 +306,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
flush = self.flush
|
flush = self.flush
|
||||||
flush.setToolTip("Flush the pipeline (of current- and higher-priority "
|
flush.setToolTip("Flush the pipeline (of current- and higher-priority "
|
||||||
"experiments) before starting the experiment")
|
"experiments) before starting the experiment")
|
||||||
self.layout.addWidget(flush, 2, 2, 1, 2)
|
self.layout.addWidget(flush, 2, 2)
|
||||||
|
|
||||||
flush.setChecked(scheduling["flush"])
|
flush.setChecked(scheduling["flush"])
|
||||||
|
|
||||||
|
@ -309,6 +314,20 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
scheduling["flush"] = bool(checked)
|
scheduling["flush"] = bool(checked)
|
||||||
flush.stateChanged.connect(update_flush)
|
flush.stateChanged.connect(update_flush)
|
||||||
|
|
||||||
|
devarg_override = QtWidgets.QComboBox()
|
||||||
|
devarg_override.setEditable(True)
|
||||||
|
devarg_override.lineEdit().setPlaceholderText("Override device arguments")
|
||||||
|
devarg_override.lineEdit().setClearButtonEnabled(True)
|
||||||
|
devarg_override.insertItem(0, "core:analyze_at_run_end=True")
|
||||||
|
self.layout.addWidget(devarg_override, 2, 3)
|
||||||
|
|
||||||
|
devarg_override.setCurrentText(options["devarg_override"])
|
||||||
|
|
||||||
|
def update_devarg_override(text):
|
||||||
|
options["devarg_override"] = text
|
||||||
|
devarg_override.editTextChanged.connect(update_devarg_override)
|
||||||
|
self.devarg_override = devarg_override
|
||||||
|
|
||||||
log_level = QtWidgets.QComboBox()
|
log_level = QtWidgets.QComboBox()
|
||||||
log_level.addItems(log_levels)
|
log_level.addItems(log_levels)
|
||||||
log_level.setCurrentIndex(1)
|
log_level.setCurrentIndex(1)
|
||||||
|
@ -329,6 +348,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
if "repo_rev" in options:
|
if "repo_rev" in options:
|
||||||
repo_rev = QtWidgets.QLineEdit()
|
repo_rev = QtWidgets.QLineEdit()
|
||||||
repo_rev.setPlaceholderText("current")
|
repo_rev.setPlaceholderText("current")
|
||||||
|
repo_rev.setClearButtonEnabled(True)
|
||||||
repo_rev_label = QtWidgets.QLabel("Revision:")
|
repo_rev_label = QtWidgets.QLabel("Revision:")
|
||||||
repo_rev_label.setToolTip("Experiment repository revision "
|
repo_rev_label.setToolTip("Experiment repository revision "
|
||||||
"(commit ID) to use")
|
"(commit ID) to use")
|
||||||
|
@ -369,6 +389,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
self.hdf5_load_directory = os.path.expanduser("~")
|
self.hdf5_load_directory = os.path.expanduser("~")
|
||||||
|
|
||||||
def submit_clicked(self):
|
def submit_clicked(self):
|
||||||
|
self.argeditor.about_to_submit()
|
||||||
try:
|
try:
|
||||||
self.manager.submit(self.expurl)
|
self.manager.submit(self.expurl)
|
||||||
except:
|
except:
|
||||||
|
@ -391,7 +412,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
|
|
||||||
async def _recompute_arguments_task(self, overrides=dict()):
|
async def _recompute_arguments_task(self, overrides=dict()):
|
||||||
try:
|
try:
|
||||||
expdesc = await self.manager.compute_expdesc(self.expurl)
|
expdesc, ui_name = await self.manager.compute_expdesc(self.expurl)
|
||||||
except:
|
except:
|
||||||
logger.error("Could not recompute experiment description of '%s'",
|
logger.error("Could not recompute experiment description of '%s'",
|
||||||
self.expurl, exc_info=True)
|
self.expurl, exc_info=True)
|
||||||
|
@ -404,12 +425,13 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
arginfo[k][0]["default"].insert(0, v)
|
arginfo[k][0]["default"].insert(0, v)
|
||||||
else:
|
else:
|
||||||
arginfo[k][0]["default"] = v
|
arginfo[k][0]["default"] = v
|
||||||
self.manager.initialize_submission_arguments(self.expurl, arginfo)
|
self.manager.initialize_submission_arguments(self.expurl, arginfo, ui_name)
|
||||||
|
|
||||||
argeditor_state = self.argeditor.save_state()
|
argeditor_state = self.argeditor.save_state()
|
||||||
self.argeditor.deleteLater()
|
self.argeditor.deleteLater()
|
||||||
|
|
||||||
self.argeditor = _ArgumentEditor(self.manager, self, self.expurl)
|
editor_class = self.manager.get_argument_editor_class(self.expurl)
|
||||||
|
self.argeditor = editor_class(self.manager, self, self.expurl)
|
||||||
self.argeditor.restore_state(argeditor_state)
|
self.argeditor.restore_state(argeditor_state)
|
||||||
self.layout.addWidget(self.argeditor, 0, 0, 1, 5)
|
self.layout.addWidget(self.argeditor, 0, 0, 1, 5)
|
||||||
|
|
||||||
|
@ -422,7 +444,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
|
|
||||||
async def _recompute_sched_options_task(self):
|
async def _recompute_sched_options_task(self):
|
||||||
try:
|
try:
|
||||||
expdesc = await self.manager.compute_expdesc(self.expurl)
|
expdesc, _ = await self.manager.compute_expdesc(self.expurl)
|
||||||
except:
|
except:
|
||||||
logger.error("Could not recompute experiment description of '%s'",
|
logger.error("Could not recompute experiment description of '%s'",
|
||||||
self.expurl, exc_info=True)
|
self.expurl, exc_info=True)
|
||||||
|
@ -459,6 +481,9 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
if "devarg_override" in expid:
|
||||||
|
self.devarg_override.setCurrentText(
|
||||||
|
unparse_devarg_override(expid["devarg_override"]))
|
||||||
self.log_level.setCurrentIndex(log_levels.index(
|
self.log_level.setCurrentIndex(log_levels.index(
|
||||||
log_level_to_name(expid["log_level"])))
|
log_level_to_name(expid["log_level"])))
|
||||||
if ("repo_rev" in expid and
|
if ("repo_rev" in expid and
|
||||||
|
@ -473,6 +498,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
await self._recompute_arguments_task(arguments)
|
await self._recompute_arguments_task(arguments)
|
||||||
|
|
||||||
def closeEvent(self, event):
|
def closeEvent(self, event):
|
||||||
|
self.argeditor.about_to_close()
|
||||||
self.sigClosed.emit()
|
self.sigClosed.emit()
|
||||||
QtWidgets.QMdiSubWindow.closeEvent(self, event)
|
QtWidgets.QMdiSubWindow.closeEvent(self, event)
|
||||||
|
|
||||||
|
@ -544,7 +570,13 @@ class _QuickOpenDialog(QtWidgets.QDialog):
|
||||||
|
|
||||||
|
|
||||||
class ExperimentManager:
|
class ExperimentManager:
|
||||||
def __init__(self, main_window,
|
#: Global registry for custom argument editor classes, indexed by the experiment
|
||||||
|
#: `argument_ui` string; can be populated by dashboard plugins such as ndscan.
|
||||||
|
#: If no handler for a requested UI name is found, the default built-in argument
|
||||||
|
#: editor will be used.
|
||||||
|
argument_ui_classes = dict()
|
||||||
|
|
||||||
|
def __init__(self, main_window, dataset_sub,
|
||||||
explist_sub, schedule_sub,
|
explist_sub, schedule_sub,
|
||||||
schedule_ctl, experiment_db_ctl):
|
schedule_ctl, experiment_db_ctl):
|
||||||
self.main_window = main_window
|
self.main_window = main_window
|
||||||
|
@ -555,7 +587,10 @@ class ExperimentManager:
|
||||||
self.submission_scheduling = dict()
|
self.submission_scheduling = dict()
|
||||||
self.submission_options = dict()
|
self.submission_options = dict()
|
||||||
self.submission_arguments = dict()
|
self.submission_arguments = dict()
|
||||||
|
self.argument_ui_names = dict()
|
||||||
|
|
||||||
|
self.datasets = dict()
|
||||||
|
dataset_sub.add_setmodel_callback(self.set_dataset_model)
|
||||||
self.explist = dict()
|
self.explist = dict()
|
||||||
explist_sub.add_setmodel_callback(self.set_explist_model)
|
explist_sub.add_setmodel_callback(self.set_explist_model)
|
||||||
self.schedule = dict()
|
self.schedule = dict()
|
||||||
|
@ -570,6 +605,9 @@ class ExperimentManager:
|
||||||
quick_open_shortcut.setContext(QtCore.Qt.ApplicationShortcut)
|
quick_open_shortcut.setContext(QtCore.Qt.ApplicationShortcut)
|
||||||
quick_open_shortcut.activated.connect(self.show_quick_open)
|
quick_open_shortcut.activated.connect(self.show_quick_open)
|
||||||
|
|
||||||
|
def set_dataset_model(self, model):
|
||||||
|
self.datasets = model
|
||||||
|
|
||||||
def set_explist_model(self, model):
|
def set_explist_model(self, model):
|
||||||
self.explist = model.backing_store
|
self.explist = model.backing_store
|
||||||
|
|
||||||
|
@ -586,6 +624,17 @@ class ExperimentManager:
|
||||||
else:
|
else:
|
||||||
raise ValueError("Malformed experiment URL")
|
raise ValueError("Malformed experiment URL")
|
||||||
|
|
||||||
|
def get_argument_editor_class(self, expurl):
|
||||||
|
ui_name = self.argument_ui_names.get(expurl, None)
|
||||||
|
if not ui_name and expurl[:5] == "repo:":
|
||||||
|
ui_name = self.explist.get(expurl[5:], {}).get("argument_ui", None)
|
||||||
|
if ui_name:
|
||||||
|
result = self.argument_ui_classes.get(ui_name, None)
|
||||||
|
if result:
|
||||||
|
return result
|
||||||
|
logger.warning("Ignoring unknown argument UI '%s'", ui_name)
|
||||||
|
return _ArgumentEditor
|
||||||
|
|
||||||
def get_submission_scheduling(self, expurl):
|
def get_submission_scheduling(self, expurl):
|
||||||
if expurl in self.submission_scheduling:
|
if expurl in self.submission_scheduling:
|
||||||
return self.submission_scheduling[expurl]
|
return self.submission_scheduling[expurl]
|
||||||
|
@ -608,14 +657,15 @@ class ExperimentManager:
|
||||||
else:
|
else:
|
||||||
# mutated by _ExperimentDock
|
# mutated by _ExperimentDock
|
||||||
options = {
|
options = {
|
||||||
"log_level": logging.WARNING
|
"log_level": logging.WARNING,
|
||||||
|
"devarg_override": ""
|
||||||
}
|
}
|
||||||
if expurl[:5] == "repo:":
|
if expurl[:5] == "repo:":
|
||||||
options["repo_rev"] = None
|
options["repo_rev"] = None
|
||||||
self.submission_options[expurl] = options
|
self.submission_options[expurl] = options
|
||||||
return options
|
return options
|
||||||
|
|
||||||
def initialize_submission_arguments(self, expurl, arginfo):
|
def initialize_submission_arguments(self, expurl, arginfo, ui_name):
|
||||||
arguments = OrderedDict()
|
arguments = OrderedDict()
|
||||||
for name, (procdesc, group, tooltip) in arginfo.items():
|
for name, (procdesc, group, tooltip) in arginfo.items():
|
||||||
state = procdesc_to_entry(procdesc).default_state(procdesc)
|
state = procdesc_to_entry(procdesc).default_state(procdesc)
|
||||||
|
@ -626,8 +676,23 @@ class ExperimentManager:
|
||||||
"state": state, # mutated by entries
|
"state": state, # mutated by entries
|
||||||
}
|
}
|
||||||
self.submission_arguments[expurl] = arguments
|
self.submission_arguments[expurl] = arguments
|
||||||
|
self.argument_ui_names[expurl] = ui_name
|
||||||
return arguments
|
return arguments
|
||||||
|
|
||||||
|
def set_argument_value(self, expurl, name, value):
|
||||||
|
try:
|
||||||
|
argument = self.submission_arguments[expurl][name]
|
||||||
|
if argument["desc"]["ty"] == "Scannable":
|
||||||
|
ty = value["ty"]
|
||||||
|
argument["state"]["selected"] = ty
|
||||||
|
argument["state"][ty] = value
|
||||||
|
else:
|
||||||
|
argument["state"] = value
|
||||||
|
if expurl in self.open_experiments.keys():
|
||||||
|
self.open_experiments[expurl].argeditor.update_argument(name, argument)
|
||||||
|
except:
|
||||||
|
logger.warn("Failed to set value for argument \"{}\" in experiment: {}.".format(name, expurl), exc_info=1)
|
||||||
|
|
||||||
def get_submission_arguments(self, expurl):
|
def get_submission_arguments(self, expurl):
|
||||||
if expurl in self.submission_arguments:
|
if expurl in self.submission_arguments:
|
||||||
return self.submission_arguments[expurl]
|
return self.submission_arguments[expurl]
|
||||||
|
@ -635,9 +700,9 @@ class ExperimentManager:
|
||||||
if expurl[:5] != "repo:":
|
if expurl[:5] != "repo:":
|
||||||
raise ValueError("Submission arguments must be preinitialized "
|
raise ValueError("Submission arguments must be preinitialized "
|
||||||
"when not using repository")
|
"when not using repository")
|
||||||
arginfo = self.explist[expurl[5:]]["arginfo"]
|
class_desc = self.explist[expurl[5:]]
|
||||||
arguments = self.initialize_submission_arguments(expurl, arginfo)
|
return self.initialize_submission_arguments(expurl,
|
||||||
return arguments
|
class_desc["arginfo"], class_desc.get("argument_ui", None))
|
||||||
|
|
||||||
def open_experiment(self, expurl):
|
def open_experiment(self, expurl):
|
||||||
if expurl in self.open_experiments:
|
if expurl in self.open_experiments:
|
||||||
|
@ -688,7 +753,14 @@ class ExperimentManager:
|
||||||
entry_cls = procdesc_to_entry(argument["desc"])
|
entry_cls = procdesc_to_entry(argument["desc"])
|
||||||
argument_values[name] = entry_cls.state_to_value(argument["state"])
|
argument_values[name] = entry_cls.state_to_value(argument["state"])
|
||||||
|
|
||||||
|
try:
|
||||||
|
devarg_override = parse_devarg_override(options["devarg_override"])
|
||||||
|
except:
|
||||||
|
logger.error("Failed to parse device argument overrides for %s", expurl)
|
||||||
|
return
|
||||||
|
|
||||||
expid = {
|
expid = {
|
||||||
|
"devarg_override": devarg_override,
|
||||||
"log_level": options["log_level"],
|
"log_level": options["log_level"],
|
||||||
"file": file,
|
"file": file,
|
||||||
"class_name": class_name,
|
"class_name": class_name,
|
||||||
|
@ -726,7 +798,7 @@ class ExperimentManager:
|
||||||
else:
|
else:
|
||||||
repo_match = "repo_rev" not in expid
|
repo_match = "repo_rev" not in expid
|
||||||
if (repo_match and
|
if (repo_match and
|
||||||
expid["file"] == file and
|
("file" in expid and expid["file"] == file) and
|
||||||
expid["class_name"] == class_name):
|
expid["class_name"] == class_name):
|
||||||
rids.append(rid)
|
rids.append(rid)
|
||||||
asyncio.ensure_future(self._request_term_multiple(rids))
|
asyncio.ensure_future(self._request_term_multiple(rids))
|
||||||
|
@ -739,13 +811,15 @@ class ExperimentManager:
|
||||||
revision = None
|
revision = None
|
||||||
description = await self.experiment_db_ctl.examine(
|
description = await self.experiment_db_ctl.examine(
|
||||||
file, use_repository, revision)
|
file, use_repository, revision)
|
||||||
return description[class_name]
|
class_desc = description[class_name]
|
||||||
|
return class_desc, class_desc.get("argument_ui", None)
|
||||||
|
|
||||||
async def open_file(self, file):
|
async def open_file(self, file):
|
||||||
description = await self.experiment_db_ctl.examine(file, False)
|
description = await self.experiment_db_ctl.examine(file, False)
|
||||||
for class_name, class_desc in description.items():
|
for class_name, class_desc in description.items():
|
||||||
expurl = "file:{}@{}".format(class_name, file)
|
expurl = "file:{}@{}".format(class_name, file)
|
||||||
self.initialize_submission_arguments(expurl, class_desc["arginfo"])
|
self.initialize_submission_arguments(expurl, class_desc["arginfo"],
|
||||||
|
class_desc.get("argument_ui", None))
|
||||||
if expurl in self.open_experiments:
|
if expurl in self.open_experiments:
|
||||||
self.open_experiments[expurl].close()
|
self.open_experiments[expurl].close()
|
||||||
self.open_experiment(expurl)
|
self.open_experiment(expurl)
|
||||||
|
@ -758,6 +832,7 @@ class ExperimentManager:
|
||||||
"options": self.submission_options,
|
"options": self.submission_options,
|
||||||
"arguments": self.submission_arguments,
|
"arguments": self.submission_arguments,
|
||||||
"docks": self.dock_states,
|
"docks": self.dock_states,
|
||||||
|
"argument_uis": self.argument_ui_names,
|
||||||
"open_docks": set(self.open_experiments.keys())
|
"open_docks": set(self.open_experiments.keys())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -768,6 +843,7 @@ class ExperimentManager:
|
||||||
self.submission_scheduling = state["scheduling"]
|
self.submission_scheduling = state["scheduling"]
|
||||||
self.submission_options = state["options"]
|
self.submission_options = state["options"]
|
||||||
self.submission_arguments = state["arguments"]
|
self.submission_arguments = state["arguments"]
|
||||||
|
self.argument_ui_names = state.get("argument_uis", {})
|
||||||
for expurl in state["open_docks"]:
|
for expurl in state["open_docks"]:
|
||||||
self.open_experiment(expurl)
|
self.open_experiment(expurl)
|
||||||
|
|
||||||
|
|
|
@ -159,7 +159,7 @@ class WaitingPanel(LayoutWidget):
|
||||||
class ExplorerDock(QtWidgets.QDockWidget):
|
class ExplorerDock(QtWidgets.QDockWidget):
|
||||||
def __init__(self, exp_manager, d_shortcuts,
|
def __init__(self, exp_manager, d_shortcuts,
|
||||||
explist_sub, explist_status_sub,
|
explist_sub, explist_status_sub,
|
||||||
schedule_ctl, experiment_db_ctl):
|
schedule_ctl, experiment_db_ctl, device_db_ctl):
|
||||||
QtWidgets.QDockWidget.__init__(self, "Explorer")
|
QtWidgets.QDockWidget.__init__(self, "Explorer")
|
||||||
self.setObjectName("Explorer")
|
self.setObjectName("Explorer")
|
||||||
self.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
|
self.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
|
||||||
|
@ -251,6 +251,12 @@ class ExplorerDock(QtWidgets.QDockWidget):
|
||||||
scan_repository_action.triggered.connect(scan_repository)
|
scan_repository_action.triggered.connect(scan_repository)
|
||||||
self.el.addAction(scan_repository_action)
|
self.el.addAction(scan_repository_action)
|
||||||
|
|
||||||
|
scan_ddb_action = QtWidgets.QAction("Scan device database", self.el)
|
||||||
|
def scan_ddb():
|
||||||
|
asyncio.ensure_future(device_db_ctl.scan())
|
||||||
|
scan_ddb_action.triggered.connect(scan_ddb)
|
||||||
|
self.el.addAction(scan_ddb_action)
|
||||||
|
|
||||||
self.current_directory = ""
|
self.current_directory = ""
|
||||||
open_file_action = QtWidgets.QAction("Open file outside repository",
|
open_file_action = QtWidgets.QAction("Open file outside repository",
|
||||||
self.el)
|
self.el)
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
|
import textwrap
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
from PyQt5 import QtCore, QtWidgets, QtGui
|
from PyQt5 import QtCore, QtWidgets, QtGui
|
||||||
|
@ -7,12 +8,27 @@ from PyQt5 import QtCore, QtWidgets, QtGui
|
||||||
from sipyco.sync_struct import Subscriber
|
from sipyco.sync_struct import Subscriber
|
||||||
|
|
||||||
from artiq.coredevice.comm_moninj import *
|
from artiq.coredevice.comm_moninj import *
|
||||||
|
from artiq.coredevice.ad9910 import (
|
||||||
|
_AD9910_REG_PROFILE0, _AD9910_REG_PROFILE7,
|
||||||
|
_AD9910_REG_FTW, _AD9910_REG_CFR1
|
||||||
|
)
|
||||||
|
from artiq.coredevice.ad9912_reg import AD9912_POW1, AD9912_SER_CONF
|
||||||
from artiq.gui.tools import LayoutWidget
|
from artiq.gui.tools import LayoutWidget
|
||||||
from artiq.gui.flowlayout import FlowLayout
|
from artiq.gui.flowlayout import FlowLayout
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class _CancellableLineEdit(QtWidgets.QLineEdit):
|
||||||
|
def escapePressedConnect(self, cb):
|
||||||
|
self.esc_cb = cb
|
||||||
|
|
||||||
|
def keyPressEvent(self, event):
|
||||||
|
key = event.key()
|
||||||
|
if key == QtCore.Qt.Key_Escape:
|
||||||
|
self.esc_cb(event)
|
||||||
|
QtWidgets.QLineEdit.keyPressEvent(self, event)
|
||||||
|
|
||||||
|
|
||||||
class _TTLWidget(QtWidgets.QFrame):
|
class _TTLWidget(QtWidgets.QFrame):
|
||||||
def __init__(self, dm, channel, force_out, title):
|
def __init__(self, dm, channel, force_out, title):
|
||||||
|
@ -168,15 +184,172 @@ class _SimpleDisplayWidget(QtWidgets.QFrame):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
class _DDSWidget(_SimpleDisplayWidget):
|
class _DDSModel:
|
||||||
def __init__(self, dm, bus_channel, channel, title):
|
def __init__(self, dds_type, ref_clk, cpld=None, pll=1, clk_div=0):
|
||||||
|
self.cpld = cpld
|
||||||
|
self.cur_frequency = 0
|
||||||
|
self.cur_reg = 0
|
||||||
|
self.dds_type = dds_type
|
||||||
|
self.is_urukul = dds_type in ["AD9910", "AD9912"]
|
||||||
|
|
||||||
|
if dds_type == "AD9914":
|
||||||
|
self.ftw_per_hz = 2**32 / ref_clk
|
||||||
|
else:
|
||||||
|
if dds_type == "AD9910":
|
||||||
|
max_freq = 1 << 32
|
||||||
|
clk_mult = [4, 1, 2, 4]
|
||||||
|
elif dds_type == "AD9912": # AD9912
|
||||||
|
max_freq = 1 << 48
|
||||||
|
clk_mult = [1, 1, 2, 4]
|
||||||
|
else:
|
||||||
|
raise NotImplementedError
|
||||||
|
sysclk = ref_clk / clk_mult[clk_div] * pll
|
||||||
|
self.ftw_per_hz = 1 / sysclk * max_freq
|
||||||
|
|
||||||
|
def monitor_update(self, probe, value):
|
||||||
|
if self.dds_type == "AD9912":
|
||||||
|
value = value << 16
|
||||||
|
self.cur_frequency = self._ftw_to_freq(value)
|
||||||
|
|
||||||
|
def _ftw_to_freq(self, ftw):
|
||||||
|
return ftw / self.ftw_per_hz
|
||||||
|
|
||||||
|
|
||||||
|
class _DDSWidget(QtWidgets.QFrame):
|
||||||
|
def __init__(self, dm, title, bus_channel=0, channel=0, dds_model=None):
|
||||||
|
self.dm = dm
|
||||||
self.bus_channel = bus_channel
|
self.bus_channel = bus_channel
|
||||||
self.channel = channel
|
self.channel = channel
|
||||||
|
self.dds_name = title
|
||||||
self.cur_frequency = 0
|
self.cur_frequency = 0
|
||||||
_SimpleDisplayWidget.__init__(self, title)
|
self.dds_model = dds_model
|
||||||
|
|
||||||
|
QtWidgets.QFrame.__init__(self)
|
||||||
|
|
||||||
|
self.setFrameShape(QtWidgets.QFrame.Box)
|
||||||
|
self.setFrameShadow(QtWidgets.QFrame.Raised)
|
||||||
|
|
||||||
|
grid = QtWidgets.QGridLayout()
|
||||||
|
grid.setContentsMargins(0, 0, 0, 0)
|
||||||
|
grid.setHorizontalSpacing(0)
|
||||||
|
grid.setVerticalSpacing(0)
|
||||||
|
self.setLayout(grid)
|
||||||
|
label = QtWidgets.QLabel(title)
|
||||||
|
label.setAlignment(QtCore.Qt.AlignCenter)
|
||||||
|
grid.addWidget(label, 1, 1)
|
||||||
|
|
||||||
|
# FREQ DATA/EDIT FIELD
|
||||||
|
self.data_stack = QtWidgets.QStackedWidget()
|
||||||
|
|
||||||
|
# page 1: display data
|
||||||
|
grid_disp = LayoutWidget()
|
||||||
|
grid_disp.layout.setContentsMargins(0, 0, 0, 0)
|
||||||
|
grid_disp.layout.setHorizontalSpacing(0)
|
||||||
|
grid_disp.layout.setVerticalSpacing(0)
|
||||||
|
|
||||||
|
self.value_label = QtWidgets.QLabel()
|
||||||
|
self.value_label.setAlignment(QtCore.Qt.AlignCenter)
|
||||||
|
grid_disp.addWidget(self.value_label, 0, 1, 1, 2)
|
||||||
|
|
||||||
|
unit = QtWidgets.QLabel("MHz")
|
||||||
|
unit.setAlignment(QtCore.Qt.AlignCenter)
|
||||||
|
grid_disp.addWidget(unit, 0, 3, 1, 1)
|
||||||
|
|
||||||
|
self.data_stack.addWidget(grid_disp)
|
||||||
|
|
||||||
|
# page 2: edit data
|
||||||
|
grid_edit = LayoutWidget()
|
||||||
|
grid_edit.layout.setContentsMargins(0, 0, 0, 0)
|
||||||
|
grid_edit.layout.setHorizontalSpacing(0)
|
||||||
|
grid_edit.layout.setVerticalSpacing(0)
|
||||||
|
|
||||||
|
self.value_edit = _CancellableLineEdit(self)
|
||||||
|
self.value_edit.setAlignment(QtCore.Qt.AlignRight)
|
||||||
|
grid_edit.addWidget(self.value_edit, 0, 1, 1, 2)
|
||||||
|
unit = QtWidgets.QLabel("MHz")
|
||||||
|
unit.setAlignment(QtCore.Qt.AlignCenter)
|
||||||
|
grid_edit.addWidget(unit, 0, 3, 1, 1)
|
||||||
|
self.data_stack.addWidget(grid_edit)
|
||||||
|
|
||||||
|
grid.addWidget(self.data_stack, 2, 1)
|
||||||
|
|
||||||
|
# BUTTONS
|
||||||
|
self.button_stack = QtWidgets.QStackedWidget()
|
||||||
|
|
||||||
|
# page 1: SET button
|
||||||
|
set_grid = LayoutWidget()
|
||||||
|
|
||||||
|
set_btn = QtWidgets.QToolButton()
|
||||||
|
set_btn.setText("Set")
|
||||||
|
set_btn.setToolTip("Set frequency")
|
||||||
|
set_grid.addWidget(set_btn, 0, 1, 1, 1)
|
||||||
|
|
||||||
|
# for urukuls also allow switching off RF
|
||||||
|
if self.dds_model.is_urukul:
|
||||||
|
off_btn = QtWidgets.QToolButton()
|
||||||
|
off_btn.setText("Off")
|
||||||
|
off_btn.setToolTip("Switch off the output")
|
||||||
|
set_grid.addWidget(off_btn, 0, 2, 1, 1)
|
||||||
|
|
||||||
|
self.button_stack.addWidget(set_grid)
|
||||||
|
|
||||||
|
# page 2: apply/cancel buttons
|
||||||
|
apply_grid = LayoutWidget()
|
||||||
|
apply = QtWidgets.QToolButton()
|
||||||
|
apply.setText("Apply")
|
||||||
|
apply.setToolTip("Apply changes")
|
||||||
|
apply_grid.addWidget(apply, 0, 1, 1, 1)
|
||||||
|
cancel = QtWidgets.QToolButton()
|
||||||
|
cancel.setText("Cancel")
|
||||||
|
cancel.setToolTip("Cancel changes")
|
||||||
|
apply_grid.addWidget(cancel, 0, 2, 1, 1)
|
||||||
|
self.button_stack.addWidget(apply_grid)
|
||||||
|
grid.addWidget(self.button_stack, 3, 1)
|
||||||
|
|
||||||
|
grid.setRowStretch(1, 1)
|
||||||
|
grid.setRowStretch(2, 1)
|
||||||
|
grid.setRowStretch(3, 1)
|
||||||
|
|
||||||
|
set_btn.clicked.connect(self.set_clicked)
|
||||||
|
apply.clicked.connect(self.apply_changes)
|
||||||
|
if self.dds_model.is_urukul:
|
||||||
|
off_btn.clicked.connect(self.off_clicked)
|
||||||
|
off_btn.setToolTip(textwrap.dedent(
|
||||||
|
"""Note: If TTL RTIO sw for the channel is switched high,
|
||||||
|
this button will not disable the channel.
|
||||||
|
Use the TTL override instead."""))
|
||||||
|
self.value_edit.returnPressed.connect(lambda: self.apply_changes(None))
|
||||||
|
self.value_edit.escapePressedConnect(self.cancel_changes)
|
||||||
|
cancel.clicked.connect(self.cancel_changes)
|
||||||
|
|
||||||
|
self.refresh_display()
|
||||||
|
|
||||||
|
def set_clicked(self, set):
|
||||||
|
self.data_stack.setCurrentIndex(1)
|
||||||
|
self.button_stack.setCurrentIndex(1)
|
||||||
|
self.value_edit.setText("{:.7f}"
|
||||||
|
.format(self.cur_frequency/1e6))
|
||||||
|
self.value_edit.setFocus()
|
||||||
|
self.value_edit.selectAll()
|
||||||
|
|
||||||
|
def off_clicked(self, set):
|
||||||
|
self.dm.dds_channel_toggle(self.dds_name, self.dds_model, sw=False)
|
||||||
|
|
||||||
|
def apply_changes(self, apply):
|
||||||
|
self.data_stack.setCurrentIndex(0)
|
||||||
|
self.button_stack.setCurrentIndex(0)
|
||||||
|
frequency = float(self.value_edit.text())*1e6
|
||||||
|
self.dm.dds_set_frequency(self.dds_name, self.dds_model, frequency)
|
||||||
|
|
||||||
|
def cancel_changes(self, cancel):
|
||||||
|
self.data_stack.setCurrentIndex(0)
|
||||||
|
self.button_stack.setCurrentIndex(0)
|
||||||
|
|
||||||
def refresh_display(self):
|
def refresh_display(self):
|
||||||
self.value.setText("<font size=\"4\">{:.7f}</font><font size=\"2\"> MHz</font>"
|
self.cur_frequency = self.dds_model.cur_frequency
|
||||||
|
self.value_label.setText("<font size=\"4\">{:.7f}</font>"
|
||||||
|
.format(self.cur_frequency/1e6))
|
||||||
|
self.value_edit.setText("{:.7f}"
|
||||||
.format(self.cur_frequency/1e6))
|
.format(self.cur_frequency/1e6))
|
||||||
|
|
||||||
def sort_key(self):
|
def sort_key(self):
|
||||||
|
@ -202,19 +375,19 @@ _WidgetDesc = namedtuple("_WidgetDesc", "uid comment cls arguments")
|
||||||
|
|
||||||
|
|
||||||
def setup_from_ddb(ddb):
|
def setup_from_ddb(ddb):
|
||||||
core_addr = None
|
mi_addr = None
|
||||||
|
mi_port = None
|
||||||
dds_sysclk = None
|
dds_sysclk = None
|
||||||
description = set()
|
description = set()
|
||||||
|
|
||||||
for k, v in ddb.items():
|
for k, v in ddb.items():
|
||||||
comment = None
|
|
||||||
if "comment" in v:
|
|
||||||
comment = v["comment"]
|
|
||||||
try:
|
try:
|
||||||
if isinstance(v, dict) and v["type"] == "local":
|
if isinstance(v, dict):
|
||||||
if k == "core":
|
comment = v.get("comment")
|
||||||
core_addr = v["arguments"]["host"]
|
if v["type"] == "local":
|
||||||
elif v["module"] == "artiq.coredevice.ttl":
|
if v["module"] == "artiq.coredevice.ttl":
|
||||||
|
if "ttl_urukul" in k:
|
||||||
|
continue
|
||||||
channel = v["arguments"]["channel"]
|
channel = v["arguments"]["channel"]
|
||||||
force_out = v["class"] == "TTLOut"
|
force_out = v["class"] == "TTLOut"
|
||||||
widget = _WidgetDesc(k, comment, _TTLWidget, (channel, force_out, k))
|
widget = _WidgetDesc(k, comment, _TTLWidget, (channel, force_out, k))
|
||||||
|
@ -224,9 +397,26 @@ def setup_from_ddb(ddb):
|
||||||
bus_channel = v["arguments"]["bus_channel"]
|
bus_channel = v["arguments"]["bus_channel"]
|
||||||
channel = v["arguments"]["channel"]
|
channel = v["arguments"]["channel"]
|
||||||
dds_sysclk = v["arguments"]["sysclk"]
|
dds_sysclk = v["arguments"]["sysclk"]
|
||||||
widget = _WidgetDesc(k, comment, _DDSWidget, (bus_channel, channel, k))
|
model = _DDSModel(v["class"], dds_sysclk)
|
||||||
|
widget = _WidgetDesc(k, comment, _DDSWidget, (k, bus_channel, channel, model))
|
||||||
description.add(widget)
|
description.add(widget)
|
||||||
elif ( (v["module"] == "artiq.coredevice.ad53xx" and v["class"] == "AD53XX")
|
elif (v["module"] == "artiq.coredevice.ad9910"
|
||||||
|
and v["class"] == "AD9910") or \
|
||||||
|
(v["module"] == "artiq.coredevice.ad9912"
|
||||||
|
and v["class"] == "AD9912"):
|
||||||
|
channel = v["arguments"]["chip_select"] - 4
|
||||||
|
if channel < 0:
|
||||||
|
continue
|
||||||
|
dds_cpld = v["arguments"]["cpld_device"]
|
||||||
|
spi_dev = ddb[dds_cpld]["arguments"]["spi_device"]
|
||||||
|
bus_channel = ddb[spi_dev]["arguments"]["channel"]
|
||||||
|
pll = v["arguments"]["pll_n"]
|
||||||
|
refclk = ddb[dds_cpld]["arguments"]["refclk"]
|
||||||
|
clk_div = v["arguments"].get("clk_div", 0)
|
||||||
|
model = _DDSModel( v["class"], refclk, dds_cpld, pll, clk_div)
|
||||||
|
widget = _WidgetDesc(k, comment, _DDSWidget, (k, bus_channel, channel, model))
|
||||||
|
description.add(widget)
|
||||||
|
elif ( (v["module"] == "artiq.coredevice.ad53xx" and v["class"] == "AD53xx")
|
||||||
or (v["module"] == "artiq.coredevice.zotino" and v["class"] == "Zotino")):
|
or (v["module"] == "artiq.coredevice.zotino" and v["class"] == "Zotino")):
|
||||||
spi_device = v["arguments"]["spi_device"]
|
spi_device = v["arguments"]["spi_device"]
|
||||||
spi_device = ddb[spi_device]
|
spi_device = ddb[spi_device]
|
||||||
|
@ -236,17 +426,23 @@ def setup_from_ddb(ddb):
|
||||||
for channel in range(32):
|
for channel in range(32):
|
||||||
widget = _WidgetDesc((k, channel), comment, _DACWidget, (spi_channel, channel, k))
|
widget = _WidgetDesc((k, channel), comment, _DACWidget, (spi_channel, channel, k))
|
||||||
description.add(widget)
|
description.add(widget)
|
||||||
|
elif v["type"] == "controller" and k == "core_moninj":
|
||||||
|
mi_addr = v["host"]
|
||||||
|
mi_port = v.get("port_proxy", 1383)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
return core_addr, dds_sysclk, description
|
return mi_addr, mi_port, description
|
||||||
|
|
||||||
|
|
||||||
class _DeviceManager:
|
class _DeviceManager:
|
||||||
def __init__(self):
|
def __init__(self, schedule_ctl):
|
||||||
self.core_addr = None
|
self.mi_addr = None
|
||||||
self.reconnect_core = asyncio.Event()
|
self.mi_port = None
|
||||||
self.core_connection = None
|
self.reconnect_mi = asyncio.Event()
|
||||||
self.core_connector_task = asyncio.ensure_future(self.core_connector())
|
self.mi_connection = None
|
||||||
|
self.mi_connector_task = asyncio.ensure_future(self.mi_connector())
|
||||||
|
|
||||||
|
self.schedule_ctl = schedule_ctl
|
||||||
|
|
||||||
self.ddb = dict()
|
self.ddb = dict()
|
||||||
self.description = set()
|
self.description = set()
|
||||||
|
@ -265,13 +461,12 @@ class _DeviceManager:
|
||||||
return ddb
|
return ddb
|
||||||
|
|
||||||
def notify(self, mod):
|
def notify(self, mod):
|
||||||
core_addr, dds_sysclk, description = setup_from_ddb(self.ddb)
|
mi_addr, mi_port, description = setup_from_ddb(self.ddb)
|
||||||
|
|
||||||
if core_addr != self.core_addr:
|
if (mi_addr, mi_port) != (self.mi_addr, self.mi_port):
|
||||||
self.core_addr = core_addr
|
self.mi_addr = mi_addr
|
||||||
self.reconnect_core.set()
|
self.mi_port = mi_port
|
||||||
|
self.reconnect_mi.set()
|
||||||
self.dds_sysclk = dds_sysclk
|
|
||||||
|
|
||||||
for to_remove in self.description - description:
|
for to_remove in self.description - description:
|
||||||
widget = self.widgets_by_uid[to_remove.uid]
|
widget = self.widgets_by_uid[to_remove.uid]
|
||||||
|
@ -319,44 +514,172 @@ class _DeviceManager:
|
||||||
self.description = description
|
self.description = description
|
||||||
|
|
||||||
def ttl_set_mode(self, channel, mode):
|
def ttl_set_mode(self, channel, mode):
|
||||||
if self.core_connection is not None:
|
if self.mi_connection is not None:
|
||||||
widget = self.ttl_widgets[channel]
|
widget = self.ttl_widgets[channel]
|
||||||
if mode == "0":
|
if mode == "0":
|
||||||
widget.cur_override = True
|
widget.cur_override = True
|
||||||
widget.cur_level = False
|
widget.cur_level = False
|
||||||
self.core_connection.inject(channel, TTLOverride.level.value, 0)
|
self.mi_connection.inject(channel, TTLOverride.level.value, 0)
|
||||||
self.core_connection.inject(channel, TTLOverride.oe.value, 1)
|
self.mi_connection.inject(channel, TTLOverride.oe.value, 1)
|
||||||
self.core_connection.inject(channel, TTLOverride.en.value, 1)
|
self.mi_connection.inject(channel, TTLOverride.en.value, 1)
|
||||||
elif mode == "1":
|
elif mode == "1":
|
||||||
widget.cur_override = True
|
widget.cur_override = True
|
||||||
widget.cur_level = True
|
widget.cur_level = True
|
||||||
self.core_connection.inject(channel, TTLOverride.level.value, 1)
|
self.mi_connection.inject(channel, TTLOverride.level.value, 1)
|
||||||
self.core_connection.inject(channel, TTLOverride.oe.value, 1)
|
self.mi_connection.inject(channel, TTLOverride.oe.value, 1)
|
||||||
self.core_connection.inject(channel, TTLOverride.en.value, 1)
|
self.mi_connection.inject(channel, TTLOverride.en.value, 1)
|
||||||
elif mode == "exp":
|
elif mode == "exp":
|
||||||
widget.cur_override = False
|
widget.cur_override = False
|
||||||
self.core_connection.inject(channel, TTLOverride.en.value, 0)
|
self.mi_connection.inject(channel, TTLOverride.en.value, 0)
|
||||||
else:
|
else:
|
||||||
raise ValueError
|
raise ValueError
|
||||||
# override state may have changed
|
# override state may have changed
|
||||||
widget.refresh_display()
|
widget.refresh_display()
|
||||||
|
|
||||||
|
async def _submit_by_content(self, content, class_name, title):
|
||||||
|
expid = {
|
||||||
|
"log_level": logging.WARNING,
|
||||||
|
"content": content,
|
||||||
|
"class_name": class_name,
|
||||||
|
"arguments": {}
|
||||||
|
}
|
||||||
|
scheduling = {
|
||||||
|
"pipeline_name": "main",
|
||||||
|
"priority": 0,
|
||||||
|
"due_date": None,
|
||||||
|
"flush": False
|
||||||
|
}
|
||||||
|
rid = await self.schedule_ctl.submit(
|
||||||
|
scheduling["pipeline_name"],
|
||||||
|
expid,
|
||||||
|
scheduling["priority"], scheduling["due_date"],
|
||||||
|
scheduling["flush"])
|
||||||
|
logger.info("Submitted '%s', RID is %d", title, rid)
|
||||||
|
|
||||||
|
def _dds_faux_injection(self, dds_channel, dds_model, action, title, log_msg):
|
||||||
|
# create kernel and fill it in and send-by-content
|
||||||
|
|
||||||
|
# initialize CPLD (if applicable)
|
||||||
|
if dds_model.is_urukul:
|
||||||
|
# urukuls need CPLD init and switch to on
|
||||||
|
cpld_dev = """self.setattr_device("core_cache")
|
||||||
|
self.setattr_device("{}")""".format(dds_model.cpld)
|
||||||
|
|
||||||
|
# `sta`/`rf_sw`` variables are guaranteed for urukuls
|
||||||
|
# so {action} can use it
|
||||||
|
# if there's no RF enabled, CPLD may have not been initialized
|
||||||
|
# but if there is, it has been initialised - no need to do again
|
||||||
|
cpld_init = """delay(15*ms)
|
||||||
|
was_init = self.core_cache.get("_{cpld}_init")
|
||||||
|
sta = self.{cpld}.sta_read()
|
||||||
|
rf_sw = urukul_sta_rf_sw(sta)
|
||||||
|
if rf_sw == 0 and len(was_init) == 0:
|
||||||
|
delay(15*ms)
|
||||||
|
self.{cpld}.init()
|
||||||
|
self.core_cache.put("_{cpld}_init", [1])
|
||||||
|
""".format(cpld=dds_model.cpld)
|
||||||
|
else:
|
||||||
|
cpld_dev = ""
|
||||||
|
cpld_init = ""
|
||||||
|
|
||||||
|
# AD9912/9910: init channel (if uninitialized)
|
||||||
|
if dds_model.dds_type == "AD9912":
|
||||||
|
# 0xFF before init, 0x99 after
|
||||||
|
channel_init = """
|
||||||
|
if self.{dds_channel}.read({cfgreg}, length=1) == 0xFF:
|
||||||
|
delay(10*ms)
|
||||||
|
self.{dds_channel}.init()
|
||||||
|
""".format(dds_channel=dds_channel, cfgreg=AD9912_SER_CONF)
|
||||||
|
elif dds_model.dds_type == "AD9910":
|
||||||
|
# -1 before init, 2 after
|
||||||
|
channel_init = """
|
||||||
|
if self.{dds_channel}.read32({cfgreg}) == -1:
|
||||||
|
delay(10*ms)
|
||||||
|
self.{dds_channel}.init()
|
||||||
|
""".format(dds_channel=dds_channel, cfgreg=AD9912_SER_CONF)
|
||||||
|
else:
|
||||||
|
channel_init = "self.{dds_channel}.init()".format(dds_channel=dds_channel)
|
||||||
|
|
||||||
|
dds_exp = textwrap.dedent("""
|
||||||
|
from artiq.experiment import *
|
||||||
|
from artiq.coredevice.urukul import *
|
||||||
|
|
||||||
|
class {title}(EnvExperiment):
|
||||||
|
def build(self):
|
||||||
|
self.setattr_device("core")
|
||||||
|
self.setattr_device("{dds_channel}")
|
||||||
|
{cpld_dev}
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def run(self):
|
||||||
|
self.core.break_realtime()
|
||||||
|
{cpld_init}
|
||||||
|
delay(10*ms)
|
||||||
|
{channel_init}
|
||||||
|
delay(15*ms)
|
||||||
|
{action}
|
||||||
|
""".format(title=title, action=action,
|
||||||
|
dds_channel=dds_channel,
|
||||||
|
cpld_dev=cpld_dev, cpld_init=cpld_init,
|
||||||
|
channel_init=channel_init))
|
||||||
|
asyncio.ensure_future(
|
||||||
|
self._submit_by_content(
|
||||||
|
dds_exp,
|
||||||
|
title,
|
||||||
|
log_msg))
|
||||||
|
|
||||||
|
def dds_set_frequency(self, dds_channel, dds_model, freq):
|
||||||
|
action = "self.{ch}.set({freq})".format(
|
||||||
|
freq=freq, ch=dds_channel)
|
||||||
|
if dds_model.is_urukul:
|
||||||
|
action += """
|
||||||
|
ch_no = self.{ch}.chip_select - 4
|
||||||
|
self.{cpld}.cfg_switches(rf_sw | 1 << ch_no)
|
||||||
|
""".format(ch=dds_channel, cpld=dds_model.cpld)
|
||||||
|
self._dds_faux_injection(
|
||||||
|
dds_channel,
|
||||||
|
dds_model,
|
||||||
|
action,
|
||||||
|
"SetDDS",
|
||||||
|
"Set DDS {} {}MHz".format(dds_channel, freq/1e6))
|
||||||
|
|
||||||
|
def dds_channel_toggle(self, dds_channel, dds_model, sw=True):
|
||||||
|
# urukul only
|
||||||
|
if sw:
|
||||||
|
switch = "| 1 << ch_no"
|
||||||
|
else:
|
||||||
|
switch = "& ~(1 << ch_no)"
|
||||||
|
action = """
|
||||||
|
ch_no = self.{dds_channel}.chip_select - 4
|
||||||
|
self.{cpld}.cfg_switches(rf_sw {switch})
|
||||||
|
""".format(
|
||||||
|
dds_channel=dds_channel,
|
||||||
|
cpld=dds_model.cpld,
|
||||||
|
switch=switch
|
||||||
|
)
|
||||||
|
self._dds_faux_injection(
|
||||||
|
dds_channel,
|
||||||
|
dds_model,
|
||||||
|
action,
|
||||||
|
"ToggleDDS",
|
||||||
|
"Toggle DDS {} {}".format(dds_channel, "on" if sw else "off"))
|
||||||
|
|
||||||
def setup_ttl_monitoring(self, enable, channel):
|
def setup_ttl_monitoring(self, enable, channel):
|
||||||
if self.core_connection is not None:
|
if self.mi_connection is not None:
|
||||||
self.core_connection.monitor_probe(enable, channel, TTLProbe.level.value)
|
self.mi_connection.monitor_probe(enable, channel, TTLProbe.level.value)
|
||||||
self.core_connection.monitor_probe(enable, channel, TTLProbe.oe.value)
|
self.mi_connection.monitor_probe(enable, channel, TTLProbe.oe.value)
|
||||||
self.core_connection.monitor_injection(enable, channel, TTLOverride.en.value)
|
self.mi_connection.monitor_injection(enable, channel, TTLOverride.en.value)
|
||||||
self.core_connection.monitor_injection(enable, channel, TTLOverride.level.value)
|
self.mi_connection.monitor_injection(enable, channel, TTLOverride.level.value)
|
||||||
if enable:
|
if enable:
|
||||||
self.core_connection.get_injection_status(channel, TTLOverride.en.value)
|
self.mi_connection.get_injection_status(channel, TTLOverride.en.value)
|
||||||
|
|
||||||
def setup_dds_monitoring(self, enable, bus_channel, channel):
|
def setup_dds_monitoring(self, enable, bus_channel, channel):
|
||||||
if self.core_connection is not None:
|
if self.mi_connection is not None:
|
||||||
self.core_connection.monitor_probe(enable, bus_channel, channel)
|
self.mi_connection.monitor_probe(enable, bus_channel, channel)
|
||||||
|
|
||||||
def setup_dac_monitoring(self, enable, spi_channel, channel):
|
def setup_dac_monitoring(self, enable, spi_channel, channel):
|
||||||
if self.core_connection is not None:
|
if self.mi_connection is not None:
|
||||||
self.core_connection.monitor_probe(enable, spi_channel, channel)
|
self.mi_connection.monitor_probe(enable, spi_channel, channel)
|
||||||
|
|
||||||
def monitor_cb(self, channel, probe, value):
|
def monitor_cb(self, channel, probe, value):
|
||||||
if channel in self.ttl_widgets:
|
if channel in self.ttl_widgets:
|
||||||
|
@ -366,11 +689,11 @@ class _DeviceManager:
|
||||||
elif probe == TTLProbe.oe.value:
|
elif probe == TTLProbe.oe.value:
|
||||||
widget.cur_oe = bool(value)
|
widget.cur_oe = bool(value)
|
||||||
widget.refresh_display()
|
widget.refresh_display()
|
||||||
if (channel, probe) in self.dds_widgets:
|
elif (channel, probe) in self.dds_widgets:
|
||||||
widget = self.dds_widgets[(channel, probe)]
|
widget = self.dds_widgets[(channel, probe)]
|
||||||
widget.cur_frequency = value*self.dds_sysclk/2**32
|
widget.dds_model.monitor_update(probe, value)
|
||||||
widget.refresh_display()
|
widget.refresh_display()
|
||||||
if (channel, probe) in self.dac_widgets:
|
elif (channel, probe) in self.dac_widgets:
|
||||||
widget = self.dac_widgets[(channel, probe)]
|
widget = self.dac_widgets[(channel, probe)]
|
||||||
widget.cur_value = value
|
widget.cur_value = value
|
||||||
widget.refresh_display()
|
widget.refresh_display()
|
||||||
|
@ -385,29 +708,31 @@ class _DeviceManager:
|
||||||
widget.refresh_display()
|
widget.refresh_display()
|
||||||
|
|
||||||
def disconnect_cb(self):
|
def disconnect_cb(self):
|
||||||
logger.error("lost connection to core device moninj")
|
logger.error("lost connection to moninj")
|
||||||
self.reconnect_core.set()
|
self.reconnect_mi.set()
|
||||||
|
|
||||||
async def core_connector(self):
|
async def mi_connector(self):
|
||||||
while True:
|
while True:
|
||||||
await self.reconnect_core.wait()
|
await self.reconnect_mi.wait()
|
||||||
self.reconnect_core.clear()
|
self.reconnect_mi.clear()
|
||||||
if self.core_connection is not None:
|
if self.mi_connection is not None:
|
||||||
await self.core_connection.close()
|
await self.mi_connection.close()
|
||||||
self.core_connection = None
|
self.mi_connection = None
|
||||||
new_core_connection = CommMonInj(self.monitor_cb, self.injection_status_cb,
|
new_mi_connection = CommMonInj(self.monitor_cb, self.injection_status_cb,
|
||||||
self.disconnect_cb)
|
self.disconnect_cb)
|
||||||
try:
|
try:
|
||||||
await new_core_connection.connect(self.core_addr, 1383)
|
await new_mi_connection.connect(self.mi_addr, self.mi_port)
|
||||||
except asyncio.CancelledError:
|
except asyncio.CancelledError:
|
||||||
logger.info("cancelled connection to core device moninj")
|
logger.info("cancelled connection to moninj")
|
||||||
break
|
break
|
||||||
except:
|
except:
|
||||||
logger.error("failed to connect to core device moninj", exc_info=True)
|
logger.error("failed to connect to moninj. Is aqctl_moninj_proxy running?", exc_info=True)
|
||||||
await asyncio.sleep(10.)
|
await asyncio.sleep(10.)
|
||||||
self.reconnect_core.set()
|
self.reconnect_mi.set()
|
||||||
else:
|
else:
|
||||||
self.core_connection = new_core_connection
|
logger.info("ARTIQ dashboard connected to moninj (%s)",
|
||||||
|
self.mi_addr)
|
||||||
|
self.mi_connection = new_mi_connection
|
||||||
for ttl_channel in self.ttl_widgets.keys():
|
for ttl_channel in self.ttl_widgets.keys():
|
||||||
self.setup_ttl_monitoring(True, ttl_channel)
|
self.setup_ttl_monitoring(True, ttl_channel)
|
||||||
for bus_channel, channel in self.dds_widgets.keys():
|
for bus_channel, channel in self.dds_widgets.keys():
|
||||||
|
@ -416,13 +741,13 @@ class _DeviceManager:
|
||||||
self.setup_dac_monitoring(True, spi_channel, channel)
|
self.setup_dac_monitoring(True, spi_channel, channel)
|
||||||
|
|
||||||
async def close(self):
|
async def close(self):
|
||||||
self.core_connector_task.cancel()
|
self.mi_connector_task.cancel()
|
||||||
try:
|
try:
|
||||||
await asyncio.wait_for(self.core_connector_task, None)
|
await asyncio.wait_for(self.mi_connector_task, None)
|
||||||
except asyncio.CancelledError:
|
except asyncio.CancelledError:
|
||||||
pass
|
pass
|
||||||
if self.core_connection is not None:
|
if self.mi_connection is not None:
|
||||||
await self.core_connection.close()
|
await self.mi_connection.close()
|
||||||
|
|
||||||
|
|
||||||
class _MonInjDock(QtWidgets.QDockWidget):
|
class _MonInjDock(QtWidgets.QDockWidget):
|
||||||
|
@ -448,12 +773,12 @@ class _MonInjDock(QtWidgets.QDockWidget):
|
||||||
|
|
||||||
|
|
||||||
class MonInj:
|
class MonInj:
|
||||||
def __init__(self):
|
def __init__(self, schedule_ctl):
|
||||||
self.ttl_dock = _MonInjDock("TTL")
|
self.ttl_dock = _MonInjDock("TTL")
|
||||||
self.dds_dock = _MonInjDock("DDS")
|
self.dds_dock = _MonInjDock("DDS")
|
||||||
self.dac_dock = _MonInjDock("DAC")
|
self.dac_dock = _MonInjDock("DAC")
|
||||||
|
|
||||||
self.dm = _DeviceManager()
|
self.dm = _DeviceManager(schedule_ctl)
|
||||||
self.dm.ttl_cb = lambda: self.ttl_dock.layout_widgets(
|
self.dm.ttl_cb = lambda: self.ttl_dock.layout_widgets(
|
||||||
self.dm.ttl_widgets.values())
|
self.dm.ttl_widgets.values())
|
||||||
self.dm.dds_cb = lambda: self.dds_dock.layout_widgets(
|
self.dm.dds_cb = lambda: self.dds_dock.layout_widgets(
|
||||||
|
|
|
@ -48,7 +48,7 @@ class Model(DictSyncModel):
|
||||||
else:
|
else:
|
||||||
return "Outside repo."
|
return "Outside repo."
|
||||||
elif column == 6:
|
elif column == 6:
|
||||||
return v["expid"]["file"]
|
return v["expid"].get("file", "<none>")
|
||||||
elif column == 7:
|
elif column == 7:
|
||||||
if v["expid"]["class_name"] is None:
|
if v["expid"]["class_name"] is None:
|
||||||
return ""
|
return ""
|
||||||
|
|
|
@ -7,7 +7,11 @@ device_db = {
|
||||||
"type": "local",
|
"type": "local",
|
||||||
"module": "artiq.coredevice.core",
|
"module": "artiq.coredevice.core",
|
||||||
"class": "Core",
|
"class": "Core",
|
||||||
"arguments": {"host": core_addr, "ref_period": 1e-9}
|
"arguments": {
|
||||||
|
"host": core_addr,
|
||||||
|
"ref_period": 1e-9,
|
||||||
|
"analyzer_proxy": "core_analyzer"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"core_log": {
|
"core_log": {
|
||||||
"type": "controller",
|
"type": "controller",
|
||||||
|
@ -15,6 +19,20 @@ device_db = {
|
||||||
"port": 1068,
|
"port": 1068,
|
||||||
"command": "aqctl_corelog -p {port} --bind {bind} " + core_addr
|
"command": "aqctl_corelog -p {port} --bind {bind} " + core_addr
|
||||||
},
|
},
|
||||||
|
"core_moninj": {
|
||||||
|
"type": "controller",
|
||||||
|
"host": "::1",
|
||||||
|
"port_proxy": 1383,
|
||||||
|
"port": 1384,
|
||||||
|
"command": "aqctl_moninj_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr
|
||||||
|
},
|
||||||
|
"core_analyzer": {
|
||||||
|
"type": "controller",
|
||||||
|
"host": "::1",
|
||||||
|
"port_proxy": 1385,
|
||||||
|
"port": 1386,
|
||||||
|
"command": "aqctl_coreanalyzer_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr
|
||||||
|
},
|
||||||
"core_cache": {
|
"core_cache": {
|
||||||
"type": "local",
|
"type": "local",
|
||||||
"module": "artiq.coredevice.cache",
|
"module": "artiq.coredevice.cache",
|
||||||
|
@ -29,13 +47,13 @@ device_db = {
|
||||||
"i2c_switch0": {
|
"i2c_switch0": {
|
||||||
"type": "local",
|
"type": "local",
|
||||||
"module": "artiq.coredevice.i2c",
|
"module": "artiq.coredevice.i2c",
|
||||||
"class": "PCA9548",
|
"class": "I2CSwitch",
|
||||||
"arguments": {"address": 0xe0}
|
"arguments": {"address": 0xe0}
|
||||||
},
|
},
|
||||||
"i2c_switch1": {
|
"i2c_switch1": {
|
||||||
"type": "local",
|
"type": "local",
|
||||||
"module": "artiq.coredevice.i2c",
|
"module": "artiq.coredevice.i2c",
|
||||||
"class": "PCA9548",
|
"class": "I2CSwitch",
|
||||||
"arguments": {"address": 0xe2}
|
"arguments": {"address": 0xe2}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,11 @@ device_db = {
|
||||||
"type": "local",
|
"type": "local",
|
||||||
"module": "artiq.coredevice.core",
|
"module": "artiq.coredevice.core",
|
||||||
"class": "Core",
|
"class": "Core",
|
||||||
"arguments": {"host": core_addr, "ref_period": 1/(8*150e6)}
|
"arguments": {
|
||||||
|
"host": core_addr,
|
||||||
|
"ref_period": 1e-9,
|
||||||
|
"analyzer_proxy": "core_analyzer"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"core_log": {
|
"core_log": {
|
||||||
"type": "controller",
|
"type": "controller",
|
||||||
|
@ -13,6 +17,20 @@ device_db = {
|
||||||
"port": 1068,
|
"port": 1068,
|
||||||
"command": "aqctl_corelog -p {port} --bind {bind} " + core_addr
|
"command": "aqctl_corelog -p {port} --bind {bind} " + core_addr
|
||||||
},
|
},
|
||||||
|
"core_moninj": {
|
||||||
|
"type": "controller",
|
||||||
|
"host": "::1",
|
||||||
|
"port_proxy": 1383,
|
||||||
|
"port": 1384,
|
||||||
|
"command": "aqctl_moninj_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr
|
||||||
|
},
|
||||||
|
"core_analyzer": {
|
||||||
|
"type": "controller",
|
||||||
|
"host": "::1",
|
||||||
|
"port_proxy": 1385,
|
||||||
|
"port": 1386,
|
||||||
|
"command": "aqctl_coreanalyzer_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr
|
||||||
|
},
|
||||||
"core_cache": {
|
"core_cache": {
|
||||||
"type": "local",
|
"type": "local",
|
||||||
"module": "artiq.coredevice.cache",
|
"module": "artiq.coredevice.cache",
|
||||||
|
|
|
@ -1,177 +0,0 @@
|
||||||
core_addr = "192.168.1.70"
|
|
||||||
|
|
||||||
device_db = {
|
|
||||||
"core": {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.core",
|
|
||||||
"class": "Core",
|
|
||||||
"arguments": {"host": core_addr, "ref_period": 1/(8*150e6)}
|
|
||||||
},
|
|
||||||
"core_log": {
|
|
||||||
"type": "controller",
|
|
||||||
"host": "::1",
|
|
||||||
"port": 1068,
|
|
||||||
"command": "aqctl_corelog -p {port} --bind {bind} " + core_addr
|
|
||||||
},
|
|
||||||
"core_cache": {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.cache",
|
|
||||||
"class": "CoreCache"
|
|
||||||
},
|
|
||||||
"core_dma": {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.dma",
|
|
||||||
"class": "CoreDMA"
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
device_db.update(
|
|
||||||
spi_urukul0={
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.spi2",
|
|
||||||
"class": "SPIMaster",
|
|
||||||
"arguments": {"channel": 0}
|
|
||||||
},
|
|
||||||
ttl_urukul0_io_update={
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": 1}
|
|
||||||
},
|
|
||||||
ttl_urukul0_sw0={
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": 2}
|
|
||||||
},
|
|
||||||
ttl_urukul0_sw1={
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": 3}
|
|
||||||
},
|
|
||||||
ttl_urukul0_sw2={
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": 4}
|
|
||||||
},
|
|
||||||
ttl_urukul0_sw3={
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": 5}
|
|
||||||
},
|
|
||||||
urukul0_cpld={
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.urukul",
|
|
||||||
"class": "CPLD",
|
|
||||||
"arguments": {
|
|
||||||
"spi_device": "spi_urukul0",
|
|
||||||
"io_update_device": "ttl_urukul0_io_update",
|
|
||||||
"refclk": 150e6,
|
|
||||||
"clk_sel": 2
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
for i in range(4):
|
|
||||||
device_db["urukul0_ch" + str(i)] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ad9910",
|
|
||||||
"class": "AD9910",
|
|
||||||
"arguments": {
|
|
||||||
"pll_n": 16, # 600MHz sample rate
|
|
||||||
"pll_vco": 2,
|
|
||||||
"chip_select": 4 + i,
|
|
||||||
"cpld_device": "urukul0_cpld",
|
|
||||||
"sw_device": "ttl_urukul0_sw" + str(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
"""
|
|
||||||
artiq_route routing.bin init
|
|
||||||
artiq_route routing.bin set 0 0
|
|
||||||
artiq_route routing.bin set 1 1 0
|
|
||||||
artiq_route routing.bin set 2 1 1 0
|
|
||||||
artiq_route routing.bin set 3 2 0
|
|
||||||
artiq_route routing.bin set 4 2 1 0
|
|
||||||
artiq_coremgmt -D kasli config write -f routing_table routing.bin
|
|
||||||
"""
|
|
||||||
|
|
||||||
for sayma in range(2):
|
|
||||||
amc_base = 0x010000 + sayma*0x020000
|
|
||||||
rtm_base = 0x020000 + sayma*0x020000
|
|
||||||
for i in range(4):
|
|
||||||
device_db["led" + str(4*sayma+i)] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": amc_base + i}
|
|
||||||
}
|
|
||||||
for i in range(2):
|
|
||||||
device_db["ttl_mcx" + str(2*sayma+i)] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLInOut",
|
|
||||||
"arguments": {"channel": amc_base + 4 + i}
|
|
||||||
}
|
|
||||||
for i in range(8):
|
|
||||||
device_db["sawg" + str(8*sayma+i)] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.sawg",
|
|
||||||
"class": "SAWG",
|
|
||||||
"arguments": {"channel_base": amc_base + 6 + i*10, "parallelism": 4}
|
|
||||||
}
|
|
||||||
for basemod in range(2):
|
|
||||||
for i in range(4):
|
|
||||||
device_db["sawg_sw" + str(8*sayma+4*basemod+i)] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": rtm_base + basemod*9 + i}
|
|
||||||
}
|
|
||||||
att_idx = 2*sayma + basemod
|
|
||||||
device_db["basemod_att_rst_n"+str(att_idx)] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": rtm_base + basemod*9 + 4}
|
|
||||||
}
|
|
||||||
device_db["basemod_att_clk"+str(att_idx)] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": rtm_base + basemod*9 + 5}
|
|
||||||
}
|
|
||||||
device_db["basemod_att_le"+str(att_idx)] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": rtm_base + basemod*9 + 6}
|
|
||||||
}
|
|
||||||
device_db["basemod_att_mosi"+str(att_idx)] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": rtm_base + basemod*9 + 7}
|
|
||||||
}
|
|
||||||
device_db["basemod_att_miso"+str(att_idx)] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLInOut",
|
|
||||||
"arguments": {"channel": rtm_base + basemod*9 + 8}
|
|
||||||
}
|
|
||||||
device_db["basemod_att"+str(att_idx)] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.basemod_att",
|
|
||||||
"class": "BaseModAtt",
|
|
||||||
"arguments": {
|
|
||||||
"rst_n": "basemod_att_rst_n"+str(att_idx),
|
|
||||||
"clk": "basemod_att_clk"+str(att_idx),
|
|
||||||
"le": "basemod_att_le"+str(att_idx),
|
|
||||||
"mosi": "basemod_att_mosi"+str(att_idx),
|
|
||||||
"miso": "basemod_att_miso"+str(att_idx),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,25 +0,0 @@
|
||||||
from artiq.experiment import *
|
|
||||||
|
|
||||||
|
|
||||||
class BaseMod(EnvExperiment):
|
|
||||||
def build(self):
|
|
||||||
self.setattr_device("core")
|
|
||||||
self.basemods = [self.get_device("basemod_att0"), self.get_device("basemod_att1")]
|
|
||||||
self.rfsws = [self.get_device("sawg_sw"+str(i)) for i in range(8)]
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def run(self):
|
|
||||||
self.core.reset()
|
|
||||||
for basemod in self.basemods:
|
|
||||||
self.core.break_realtime()
|
|
||||||
delay(10*ms)
|
|
||||||
basemod.reset()
|
|
||||||
delay(10*ms)
|
|
||||||
basemod.set(0.0, 0.0, 0.0, 0.0)
|
|
||||||
delay(10*ms)
|
|
||||||
print(basemod.get_mu())
|
|
||||||
|
|
||||||
self.core.break_realtime()
|
|
||||||
for rfsw in self.rfsws:
|
|
||||||
rfsw.on()
|
|
||||||
delay(1*ms)
|
|
|
@ -1,37 +0,0 @@
|
||||||
from artiq.experiment import *
|
|
||||||
|
|
||||||
|
|
||||||
class Sines2Sayma(EnvExperiment):
|
|
||||||
def build(self):
|
|
||||||
self.setattr_device("core")
|
|
||||||
self.sawgs = [self.get_device("sawg"+str(i)) for i in range(16)]
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def drtio_is_up(self):
|
|
||||||
for i in range(5):
|
|
||||||
if not self.core.get_rtio_destination_status(i):
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def run(self):
|
|
||||||
while True:
|
|
||||||
print("waiting for DRTIO ready...")
|
|
||||||
while not self.drtio_is_up():
|
|
||||||
pass
|
|
||||||
print("OK")
|
|
||||||
|
|
||||||
self.core.reset()
|
|
||||||
|
|
||||||
for sawg in self.sawgs:
|
|
||||||
delay(1*ms)
|
|
||||||
sawg.reset()
|
|
||||||
|
|
||||||
for sawg in self.sawgs:
|
|
||||||
delay(1*ms)
|
|
||||||
sawg.amplitude1.set(.4)
|
|
||||||
# Do not use a sub-multiple of oscilloscope sample rates.
|
|
||||||
sawg.frequency0.set(9*MHz)
|
|
||||||
|
|
||||||
while self.drtio_is_up():
|
|
||||||
pass
|
|
|
@ -1,89 +0,0 @@
|
||||||
from artiq.experiment import *
|
|
||||||
|
|
||||||
|
|
||||||
class SinesUrukulSayma(EnvExperiment):
|
|
||||||
def build(self):
|
|
||||||
self.setattr_device("core")
|
|
||||||
self.setattr_device("urukul0_cpld")
|
|
||||||
|
|
||||||
# Urukul clock output syntonized to the RTIO clock.
|
|
||||||
# Can be used as HMC830 reference on Sayma RTM.
|
|
||||||
# When using this reference, Sayma must be recalibrated every time Urukul
|
|
||||||
# is rebooted, as Urukul is not synchronized to the Kasli.
|
|
||||||
self.urukul_hmc_ref = self.get_device("urukul0_ch3")
|
|
||||||
|
|
||||||
# Urukul measurement channels - compare with SAWG outputs.
|
|
||||||
# When testing sync, do not reboot Urukul, as it is not
|
|
||||||
# synchronized to the Kasli.
|
|
||||||
self.urukul_meas = [self.get_device("urukul0_ch" + str(i)) for i in range(3)]
|
|
||||||
# The same waveform is output on all first 4 SAWG channels (first DAC).
|
|
||||||
self.sawgs = [self.get_device("sawg"+str(i)) for i in range(4)]
|
|
||||||
self.basemod = self.get_device("basemod_att0")
|
|
||||||
self.rfsws = [self.get_device("sawg_sw"+str(i)) for i in range(4)]
|
|
||||||
|
|
||||||
|
|
||||||
# DRTIO destinations:
|
|
||||||
# 0: local
|
|
||||||
# 1: Sayma AMC
|
|
||||||
# 2: Sayma RTM
|
|
||||||
@kernel
|
|
||||||
def drtio_is_up(self):
|
|
||||||
for i in range(3):
|
|
||||||
if not self.core.get_rtio_destination_status(i):
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def run(self):
|
|
||||||
f = 9*MHz
|
|
||||||
dds_ftw = self.urukul_meas[0].frequency_to_ftw(f)
|
|
||||||
sawg_ftw = self.sawgs[0].frequency0.to_mu(f)
|
|
||||||
if dds_ftw != sawg_ftw:
|
|
||||||
print("DDS and SAWG FTWs do not match:", dds_ftw, sawg_ftw)
|
|
||||||
return
|
|
||||||
|
|
||||||
self.core.reset()
|
|
||||||
self.urukul0_cpld.init()
|
|
||||||
|
|
||||||
delay(1*ms)
|
|
||||||
self.urukul_hmc_ref.init()
|
|
||||||
self.urukul_hmc_ref.set_mu(0x40000000, asf=self.urukul_hmc_ref.amplitude_to_asf(0.6))
|
|
||||||
self.urukul_hmc_ref.set_att(6.)
|
|
||||||
self.urukul_hmc_ref.sw.on()
|
|
||||||
|
|
||||||
for urukul_ch in self.urukul_meas:
|
|
||||||
delay(1*ms)
|
|
||||||
urukul_ch.init()
|
|
||||||
urukul_ch.set_mu(dds_ftw, asf=urukul_ch.amplitude_to_asf(0.5))
|
|
||||||
urukul_ch.set_att(6.)
|
|
||||||
urukul_ch.sw.on()
|
|
||||||
|
|
||||||
while True:
|
|
||||||
print("waiting for DRTIO ready...")
|
|
||||||
while not self.drtio_is_up():
|
|
||||||
pass
|
|
||||||
print("OK")
|
|
||||||
|
|
||||||
self.core.reset()
|
|
||||||
|
|
||||||
delay(10*ms)
|
|
||||||
self.basemod.reset()
|
|
||||||
delay(10*ms)
|
|
||||||
self.basemod.set(3.0, 3.0, 3.0, 3.0)
|
|
||||||
delay(10*ms)
|
|
||||||
for rfsw in self.rfsws:
|
|
||||||
delay(1*ms)
|
|
||||||
rfsw.on()
|
|
||||||
|
|
||||||
for sawg in self.sawgs:
|
|
||||||
delay(1*ms)
|
|
||||||
sawg.reset()
|
|
||||||
|
|
||||||
for sawg in self.sawgs:
|
|
||||||
delay(1*ms)
|
|
||||||
sawg.amplitude1.set(.4)
|
|
||||||
sawg.frequency0.set_mu(sawg_ftw)
|
|
||||||
sawg.phase0.set_mu(sawg_ftw*now_mu() >> 17)
|
|
||||||
|
|
||||||
while self.drtio_is_up():
|
|
||||||
pass
|
|
|
@ -0,0 +1,18 @@
|
||||||
|
{
|
||||||
|
"target": "kasli",
|
||||||
|
"variant": "shuttlerdemo",
|
||||||
|
"hw_rev": "v2.0",
|
||||||
|
"drtio_role": "master",
|
||||||
|
"peripherals": [
|
||||||
|
{
|
||||||
|
"type": "shuttler",
|
||||||
|
"ports": [0]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "dio",
|
||||||
|
"ports": [1],
|
||||||
|
"bank_direction_low": "input",
|
||||||
|
"bank_direction_high": "output"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
|
@ -0,0 +1,330 @@
|
||||||
|
from artiq.experiment import *
|
||||||
|
from artiq.coredevice.shuttler import shuttler_volt_to_mu
|
||||||
|
|
||||||
|
DAC_Fs_MHZ = 125
|
||||||
|
CORDIC_GAIN = 1.64676
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def shuttler_phase_offset(offset_degree):
|
||||||
|
return round(offset_degree / 360 * (2 ** 16))
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def shuttler_freq_mu(freq_mhz):
|
||||||
|
return round(float(2) ** 32 / DAC_Fs_MHZ * freq_mhz)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def shuttler_chirp_rate_mu(freq_mhz_per_us):
|
||||||
|
return round(float(2) ** 32 * freq_mhz_per_us / (DAC_Fs_MHZ ** 2))
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def shuttler_freq_sweep(start_f_MHz, end_f_MHz, time_us):
|
||||||
|
return shuttler_chirp_rate_mu((end_f_MHz - start_f_MHz)/(time_us))
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def shuttler_volt_amp_mu(volt):
|
||||||
|
return shuttler_volt_to_mu(volt)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def shuttler_volt_damp_mu(volt_per_us):
|
||||||
|
return round(float(2) ** 32 * (volt_per_us / 20) / DAC_Fs_MHZ)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def shuttler_volt_ddamp_mu(volt_per_us_square):
|
||||||
|
return round(float(2) ** 48 * (volt_per_us_square / 20) * 2 / (DAC_Fs_MHZ ** 2))
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def shuttler_volt_dddamp_mu(volt_per_us_cube):
|
||||||
|
return round(float(2) ** 48 * (volt_per_us_cube / 20) * 6 / (DAC_Fs_MHZ ** 3))
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def shuttler_dds_amp_mu(volt):
|
||||||
|
return shuttler_volt_amp_mu(volt / CORDIC_GAIN)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def shuttler_dds_damp_mu(volt_per_us):
|
||||||
|
return shuttler_volt_damp_mu(volt_per_us / CORDIC_GAIN)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def shuttler_dds_ddamp_mu(volt_per_us_square):
|
||||||
|
return shuttler_volt_ddamp_mu(volt_per_us_square / CORDIC_GAIN)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def shuttler_dds_dddamp_mu(volt_per_us_cube):
|
||||||
|
return shuttler_volt_dddamp_mu(volt_per_us_cube / CORDIC_GAIN)
|
||||||
|
|
||||||
|
class Shuttler(EnvExperiment):
|
||||||
|
def build(self):
|
||||||
|
self.setattr_device("core")
|
||||||
|
self.setattr_device("core_dma")
|
||||||
|
self.setattr_device("scheduler")
|
||||||
|
self.shuttler0_leds = [ self.get_device("shuttler0_led{}".format(i)) for i in range(2) ]
|
||||||
|
self.setattr_device("shuttler0_config")
|
||||||
|
self.setattr_device("shuttler0_trigger")
|
||||||
|
self.shuttler0_dcbias = [ self.get_device("shuttler0_dcbias{}".format(i)) for i in range(16) ]
|
||||||
|
self.shuttler0_dds = [ self.get_device("shuttler0_dds{}".format(i)) for i in range(16) ]
|
||||||
|
self.setattr_device("shuttler0_relay")
|
||||||
|
self.setattr_device("shuttler0_adc")
|
||||||
|
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def record(self):
|
||||||
|
with self.core_dma.record("example_waveform"):
|
||||||
|
self.example_waveform()
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def init(self):
|
||||||
|
self.led()
|
||||||
|
self.relay_init()
|
||||||
|
self.adc_init()
|
||||||
|
self.shuttler_reset()
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def run(self):
|
||||||
|
self.core.reset()
|
||||||
|
self.core.break_realtime()
|
||||||
|
self.init()
|
||||||
|
|
||||||
|
self.record()
|
||||||
|
example_waveform_handle = self.core_dma.get_handle("example_waveform")
|
||||||
|
|
||||||
|
print("Example Waveforms are on OUT0 and OUT1")
|
||||||
|
self.core.break_realtime()
|
||||||
|
while not(self.scheduler.check_termination()):
|
||||||
|
delay(1*s)
|
||||||
|
self.core_dma.playback_handle(example_waveform_handle)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def shuttler_reset(self):
|
||||||
|
for i in range(16):
|
||||||
|
self.shuttler_channel_reset(i)
|
||||||
|
# To avoid RTIO Underflow
|
||||||
|
delay(50*us)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def shuttler_channel_reset(self, ch):
|
||||||
|
self.shuttler0_dcbias[ch].set_waveform(
|
||||||
|
a0=0,
|
||||||
|
a1=0,
|
||||||
|
a2=0,
|
||||||
|
a3=0,
|
||||||
|
)
|
||||||
|
self.shuttler0_dds[ch].set_waveform(
|
||||||
|
b0=0,
|
||||||
|
b1=0,
|
||||||
|
b2=0,
|
||||||
|
b3=0,
|
||||||
|
c0=0,
|
||||||
|
c1=0,
|
||||||
|
c2=0,
|
||||||
|
)
|
||||||
|
self.shuttler0_trigger.trigger(1 << ch)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def example_waveform(self):
|
||||||
|
# Equation of Output Waveform
|
||||||
|
# w(t_us) = a(t_us) + b(t_us) * cos(c(t_us))
|
||||||
|
# Step 1:
|
||||||
|
# Enable the Output Relay of OUT0 and OUT1
|
||||||
|
# Step 2: Cosine Wave Frequency Sweep from 10kHz to 50kHz in 500us
|
||||||
|
# OUT0: b(t_us) = 1
|
||||||
|
# c(t_us) = 2 * pi * (0.08 * t_us ^ 2 + 0.01 * t_us)
|
||||||
|
# OUT1: b(t_us) = 1
|
||||||
|
# c(t_us) = 2 * pi * (0.05 * t_us)
|
||||||
|
# Step 3(after 500us): Cosine Wave with 180 Degree Phase Offset
|
||||||
|
# OUT0: b(t_us) = 1
|
||||||
|
# c(t_us) = 2 * pi * (0.05 * t_us) + pi
|
||||||
|
# OUT1: b(t_us) = 1
|
||||||
|
# c(t_us) = 2 * pi * (0.05 * t_us)
|
||||||
|
# Step 4(after 500us): Cosine Wave with Amplitude Envelop
|
||||||
|
# OUT0: b(t_us) = -0.0001367187 * t_us ^ 2 + 0.06835937 * t_us
|
||||||
|
# c(t_us) = 2 * pi * (0.05 * t_us)
|
||||||
|
# OUT1: b(t_us) = -0.0001367187 * t_us ^ 2 + 0.06835937 * t_us
|
||||||
|
# c(t_us) = 0
|
||||||
|
# Step 5(after 500us): Sawtooth Wave Modulated with 50kHz Cosine Wave
|
||||||
|
# OUT0: a(t_us) = 0.01 * t_us - 5
|
||||||
|
# b(t_us) = 1
|
||||||
|
# c(t_us) = 2 * pi * (0.05 * t_us)
|
||||||
|
# OUT1: a(t_us) = 0.01 * t_us - 5
|
||||||
|
# Step 6(after 1000us): A Combination of Previous Waveforms
|
||||||
|
# OUT0: a(t_us) = 0.01 * t_us - 5
|
||||||
|
# b(t_us) = -0.0001367187 * t_us ^ 2 + 0.06835937 * t_us
|
||||||
|
# c(t_us) = 2 * pi * (0.08 * t_us ^ 2 + 0.01 * t_us)
|
||||||
|
# Step 7(after 500us): Mirrored Waveform in Step 6
|
||||||
|
# OUT0: a(t_us) = 2.5 + -0.01 * (1000 ^ 2) * t_us
|
||||||
|
# b(t_us) = 0.0001367187 * t_us ^ 2 - 0.06835937 * t_us
|
||||||
|
# c(t_us) = 2 * pi * (-0.08 * t_us ^ 2 + 0.05 * t_us) + pi
|
||||||
|
# Step 8(after 500us):
|
||||||
|
# Disable Output Relay of OUT0 and OUT1
|
||||||
|
# Reset OUT0 and OUT1
|
||||||
|
|
||||||
|
## Step 1 ##
|
||||||
|
self.shuttler0_relay.enable(0b11)
|
||||||
|
|
||||||
|
## Step 2 ##
|
||||||
|
start_f_MHz = 0.01
|
||||||
|
end_f_MHz = 0.05
|
||||||
|
duration_us = 500
|
||||||
|
# OUT0 and OUT1 have their frequency and phase aligned at 500us
|
||||||
|
self.shuttler0_dds[0].set_waveform(
|
||||||
|
b0=shuttler_dds_amp_mu(1.0),
|
||||||
|
b1=0,
|
||||||
|
b2=0,
|
||||||
|
b3=0,
|
||||||
|
c0=0,
|
||||||
|
c1=shuttler_freq_mu(start_f_MHz),
|
||||||
|
c2=shuttler_freq_sweep(start_f_MHz, end_f_MHz, duration_us),
|
||||||
|
)
|
||||||
|
self.shuttler0_dds[1].set_waveform(
|
||||||
|
b0=shuttler_dds_amp_mu(1.0),
|
||||||
|
b1=0,
|
||||||
|
b2=0,
|
||||||
|
b3=0,
|
||||||
|
c0=0,
|
||||||
|
c1=shuttler_freq_mu(end_f_MHz),
|
||||||
|
c2=0,
|
||||||
|
)
|
||||||
|
self.shuttler0_trigger.trigger(0b11)
|
||||||
|
delay(500*us)
|
||||||
|
|
||||||
|
## Step 3 ##
|
||||||
|
# OUT0 and OUT1 has 180 degree phase difference
|
||||||
|
self.shuttler0_dds[0].set_waveform(
|
||||||
|
b0=shuttler_dds_amp_mu(1.0),
|
||||||
|
b1=0,
|
||||||
|
b2=0,
|
||||||
|
b3=0,
|
||||||
|
c0=shuttler_phase_offset(180.0),
|
||||||
|
c1=shuttler_freq_mu(end_f_MHz),
|
||||||
|
c2=0,
|
||||||
|
)
|
||||||
|
# Phase and Output Setting of OUT1 is retained
|
||||||
|
# if the channel is not triggered or config is not cleared
|
||||||
|
self.shuttler0_trigger.trigger(0b1)
|
||||||
|
delay(500*us)
|
||||||
|
|
||||||
|
## Step 4 ##
|
||||||
|
# b(0) = 0, b(250) = 8.545, b(500) = 0
|
||||||
|
self.shuttler0_dds[0].set_waveform(
|
||||||
|
b0=0,
|
||||||
|
b1=shuttler_dds_damp_mu(0.06835937),
|
||||||
|
b2=shuttler_dds_ddamp_mu(-0.0001367187),
|
||||||
|
b3=0,
|
||||||
|
c0=0,
|
||||||
|
c1=shuttler_freq_mu(end_f_MHz),
|
||||||
|
c2=0,
|
||||||
|
)
|
||||||
|
self.shuttler0_dds[1].set_waveform(
|
||||||
|
b0=0,
|
||||||
|
b1=shuttler_dds_damp_mu(0.06835937),
|
||||||
|
b2=shuttler_dds_ddamp_mu(-0.0001367187),
|
||||||
|
b3=0,
|
||||||
|
c0=0,
|
||||||
|
c1=0,
|
||||||
|
c2=0,
|
||||||
|
)
|
||||||
|
self.shuttler0_trigger.trigger(0b11)
|
||||||
|
delay(500*us)
|
||||||
|
|
||||||
|
## Step 5 ##
|
||||||
|
self.shuttler0_dcbias[0].set_waveform(
|
||||||
|
a0=shuttler_volt_amp_mu(-5.0),
|
||||||
|
a1=int32(shuttler_volt_damp_mu(0.01)),
|
||||||
|
a2=0,
|
||||||
|
a3=0,
|
||||||
|
)
|
||||||
|
self.shuttler0_dds[0].set_waveform(
|
||||||
|
b0=shuttler_dds_amp_mu(1.0),
|
||||||
|
b1=0,
|
||||||
|
b2=0,
|
||||||
|
b3=0,
|
||||||
|
c0=0,
|
||||||
|
c1=shuttler_freq_mu(end_f_MHz),
|
||||||
|
c2=0,
|
||||||
|
)
|
||||||
|
self.shuttler0_dcbias[1].set_waveform(
|
||||||
|
a0=shuttler_volt_amp_mu(-5.0),
|
||||||
|
a1=int32(shuttler_volt_damp_mu(0.01)),
|
||||||
|
a2=0,
|
||||||
|
a3=0,
|
||||||
|
)
|
||||||
|
self.shuttler0_dds[1].set_waveform(
|
||||||
|
b0=0,
|
||||||
|
b1=0,
|
||||||
|
b2=0,
|
||||||
|
b3=0,
|
||||||
|
c0=0,
|
||||||
|
c1=0,
|
||||||
|
c2=0,
|
||||||
|
)
|
||||||
|
self.shuttler0_trigger.trigger(0b11)
|
||||||
|
delay(1000*us)
|
||||||
|
|
||||||
|
## Step 6 ##
|
||||||
|
self.shuttler0_dcbias[0].set_waveform(
|
||||||
|
a0=shuttler_volt_amp_mu(-2.5),
|
||||||
|
a1=int32(shuttler_volt_damp_mu(0.01)),
|
||||||
|
a2=0,
|
||||||
|
a3=0,
|
||||||
|
)
|
||||||
|
self.shuttler0_dds[0].set_waveform(
|
||||||
|
b0=0,
|
||||||
|
b1=shuttler_dds_damp_mu(0.06835937),
|
||||||
|
b2=shuttler_dds_ddamp_mu(-0.0001367187),
|
||||||
|
b3=0,
|
||||||
|
c0=0,
|
||||||
|
c1=shuttler_freq_mu(start_f_MHz),
|
||||||
|
c2=shuttler_freq_sweep(start_f_MHz, end_f_MHz, duration_us),
|
||||||
|
)
|
||||||
|
self.shuttler0_trigger.trigger(0b1)
|
||||||
|
self.shuttler_channel_reset(1)
|
||||||
|
delay(500*us)
|
||||||
|
|
||||||
|
## Step 7 ##
|
||||||
|
self.shuttler0_dcbias[0].set_waveform(
|
||||||
|
a0=shuttler_volt_amp_mu(2.5),
|
||||||
|
a1=int32(shuttler_volt_damp_mu(-0.01)),
|
||||||
|
a2=0,
|
||||||
|
a3=0,
|
||||||
|
)
|
||||||
|
self.shuttler0_dds[0].set_waveform(
|
||||||
|
b0=0,
|
||||||
|
b1=shuttler_dds_damp_mu(-0.06835937),
|
||||||
|
b2=shuttler_dds_ddamp_mu(0.0001367187),
|
||||||
|
b3=0,
|
||||||
|
c0=shuttler_phase_offset(180.0),
|
||||||
|
c1=shuttler_freq_mu(end_f_MHz),
|
||||||
|
c2=shuttler_freq_sweep(end_f_MHz, start_f_MHz, duration_us),
|
||||||
|
)
|
||||||
|
self.shuttler0_trigger.trigger(0b1)
|
||||||
|
delay(500*us)
|
||||||
|
|
||||||
|
## Step 8 ##
|
||||||
|
self.shuttler0_relay.enable(0)
|
||||||
|
self.shuttler_channel_reset(0)
|
||||||
|
self.shuttler_channel_reset(1)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def led(self):
|
||||||
|
for i in range(2):
|
||||||
|
for j in range(3):
|
||||||
|
self.shuttler0_leds[i].pulse(.1*s)
|
||||||
|
delay(.1*s)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def relay_init(self):
|
||||||
|
self.shuttler0_relay.init()
|
||||||
|
self.shuttler0_relay.enable(0x0000)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def adc_init(self):
|
||||||
|
delay_mu(int64(self.core.ref_multiplier))
|
||||||
|
self.shuttler0_adc.power_up()
|
||||||
|
|
||||||
|
delay_mu(int64(self.core.ref_multiplier))
|
||||||
|
assert self.shuttler0_adc.read_id() >> 4 == 0x038d
|
||||||
|
|
||||||
|
delay_mu(int64(self.core.ref_multiplier))
|
||||||
|
# The actual output voltage is limited by the hardware, the calculated calibration gain and offset.
|
||||||
|
# For example, if the system has a calibration gain of 1.06, then the max output voltage = 10 / 1.06 = 9.43V.
|
||||||
|
# Setting a value larger than 9.43V will result in overflow.
|
||||||
|
self.shuttler0_adc.calibrate(self.shuttler0_dcbias, self.shuttler0_trigger, self.shuttler0_config)
|
|
@ -5,7 +5,11 @@ device_db = {
|
||||||
"type": "local",
|
"type": "local",
|
||||||
"module": "artiq.coredevice.core",
|
"module": "artiq.coredevice.core",
|
||||||
"class": "Core",
|
"class": "Core",
|
||||||
"arguments": {"host": core_addr, "ref_period": 1e-9}
|
"arguments": {
|
||||||
|
"host": core_addr,
|
||||||
|
"ref_period": 1e-9,
|
||||||
|
"analyzer_proxy": "core_analyzer"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"core_log": {
|
"core_log": {
|
||||||
"type": "controller",
|
"type": "controller",
|
||||||
|
@ -13,6 +17,20 @@ device_db = {
|
||||||
"port": 1068,
|
"port": 1068,
|
||||||
"command": "aqctl_corelog -p {port} --bind {bind} " + core_addr
|
"command": "aqctl_corelog -p {port} --bind {bind} " + core_addr
|
||||||
},
|
},
|
||||||
|
"core_moninj": {
|
||||||
|
"type": "controller",
|
||||||
|
"host": "::1",
|
||||||
|
"port_proxy": 1383,
|
||||||
|
"port": 1384,
|
||||||
|
"command": "aqctl_moninj_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr
|
||||||
|
},
|
||||||
|
"core_analyzer": {
|
||||||
|
"type": "controller",
|
||||||
|
"host": "::1",
|
||||||
|
"port_proxy": 1385,
|
||||||
|
"port": 1386,
|
||||||
|
"command": "aqctl_coreanalyzer_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr
|
||||||
|
},
|
||||||
"core_cache": {
|
"core_cache": {
|
||||||
"type": "local",
|
"type": "local",
|
||||||
"module": "artiq.coredevice.cache",
|
"module": "artiq.coredevice.cache",
|
||||||
|
@ -27,13 +45,13 @@ device_db = {
|
||||||
"i2c_switch0": {
|
"i2c_switch0": {
|
||||||
"type": "local",
|
"type": "local",
|
||||||
"module": "artiq.coredevice.i2c",
|
"module": "artiq.coredevice.i2c",
|
||||||
"class": "PCA9548",
|
"class": "I2CSwitch",
|
||||||
"arguments": {"address": 0xe0}
|
"arguments": {"address": 0xe0}
|
||||||
},
|
},
|
||||||
"i2c_switch1": {
|
"i2c_switch1": {
|
||||||
"type": "local",
|
"type": "local",
|
||||||
"module": "artiq.coredevice.i2c",
|
"module": "artiq.coredevice.i2c",
|
||||||
"class": "PCA9548",
|
"class": "I2CSwitch",
|
||||||
"arguments": {"address": 0xe2}
|
"arguments": {"address": 0xe2}
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,11 @@ device_db = {
|
||||||
"type": "local",
|
"type": "local",
|
||||||
"module": "artiq.coredevice.core",
|
"module": "artiq.coredevice.core",
|
||||||
"class": "Core",
|
"class": "Core",
|
||||||
"arguments": {"host": core_addr, "ref_period": 1e-9}
|
"arguments": {
|
||||||
|
"host": core_addr,
|
||||||
|
"ref_period": 1e-9,
|
||||||
|
"analyzer_proxy": "core_analyzer"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"core_log": {
|
"core_log": {
|
||||||
"type": "controller",
|
"type": "controller",
|
||||||
|
@ -17,6 +21,20 @@ device_db = {
|
||||||
"port": 1068,
|
"port": 1068,
|
||||||
"command": "aqctl_corelog -p {port} --bind {bind} " + core_addr
|
"command": "aqctl_corelog -p {port} --bind {bind} " + core_addr
|
||||||
},
|
},
|
||||||
|
"core_moninj": {
|
||||||
|
"type": "controller",
|
||||||
|
"host": "::1",
|
||||||
|
"port_proxy": 1383,
|
||||||
|
"port": 1384,
|
||||||
|
"command": "aqctl_moninj_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr
|
||||||
|
},
|
||||||
|
"core_analyzer": {
|
||||||
|
"type": "controller",
|
||||||
|
"host": "::1",
|
||||||
|
"port_proxy": 1385,
|
||||||
|
"port": 1386,
|
||||||
|
"command": "aqctl_coreanalyzer_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr
|
||||||
|
},
|
||||||
"core_cache": {
|
"core_cache": {
|
||||||
"type": "local",
|
"type": "local",
|
||||||
"module": "artiq.coredevice.cache",
|
"module": "artiq.coredevice.cache",
|
||||||
|
@ -31,7 +49,7 @@ device_db = {
|
||||||
"i2c_switch": {
|
"i2c_switch": {
|
||||||
"type": "local",
|
"type": "local",
|
||||||
"module": "artiq.coredevice.i2c",
|
"module": "artiq.coredevice.i2c",
|
||||||
"class": "PCA9548"
|
"class": "I2CSwitch"
|
||||||
},
|
},
|
||||||
|
|
||||||
# Generic TTL
|
# Generic TTL
|
||||||
|
|
|
@ -20,7 +20,7 @@ class DDSSetter(EnvExperiment):
|
||||||
"driver": self.get_device(k),
|
"driver": self.get_device(k),
|
||||||
"frequency": self.get_argument(
|
"frequency": self.get_argument(
|
||||||
"{}_frequency".format(k),
|
"{}_frequency".format(k),
|
||||||
NumberValue(100e6, scale=1e6, unit="MHz", ndecimals=6))
|
NumberValue(100e6, scale=1e6, unit="MHz", precision=6))
|
||||||
}
|
}
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
|
|
|
@ -12,8 +12,8 @@ class PhotonHistogram(EnvExperiment):
|
||||||
self.setattr_device("bdd_sw")
|
self.setattr_device("bdd_sw")
|
||||||
self.setattr_device("pmt")
|
self.setattr_device("pmt")
|
||||||
|
|
||||||
self.setattr_argument("nbins", NumberValue(100, ndecimals=0, step=1))
|
self.setattr_argument("nbins", NumberValue(100, precision=0, step=1))
|
||||||
self.setattr_argument("repeats", NumberValue(100, ndecimals=0, step=1))
|
self.setattr_argument("repeats", NumberValue(100, precision=0, step=1))
|
||||||
|
|
||||||
self.setattr_dataset("cool_f", 230*MHz)
|
self.setattr_dataset("cool_f", 230*MHz)
|
||||||
self.setattr_dataset("detect_f", 220*MHz)
|
self.setattr_dataset("detect_f", 220*MHz)
|
||||||
|
|
|
@ -0,0 +1,20 @@
|
||||||
|
from artiq.experiment import *
|
||||||
|
|
||||||
|
|
||||||
|
class Precompile(EnvExperiment):
|
||||||
|
def build(self):
|
||||||
|
self.setattr_device("core")
|
||||||
|
self.hello_str = "hello ARTIQ"
|
||||||
|
|
||||||
|
def prepare(self):
|
||||||
|
self.precompiled = self.core.precompile(self.hello, "world")
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def hello(self, arg):
|
||||||
|
print(self.hello_str, arg)
|
||||||
|
self.hello_str = "nowriteback"
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
self.precompiled()
|
||||||
|
self.hello_str = "noupdate"
|
||||||
|
self.precompiled()
|
|
@ -79,7 +79,7 @@ class SpeedBenchmark(EnvExperiment):
|
||||||
"CoreSend1MB",
|
"CoreSend1MB",
|
||||||
"CorePrimes"]))
|
"CorePrimes"]))
|
||||||
self.setattr_argument("nruns", NumberValue(10, min=1, max=1000,
|
self.setattr_argument("nruns", NumberValue(10, min=1, max=1000,
|
||||||
ndecimals=0, step=1))
|
precision=0, step=1))
|
||||||
self.setattr_device("core")
|
self.setattr_device("core")
|
||||||
self.setattr_device("scheduler")
|
self.setattr_device("scheduler")
|
||||||
|
|
||||||
|
|
|
@ -1,95 +0,0 @@
|
||||||
core_addr = "192.168.1.65"
|
|
||||||
|
|
||||||
device_db = {
|
|
||||||
"core": {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.core",
|
|
||||||
"class": "Core",
|
|
||||||
"arguments": {"host": core_addr, "ref_period": 1/(8*150e6)}
|
|
||||||
},
|
|
||||||
"core_log": {
|
|
||||||
"type": "controller",
|
|
||||||
"host": "::1",
|
|
||||||
"port": 1068,
|
|
||||||
"command": "aqctl_corelog -p {port} --bind {bind} " + core_addr
|
|
||||||
},
|
|
||||||
"core_cache": {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.cache",
|
|
||||||
"class": "CoreCache"
|
|
||||||
},
|
|
||||||
"core_dma": {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.dma",
|
|
||||||
"class": "CoreDMA"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# master peripherals
|
|
||||||
for i in range(4):
|
|
||||||
device_db["led" + str(i)] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": i},
|
|
||||||
}
|
|
||||||
|
|
||||||
# DEST#1 peripherals
|
|
||||||
amc_base = 0x070000
|
|
||||||
rtm_base = 0x020000
|
|
||||||
|
|
||||||
for i in range(4):
|
|
||||||
device_db["led" + str(4+i)] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": amc_base + i},
|
|
||||||
}
|
|
||||||
|
|
||||||
#DIO (EEM0) starting at RTIO channel 0x000056
|
|
||||||
for i in range(8):
|
|
||||||
device_db["ttl" + str(i)] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": amc_base + 0x000056 + i},
|
|
||||||
}
|
|
||||||
|
|
||||||
#DIO (EEM1) starting at RTIO channel 0x00005e
|
|
||||||
for i in range(8):
|
|
||||||
device_db["ttl" + str(8+i)] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": amc_base + 0x00005e + i},
|
|
||||||
}
|
|
||||||
|
|
||||||
device_db["fmcdio_dirctl_clk"] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": amc_base + 0x000066}
|
|
||||||
}
|
|
||||||
|
|
||||||
device_db["fmcdio_dirctl_ser"] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": amc_base + 0x000067}
|
|
||||||
}
|
|
||||||
|
|
||||||
device_db["fmcdio_dirctl_latch"] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": amc_base + 0x000068}
|
|
||||||
}
|
|
||||||
|
|
||||||
device_db["fmcdio_dirctl"] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.shiftreg",
|
|
||||||
"class": "ShiftReg",
|
|
||||||
"arguments": {"clk": "fmcdio_dirctl_clk",
|
|
||||||
"ser": "fmcdio_dirctl_ser",
|
|
||||||
"latch": "fmcdio_dirctl_latch"}
|
|
||||||
}
|
|
|
@ -1,129 +0,0 @@
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
import select
|
|
||||||
|
|
||||||
from artiq.experiment import *
|
|
||||||
from artiq.coredevice.fmcdio_vhdci_eem import *
|
|
||||||
|
|
||||||
|
|
||||||
def chunker(seq, size):
|
|
||||||
res = []
|
|
||||||
for el in seq:
|
|
||||||
res.append(el)
|
|
||||||
if len(res) == size:
|
|
||||||
yield res
|
|
||||||
res = []
|
|
||||||
if res:
|
|
||||||
yield res
|
|
||||||
|
|
||||||
|
|
||||||
def is_enter_pressed() -> TBool:
|
|
||||||
if os.name == "nt":
|
|
||||||
if msvcrt.kbhit() and msvcrt.getch() == b"\r":
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
if select.select([sys.stdin, ], [], [], 0.0)[0]:
|
|
||||||
sys.stdin.read(1)
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class Demo(EnvExperiment):
|
|
||||||
def build(self):
|
|
||||||
self.setattr_device("core")
|
|
||||||
self.setattr_device("fmcdio_dirctl")
|
|
||||||
|
|
||||||
self.leds = dict()
|
|
||||||
self.ttl_outs = dict()
|
|
||||||
|
|
||||||
ddb = self.get_device_db()
|
|
||||||
for name, desc in ddb.items():
|
|
||||||
if isinstance(desc, dict) and desc["type"] == "local":
|
|
||||||
module, cls = desc["module"], desc["class"]
|
|
||||||
if (module, cls) == ("artiq.coredevice.ttl", "TTLOut"):
|
|
||||||
dev = self.get_device(name)
|
|
||||||
if "led" in name: # guess
|
|
||||||
self.leds[name] = dev
|
|
||||||
elif "ttl" in name: # to exclude fmcdio_dirctl
|
|
||||||
self.ttl_outs[name] = dev
|
|
||||||
|
|
||||||
self.leds = sorted(self.leds.items(), key=lambda x: x[1].channel)
|
|
||||||
self.ttl_outs = sorted(self.ttl_outs.items(), key=lambda x: x[1].channel)
|
|
||||||
|
|
||||||
self.dirctl_word = (
|
|
||||||
shiftreg_bits(0, dio_bank0_out_pins | dio_bank1_out_pins) |
|
|
||||||
shiftreg_bits(1, dio_bank0_out_pins | dio_bank1_out_pins)
|
|
||||||
)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def init(self):
|
|
||||||
self.core.break_realtime()
|
|
||||||
print("*** Waiting for DRTIO ready...")
|
|
||||||
drtio_indices = [7]
|
|
||||||
for i in drtio_indices:
|
|
||||||
while not self.drtio_is_up(i):
|
|
||||||
pass
|
|
||||||
|
|
||||||
self.fmcdio_dirctl.set(self.dirctl_word)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def drtio_is_up(self, drtio_index):
|
|
||||||
if not self.core.get_rtio_destination_status(drtio_index):
|
|
||||||
return False
|
|
||||||
print("DRTIO #", drtio_index, "is ready\n")
|
|
||||||
return True
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def test_led(self, led):
|
|
||||||
while not is_enter_pressed():
|
|
||||||
self.core.break_realtime()
|
|
||||||
# do not fill the FIFOs too much to avoid long response times
|
|
||||||
t = now_mu() - self.core.seconds_to_mu(0.2)
|
|
||||||
while self.core.get_rtio_counter_mu() < t:
|
|
||||||
pass
|
|
||||||
for i in range(3):
|
|
||||||
led.pulse(100*ms)
|
|
||||||
delay(100*ms)
|
|
||||||
|
|
||||||
def test_leds(self):
|
|
||||||
print("*** Testing LEDs.")
|
|
||||||
print("Check for blinking. Press ENTER when done.")
|
|
||||||
|
|
||||||
for led_name, led_dev in self.leds:
|
|
||||||
print("Testing LED: {}".format(led_name))
|
|
||||||
self.test_led(led_dev)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def test_ttl_out_chunk(self, ttl_chunk):
|
|
||||||
while not is_enter_pressed():
|
|
||||||
self.core.break_realtime()
|
|
||||||
for _ in range(50000):
|
|
||||||
i = 0
|
|
||||||
for ttl in ttl_chunk:
|
|
||||||
i += 1
|
|
||||||
for _ in range(i):
|
|
||||||
ttl.pulse(1*us)
|
|
||||||
delay(1*us)
|
|
||||||
delay(10*us)
|
|
||||||
|
|
||||||
def test_ttl_outs(self):
|
|
||||||
print("*** Testing TTL outputs.")
|
|
||||||
print("Outputs are tested in groups of 4. Touch each TTL connector")
|
|
||||||
print("with the oscilloscope probe tip, and check that the number of")
|
|
||||||
print("pulses corresponds to its number in the group.")
|
|
||||||
print("Press ENTER when done.")
|
|
||||||
|
|
||||||
for ttl_chunk in chunker(self.ttl_outs, 4):
|
|
||||||
print("Testing TTL outputs: {}.".format(", ".join(name for name, dev in ttl_chunk)))
|
|
||||||
self.test_ttl_out_chunk([dev for name, dev in ttl_chunk])
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
self.core.reset()
|
|
||||||
|
|
||||||
if self.leds:
|
|
||||||
self.test_leds()
|
|
||||||
if self.ttl_outs:
|
|
||||||
self.test_ttl_outs()
|
|
|
@ -45,13 +45,13 @@ class ArgumentsDemo(EnvExperiment):
|
||||||
PYONValue(self.get_dataset("foo", default=42)))
|
PYONValue(self.get_dataset("foo", default=42)))
|
||||||
self.setattr_argument("number", NumberValue(42e-6,
|
self.setattr_argument("number", NumberValue(42e-6,
|
||||||
unit="us",
|
unit="us",
|
||||||
ndecimals=4))
|
precision=4))
|
||||||
self.setattr_argument("integer", NumberValue(42,
|
self.setattr_argument("integer", NumberValue(42,
|
||||||
step=1, ndecimals=0))
|
step=1, precision=0))
|
||||||
self.setattr_argument("string", StringValue("Hello World"))
|
self.setattr_argument("string", StringValue("Hello World"))
|
||||||
self.setattr_argument("scan", Scannable(global_max=400,
|
self.setattr_argument("scan", Scannable(global_max=400,
|
||||||
default=NoScan(325),
|
default=NoScan(325),
|
||||||
ndecimals=6))
|
precision=6))
|
||||||
self.setattr_argument("boolean", BooleanValue(True), "Group")
|
self.setattr_argument("boolean", BooleanValue(True), "Group")
|
||||||
self.setattr_argument("enum", EnumerationValue(
|
self.setattr_argument("enum", EnumerationValue(
|
||||||
["foo", "bar", "quux"], "foo"), "Group")
|
["foo", "bar", "quux"], "foo"), "Group")
|
||||||
|
|
|
@ -4,13 +4,13 @@ from artiq.applets.simple import SimpleApplet
|
||||||
|
|
||||||
|
|
||||||
class DemoWidget(QtWidgets.QLabel):
|
class DemoWidget(QtWidgets.QLabel):
|
||||||
def __init__(self, args):
|
def __init__(self, args, ctl):
|
||||||
QtWidgets.QLabel.__init__(self)
|
QtWidgets.QLabel.__init__(self)
|
||||||
self.dataset_name = args.dataset
|
self.dataset_name = args.dataset
|
||||||
|
|
||||||
def data_changed(self, data, mods):
|
def data_changed(self, value, metadata, persist, mods):
|
||||||
try:
|
try:
|
||||||
n = str(data[self.dataset_name][1])
|
n = str(value[self.dataset_name])
|
||||||
except (KeyError, ValueError, TypeError):
|
except (KeyError, ValueError, TypeError):
|
||||||
n = "---"
|
n = "---"
|
||||||
n = "<font size=15>" + n + "</font>"
|
n = "<font size=15>" + n + "</font>"
|
||||||
|
|
|
@ -1,166 +0,0 @@
|
||||||
core_addr = "192.168.1.60"
|
|
||||||
|
|
||||||
device_db = {
|
|
||||||
"core": {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.core",
|
|
||||||
"class": "Core",
|
|
||||||
"arguments": {"host": core_addr, "ref_period": 1/(8*150e6)}
|
|
||||||
},
|
|
||||||
"core_log": {
|
|
||||||
"type": "controller",
|
|
||||||
"host": "::1",
|
|
||||||
"port": 1068,
|
|
||||||
"command": "aqctl_corelog -p {port} --bind {bind} " + core_addr
|
|
||||||
},
|
|
||||||
"core_cache": {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.cache",
|
|
||||||
"class": "CoreCache"
|
|
||||||
},
|
|
||||||
"core_dma": {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.dma",
|
|
||||||
"class": "CoreDMA"
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i in range(4):
|
|
||||||
device_db["led" + str(i)] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": i},
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
for i in range(2):
|
|
||||||
device_db["ttl" + str(i)] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLInOut",
|
|
||||||
"arguments": {"channel": 4 + i},
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
device_db.update(
|
|
||||||
fmcdio_dirctl_clk={
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": 6}
|
|
||||||
},
|
|
||||||
fmcdio_dirctl_ser={
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": 7}
|
|
||||||
},
|
|
||||||
fmcdio_dirctl_latch={
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": 8}
|
|
||||||
},
|
|
||||||
fmcdio_dirctl={
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.shiftreg",
|
|
||||||
"class": "ShiftReg",
|
|
||||||
"arguments": {"clk": "fmcdio_dirctl_clk",
|
|
||||||
"ser": "fmcdio_dirctl_ser",
|
|
||||||
"latch": "fmcdio_dirctl_latch"}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
device_db.update(
|
|
||||||
spi_urukul0={
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.spi2",
|
|
||||||
"class": "SPIMaster",
|
|
||||||
"arguments": {"channel": 17}
|
|
||||||
},
|
|
||||||
ttl_urukul0_io_update={
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": 18}
|
|
||||||
},
|
|
||||||
ttl_urukul0_sw0={
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": 19}
|
|
||||||
},
|
|
||||||
ttl_urukul0_sw1={
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": 20}
|
|
||||||
},
|
|
||||||
ttl_urukul0_sw2={
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": 21}
|
|
||||||
},
|
|
||||||
ttl_urukul0_sw3={
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": 22}
|
|
||||||
},
|
|
||||||
urukul0_cpld={
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.urukul",
|
|
||||||
"class": "CPLD",
|
|
||||||
"arguments": {
|
|
||||||
"spi_device": "spi_urukul0",
|
|
||||||
"io_update_device": "ttl_urukul0_io_update",
|
|
||||||
"refclk": 125e6,
|
|
||||||
"clk_sel": 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
for i in range(4):
|
|
||||||
device_db["urukul0_ch" + str(i)] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ad9910",
|
|
||||||
"class": "AD9910",
|
|
||||||
"arguments": {
|
|
||||||
"pll_n": 32,
|
|
||||||
"chip_select": 4 + i,
|
|
||||||
"cpld_device": "urukul0_cpld",
|
|
||||||
"sw_device": "ttl_urukul0_sw" + str(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
device_db["spi_zotino0"] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.spi2",
|
|
||||||
"class": "SPIMaster",
|
|
||||||
"arguments": {"channel": 23}
|
|
||||||
}
|
|
||||||
device_db["ttl_zotino0_ldac"] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": 24}
|
|
||||||
}
|
|
||||||
device_db["ttl_zotino0_clr"] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.ttl",
|
|
||||||
"class": "TTLOut",
|
|
||||||
"arguments": {"channel": 25}
|
|
||||||
}
|
|
||||||
device_db["zotino0"] = {
|
|
||||||
"type": "local",
|
|
||||||
"module": "artiq.coredevice.zotino",
|
|
||||||
"class": "Zotino",
|
|
||||||
"arguments": {
|
|
||||||
"spi_device": "spi_zotino0",
|
|
||||||
"ldac_device": "ttl_zotino0_ldac",
|
|
||||||
"clr_device": "ttl_zotino0_clr"
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,41 +0,0 @@
|
||||||
from artiq.experiment import *
|
|
||||||
from artiq.coredevice.fmcdio_vhdci_eem import *
|
|
||||||
|
|
||||||
|
|
||||||
class Demo(EnvExperiment):
|
|
||||||
def build(self):
|
|
||||||
self.setattr_device("core")
|
|
||||||
self.setattr_device("fmcdio_dirctl")
|
|
||||||
|
|
||||||
self.ttls = [self.get_device("ttl" + str(i)) for i in range(8)]
|
|
||||||
self.setattr_device("urukul0_cpld")
|
|
||||||
self.urukul_chs = [self.get_device("urukul0_ch" + str(i)) for i in range(4)]
|
|
||||||
self.setattr_device("zotino0")
|
|
||||||
|
|
||||||
self.dirctl_word = (
|
|
||||||
shiftreg_bits(1, urukul_out_pins) |
|
|
||||||
shiftreg_bits(0, urukul_aux_out_pins) |
|
|
||||||
shiftreg_bits(2, dio_bank0_out_pins | dio_bank1_out_pins) |
|
|
||||||
shiftreg_bits(3, zotino_out_pins))
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def run(self):
|
|
||||||
self.core.reset()
|
|
||||||
delay(10*ms)
|
|
||||||
self.fmcdio_dirctl.set(self.dirctl_word)
|
|
||||||
delay(10*ms)
|
|
||||||
|
|
||||||
self.urukul0_cpld.init()
|
|
||||||
delay(10*ms)
|
|
||||||
|
|
||||||
self.zotino0.init()
|
|
||||||
delay(1*ms)
|
|
||||||
for i in range(32):
|
|
||||||
self.zotino0.write_dac(i, i/4)
|
|
||||||
delay(1*ms)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
for ttl in self.ttls:
|
|
||||||
ttl.pulse(100*ms)
|
|
||||||
for urukul_ch in self.urukul_chs:
|
|
||||||
urukul_ch.sw.pulse(100*ms)
|
|
|
@ -13,6 +13,12 @@ dependencies = [
|
||||||
name = "alloc_list"
|
name = "alloc_list"
|
||||||
version = "0.0.0"
|
version = "0.0.0"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "arrayvec"
|
||||||
|
version = "0.7.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "bare-metal"
|
name = "bare-metal"
|
||||||
version = "0.2.5"
|
version = "0.2.5"
|
||||||
|
@ -240,6 +246,12 @@ version = "0.7.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "c75de51135344a4f8ed3cfe2720dc27736f7711989703a0b43aadf3753c55577"
|
checksum = "c75de51135344a4f8ed3cfe2720dc27736f7711989703a0b43aadf3753c55577"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "managed"
|
||||||
|
version = "0.8.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "0ca88d725a0a943b096803bd34e73a4437208b6077654cc4ecb2947a5f91618d"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "memchr"
|
name = "memchr"
|
||||||
version = "2.4.1"
|
version = "2.4.1"
|
||||||
|
@ -253,6 +265,7 @@ dependencies = [
|
||||||
"byteorder",
|
"byteorder",
|
||||||
"cslice",
|
"cslice",
|
||||||
"dyld",
|
"dyld",
|
||||||
|
"eh",
|
||||||
"failure",
|
"failure",
|
||||||
"failure_derive",
|
"failure_derive",
|
||||||
"io",
|
"io",
|
||||||
|
@ -313,6 +326,7 @@ dependencies = [
|
||||||
"build_misoc",
|
"build_misoc",
|
||||||
"byteorder",
|
"byteorder",
|
||||||
"cslice",
|
"cslice",
|
||||||
|
"dyld",
|
||||||
"eh",
|
"eh",
|
||||||
"failure",
|
"failure",
|
||||||
"failure_derive",
|
"failure_derive",
|
||||||
|
@ -320,10 +334,11 @@ dependencies = [
|
||||||
"io",
|
"io",
|
||||||
"log",
|
"log",
|
||||||
"logger_artiq",
|
"logger_artiq",
|
||||||
"managed",
|
"managed 0.7.2",
|
||||||
"proto_artiq",
|
"proto_artiq",
|
||||||
"riscv",
|
"riscv",
|
||||||
"smoltcp",
|
"smoltcp",
|
||||||
|
"tar-no-std",
|
||||||
"unwind_backtrace",
|
"unwind_backtrace",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -340,10 +355,15 @@ dependencies = [
|
||||||
name = "satman"
|
name = "satman"
|
||||||
version = "0.0.0"
|
version = "0.0.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"alloc_list",
|
||||||
"board_artiq",
|
"board_artiq",
|
||||||
"board_misoc",
|
"board_misoc",
|
||||||
"build_misoc",
|
"build_misoc",
|
||||||
|
"cslice",
|
||||||
|
"eh",
|
||||||
|
"io",
|
||||||
"log",
|
"log",
|
||||||
|
"proto_artiq",
|
||||||
"riscv",
|
"riscv",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -364,13 +384,13 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "smoltcp"
|
name = "smoltcp"
|
||||||
version = "0.6.0"
|
version = "0.8.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0fe46639fd2ec79eadf8fe719f237a7a0bd4dac5d957f1ca5bbdbc1c3c39e53a"
|
checksum = "ee34c1e1bfc7e9206cc0fb8030a90129b4e319ab53856249bb27642cab914fb3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags",
|
"bitflags",
|
||||||
"byteorder",
|
"byteorder",
|
||||||
"managed",
|
"managed 0.8.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -403,6 +423,16 @@ dependencies = [
|
||||||
"syn",
|
"syn",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "tar-no-std"
|
||||||
|
version = "0.1.8"
|
||||||
|
source = "git+https://git.m-labs.hk/M-Labs/tar-no-std?rev=2ab6dc5#2ab6dc58e5249c59c4eb03eaf3a119bcdd678d32"
|
||||||
|
dependencies = [
|
||||||
|
"arrayvec",
|
||||||
|
"bitflags",
|
||||||
|
"log",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "unicode-xid"
|
name = "unicode-xid"
|
||||||
version = "0.0.4"
|
version = "0.0.4"
|
||||||
|
|
|
@ -16,5 +16,5 @@ build_misoc = { path = "../libbuild_misoc" }
|
||||||
byteorder = { version = "1.0", default-features = false }
|
byteorder = { version = "1.0", default-features = false }
|
||||||
crc = { version = "1.7", default-features = false }
|
crc = { version = "1.7", default-features = false }
|
||||||
board_misoc = { path = "../libboard_misoc", features = ["uart_console", "smoltcp"] }
|
board_misoc = { path = "../libboard_misoc", features = ["uart_console", "smoltcp"] }
|
||||||
smoltcp = { version = "0.6.0", default-features = false, features = ["ethernet", "proto-ipv4", "proto-ipv6", "socket-tcp"] }
|
smoltcp = { version = "0.8.2", default-features = false, features = ["medium-ethernet", "proto-ipv4", "proto-ipv6", "socket-tcp"] }
|
||||||
riscv = { version = "0.6.0", features = ["inline-asm"] }
|
riscv = { version = "0.6.0", features = ["inline-asm"] }
|
||||||
|
|
|
@ -18,6 +18,10 @@ use board_misoc::slave_fpga;
|
||||||
use board_misoc::{clock, ethmac, net_settings};
|
use board_misoc::{clock, ethmac, net_settings};
|
||||||
use board_misoc::uart_console::Console;
|
use board_misoc::uart_console::Console;
|
||||||
use riscv::register::{mcause, mepc, mtval};
|
use riscv::register::{mcause, mepc, mtval};
|
||||||
|
#[cfg(has_ethmac)]
|
||||||
|
use smoltcp::iface::{Routes, SocketStorage};
|
||||||
|
#[cfg(has_ethmac)]
|
||||||
|
use smoltcp::wire::{HardwareAddress, IpAddress, Ipv4Address, Ipv6Address};
|
||||||
|
|
||||||
fn check_integrity() -> bool {
|
fn check_integrity() -> bool {
|
||||||
extern {
|
extern {
|
||||||
|
@ -65,8 +69,8 @@ fn memory_test(total: &mut usize, wrong: &mut usize) -> bool {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn prng32(seed: &mut u32) -> u32 { *seed = 1664525 * *seed + 1013904223; *seed }
|
fn prng32(seed: &mut u32) -> u32 { *seed = u32::wrapping_add(u32::wrapping_mul(1664525, *seed), 1013904223); *seed }
|
||||||
fn prng16(seed: &mut u16) -> u16 { *seed = 25173 * *seed + 13849; *seed }
|
fn prng16(seed: &mut u16) -> u16 { *seed = u16::wrapping_add(u16::wrapping_mul(25173, *seed), 13849); *seed }
|
||||||
|
|
||||||
for _ in 0..4 {
|
for _ in 0..4 {
|
||||||
// Test data bus
|
// Test data bus
|
||||||
|
@ -396,6 +400,9 @@ fn network_boot() {
|
||||||
|
|
||||||
println!("Initializing network...");
|
println!("Initializing network...");
|
||||||
|
|
||||||
|
// Assuming only one socket is ever needed by the bootloader.
|
||||||
|
// The smoltcp reuses the listening socket when the connection is established.
|
||||||
|
let mut sockets = [SocketStorage::EMPTY];
|
||||||
let mut net_device = unsafe { ethmac::EthernetDevice::new() };
|
let mut net_device = unsafe { ethmac::EthernetDevice::new() };
|
||||||
net_device.reset_phy_if_any();
|
net_device.reset_phy_if_any();
|
||||||
|
|
||||||
|
@ -405,38 +412,38 @@ fn network_boot() {
|
||||||
let net_addresses = net_settings::get_adresses();
|
let net_addresses = net_settings::get_adresses();
|
||||||
println!("Network addresses: {}", net_addresses);
|
println!("Network addresses: {}", net_addresses);
|
||||||
let mut ip_addrs = [
|
let mut ip_addrs = [
|
||||||
IpCidr::new(net_addresses.ipv4_addr, 0),
|
IpCidr::new(IpAddress::Ipv4(Ipv4Address::UNSPECIFIED), 0),
|
||||||
IpCidr::new(net_addresses.ipv6_ll_addr, 0),
|
net_addresses.ipv6_ll_addr,
|
||||||
IpCidr::new(net_addresses.ipv6_ll_addr, 0)
|
IpCidr::new(IpAddress::Ipv6(Ipv6Address::UNSPECIFIED), 0)
|
||||||
];
|
];
|
||||||
let mut interface = match net_addresses.ipv6_addr {
|
if let net_settings::Ipv4AddrConfig::Static(ipv4) = net_addresses.ipv4_addr {
|
||||||
Some(addr) => {
|
ip_addrs[0] = IpCidr::Ipv4(ipv4);
|
||||||
ip_addrs[2] = IpCidr::new(addr, 0);
|
}
|
||||||
smoltcp::iface::EthernetInterfaceBuilder::new(net_device)
|
if let Some(ipv6) = net_addresses.ipv6_addr {
|
||||||
.ethernet_addr(net_addresses.hardware_addr)
|
ip_addrs[2] = IpCidr::Ipv6(ipv6);
|
||||||
|
};
|
||||||
|
let mut routes = [None; 2];
|
||||||
|
let mut interface = smoltcp::iface::InterfaceBuilder::new(net_device, &mut sockets[..])
|
||||||
|
.hardware_addr(HardwareAddress::Ethernet(net_addresses.hardware_addr))
|
||||||
.ip_addrs(&mut ip_addrs[..])
|
.ip_addrs(&mut ip_addrs[..])
|
||||||
.neighbor_cache(neighbor_cache)
|
.neighbor_cache(neighbor_cache)
|
||||||
.finalize()
|
.routes(Routes::new(&mut routes[..]))
|
||||||
|
.finalize();
|
||||||
|
|
||||||
|
if let Some(default_route) = net_addresses.ipv4_default_route {
|
||||||
|
interface.routes_mut().add_default_ipv4_route(default_route).unwrap();
|
||||||
|
}
|
||||||
|
if let Some(default_route) = net_addresses.ipv6_default_route {
|
||||||
|
interface.routes_mut().add_default_ipv6_route(default_route).unwrap();
|
||||||
}
|
}
|
||||||
None =>
|
|
||||||
smoltcp::iface::EthernetInterfaceBuilder::new(net_device)
|
|
||||||
.ethernet_addr(net_addresses.hardware_addr)
|
|
||||||
.ip_addrs(&mut ip_addrs[..2])
|
|
||||||
.neighbor_cache(neighbor_cache)
|
|
||||||
.finalize()
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut rx_storage = [0; 4096];
|
let mut rx_storage = [0; 4096];
|
||||||
let mut tx_storage = [0; 128];
|
let mut tx_storage = [0; 128];
|
||||||
|
|
||||||
let mut socket_set_entries: [_; 1] = Default::default();
|
|
||||||
let mut sockets =
|
|
||||||
smoltcp::socket::SocketSet::new(&mut socket_set_entries[..]);
|
|
||||||
|
|
||||||
let tcp_rx_buffer = smoltcp::socket::TcpSocketBuffer::new(&mut rx_storage[..]);
|
let tcp_rx_buffer = smoltcp::socket::TcpSocketBuffer::new(&mut rx_storage[..]);
|
||||||
let tcp_tx_buffer = smoltcp::socket::TcpSocketBuffer::new(&mut tx_storage[..]);
|
let tcp_tx_buffer = smoltcp::socket::TcpSocketBuffer::new(&mut tx_storage[..]);
|
||||||
let tcp_socket = smoltcp::socket::TcpSocket::new(tcp_rx_buffer, tcp_tx_buffer);
|
let tcp_socket = smoltcp::socket::TcpSocket::new(tcp_rx_buffer, tcp_tx_buffer);
|
||||||
let tcp_handle = sockets.add(tcp_socket);
|
let tcp_handle = interface.add_socket(tcp_socket);
|
||||||
|
|
||||||
let mut net_conn = NetConn::new();
|
let mut net_conn = NetConn::new();
|
||||||
let mut boot_time = None;
|
let mut boot_time = None;
|
||||||
|
@ -446,7 +453,7 @@ fn network_boot() {
|
||||||
loop {
|
loop {
|
||||||
let timestamp = clock::get_ms() as i64;
|
let timestamp = clock::get_ms() as i64;
|
||||||
{
|
{
|
||||||
let socket = &mut *sockets.get::<smoltcp::socket::TcpSocket>(tcp_handle);
|
let socket = &mut *interface.get_socket::<smoltcp::socket::TcpSocket>(tcp_handle);
|
||||||
|
|
||||||
match boot_time {
|
match boot_time {
|
||||||
None => {
|
None => {
|
||||||
|
@ -475,7 +482,7 @@ fn network_boot() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
match interface.poll(&mut sockets, smoltcp::time::Instant::from_millis(timestamp)) {
|
match interface.poll(smoltcp::time::Instant::from_millis(timestamp)) {
|
||||||
Ok(_) => (),
|
Ok(_) => (),
|
||||||
Err(smoltcp::Error::Unrecognized) => (),
|
Err(smoltcp::Error::Unrecognized) => (),
|
||||||
Err(err) => println!("Network error: {}", err)
|
Err(err) => println!("Network error: {}", err)
|
||||||
|
@ -493,7 +500,7 @@ pub extern fn main() -> i32 {
|
||||||
println!(r"|_| |_|_|____/ \___/ \____|");
|
println!(r"|_| |_|_|____/ \___/ \____|");
|
||||||
println!("");
|
println!("");
|
||||||
println!("MiSoC Bootloader");
|
println!("MiSoC Bootloader");
|
||||||
println!("Copyright (c) 2017-2021 M-Labs Limited");
|
println!("Copyright (c) 2017-2023 M-Labs Limited");
|
||||||
println!("");
|
println!("");
|
||||||
|
|
||||||
#[cfg(has_ethmac)]
|
#[cfg(has_ethmac)]
|
||||||
|
|
|
@ -6,7 +6,7 @@ ENTRY(_reset_handler)
|
||||||
* ld does not allow this expression here.
|
* ld does not allow this expression here.
|
||||||
*/
|
*/
|
||||||
MEMORY {
|
MEMORY {
|
||||||
runtime (RWX) : ORIGIN = 0x40000000, LENGTH = 0x4000000 /* 64M */
|
firmware (RWX) : ORIGIN = 0x40000000, LENGTH = 0x4000000 /* 64M */
|
||||||
}
|
}
|
||||||
|
|
||||||
SECTIONS
|
SECTIONS
|
||||||
|
@ -14,24 +14,24 @@ SECTIONS
|
||||||
.vectors :
|
.vectors :
|
||||||
{
|
{
|
||||||
*(.vectors)
|
*(.vectors)
|
||||||
} > runtime
|
} > firmware
|
||||||
|
|
||||||
.text :
|
.text :
|
||||||
{
|
{
|
||||||
*(.text .text.*)
|
*(.text .text.*)
|
||||||
} > runtime
|
} > firmware
|
||||||
|
|
||||||
.eh_frame :
|
.eh_frame :
|
||||||
{
|
{
|
||||||
__eh_frame_start = .;
|
__eh_frame_start = .;
|
||||||
KEEP(*(.eh_frame))
|
KEEP(*(.eh_frame))
|
||||||
__eh_frame_end = .;
|
__eh_frame_end = .;
|
||||||
} > runtime
|
} > firmware
|
||||||
|
|
||||||
.eh_frame_hdr :
|
.eh_frame_hdr :
|
||||||
{
|
{
|
||||||
KEEP(*(.eh_frame_hdr))
|
KEEP(*(.eh_frame_hdr))
|
||||||
} > runtime
|
} > firmware
|
||||||
|
|
||||||
__eh_frame_hdr_start = SIZEOF(.eh_frame_hdr) > 0 ? ADDR(.eh_frame_hdr) : 0;
|
__eh_frame_hdr_start = SIZEOF(.eh_frame_hdr) > 0 ? ADDR(.eh_frame_hdr) : 0;
|
||||||
__eh_frame_hdr_end = SIZEOF(.eh_frame_hdr) > 0 ? . : 0;
|
__eh_frame_hdr_end = SIZEOF(.eh_frame_hdr) > 0 ? . : 0;
|
||||||
|
@ -39,35 +39,35 @@ SECTIONS
|
||||||
.gcc_except_table :
|
.gcc_except_table :
|
||||||
{
|
{
|
||||||
*(.gcc_except_table)
|
*(.gcc_except_table)
|
||||||
} > runtime
|
} > firmware
|
||||||
|
|
||||||
/* https://sourceware.org/bugzilla/show_bug.cgi?id=20475 */
|
/* https://sourceware.org/bugzilla/show_bug.cgi?id=20475 */
|
||||||
.got :
|
.got :
|
||||||
{
|
{
|
||||||
*(.got)
|
*(.got)
|
||||||
} > runtime
|
} > firmware
|
||||||
|
|
||||||
.got.plt :
|
.got.plt :
|
||||||
{
|
{
|
||||||
*(.got.plt)
|
*(.got.plt)
|
||||||
} > runtime
|
} > firmware
|
||||||
|
|
||||||
.rodata :
|
.rodata :
|
||||||
{
|
{
|
||||||
*(.rodata .rodata.*)
|
*(.rodata .rodata.*)
|
||||||
} > runtime
|
} > firmware
|
||||||
|
|
||||||
.data :
|
.data :
|
||||||
{
|
{
|
||||||
*(.data .data.*)
|
*(.data .data.*)
|
||||||
} > runtime
|
} > firmware
|
||||||
|
|
||||||
.bss (NOLOAD) : ALIGN(4)
|
.bss (NOLOAD) : ALIGN(4)
|
||||||
{
|
{
|
||||||
_fbss = .;
|
_fbss = .;
|
||||||
*(.sbss .sbss.* .bss .bss.*);
|
*(.sbss .sbss.* .bss .bss.*);
|
||||||
_ebss = .;
|
_ebss = .;
|
||||||
} > runtime
|
} > firmware
|
||||||
|
|
||||||
.stack (NOLOAD) : ALIGN(0x1000)
|
.stack (NOLOAD) : ALIGN(0x1000)
|
||||||
{
|
{
|
||||||
|
@ -76,12 +76,12 @@ SECTIONS
|
||||||
_estack = .;
|
_estack = .;
|
||||||
. += 0x10000;
|
. += 0x10000;
|
||||||
_fstack = . - 16;
|
_fstack = . - 16;
|
||||||
} > runtime
|
} > firmware
|
||||||
|
|
||||||
.heap (NOLOAD) : ALIGN(16)
|
.heap (NOLOAD) : ALIGN(16)
|
||||||
{
|
{
|
||||||
_fheap = .;
|
_fheap = .;
|
||||||
. = ORIGIN(runtime) + LENGTH(runtime);
|
. = ORIGIN(firmware) + LENGTH(firmware);
|
||||||
_eheap = .;
|
_eheap = .;
|
||||||
} > runtime
|
} > firmware
|
||||||
}
|
}
|
|
@ -26,7 +26,7 @@ $(RUSTOUT)/libksupport.a:
|
||||||
|
|
||||||
ksupport.elf: $(RUSTOUT)/libksupport.a glue.o
|
ksupport.elf: $(RUSTOUT)/libksupport.a glue.o
|
||||||
$(link) -T $(KSUPPORT_DIRECTORY)/ksupport.ld \
|
$(link) -T $(KSUPPORT_DIRECTORY)/ksupport.ld \
|
||||||
-lunwind-$(CPU)-elf -lprintf-float -lm
|
-lunwind-$(CPU)-libc -lprintf-float -lm
|
||||||
|
|
||||||
%.o: $(KSUPPORT_DIRECTORY)/%.c
|
%.o: $(KSUPPORT_DIRECTORY)/%.c
|
||||||
$(compile)
|
$(compile)
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue