forked from M-Labs/artiq
Compare commits
1044 Commits
Author | SHA1 | Date |
---|---|---|
Simon Renblad | 76fba538b1 | |
Sebastien Bourdeauducq | 8dd8cfa6b0 | |
Sebastien Bourdeauducq | 5df0721811 | |
Sebastien Bourdeauducq | 6326051052 | |
Sebastien Bourdeauducq | 44a95b5dda | |
Sebastien Bourdeauducq | 645b9b8c5f | |
Sebastien Bourdeauducq | 858f0479ba | |
Sebastien Bourdeauducq | 133b26b6ce | |
Sebastien Bourdeauducq | d96213dbbc | |
Sebastien Bourdeauducq | 413d33c3d1 | |
Sebastien Bourdeauducq | c2b53ecb43 | |
Sebastien Bourdeauducq | ede0b37c6e | |
Sebastien Bourdeauducq | 795c4372fa | |
Sebastien Bourdeauducq | 402a5d3376 | |
Sebastien Bourdeauducq | 85850ad9e8 | |
Sebastien Bourdeauducq | 7a863b4f5e | |
Sebastien Bourdeauducq | a26cee6ca7 | |
Sebastien Bourdeauducq | be08862606 | |
Sebastien Bourdeauducq | 05a9422e67 | |
Simon Renblad | b09a39c82e | |
mwojcik | 49267671f9 | |
Sebastien Bourdeauducq | 8ca75a3fb9 | |
Florian Agbuya | 8381b34a79 | |
Sebastien Bourdeauducq | d458fc27bf | |
mwojcik | 9f4b8db2de | |
Florian Agbuya | 1108cebd75 | |
Florian Agbuya | cf7cbd0c3b | |
mwojcik | 1a28069aa2 | |
Sebastien Bourdeauducq | 56418e342e | |
Sebastien Bourdeauducq | 77c6553725 | |
Sebastien Bourdeauducq | e81e8f28cf | |
mwojcik | de10e584f6 | |
Florian Agbuya | 875666f3ec | |
Sebastien Bourdeauducq | 3ad3fac828 | |
Simon Renblad | 49afa116b3 | |
Simon Renblad | 363afb5fc9 | |
Simon Renblad | e7af219505 | |
linuswck | ec2b86b08d | |
linuswck | 8f7d138dbd | |
Sebastien Bourdeauducq | bbe6ff8cac | |
Sebastien Bourdeauducq | c0a6252e77 | |
mwojcik | 6640bf0e82 | |
mwojcik | b3c0d084d4 | |
linuswck | bb0b8a6c00 | |
Sebastien Bourdeauducq | ce80bf5717 | |
Florian Agbuya | 378dd0e5ca | |
jfniedermeyer | 9c68451cae | |
linuswck | 93c9d8bcdf | |
mwojcik | e480bbe8d8 | |
mwojcik | b168f0bb4b | |
Sebastien Bourdeauducq | 6705c9fbfb | |
mwojcik | 5f445f6b92 | |
occheung | 363f7327f1 | |
Sebastien Bourdeauducq | f7abc156cb | |
linuswck | de41bd6655 | |
Simon Renblad | 96941d7c04 | |
mwojcik | f3c79e71e1 | |
Simon Renblad | 333b81f789 | |
Sebastien Bourdeauducq | d070826911 | |
Sebastien Bourdeauducq | 9c90f923d2 | |
Sebastien Bourdeauducq | e23e4d39d7 | |
David Nadlinger | 08eea09d44 | |
mwojcik | 7ab52af603 | |
mwojcik | 973fd88b27 | |
mwojcik | 8d7194941e | |
mwojcik | 0a750c77e8 | |
mwojcik | 1a0fc317df | |
mwojcik | e05be2f8e4 | |
mwojcik | 6f4b8c641e | |
mwojcik | b42816582e | |
Hartmann Michael (IFAG PSS SIS SCE QSE) | 76f1318bc0 | |
Sebastien Bourdeauducq | 0131a8bef2 | |
mwojcik | e63e2a2897 | |
Simon Renblad | 47fc640f75 | |
Simon Renblad | bb7caacb5f | |
Simon Renblad | da9f7cb58a | |
occheung | 43926574da | |
Simon Renblad | 4f3e58db52 | |
Simon Renblad | 13271cea64 | |
occheung | 0e8fa8933f | |
David Nadlinger | 2eb89cb168 | |
occheung | a772dee1cc | |
Simon Renblad | bafb85a274 | |
mwojcik | 0e8aa33979 | |
mwojcik | fcf6c90ba2 | |
linuswck | 0c1b572872 | |
linuswck | ab0d4c41c3 | |
Jonathan Coates | 6eb81494c5 | |
Jonathan Coates | 586d97c6cb | |
David Nadlinger | 892b0eaca2 | |
linuswck | eedac7cf71 | |
linuswck | a61bbf5618 | |
occheung | b7b8f0efa2 | |
occheung | b52f253dbd | |
occheung | 73ab71f443 | |
linuswck | ab8247b3d7 | |
mwojcik | 36b3678853 | |
mwojcik | af77885dfc | |
mwojcik | eb57b3b393 | |
Simon Renblad | 40ac2e03ab | |
occheung | a2fbcb8bfd | |
occheung | 5c64eac8d2 | |
occheung | 477a7b693c | |
occheung | f2694f25eb | |
occheung | 9e1447d104 | |
occheung | 870020bc9f | |
occheung | c2d136f669 | |
occheung | 06426e0ed9 | |
occheung | e443e06e62 | |
occheung | 55150ebdbb | |
occheung | eb08c55abe | |
occheung | 67b6588d95 | |
occheung | 1bb7e9ceef | |
Florian Agbuya | c02a14ba37 | |
Simon Renblad | 1f3b2ef645 | |
linuswck | 372008cb66 | |
linuswck | 85abb1da2c | |
David Nadlinger | 9e5b62a6b1 | |
David Nadlinger | 22ab62324c | |
David Nadlinger | fc74b78a45 | |
Simon Renblad | f01e654b9c | |
David Nadlinger | e45dc948e9 | |
David Mak | 460cbf4499 | |
Florian Agbuya | 6df85478e4 | |
Jonathan Coates | 5c85cef0c2 | |
linuswck | ccb140a929 | |
linuswck | 7c8073c1ce | |
Florian Agbuya | 2f3329181c | |
Sebastien Bourdeauducq | 1ec1ab0502 | |
linuswck | b49fb841ce | |
Florian Agbuya | a619c9f3c2 | |
Florian Agbuya | 0188f31f3a | |
Florian Agbuya | 4e770509db | |
occheung | 7f63bb322d | |
occheung | 5e5d671f4c | |
occheung | 98904ef4c3 | |
Sebastien Bourdeauducq | 73ac414912 | |
occheung | 838cc80922 | |
Simon Renblad | 904afe1632 | |
Simon Renblad | 01d777c977 | |
Sebastien Bourdeauducq | 9556ca53de | |
occheung | df99450faa | |
Sebastien Bourdeauducq | 1f58cd505c | |
linuswck | ddb2b5e3a1 | |
linuswck | b56f7e429a | |
Sebastien Bourdeauducq | 3452d0c423 | |
Sebastien Bourdeauducq | 2139456f80 | |
Sebastien Bourdeauducq | a2a780a3f2 | |
Sebastien Bourdeauducq | 3620358f12 | |
Sebastien Bourdeauducq | 72b0a17542 | |
Sebastien Bourdeauducq | f5cbca9c29 | |
linuswck | 737ff79ae7 | |
linuswck | dc97d3aee6 | |
Sebastien Bourdeauducq | 5d38db19d0 | |
Sebastien Bourdeauducq | 9bee4b9697 | |
linuswck | cd22e42cb4 | |
linuswck | b7bac8c9d8 | |
mwojcik | e8818c812c | |
occheung | 68dd0e029f | |
occheung | 64d3f867a0 | |
Sebastien Bourdeauducq | df662c4262 | |
Sebastien Bourdeauducq | d2ac6aceb3 | |
Sebastien Bourdeauducq | 9b94a09477 | |
David Nadlinger | efbae51f9d | |
David Nadlinger | 8acfa82586 | |
David Nadlinger | 4d636ea593 | |
Sebastien Bourdeauducq | 3ed7e0ed06 | |
Simon Renblad | c4259dab18 | |
mwojcik | c46ac6f87d | |
linuswck | 758b97426a | |
linuswck | c206e92f29 | |
linuswck | cb547c8a46 | |
linuswck | 72a5231493 | |
Denis Ovchinnikov | 07714be8a7 | |
Simon Renblad | 361088ae72 | |
Simon Renblad | a384df17a4 | |
Simon Renblad | 6592b6ea1d | |
Simon Renblad | 2fb085f1a2 | |
Simon Renblad | a7569a0b2d | |
Simon Renblad | 4fbff1648c | |
Simon Renblad | 8f4c8387f9 | |
Simon Renblad | a2d62e6006 | |
Simon Renblad | 3d0feef614 | |
Simon Renblad | 59ad873831 | |
Simon Renblad | 8589da0723 | |
Simon Renblad | 94e076e976 | |
Simon Renblad | a0094aafbb | |
Simon Renblad | 0befadee96 | |
sven-oxionics | b3dc199e6a | |
Florian Agbuya | d73889fb27 | |
Simon Renblad | 9f8bb6445f | |
Simon Renblad | 068a2d1663 | |
Simon Renblad | 6c588b83d7 | |
Simon Renblad | c17f69a51b | |
Simon Renblad | ac504069d2 | |
Simon Renblad | b6a83904b5 | |
Simon Renblad | 25959d0cd6 | |
Simon Renblad | 5695e9f77e | |
Simon Renblad | fe0f6d8a2c | |
Simon Renblad | d1f2727126 | |
Simon Renblad | 16a3ce274f | |
Simon Renblad | af7622d7ab | |
Jonathan Coates | 9a84575649 | |
Simon Renblad | faf85e815a | |
Simon Renblad | 3663a6b8e8 | |
Simon Renblad | 91442e2914 | |
Simon Renblad | 50a6dac178 | |
Simon Renblad | 5292a8de82 | |
Sebastien Bourdeauducq | 7791f85a1a | |
Sebastien Bourdeauducq | 48bc8a2ecc | |
Denis Ovchinnikov | 93882eb3ce | |
Simon Renblad | 7ca02a119d | |
Simon Renblad | 373fe3dbe7 | |
Simon Renblad | 1af98727b7 | |
Simon Renblad | 376f36c965 | |
Simon Renblad | e710d4badd | |
Simon Renblad | bfbe13e51b | |
Simon Renblad | bf38fc8b0f | |
Simon Renblad | 337273acb6 | |
Simon Renblad | 748707e157 | |
Leon Riesebos | 833fd8760e | |
Florian Agbuya | 454597915a | |
Sebastien Bourdeauducq | 77293d53e3 | |
Sebastien Bourdeauducq | a792bc5456 | |
Sebastien Bourdeauducq | 20d4712815 | |
Spaqin | 82bd913f63 | |
Sebastien Bourdeauducq | 115415d120 | |
Florian Agbuya | d140c960bb | |
Egor Savkin | c25c0bd55a | |
Egor Savkin | 30ef8d8cb4 | |
Florian Agbuya | 7ad32d903a | |
Florian Agbuya | bf46ce4a92 | |
den512is | 1f306a2859 | |
Florian Agbuya | 150d325fc1 | |
Florian Agbuya | c298ec4c2e | |
Sebastien Bourdeauducq | 69bf2dfb81 | |
mwojcik | 29cb7e785d | |
Sebastien Bourdeauducq | b97f6a9e44 | |
Sebastien Bourdeauducq | e0ebc1b21d | |
Sebastien Bourdeauducq | c6ddd3af17 | |
Florian Agbuya | e12219e803 | |
Sebastien Bourdeauducq | ff11b5df71 | |
Sebastien Bourdeauducq | c8dc2cbf09 | |
Sebastien Bourdeauducq | c6b29b30fb | |
Sebastien Bourdeauducq | b20d09aad5 | |
Sebastien Bourdeauducq | 6276182c96 | |
Sebastien Bourdeauducq | d103cbea31 | |
Sebastien Bourdeauducq | 9a6bc6dc7b | |
Sebastien Bourdeauducq | fabe88065b | |
Egor Savkin | 748969c21e | |
Sebastien Bourdeauducq | 75f6bdb6a1 | |
Sebastien Bourdeauducq | 41caec797e | |
Sebastien Bourdeauducq | 953a8a9555 | |
Sebastien Bourdeauducq | 444bab2186 | |
Sebastien Bourdeauducq | 0941d3a29a | |
Denis Ovchinnikov | 22e2514ce6 | |
mwojcik | a4895b591a | |
Sebastien Bourdeauducq | ef2cc2cc12 | |
Sebastien Bourdeauducq | 779810163f | |
Sebastien Bourdeauducq | b9c7905b20 | |
Charles Baynham | c2b0c97640 | |
Sebastien Bourdeauducq | 58cc3b8d0a | |
Sebastien Bourdeauducq | 598c7b1d25 | |
Jonathan Coates | ea9fe9b4e1 | |
mwojcik | c1d6fd4bbe | |
mwojcik | ab52748cac | |
mwojcik | ddfe51e7ac | |
mwojcik | 6c96033d41 | |
mwojcik | 0b03126038 | |
mwojcik | fdca1ab7fc | |
mwojcik | c36b6b3b65 | |
mwojcik | c0ca27e6cf | |
Jonathan Coates | 3ca47537b8 | |
Hartmann Michael (IFAG PSS SIS SCE QSE) | df15f53ee9 | |
Sebastien Bourdeauducq | e015483e48 | |
Sebastien Bourdeauducq | c53d333d46 | |
Sebastien Bourdeauducq | 5b94ce82e4 | |
Sebastien Bourdeauducq | 45cd438fb8 | |
Sebastien Bourdeauducq | 0e7e30d46e | |
Sebastien Bourdeauducq | d5a7755584 | |
Sebastien Bourdeauducq | 3ff0be6540 | |
Sebastien Bourdeauducq | 8409a6bb94 | |
Sebastien Bourdeauducq | 2c1438c4b9 | |
Egor Savkin | 5199bea353 | |
mwojcik | a533f2a0cd | |
Jonathan Coates | 0bf57f4ebd | |
Sebastien Bourdeauducq | 4417acd13b | |
Sebastien Bourdeauducq | 4056168875 | |
Egor Savkin | 9331911139 | |
Spaqin | 2f35869eb1 | |
Egor Savkin | aed47d79ff | |
mwojcik | 918d30b900 | |
Egor Savkin | b5d9062ba9 | |
Egor Savkin | 8984f5104a | |
Egor Savkin | d0b8818688 | |
Sebastien Bourdeauducq | 757c00b0fe | |
Sebastien Bourdeauducq | c1474c134a | |
Sebastien Bourdeauducq | dc3db8bb66 | |
Sebastien Bourdeauducq | 97161a3df2 | |
Ikko Eltociear Ashimine | 7ba06bfe61 | |
Spaqin | b225717ddb | |
mwojcik | 696bda5c03 | |
mwojcik | 9150230ea7 | |
Spaqin | e9a153b985 | |
David Nadlinger | 8b1f38b015 | |
Egor Savkin | bbf80875fb | |
Egor Savkin | 1ca09b9484 | |
Spaqin | 84e7515721 | |
Ikko Eltociear Ashimine | 15c18bdc81 | |
Sebastien Bourdeauducq | a9360823b1 | |
Egor Savkin | 1ec0abbfcf | |
mwojcik | 90a6fe1c35 | |
mwojcik | d0437f5672 | |
Michael Hartmann | 07d684a35d | |
Michael Hartmann | 2371c825f5 | |
Egor Savkin | 394138f00f | |
Sebastien Bourdeauducq | 3f5cc4aa10 | |
Sebastien Bourdeauducq | e9c65abebe | |
Sebastien Bourdeauducq | 20e8f17b3d | |
Sebastien Bourdeauducq | 57e87c9717 | |
Sebastien Bourdeauducq | 248cd69673 | |
Sebastien Bourdeauducq | b8968262d7 | |
Sebastien Bourdeauducq | babbbfadb3 | |
Sebastien Bourdeauducq | 514ac953ce | |
Sebastien Bourdeauducq | 0a37a1a4c1 | |
Sebastien Bourdeauducq | 6d37d9d52c | |
Sebastien Bourdeauducq | 5f77d4f5fa | |
Sebastien Bourdeauducq | 2f289c552f | |
Sebastien Bourdeauducq | 9e8bb3c701 | |
Sebastien Bourdeauducq | d872c3ab4d | |
Sebastien Bourdeauducq | f8d93813e9 | |
Sebastien Bourdeauducq | 628b671433 | |
Sebastien Bourdeauducq | daad3d263a | |
Sebastien Bourdeauducq | 80f261437a | |
Sebastien Bourdeauducq | 7fd6dead8f | |
Sebastien Bourdeauducq | 73a4ef89ec | |
mwojcik | 70edc9c5c6 | |
mwojcik | 9042426872 | |
mwojcik | cd860beda2 | |
mwojcik | 627504b60e | |
Sebastien Bourdeauducq | c8ab6c1b2b | |
Sebastien Bourdeauducq | a96bbd8508 | |
Sebastien Bourdeauducq | 6cfd1480a7 | |
Sebastien Bourdeauducq | c401559ed5 | |
Sebastien Bourdeauducq | ea21f474a7 | |
Sebastien Bourdeauducq | cee9f3f44e | |
Sebastien Bourdeauducq | b9bfe090f4 | |
mwojcik | eb3742fb08 | |
Egor Savkin | 070fed755b | |
Sebastien Bourdeauducq | 63f1a6d197 | |
Sebastien Bourdeauducq | 7dafdfe2f7 | |
Sebastien Bourdeauducq | ec893222a4 | |
Sebastien Bourdeauducq | 573a895c1e | |
Sebastien Bourdeauducq | cf2a4972f7 | |
Sebastien Bourdeauducq | 668997a451 | |
Sebastien Bourdeauducq | 5da9794895 | |
Spaqin | 3838dfc1d1 | |
Sebastien Bourdeauducq | 1be7e2a2e1 | |
Sebastien Bourdeauducq | 1bf7188dec | |
mwojcik | bdae594c79 | |
mwojcik | 8dc6902c23 | |
Norman Krackow | dbb77b5356 | |
Sebastien Bourdeauducq | 1fc127c770 | |
David Nadlinger | 88684dbd2a | |
David Nadlinger | b9f13d48aa | |
David Nadlinger | 4bb2a3b9e0 | |
David Nadlinger | f5c408d8d9 | |
Sebastien Bourdeauducq | 4be7f302e4 | |
Spaqin | 17efc28dbe | |
David Nadlinger | 1e0102379b | |
David Nadlinger | ceabeb8d84 | |
SingularitySurfer | 8e476dd502 | |
David Nadlinger | 874d298ceb | |
Egor Savkin | d75ade7be6 | |
Egor Savkin | 2a58981822 | |
Egor Savkin | e80442811e | |
Egor Savkin | 12649720f1 | |
Egor Savkin | 454ae39c5d | |
David Nadlinger | 3c7a394eff | |
David Nadlinger | 740543d4e2 | |
Egor Savkin | b2b559e73b | |
Egor Savkin | 1852491102 | |
Egor Savkin | c591e7e305 | |
David Nadlinger | 261dc6b933 | |
David Nadlinger | 1abedba6dc | |
Egor Savkin | aa2febca53 | |
Egor Savkin | d60a96a715 | |
wlph17 | 3f93f16955 | |
Sebastien Bourdeauducq | 3735b7ea9d | |
Sebastien Bourdeauducq | 195d2aea6a | |
Sebastien Bourdeauducq | 6d179b2bf5 | |
Sebastien Bourdeauducq | 275b00bfc2 | |
Jonathan Coates | b8b6ce14cc | |
Nico Pulido | 88c5109627 | |
David Nadlinger | dee154b35b | |
David Nadlinger | 950b9ac4d6 | |
Egor Savkin | 6c47aac760 | |
mwojcik | f2c1e663a7 | |
Egor Savkin | f7f027001e | |
David Nadlinger | 0b3c232819 | |
Etienne Wodey | d45f9b6950 | |
Sebastien Bourdeauducq | 2fe02cee6f | |
Sebastien Bourdeauducq | 404f24af6b | |
David Nadlinger | 3d25092cbd | |
David Nadlinger | dbbe8e8ed4 | |
David Nadlinger | 8740ec3dd5 | |
David Nadlinger | 6caa779c74 | |
David Nadlinger | 4819016a3c | |
David Nadlinger | 00a27b105a | |
David Nadlinger | beff15de5e | |
火焚 富良 | defc69d9c3 | |
火焚 富良 | e2178f6c86 | |
Sebastien Bourdeauducq | f3f068036a | |
mwojcik | ad000609ce | |
mwojcik | af0b94bb34 | |
mwojcik | 5cd57e8688 | |
mwojcik | f8eb695c0f | |
mwojcik | 458bd8a927 | |
mwojcik | a6856a5e4a | |
mwojcik | 1eb87164be | |
Sebastien Bourdeauducq | f75ddf78b0 | |
Sebastien Bourdeauducq | e0b1098bc0 | |
Robert Jördens | e5c621751f | |
Robert Jördens | 07db770423 | |
Robert Jördens | eb7a0714b3 | |
Robert Jördens | e15b5b50d8 | |
Robert Jördens | 1820e1f715 | |
Robert Jördens | 118b7aca1d | |
Fabian Schwartau | d5e267fadf | |
Sebastien Bourdeauducq | 286f151d9a | |
Sebastien Bourdeauducq | 19b8d28a2e | |
Sebastien Bourdeauducq | 3ffbc5681e | |
Sebastien Bourdeauducq | 192cab887f | |
wlph17 | 9846ee653c | |
fanmingyu212 | 56e6b1428c | |
Michael Birtwell | b895846322 | |
Robert Jördens | a1a4545ed4 | |
Robert Jördens | a0053f7a2b | |
Robert Jördens | 740f3d220b | |
Robert Jördens | 513f9f00f3 | |
Robert Jördens | 5cfa8d9a42 | |
Robert Jördens | 0e4a87826c | |
Sebastien Bourdeauducq | 1709cf9717 | |
Sebastien Bourdeauducq | 4266beeb9c | |
mwojcik | c955ac15ed | |
mwojcik | 81ef484864 | |
mwojcik | f2c3f95040 | |
mwojcik | 616ed3dcc2 | |
Robert Jördens | aedcf205c7 | |
Robert Jördens | 14ab1d4bbc | |
Sebastien Bourdeauducq | a028b5c9f7 | |
Sebastien Bourdeauducq | 6085fe3319 | |
Robert Jördens | af28bf3550 | |
Robert Jördens | 4df880faf6 | |
Robert Jördens | 857fb4ecec | |
Robert Jördens | a91836e5fe | |
Robert Jördens | c5c5c30617 | |
Robert Jördens | 27e3c044ed | |
Robert Jördens | c26fa5eb90 | |
Sebastien Bourdeauducq | 411afbdc23 | |
Sebastien Bourdeauducq | b4287ac9f4 | |
Robert Jördens | 1cc57e2345 | |
Robert Jördens | 263c2751b3 | |
Robert Jördens | 876f26ee30 | |
Robert Jördens | fa3678f8a3 | |
Robert Jördens | f4d325112c | |
Robert Jördens | b6586cd7e4 | |
Robert Jördens | 3809ac5470 | |
Robert Jördens | b9727fdfce | |
Robert Jördens | d6d0c2c866 | |
Robert Jördens | 0df2cadcd3 | |
Robert Jördens | 25c0dc4688 | |
Robert Jördens | cf48232a90 | |
Robert Jördens | a20087848d | |
Robert Jördens | 31663556b8 | |
Robert Jördens | 47f90a58cc | |
Mikołaj Sowiński | 3c7ab498d1 | |
Deepskyhunter | 7c306d5609 | |
mwojcik | b705862ecd | |
fanmingyu212 | 20cb99061e | |
Sebastien Bourdeauducq | 5ef94d30dd | |
kk1050 | 3c72b8d646 | |
Sebastien Bourdeauducq | 27397625ba | |
cc78078 | 3535d0f1ae | |
cc78078 | 185c91f522 | |
Deepskyhunter | f31279411e | |
Alex Wong Tat Hang | a3ae82502c | |
Deepskyhunter | 0cdb06fdf5 | |
Deepskyhunter | 2a7a72b27a | |
kk1050 | 748e28be38 | |
Sebastien Bourdeauducq | 4b1715c80b | |
Robert Jördens | 5985595845 | |
Robert Jördens | a8f498b478 | |
Sebastien Bourdeauducq | db4bccda7e | |
Sebastien Bourdeauducq | 5c461443e4 | |
Sebastien Bourdeauducq | cb711e0ee3 | |
Sebastien Bourdeauducq | 9ba239b8b2 | |
Robert Jördens | 4ea11f4609 | |
SingularitySurfer | 57ac6ec003 | |
Robert Jördens | d2dacc6433 | |
Sebastien Bourdeauducq | 734b2a6747 | |
Deepskyhunter | c7394802bd | |
kk1050 | 7aa6104872 | |
mwojcik | 46f2842d38 | |
mwojcik | c9fb7b410f | |
Spaqin | 8be945d5c7 | |
SingularitySurfer | 9c8ffa54b2 | |
Sebastien Bourdeauducq | d17675e9b5 | |
Sebastien Bourdeauducq | 388b81af19 | |
Deepskyhunter | 02b086c9e5 | |
SingularitySurfer | 953dd899fd | |
SingularitySurfer | 689a2ef8ba | |
SingularitySurfer | d8cfe22501 | |
Deepskyhunter | b4f24dd326 | |
Deepskyhunter | da6d35e7c6 | |
Deepskyhunter | 745f440597 | |
SingularitySurfer | 2e834cf406 | |
SingularitySurfer | 3f8a221c76 | |
SingularitySurfer | ab097b8ef9 | |
SingularitySurfer | 24b4ec46bd | |
Norman Krackow | 56c59e38f0 | |
SingularitySurfer | c0581178d6 | |
SingularitySurfer | 43c94577ce | |
SingularitySurfer | ce4055db3b | |
SingularitySurfer | b67a70392d | |
SingularitySurfer | 57176fedb2 | |
SingularitySurfer | 8bea821f93 | |
SingularitySurfer | 0388161754 | |
SingularitySurfer | 751af3144e | |
SingularitySurfer | 5df766e6da | |
David Nadlinger | e1f9feae8b | |
David Nadlinger | dd928fc014 | |
Sebastien Bourdeauducq | 48cb111035 | |
hartytp | d8597e9dc8 | |
David Nadlinger | 32db6ff978 | |
David Nadlinger | dbc87f08ff | |
David Nadlinger | c4068e6896 | |
David Nadlinger | 85895ab89b | |
kk1050 | 46fb8916bb | |
David Nadlinger | 2d6fc154db | |
David Nadlinger | 4c42f65909 | |
David Nadlinger | f4d639242d | |
SingularitySurfer | d09153411f | |
Norman Krackow | dc49372d57 | |
Norman Krackow | 2044dc3ae5 | |
SingularitySurfer | ae3f1c1c71 | |
Sebastien Bourdeauducq | bf3b155a31 | |
SingularitySurfer | 1bddadc6e2 | |
SingularitySurfer | b0f9fd9c4c | |
Michael Birtwell | 69c4026d2b | |
Deepskyhunter | e47834d82e | |
Spaqin | 4ede14b14d | |
kk1050 | 4ddd2739ee | |
Sebastien Bourdeauducq | e702624720 | |
Sebastien Bourdeauducq | 68ef0073ea | |
Sebastien Bourdeauducq | 71a37bb408 | |
occheung | f79f7db3a2 | |
occheung | 872f8f039f | |
occheung | 50495097e5 | |
Sebastien Bourdeauducq | ca614a3eea | |
Sebastien Bourdeauducq | 8bf6bc4d1f | |
occheung | 6d46c886d7 | |
Sebastien Bourdeauducq | a5b7e958f8 | |
Sebastien Bourdeauducq | 667f36a2e7 | |
Sebastien Bourdeauducq | 7cff63e539 | |
Sebastien Bourdeauducq | df1b19082c | |
Sebastien Bourdeauducq | d478086119 | |
Sebastien Bourdeauducq | 18a08954c1 | |
Sebastien Bourdeauducq | 57086e2349 | |
mwojcik | cf8e583847 | |
mwojcik | d24a36a02a | |
mwojcik | 4bdb4c8e11 | |
Sebastien Bourdeauducq | 8599be5550 | |
Sebastien Bourdeauducq | 9896d78e07 | |
kk1050 | 70503bee6f | |
Laurent Stephenson | 16393efa7c | |
David Nadlinger | 8a7af3f75c | |
Spaqin | 35f30ddf05 | |
Sebastien Bourdeauducq | c440f9fe1b | |
Sebastien Bourdeauducq | 69b6426800 | |
Michael Birtwell | 50dbda4f43 | |
Michael Birtwell | 95378cf9c9 | |
Michael Birtwell | 671453938b | |
Michael Birtwell | 1fe59d27dc | |
Michael Birtwell | 73082d116f | |
Michael Birtwell | 596b9a265c | |
Michael Birtwell | 6ffb1f83ee | |
Michael Birtwell | c60de48a30 | |
Suthep Pomjaksilp | 06ad76b6ab | |
David Nadlinger | b2b84b1fd6 | |
David Nadlinger | 6b5c390d48 | |
David Nadlinger | 2cb08814e8 | |
Sebastien Bourdeauducq | 58b59b99ff | |
Sebastien Bourdeauducq | fa3ee8ad23 | |
Michael Birtwell | cab9d90d01 | |
Sebastien Bourdeauducq | 0a029748ee | |
Leon Riesebos | 386391e3f9 | |
Leon Riesebos | b5dc9fd640 | |
Sebastien Bourdeauducq | c82c358f3a | |
Sebastien Bourdeauducq | 723f41c78b | |
Sebastien Bourdeauducq | 866a83796a | |
Timothy Ballance | f91e106586 | |
Timothy Ballance | a289d69883 | |
Sebastien Bourdeauducq | f89275b02a | |
Sebastien Bourdeauducq | 65d2dd0173 | |
Sebastien Bourdeauducq | 6b33f3b719 | |
Sebastien Bourdeauducq | 80d412a8bf | |
Sebastien Bourdeauducq | 922d2b1619 | |
Sebastien Bourdeauducq | d644e982c8 | |
Sebastien Bourdeauducq | ec1efd7af9 | |
Sebastien Bourdeauducq | 735133a2b4 | |
Sebastien Bourdeauducq | 207717c740 | |
Sebastien Bourdeauducq | 6d92e539b1 | |
Sebastien Bourdeauducq | 6a49b8cb58 | |
Sebastien Bourdeauducq | df1513f0e9 | |
Sebastien Bourdeauducq | d3073022ac | |
Sebastien Bourdeauducq | bbb2c75194 | |
Sebastien Bourdeauducq | 710786388c | |
Sebastien Bourdeauducq | aff569b2c3 | |
Sebastien Bourdeauducq | a159ef642d | |
Sebastien Bourdeauducq | 1a26eb8cf2 | |
Sebastien Bourdeauducq | c1c2d21ba7 | |
Sebastien Bourdeauducq | e5e4d55f84 | |
Sebastien Bourdeauducq | 71e8b49246 | |
pca006132 | ebfeb1869f | |
pca006132 | eb6817c8f1 | |
Sebastien Bourdeauducq | 8415151866 | |
ciciwu | 67ca48fa84 | |
ciciwu | 9a96387dfe | |
Sebastien Bourdeauducq | b02abc2bf4 | |
Sebastien Bourdeauducq | ac55da81d8 | |
spaqin | 232f28c0e8 | |
spaqin | 51fa1b5e5e | |
spaqin | 17ecd35530 | |
Spaqin | a85b4d5f5e | |
David Nadlinger | 9bfbd39fa3 | |
Sebastien Bourdeauducq | 338bb189b4 | |
Leon Riesebos | c4292770f8 | |
Sebastien Bourdeauducq | 2b918ac6f7 | |
Michael Birtwell | 1b80746f48 | |
Michael Birtwell | 2d6215158f | |
mwojcik | c000af9985 | |
mwojcik | 35f91aef68 | |
Sebastien Bourdeauducq | 0da7b83176 | |
Steve Fan | ad656d1e53 | |
Sebastien Bourdeauducq | 69ce09c7c0 | |
Sebastien Bourdeauducq | 6a586c2e4d | |
Sebastien Bourdeauducq | e84056f7e0 | |
Mike Birtwell | a106ed0295 | |
Robert Jördens | c8b9eed9c9 | |
Robert Jördens | 08b65470cd | |
Sebastien Bourdeauducq | 65eab31f23 | |
Sebastien Bourdeauducq | 6dfc854673 | |
Sebastien Bourdeauducq | 5a8928fbf3 | |
Sebastien Bourdeauducq | b3b73948a2 | |
Sebastien Bourdeauducq | 8433cc6731 | |
Sebastien Bourdeauducq | 0649e69d94 | |
Sebastien Bourdeauducq | bbfa926fa6 | |
Sebastien Bourdeauducq | 9e37fb95d6 | |
Sebastien Bourdeauducq | 034a0fdb35 | |
Sebastien Bourdeauducq | 0e178e40ac | |
Sebastien Bourdeauducq | a0070d4396 | |
Sebastien Bourdeauducq | 03a367f565 | |
Sebastien Bourdeauducq | b893d97d7b | |
Sebastien Bourdeauducq | b6f5ba8b5b | |
Sebastien Bourdeauducq | cc69482dad | |
Sebastien Bourdeauducq | 833acb6925 | |
occheung | d5eec652ee | |
occheung | a74196aa27 | |
Steve Fan | 798a412c6f | |
David Nadlinger | e45cb217be | |
Sebastien Bourdeauducq | 8866ab301a | |
Sebastien Bourdeauducq | 3cddb14174 | |
Sebastien Bourdeauducq | 245fe6e9ea | |
Sebastien Bourdeauducq | ef25640937 | |
Sebastien Bourdeauducq | dd3279e506 | |
Sebastien Bourdeauducq | afb98a1903 | |
Steve Fan | 34008b7a21 | |
pca006132 | 93328ad8ee | |
Steve Fan | 234a82aaa9 | |
Sebastien Bourdeauducq | ee511758ce | |
Sebastien Bourdeauducq | e6c18364ae | |
pca006132 | 9d43762695 | |
pca006132 | 4132c450a5 | |
pca006132 | 536b3e0c26 | |
pca006132 | ba34700798 | |
pca006132 | 6ec003c1c9 | |
pca006132 | da4ff44377 | |
pca006132 | 4644e105b1 | |
hartytp | 715bff3ebf | |
Sebastien Bourdeauducq | f58aa3bdf6 | |
Sebastien Bourdeauducq | 4e420fc297 | |
Sebastien Bourdeauducq | 5597be3356 | |
Sebastien Bourdeauducq | f542f045da | |
Sebastien Bourdeauducq | 53878fe1d4 | |
Sebastien Bourdeauducq | 735cd1eb3e | |
Steve Fan | 3f812c4c2c | |
occheung | b6c59a0cb3 | |
Steve Fan | de5892a00a | |
Peter Drmota | 4eee49f889 | |
occheung | 9eee0e5a7b | |
Steve Fan | d7dd75e833 | |
Spaqin | 095fb9e333 | |
Sebastien Bourdeauducq | 4e3e0d129c | |
pca006132 | 12ee326fb4 | |
occheung | 61349f9685 | |
occheung | cea0a15e1e | |
occheung | 8b45f917d1 | |
pca006132 | 6542b65db3 | |
pca006132 | 9f90088fa6 | |
occheung | 5e1847e7c1 | |
occheung | 6f3c49528d | |
Sebastien Bourdeauducq | eaa1505c94 | |
Leon Riesebos | f42bea06a8 | |
occheung | 9d493028e5 | |
Sebastien Bourdeauducq | bbac477092 | |
Steve Fan | c0a7be0a90 | |
Sebastien Bourdeauducq | 9e5e234af3 | |
Sebastien Bourdeauducq | 352317df11 | |
Sebastien Bourdeauducq | a518963a47 | |
Sebastien Bourdeauducq | 37f14d94d0 | |
Sebastien Bourdeauducq | 4f723e19a6 | |
Peter Drmota | 7c664142a5 | |
Etienne Wodey | 33a9ca2684 | |
Sébastien Bourdeauducq | 311a818a49 | |
Sébastien Bourdeauducq | 1def0d98c5 | |
Leon Riesebos | 7ffe4dc2e3 | |
Leon Riesebos | 9e3ea4e8ef | |
Sebastien Bourdeauducq | 12512bfb2f | |
Steve Fan | 4a6bea479a | |
Sebastien Bourdeauducq | 9bbf7eb485 | |
mwojcik | f8a649deda | |
mwojcik | 7953f3d705 | |
mwojcik | f281112779 | |
mwojcik | eec3ea6589 | |
Sebastien Bourdeauducq | 163f5d9128 | |
Etienne Wodey | 9f830b86c0 | |
Sebastien Bourdeauducq | b8e7add785 | |
Sebastien Bourdeauducq | 5a923a0956 | |
David Nadlinger | c6039479e4 | |
David Nadlinger | 63b5727a0c | |
David Nadlinger | 9b01db3d11 | |
Sebastien Bourdeauducq | 6a433b2fce | |
occheung | 5ed9e49b94 | |
occheung | 9423428bb0 | |
Sebastien Bourdeauducq | 7307b30213 | |
Harry Ho | b49f813b17 | |
Peter Drmota | 20e079a381 | |
Sebastien Bourdeauducq | f0c50c80e6 | |
Sebastien Bourdeauducq | 46604300a2 | |
Sebastien Bourdeauducq | c029977a27 | |
Sebastien Bourdeauducq | 80115fcc02 | |
occheung | ac2f55b3ff | |
occheung | db3e5e83e6 | |
occheung | 09945ecc4d | |
occheung | 02119282b8 | |
occheung | 750b0ce46d | |
occheung | 531670d6c5 | |
occheung | 0f660735bf | |
occheung | 0755757601 | |
occheung | 0d708cd61a | |
occheung | 03b803e764 | |
occheung | b3e315e24a | |
occheung | 0898e101e2 | |
occheung | cb247f235f | |
occheung | 90f944481c | |
occheung | d84ad0095b | |
occheung | dd68b4ab82 | |
occheung | c6e0e26440 | |
occheung | 8da924ec0f | |
Robert Jördens | 591507a7c0 | |
Robert Jördens | 5a5b0cc7c0 | |
Spaqin | 69cddc6b86 | |
Spaqin | 9b1d7e297d | |
Harry Ho | 21b07dc667 | |
Robert Jördens | 1ff474893d | |
Robert Jördens | 10c37b87ec | |
Harry Ho | c940f104f1 | |
Harry Ho | 0aa8a739aa | |
Sebastien Bourdeauducq | 43eab14f56 | |
Sebastien Bourdeauducq | cc15a4f572 | |
Sebastien Bourdeauducq | df6aeb99f6 | |
Sebastien Bourdeauducq | bb61f2dae6 | |
Sebastien Bourdeauducq | b0cbad530b | |
Sebastien Bourdeauducq | 92cdfac35a | |
occheung | bf180c168c | |
occheung | d5fa3d131a | |
occheung | 6d3164a912 | |
occheung | 46326716fd | |
occheung | 0a59c889de | |
occheung | 27a7a96626 | |
occheung | a0bf11b465 | |
occheung | 790a20edf6 | |
fanmingyu212 | 178a86bcda | |
Sebastien Bourdeauducq | 35d21c98d3 | |
Sebastien Bourdeauducq | f5100702f6 | |
Sebastien Bourdeauducq | 3c1cbf47d2 | |
Robert Jördens | 3f6bf33298 | |
Harry Ho | 501eb1fa23 | |
Harry Ho | ea9bc04407 | |
occheung | 59065c4663 | |
Spaqin | 1894f0f626 | |
Sebastien Bourdeauducq | 4bfd010f03 | |
Etienne Wodey | a8333053c9 | |
occheung | 7a7e17f7e3 | |
Sebastien Bourdeauducq | 3ed10221d8 | |
Sebastien Bourdeauducq | e8a7a8f41e | |
Sebastien Bourdeauducq | 4834966798 | |
Sebastien Bourdeauducq | 7209e6f279 | |
Sebastien Bourdeauducq | ffb1e3ec2d | |
Sebastien Bourdeauducq | 2d79d824f9 | |
Sebastien Bourdeauducq | 1a0c4219ec | |
Sebastien Bourdeauducq | 2e5c32878f | |
occheung | a573dcf3f9 | |
occheung | 448974fe11 | |
occheung | b091d8cb66 | |
Sebastien Bourdeauducq | d50e24acb1 | |
occheung | 5394d04669 | |
occheung | b8ed5a0d91 | |
occheung | 2213e7ffac | |
occheung | 09ffd9de1e | |
occheung | 051a14abf2 | |
occheung | c6ba0f3cf4 | |
occheung | c812a837ab | |
occheung | a596db404d | |
Sebastien Bourdeauducq | eff7ae5aff | |
Sebastien Bourdeauducq | c78fbe9bd2 | |
Sebastien Bourdeauducq | 17b9d2fc5a | |
Sebastien Bourdeauducq | 5e2664ae7e | |
Sebastien Bourdeauducq | 64ce7e498b | |
Sebastien Bourdeauducq | 952acce65b | |
Sebastien Bourdeauducq | 7ae4b2d9bb | |
Sebastien Bourdeauducq | ce0964e25f | |
occheung | 4fab267593 | |
occheung | dcbd9f905c | |
occheung | 9f6b3f6014 | |
Sebastien Bourdeauducq | 9697ec33eb | |
Sebastien Bourdeauducq | eee80c7697 | |
Sebastien Bourdeauducq | b7efb2f633 | |
Sebastien Bourdeauducq | 9ee03bd438 | |
occheung | 4619a33db4 | |
occheung | 5985f7efb5 | |
Sebastien Bourdeauducq | 6db7280b09 | |
occheung | d8ac429059 | |
occheung | 798774192d | |
occheung | eecd825d23 | |
occheung | 1da0554a49 | |
Sebastien Bourdeauducq | 035d15af9d | |
Sebastien Bourdeauducq | 9addd08587 | |
Sebastien Bourdeauducq | 3e09e48152 | |
occheung | 5d0a8cf9ac | |
occheung | 70507e1b72 | |
occheung | c113cd6bf5 | |
Sebastien Bourdeauducq | 251cd4dcc6 | |
occheung | 61b0170a12 | |
occheung | af263ffe1f | |
occheung | a833974b50 | |
occheung | d623acc29d | |
occheung | 8fa47b8119 | |
occheung | de0f2d4a28 | |
occheung | 9afe63c08a | |
occheung | 29a2f106d1 | |
occheung | b30ed75e69 | |
occheung | 279593f984 | |
occheung | 1ba8c8dfee | |
Sebastien Bourdeauducq | 942bd1a95d | |
occheung | 3d629006df | |
occheung | 7542105f0f | |
occheung | 01ca114c66 | |
occheung | 36171f2c61 | |
occheung | 01e357e5d3 | |
occheung | f77b607b56 | |
occheung | 1293e0750e | |
occheung | fc42d053d9 | |
Sebastien Bourdeauducq | 9adab6c817 | |
Sebastien Bourdeauducq | 8c468d0346 | |
occheung | 1b516b16e2 | |
Sebastien Bourdeauducq | be5ae5c5b4 | |
Sebastien Bourdeauducq | d13efd6587 | |
Sebastien Bourdeauducq | e8fe8409b2 | |
Sebastien Bourdeauducq | cabe5ace8e | |
Sebastien Bourdeauducq | 6629a49e86 | |
Sebastien Bourdeauducq | 43d120359d | |
Sebastien Bourdeauducq | 5656e52581 | |
occheung | 1b8b4baf6a | |
occheung | 905330b0f1 | |
occheung | 50a62b3d42 | |
occheung | 7f0bc9f7f0 | |
occheung | c42adfe6fd | |
occheung | f56152e72f | |
occheung | c800b6c8d3 | |
occheung | e99061b013 | |
occheung | ecedec577c | |
occheung | 252594a606 | |
occheung | 31bf17563c | |
occheung | bfddd8a30f | |
occheung | ad3037d0f6 | |
occheung | daaf6c3401 | |
occheung | 6d9cebfd42 | |
occheung | 96438c9da7 | |
occheung | 6535b2f089 | |
occheung | 45adaa1d98 | |
occheung | 869a282410 | |
occheung | ebb9f298b5 | |
occheung | 97a0132f15 | |
occheung | 37ea863004 | |
occheung | 3ff74e0693 | |
occheung | 448fe0e8cf | |
occheung | 8294d7fea5 | |
occheung | 13032272fd | |
occheung | 46102ee737 | |
occheung | b87ea79d51 | |
occheung | 9aee42f0f2 | |
occheung | 82b4052cd6 | |
Leon Riesebos | 2cf144a60c | |
Robert Jördens | e7a46ec767 | |
Etienne Wodey | 4d7bd3ee32 | |
Etienne Wodey | 075cb26dd7 | |
Etienne Wodey | 7aebf02f84 | |
Etienne Wodey | 61b44d40dd | |
Etienne Wodey | 65f8a97b56 | |
Robert Jördens | 11790c6d7c | |
SingularitySurfer | 65f63e6927 | |
Robert Jördens | a53162d01d | |
SingularitySurfer | 4d21a72407 | |
Mikołaj Sowiński | 898122f3e5 | |
Sebastien Bourdeauducq | 420891ba54 | |
Sebastien Bourdeauducq | 9f94bc61ae | |
Sebastien Bourdeauducq | c69a1316ad | |
Sebastien Bourdeauducq | 477b1516d3 | |
Sebastien Bourdeauducq | e3edb505e3 | |
Sebastien Bourdeauducq | 67847f98f4 | |
mwojcik | 7879d3630b | |
Sebastien Bourdeauducq | 242dfae38e | |
Star Chen | 5111132ef0 | |
Sebastien Bourdeauducq | dc546630e4 | |
Robert Jördens | fd824f7ad0 | |
Harry Ho | c9608c0a89 | |
Star Chen | 6b88ea563d | |
Sebastien Bourdeauducq | 97e994700b | |
Sebastien Bourdeauducq | c3d765f745 | |
Robert Jördens | 1e869aedd3 | |
Sebastien Bourdeauducq | 53a98acfe4 | |
Star Chen | 30e5e06a33 | |
Star Chen | ebb67eaeee | |
Star Chen | 943a95e07a | |
Star Chen | e996b5f635 | |
StarChen | 796aeabb53 | |
Sebastien Bourdeauducq | 4fb8ea5b73 | |
Star Chen | 5cd721c514 | |
Sebastien Bourdeauducq | d327d2a505 | |
Sebastien Bourdeauducq | bc7ce7d6aa | |
Star Chen | 6ce9c26402 | |
occheung | 2204fd2b22 | |
pca006132 | b10d1bdd37 | |
pca006132 | 4ede58e44b | |
Sebastien Bourdeauducq | 51d2861e63 | |
Sebastien Bourdeauducq | 29fd58e34b | |
pca006132 | 0257ecc332 | |
pca006132 | 822e8565f7 | |
pca006132 | 6fb31a7abb | |
pca006132 | 0806b67dbf | |
pca006132 | f531af510c | |
pca006132 | c29a149d16 | |
Etienne Wodey | 094a346974 | |
Etienne Wodey | 68268e3db8 | |
Etienne Wodey | cca654bd47 | |
Etienne Wodey | 8bedf278f0 | |
Etienne Wodey | 12ef907f34 | |
Etienne Wodey | d8b1e59538 | |
Etienne Wodey | b8ab5f2607 | |
Etienne Wodey | 5c23e6edb6 | |
Sebastien Bourdeauducq | 7046aa9c23 | |
Sebastien Bourdeauducq | ea0c7b6173 | |
Star Chen | 9dee8bb9c9 | |
pca006132 | bcb030cc9c | |
Sebastien Bourdeauducq | 522c2f5995 | |
Sebastien Bourdeauducq | ea1dd2da43 | |
Leon Riesebos | 07bd1e27c1 | |
David Nadlinger | b89610bbcd | |
pca006132 | 4c743cf8af | |
pca006132 | 1e9a131386 | |
Harry Ho | 43b2a3791c | |
Sebastien Bourdeauducq | 935e18c1be | |
Robert Jördens | 67d474e6cf | |
fanmingyu212 | 91832aa886 | |
Marius Weber | 129cf8c1dd | |
Charles Baynham | 011f3bdb2e | |
Marius Weber | fb6fad7c64 | |
Marius Weber | 043c9c20d7 | |
Marius Weber | f97baa8aec | |
Marius Weber | 4fa2028671 | |
Marius Weber | 515cfa7dfb | |
Marius Weber | 4f812cc4ed | |
Marius Weber | 407fba232d | |
Marius Weber | 75445fe5f0 | |
Marius Weber | 1c96797de5 | |
Marius Weber | 7404152e4c | |
Marius Weber | eb477ee06b | |
Marius Weber | c7e992e26d | |
Sebastien Bourdeauducq | eb38b664e3 | |
Peter Drmota | 47bf5d36af | |
Leon Riesebos | af4fadcd54 | |
Leon Riesebos | a0cea3a011 | |
Leon Riesebos | 2671c271d4 | |
Leon Riesebos | d745d50245 | |
Leon Riesebos | 4a6201c083 | |
Robert Jördens | ffe1c9f9b1 | |
Marius Weber | bda5aa7c7e | |
Sebastien Bourdeauducq | 78490bef5d | |
David Nadlinger | b7f3eaebf9 | |
Harry Ho | fc59791583 | |
Harry Ho | 8002fcf8bb | |
Harry Ho | 5f32cb7196 | |
Harry Ho | 75efb8985c | |
Sebastien Bourdeauducq | 523fa01343 | |
David Nadlinger | bdaaf3c1d7 | |
David Nadlinger | 6fd088e339 | |
David Nadlinger | be4669d7a5 | |
David Nadlinger | 1f40f3ce15 | |
David Nadlinger | b8cd163978 | |
David Nadlinger | 888696f588 | |
Leon Riesebos | d04bcd8754 | |
Leon Riesebos | c22f731a61 | |
David Nadlinger | 5ba22c11c3 | |
David Nadlinger | c707ccf7d7 | |
David Nadlinger | 557671b7db | |
David Nadlinger | 75c255425d | |
Leon Riesebos | b8f4c6b9bb | |
Leon Riesebos | 1deaa758ce | |
Leon Riesebos | 3c68223337 | |
Leon Riesebos | cd7f9531d7 | |
jonathanpritchard | e577542f6b | |
Sebastien Bourdeauducq | 92fd705990 | |
Sebastien Bourdeauducq | 8deb269b9a | |
Harry Ho | a0fd5261ea | |
Harry Ho | 7c4eed7a11 | |
Harry Ho | 88b14082b6 | |
Harry Ho | 9daf77bd58 | |
Harry Ho | 52afd4ef6b | |
Harry Ho | f6d39fd6ba | |
Harry Ho | f25e86e934 | |
Harry Ho | cff7bcc122 | |
Harry Ho | dc7addf394 |
|
@ -29,6 +29,7 @@ __pycache__/
|
|||
/repository/
|
||||
/results
|
||||
/last_rid.pyon
|
||||
/dataset_db.pyon
|
||||
/dataset_db.mdb
|
||||
/dataset_db.mdb-lock
|
||||
/device_db*.py
|
||||
/test*
|
||||
|
|
|
@ -7,8 +7,7 @@ Reporting Issues/Bugs
|
|||
|
||||
Thanks for `reporting issues to ARTIQ
|
||||
<https://github.com/m-labs/artiq/issues/new>`_! You can also discuss issues and
|
||||
ask questions on IRC (the `#m-labs channel on freenode
|
||||
<https://webchat.freenode.net/?channels=m-labs>`_), the `Mattermost chat
|
||||
ask questions on IRC (the #m-labs channel on OFTC), the `Mattermost chat
|
||||
<https://chat.m-labs.hk>`_, or on the `forum <https://forum.m-labs.hk>`_.
|
||||
|
||||
The best bug reports are those which contain sufficient information. With
|
||||
|
@ -27,7 +26,6 @@ report if possible:
|
|||
* Operating System
|
||||
* ARTIQ version (with recent versions of ARTIQ, run ``artiq_client --version``)
|
||||
* Version of the gateware and runtime loaded in the core device (in the output of ``artiq_coremgmt -D .... log``)
|
||||
* If using Conda, output of `conda list`
|
||||
* Hardware involved
|
||||
|
||||
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
6
|
|
@ -4,3 +4,5 @@ include artiq/gui/logo*.svg
|
|||
include versioneer.py
|
||||
include artiq/_version.py
|
||||
include artiq/coredevice/coredevice_generic.schema.json
|
||||
include artiq/compiler/kernel.ld
|
||||
include artiq/afws.pem
|
||||
|
|
|
@ -13,14 +13,14 @@ ARTIQ uses FPGA hardware to perform its time-critical tasks. The `Sinara hardwar
|
|||
ARTIQ is designed to be portable to hardware platforms from different vendors and FPGA manufacturers.
|
||||
Several different configurations of a `FPGA evaluation kit <https://www.xilinx.com/products/boards-and-kits/ek-k7-kc705-g.html>`_ and of a `Zynq evaluation kit <https://www.xilinx.com/products/boards-and-kits/ek-z7-zc706-g.html>`_ are also used and supported. FPGA platforms can be combined with any number of additional peripherals, either already accessible from ARTIQ or made accessible with little effort.
|
||||
|
||||
ARTIQ and its dependencies are available in the form of Nix packages (for Linux) and Conda packages (for Windows and Linux). See `the manual <https://m-labs.hk/experiment-control/resources/>`_ for installation instructions.
|
||||
ARTIQ and its dependencies are available in the form of Nix packages (for Linux) and MSYS2 packages (for Windows). See `the manual <https://m-labs.hk/experiment-control/resources/>`_ for installation instructions.
|
||||
Packages containing pre-compiled binary images to be loaded onto the hardware platforms are supplied for each configuration.
|
||||
Like any open source software ARTIQ can equally be built and installed directly from `source <https://github.com/m-labs/artiq>`_.
|
||||
|
||||
ARTIQ is supported by M-Labs and developed openly.
|
||||
Components, features, fixes, improvements, and extensions are often `funded <https://m-labs.hk/experiment-control/funding/>`_ by and developed for the partnering research groups.
|
||||
|
||||
Core technologies employed include `Python <https://www.python.org/>`_, `Migen <https://github.com/m-labs/migen>`_, `Migen-AXI <https://github.com/peteut/migen-axi>`_, `Rust <https://www.rust-lang.org/>`_, `MiSoC <https://github.com/m-labs/misoc>`_/`mor1kx <https://github.com/openrisc/mor1kx>`_, `LLVM <https://llvm.org/>`_/`llvmlite <https://github.com/numba/llvmlite>`_, and `Qt5 <https://www.qt.io/>`_.
|
||||
Core technologies employed include `Python <https://www.python.org/>`_, `Migen <https://github.com/m-labs/migen>`_, `Migen-AXI <https://github.com/peteut/migen-axi>`_, `Rust <https://www.rust-lang.org/>`_, `MiSoC <https://github.com/m-labs/misoc>`_/`VexRiscv <https://github.com/SpinalHDL/VexRiscv>`_, `LLVM <https://llvm.org/>`_/`llvmlite <https://github.com/numba/llvmlite>`_, and `Qt5 <https://www.qt.io/>`_.
|
||||
|
||||
Website: https://m-labs.hk/artiq
|
||||
|
||||
|
@ -29,7 +29,7 @@ Website: https://m-labs.hk/artiq
|
|||
License
|
||||
=======
|
||||
|
||||
Copyright (C) 2014-2021 M-Labs Limited.
|
||||
Copyright (C) 2014-2023 M-Labs Limited.
|
||||
|
||||
ARTIQ is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
|
|
|
@ -3,6 +3,170 @@
|
|||
Release notes
|
||||
=============
|
||||
|
||||
ARTIQ-8 (Unreleased)
|
||||
--------------------
|
||||
|
||||
Highlights:
|
||||
|
||||
* New hardware support:
|
||||
- Support for Shuttler, a 16-channel 125MSPS DAC card intended for ion transport.
|
||||
Waveform generator and user API are similar to the NIST PDQ.
|
||||
- Implemented Phaser-servo. This requires recent gateware on Phaser.
|
||||
- Almazny v1.2 with finer RF switch control.
|
||||
- Metlino and Sayma support has been dropped due to complications with synchronous RTIO clocking.
|
||||
- More user LEDs are exposed to RTIO on Kasli.
|
||||
- Implemented Phaser-MIQRO support. This requires the proprietary Phaser MIQRO gateware
|
||||
variant from QUARTIQ.
|
||||
- Sampler: fixed ADC MU to Volt conversion factor for Sampler v2.2+.
|
||||
For earlier hardware versions, specify the hardware version in the device
|
||||
database file (e.g. ``"hw_rev": "v2.1"``) to use the correct conversion factor.
|
||||
* Support for distributed DMA, where DMA is run directly on satellites for corresponding
|
||||
RTIO events, increasing bandwidth in scenarios with heavy satellite usage.
|
||||
* Support for subkernels, where kernels are run on satellite device CPUs to offload some
|
||||
of the processing and RTIO operations.
|
||||
* CPU (on softcore platforms) and AXI bus (on Zynq) are now clocked synchronously with the RTIO
|
||||
clock, to facilitate implementation of local processing on DRTIO satellites, and to slightly
|
||||
reduce RTIO latency.
|
||||
* Support for DRTIO-over-EEM, used with Shuttler.
|
||||
* Added channel names to RTIO error messages.
|
||||
* GUI:
|
||||
- Implemented Applet Request Interfaces which allow applets to modify datasets and set the
|
||||
current values of widgets in the dashboard's experiment windows.
|
||||
- Implemented a new EntryArea widget which allows argument entry widgets to be used in applets.
|
||||
- The "Close all applets" command (shortcut: Ctrl-Alt-W) now ignores docked applets,
|
||||
making it a convenient way to clean up after exploratory work without destroying a
|
||||
carefully arranged default workspace.
|
||||
- Hotkeys now organize experiment windows in the order they were last interacted with:
|
||||
+ CTRL+SHIFT+T tiles experiment windows
|
||||
+ CTRL+SHIFT+C cascades experiment windows
|
||||
* Persistent datasets are now stored in a LMDB database for improved performance.
|
||||
* Python's built-in types (such as ``float``, or ``List[...]``) can now be used in type annotations on
|
||||
kernel functions.
|
||||
* Full Python 3.10 support.
|
||||
* MSYS2 packaging for Windows, which replaces Conda. Conda packages are still available to
|
||||
support legacy installations, but may be removed in a future release.
|
||||
|
||||
Breaking changes:
|
||||
|
||||
* ``SimpleApplet`` now calls widget constructors with an additional ``ctl`` parameter for control
|
||||
operations, which includes dataset operations. It can be ignored if not needed. For an example usage,
|
||||
refer to the ``big_number.py`` applet.
|
||||
* ``SimpleApplet`` and ``TitleApplet`` now call ``data_changed`` with additional parameters. Derived applets
|
||||
should change the function signature as below:
|
||||
|
||||
::
|
||||
|
||||
# SimpleApplet
|
||||
def data_changed(self, value, metadata, persist, mods)
|
||||
# SimpleApplet (old version)
|
||||
def data_changed(self, data, mods)
|
||||
# TitleApplet
|
||||
def data_changed(self, value, metadata, persist, mods, title)
|
||||
# TitleApplet (old version)
|
||||
def data_changed(self, data, mods, title)
|
||||
|
||||
Accesses to the data argument should be replaced as below:
|
||||
|
||||
::
|
||||
|
||||
data[key][0] ==> persist[key]
|
||||
data[key][1] ==> value[key]
|
||||
|
||||
* The ``ndecimals`` parameter in ``NumberValue`` and ``Scannable`` has been renamed to ``precision``.
|
||||
Parameters after and including ``scale`` in both constructors are now keyword-only.
|
||||
Refer to the updated ``no_hardware/arguments_demo.py`` example for current usage.
|
||||
* Almazny v1.2 is incompatible with the legacy versions and is the default.
|
||||
To use legacy versions, specify ``almazny_hw_rev`` in the JSON description.
|
||||
* kasli_generic.py has been merged into kasli.py, and the demonstration designs without JSON descriptions
|
||||
have been removed. The base classes remain present in kasli.py to support third-party flows without
|
||||
JSON descriptions.
|
||||
* Legacy PYON databases should be converted to LMDB with the script below:
|
||||
|
||||
::
|
||||
|
||||
from sipyco import pyon
|
||||
import lmdb
|
||||
|
||||
old = pyon.load_file("dataset_db.pyon")
|
||||
new = lmdb.open("dataset_db.mdb", subdir=False, map_size=2**30)
|
||||
with new.begin(write=True) as txn:
|
||||
for key, value in old.items():
|
||||
txn.put(key.encode(), pyon.encode((value, {})).encode())
|
||||
new.close()
|
||||
|
||||
* ``artiq.wavesynth`` has been removed.
|
||||
|
||||
ARTIQ-7
|
||||
-------
|
||||
|
||||
Highlights:
|
||||
|
||||
* New hardware support:
|
||||
- Kasli-SoC, a new EEM carrier based on a Zynq SoC, enabling much faster kernel execution
|
||||
(see: https://arxiv.org/abs/2111.15290).
|
||||
- DRTIO support on Zynq-based devices (Kasli-SoC and ZC706).
|
||||
- DRTIO support on KC705.
|
||||
- HVAMP_8CH 8 channel HV amplifier for Fastino / Zotinos
|
||||
- Almazny mezzanine board for Mirny
|
||||
- Phaser: improved documentation, exposed the DAC coarse mixer and ``sif_sync``, exposed upconverter calibration
|
||||
and enabling/disabling of upconverter LO & RF outputs, added helpers to align Phaser updates to the
|
||||
RTIO timeline (``get_next_frame_mu()``).
|
||||
- Urukul: ``get()``, ``get_mu()``, ``get_att()``, and ``get_att_mu()`` functions added for AD9910 and AD9912.
|
||||
* Softcore targets now use the RISC-V architecture (VexRiscv) instead of OR1K (mor1kx).
|
||||
* Gateware FPU is supported on KC705 and Kasli 2.0.
|
||||
* Faster compilation for large arrays/lists.
|
||||
* Faster exception handling.
|
||||
* Several exception handling bugs fixed.
|
||||
* Support for a simpler shared library system with faster calls into the runtime. This is only used by the NAC3
|
||||
compiler (nac3ld) and improves RTIO output performance (test_pulse_rate) by 9-10%.
|
||||
* Moninj improvements:
|
||||
- Urukul monitoring and frequency setting (through dashboard) is now supported.
|
||||
- Core device moninj is now proxied via the ``aqctl_moninj_proxy`` controller.
|
||||
* The configuration entry ``rtio_clock`` supports multiple clocking settings, deprecating the usage
|
||||
of compile-time options.
|
||||
* Added support for 100MHz RTIO clock in DRTIO.
|
||||
* Previously detected RTIO async errors are reported to the host after each kernel terminates and a
|
||||
warning is logged. The warning is additional to the one already printed in the core device log
|
||||
immediately upon detection of the error.
|
||||
* Extended Kasli gateware JSON description with configuration for SPI over DIO.
|
||||
* TTL outputs can be now configured to work as a clock generator from the JSON.
|
||||
* On Kasli, the number of FIFO lanes in the scalable events dispatcher (SED) can now be configured in
|
||||
the JSON.
|
||||
* ``artiq_ddb_template`` generates edge-counter keys that start with the key of the corresponding
|
||||
TTL device (e.g. ``ttl_0_counter`` for the edge counter on TTL device ``ttl_0``).
|
||||
* ``artiq_master`` now has an ``--experiment-subdir`` option to scan only a subdirectory of the
|
||||
repository when building the list of experiments.
|
||||
* Experiments can now be submitted by-content.
|
||||
* The master can now optionally log all experiments submitted into a CSV file.
|
||||
* Removed worker DB warning for writing a dataset that is also in the archive.
|
||||
* Experiments can now call ``scheduler.check_termination()`` to test if the user
|
||||
has requested graceful termination.
|
||||
* ARTIQ command-line programs and controllers now exit cleanly on Ctrl-C.
|
||||
* ``artiq_coremgmt reboot`` now reloads gateware as well, providing a more thorough and reliable
|
||||
device reset (7-series FPGAs only).
|
||||
* Firmware and gateware can now be built on-demand on the M-Labs server using ``afws_client``
|
||||
(subscribers only). Self-compilation remains possible.
|
||||
* Easier-to-use packaging via Nix Flakes.
|
||||
* Python 3.10 support (experimental).
|
||||
|
||||
Breaking changes:
|
||||
|
||||
* Due to the new RISC-V CPU, the device database entry for the core device needs to be updated.
|
||||
The ``target`` parameter needs to be set to ``rv32ima`` for Kasli 1.x and to ``rv32g`` for all
|
||||
other boards. Freshly generated device database templates already contain this update.
|
||||
* Updated Phaser-Upconverter default frequency 2.875 GHz. The new default uses the target PFD
|
||||
frequency of the hardware design.
|
||||
* ``Phaser.init()`` now disables all Kasli-oscillators. This avoids full power RF output being
|
||||
generated for some configurations.
|
||||
* Phaser: fixed coarse mixer frequency configuration
|
||||
* Mirny: Added extra delays in ``ADF5356.sync()``. This avoids the need of an extra delay before
|
||||
calling ``ADF5356.init()``.
|
||||
* The deprecated ``set_dataset(..., save=...)`` is no longer supported.
|
||||
* The ``PCA9548`` I2C switch class was renamed to ``I2CSwitch``, to accommodate support for PCA9547,
|
||||
and possibly other switches in future. Readback has been removed, and now only one channel per
|
||||
switch is supported.
|
||||
|
||||
|
||||
ARTIQ-6
|
||||
-------
|
||||
|
||||
|
@ -11,7 +175,7 @@ Highlights:
|
|||
* New hardware support:
|
||||
- Phaser, a quad channel 1GS/s RF generator card with dual IQ upconverter and dual 5MS/s
|
||||
ADC and FPGA.
|
||||
- Zynq SoC core devices, enabling kernels to run on 1 GHz CPU core with a floating-point
|
||||
- Zynq SoC core device (ZC706), enabling kernels to run on 1 GHz CPU core with a floating-point
|
||||
unit for faster computations. This currently requires an external
|
||||
repository (https://git.m-labs.hk/m-labs/artiq-zynq).
|
||||
- Mirny 4-channel wide-band PLL/VCO-based microwave frequency synthesiser
|
||||
|
@ -33,8 +197,11 @@ Highlights:
|
|||
- Improved performance for kernel RPC involving list and array.
|
||||
* Coredevice SI to mu conversions now always return valid codes, or raise a ``ValueError``.
|
||||
* Zotino now exposes ``voltage_to_mu()``
|
||||
* ``ad9910``: The maximum amplitude scale factor is now ``0x3fff`` (was ``0x3ffe``
|
||||
before).
|
||||
* ``ad9910``:
|
||||
- The maximum amplitude scale factor is now ``0x3fff`` (was ``0x3ffe`` before).
|
||||
- The default single-tone profile is now 7 (was 0).
|
||||
- Added option to ``set_mu()`` that affects the ASF, FTW and POW registers
|
||||
instead of the single-tone profile register.
|
||||
* Mirny now supports HW revision independent, human readable ``clk_sel`` parameters:
|
||||
"XO", "SMA", and "MMCX". Passing an integer is backwards compatible.
|
||||
* Dashboard:
|
||||
|
@ -67,6 +234,9 @@ Breaking changes:
|
|||
* ``quamash`` has been replaced with ``qasync``.
|
||||
* Protocols are updated to use device endian.
|
||||
* Analyzer dump format includes a byte for device endianness.
|
||||
* To support variable numbers of Urukul cards in the future, the
|
||||
``artiq.coredevice.suservo.SUServo`` constructor now accepts two device name lists,
|
||||
``cpld_devices`` and ``dds_devices``, rather than four individual arguments.
|
||||
* Experiment classes with underscore-prefixed names are now ignored when ``artiq_client``
|
||||
determines which experiment to submit (consistent with ``artiq_run``).
|
||||
|
||||
|
|
|
@ -1,13 +1,7 @@
|
|||
import os
|
||||
|
||||
def get_rev():
|
||||
return os.getenv("VERSIONEER_REV", default="unknown")
|
||||
|
||||
def get_version():
|
||||
override = os.getenv("VERSIONEER_OVERRIDE")
|
||||
if override:
|
||||
return override
|
||||
srcroot = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir)
|
||||
with open(os.path.join(srcroot, "MAJOR_VERSION"), "r") as f:
|
||||
version = f.read().strip()
|
||||
version += ".unknown"
|
||||
if os.path.exists(os.path.join(srcroot, "BETA")):
|
||||
version += ".beta"
|
||||
return version
|
||||
return os.getenv("VERSIONEER_OVERRIDE", default="8.0+unknown.beta")
|
||||
|
|
|
@ -1,22 +1,96 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from PyQt5 import QtWidgets
|
||||
|
||||
from PyQt5 import QtWidgets, QtCore, QtGui
|
||||
from artiq.applets.simple import SimpleApplet
|
||||
from artiq.tools import scale_from_metadata
|
||||
from artiq.gui.tools import LayoutWidget
|
||||
|
||||
|
||||
class NumberWidget(QtWidgets.QLCDNumber):
|
||||
def __init__(self, args):
|
||||
QtWidgets.QLCDNumber.__init__(self)
|
||||
self.setDigitCount(args.digit_count)
|
||||
class QResponsiveLCDNumber(QtWidgets.QLCDNumber):
|
||||
doubleClicked = QtCore.pyqtSignal()
|
||||
|
||||
def mouseDoubleClickEvent(self, event):
|
||||
self.doubleClicked.emit()
|
||||
|
||||
|
||||
class QCancellableLineEdit(QtWidgets.QLineEdit):
|
||||
editCancelled = QtCore.pyqtSignal()
|
||||
|
||||
def keyPressEvent(self, event):
|
||||
if event.key() == QtCore.Qt.Key_Escape:
|
||||
self.editCancelled.emit()
|
||||
else:
|
||||
super().keyPressEvent(event)
|
||||
|
||||
|
||||
class NumberWidget(LayoutWidget):
|
||||
def __init__(self, args, req):
|
||||
LayoutWidget.__init__(self)
|
||||
self.dataset_name = args.dataset
|
||||
self.req = req
|
||||
self.metadata = dict()
|
||||
|
||||
def data_changed(self, data, mods):
|
||||
self.number_area = QtWidgets.QStackedWidget()
|
||||
self.addWidget(self.number_area, 0, 0)
|
||||
|
||||
self.unit_area = QtWidgets.QLabel()
|
||||
self.unit_area.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
|
||||
self.addWidget(self.unit_area, 0, 1)
|
||||
|
||||
self.lcd_widget = QResponsiveLCDNumber()
|
||||
self.lcd_widget.setDigitCount(args.digit_count)
|
||||
self.lcd_widget.doubleClicked.connect(self.start_edit)
|
||||
self.number_area.addWidget(self.lcd_widget)
|
||||
|
||||
self.edit_widget = QCancellableLineEdit()
|
||||
self.edit_widget.setValidator(QtGui.QDoubleValidator())
|
||||
self.edit_widget.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
|
||||
self.edit_widget.editCancelled.connect(self.cancel_edit)
|
||||
self.edit_widget.returnPressed.connect(self.confirm_edit)
|
||||
self.number_area.addWidget(self.edit_widget)
|
||||
|
||||
font = QtGui.QFont()
|
||||
font.setPointSize(60)
|
||||
self.edit_widget.setFont(font)
|
||||
|
||||
unit_font = QtGui.QFont()
|
||||
unit_font.setPointSize(20)
|
||||
self.unit_area.setFont(unit_font)
|
||||
|
||||
self.number_area.setCurrentWidget(self.lcd_widget)
|
||||
|
||||
def start_edit(self):
|
||||
# QLCDNumber value property contains the value of zero
|
||||
# if the displayed value is not a number.
|
||||
self.edit_widget.setText(str(self.lcd_widget.value()))
|
||||
self.edit_widget.selectAll()
|
||||
self.edit_widget.setFocus()
|
||||
self.number_area.setCurrentWidget(self.edit_widget)
|
||||
|
||||
def confirm_edit(self):
|
||||
scale = scale_from_metadata(self.metadata)
|
||||
val = float(self.edit_widget.text())
|
||||
val *= scale
|
||||
self.req.set_dataset(self.dataset_name, val, **self.metadata)
|
||||
self.number_area.setCurrentWidget(self.lcd_widget)
|
||||
|
||||
def cancel_edit(self):
|
||||
self.number_area.setCurrentWidget(self.lcd_widget)
|
||||
|
||||
def data_changed(self, value, metadata, persist, mods):
|
||||
try:
|
||||
n = float(data[self.dataset_name][1])
|
||||
self.metadata = metadata[self.dataset_name]
|
||||
# This applet will degenerate other scalar types to native float on edit
|
||||
# Use the dashboard ChangeEditDialog for consistent type casting
|
||||
val = float(value[self.dataset_name])
|
||||
scale = scale_from_metadata(self.metadata)
|
||||
val /= scale
|
||||
except (KeyError, ValueError, TypeError):
|
||||
n = "---"
|
||||
self.display(n)
|
||||
val = "---"
|
||||
|
||||
unit = self.metadata.get("unit", "")
|
||||
self.unit_area.setText(unit)
|
||||
self.lcd_widget.display(val)
|
||||
|
||||
|
||||
def main():
|
||||
|
|
|
@ -7,13 +7,13 @@ from artiq.applets.simple import SimpleApplet
|
|||
|
||||
|
||||
class Image(pyqtgraph.ImageView):
|
||||
def __init__(self, args):
|
||||
def __init__(self, args, req):
|
||||
pyqtgraph.ImageView.__init__(self)
|
||||
self.args = args
|
||||
|
||||
def data_changed(self, data, mods):
|
||||
def data_changed(self, value, metadata, persist, mods):
|
||||
try:
|
||||
img = data[self.args.img][1]
|
||||
img = value[self.args.img]
|
||||
except KeyError:
|
||||
return
|
||||
self.setImage(img)
|
||||
|
|
|
@ -1,33 +1,47 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import PyQt5 # make sure pyqtgraph imports Qt5
|
||||
import PyQt5 # make sure pyqtgraph imports Qt5
|
||||
from PyQt5.QtCore import QTimer
|
||||
import pyqtgraph
|
||||
|
||||
from artiq.applets.simple import TitleApplet
|
||||
|
||||
|
||||
class HistogramPlot(pyqtgraph.PlotWidget):
|
||||
def __init__(self, args):
|
||||
def __init__(self, args, req):
|
||||
pyqtgraph.PlotWidget.__init__(self)
|
||||
self.args = args
|
||||
self.timer = QTimer()
|
||||
self.timer.setSingleShot(True)
|
||||
self.timer.timeout.connect(self.length_warning)
|
||||
|
||||
def data_changed(self, data, mods, title):
|
||||
def data_changed(self, value, metadata, persist, mods, title):
|
||||
try:
|
||||
y = data[self.args.y][1]
|
||||
y = value[self.args.y]
|
||||
if self.args.x is None:
|
||||
x = None
|
||||
else:
|
||||
x = data[self.args.x][1]
|
||||
x = value[self.args.x]
|
||||
except KeyError:
|
||||
return
|
||||
if x is None:
|
||||
x = list(range(len(y)+1))
|
||||
|
||||
if len(y) and len(x) == len(y) + 1:
|
||||
self.timer.stop()
|
||||
self.clear()
|
||||
self.plot(x, y, stepMode=True, fillLevel=0,
|
||||
brush=(0, 0, 255, 150))
|
||||
self.setTitle(title)
|
||||
else:
|
||||
if not self.timer.isActive():
|
||||
self.timer.start(1000)
|
||||
|
||||
def length_warning(self):
|
||||
self.clear()
|
||||
text = "⚠️ dataset lengths mismatch:\n"\
|
||||
"There should be one more bin boundaries than there are Y values"
|
||||
self.addItem(pyqtgraph.TextItem(text))
|
||||
|
||||
|
||||
def main():
|
||||
|
|
|
@ -2,39 +2,58 @@
|
|||
|
||||
import numpy as np
|
||||
import PyQt5 # make sure pyqtgraph imports Qt5
|
||||
from PyQt5.QtCore import QTimer
|
||||
import pyqtgraph
|
||||
|
||||
from artiq.applets.simple import TitleApplet
|
||||
|
||||
|
||||
class XYPlot(pyqtgraph.PlotWidget):
|
||||
def __init__(self, args):
|
||||
def __init__(self, args, req):
|
||||
pyqtgraph.PlotWidget.__init__(self)
|
||||
self.args = args
|
||||
self.timer = QTimer()
|
||||
self.timer.setSingleShot(True)
|
||||
self.timer.timeout.connect(self.length_warning)
|
||||
self.mismatch = {'X values': False,
|
||||
'Error bars': False,
|
||||
'Fit values': False}
|
||||
|
||||
def data_changed(self, data, mods, title):
|
||||
def data_changed(self, value, metadata, persist, mods, title):
|
||||
try:
|
||||
y = data[self.args.y][1]
|
||||
y = value[self.args.y]
|
||||
except KeyError:
|
||||
return
|
||||
x = data.get(self.args.x, (False, None))[1]
|
||||
x = value.get(self.args.x, (False, None))
|
||||
if x is None:
|
||||
x = np.arange(len(y))
|
||||
error = data.get(self.args.error, (False, None))[1]
|
||||
fit = data.get(self.args.fit, (False, None))[1]
|
||||
error = value.get(self.args.error, (False, None))
|
||||
fit = value.get(self.args.fit, (False, None))
|
||||
|
||||
if not len(y) or len(y) != len(x):
|
||||
return
|
||||
self.mismatch['X values'] = True
|
||||
else:
|
||||
self.mismatch['X values'] = False
|
||||
if error is not None and hasattr(error, "__len__"):
|
||||
if not len(error):
|
||||
error = None
|
||||
elif len(error) != len(y):
|
||||
return
|
||||
self.mismatch['Error bars'] = True
|
||||
else:
|
||||
self.mismatch['Error bars'] = False
|
||||
if fit is not None:
|
||||
if not len(fit):
|
||||
fit = None
|
||||
elif len(fit) != len(y):
|
||||
return
|
||||
self.mismatch['Fit values'] = True
|
||||
else:
|
||||
self.mismatch['Fit values'] = False
|
||||
if not any(self.mismatch.values()):
|
||||
self.timer.stop()
|
||||
else:
|
||||
if not self.timer.isActive():
|
||||
self.timer.start(1000)
|
||||
return
|
||||
|
||||
self.clear()
|
||||
self.plot(x, y, pen=None, symbol="x")
|
||||
|
@ -50,6 +69,13 @@ class XYPlot(pyqtgraph.PlotWidget):
|
|||
xi = np.argsort(x)
|
||||
self.plot(x[xi], fit[xi])
|
||||
|
||||
def length_warning(self):
|
||||
self.clear()
|
||||
text = "⚠️ dataset lengths mismatch:\n"
|
||||
errors = ', '.join([k for k, v in self.mismatch.items() if v])
|
||||
text = ' '.join([errors, "should have the same length as Y values"])
|
||||
self.addItem(pyqtgraph.TextItem(text))
|
||||
|
||||
|
||||
def main():
|
||||
applet = TitleApplet(XYPlot)
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
import numpy as np
|
||||
from PyQt5 import QtWidgets
|
||||
from PyQt5.QtCore import QTimer
|
||||
import pyqtgraph
|
||||
|
||||
from artiq.applets.simple import SimpleApplet
|
||||
|
@ -21,7 +22,7 @@ def _compute_ys(histogram_bins, histograms_counts):
|
|||
# pyqtgraph.GraphicsWindow fails to behave like a regular Qt widget
|
||||
# and breaks embedding. Do not use as top widget.
|
||||
class XYHistPlot(QtWidgets.QSplitter):
|
||||
def __init__(self, args):
|
||||
def __init__(self, args, req):
|
||||
QtWidgets.QSplitter.__init__(self)
|
||||
self.resize(1000, 600)
|
||||
self.setWindowTitle("XY/Histogram")
|
||||
|
@ -37,6 +38,10 @@ class XYHistPlot(QtWidgets.QSplitter):
|
|||
self.hist_plot_data = None
|
||||
|
||||
self.args = args
|
||||
self.timer = QTimer()
|
||||
self.timer.setSingleShot(True)
|
||||
self.timer.timeout.connect(self.length_warning)
|
||||
self.mismatch = {'bins': False, 'xs': False}
|
||||
|
||||
def _set_full_data(self, xs, histogram_bins, histograms_counts):
|
||||
self.xy_plot.clear()
|
||||
|
@ -59,9 +64,9 @@ class XYHistPlot(QtWidgets.QSplitter):
|
|||
point.histogram_index = index
|
||||
point.histogram_counts = counts
|
||||
|
||||
self.hist_plot_data = self.hist_plot.plot(
|
||||
stepMode=True, fillLevel=0,
|
||||
brush=(0, 0, 255, 150))
|
||||
text = "click on a data point at the left\n"\
|
||||
"to see the corresponding histogram"
|
||||
self.hist_plot.addItem(pyqtgraph.TextItem(text))
|
||||
|
||||
def _set_partial_data(self, xs, histograms_counts):
|
||||
ys = _compute_ys(self.histogram_bins, histograms_counts)
|
||||
|
@ -87,8 +92,17 @@ class XYHistPlot(QtWidgets.QSplitter):
|
|||
else:
|
||||
self.arrow.setPos(position)
|
||||
self.selected_index = spot_item.histogram_index
|
||||
self.hist_plot_data.setData(x=self.histogram_bins,
|
||||
y=spot_item.histogram_counts)
|
||||
|
||||
if self.hist_plot_data is None:
|
||||
self.hist_plot.clear()
|
||||
self.hist_plot_data = self.hist_plot.plot(
|
||||
x=self.histogram_bins,
|
||||
y=spot_item.histogram_counts,
|
||||
stepMode=True, fillLevel=0,
|
||||
brush=(0, 0, 255, 150))
|
||||
else:
|
||||
self.hist_plot_data.setData(x=self.histogram_bins,
|
||||
y=spot_item.histogram_counts)
|
||||
|
||||
def _can_use_partial(self, mods):
|
||||
if self.hist_plot_data is None:
|
||||
|
@ -110,18 +124,48 @@ class XYHistPlot(QtWidgets.QSplitter):
|
|||
return False
|
||||
return True
|
||||
|
||||
def data_changed(self, data, mods):
|
||||
def data_changed(self, value, metadata, persist, mods):
|
||||
try:
|
||||
xs = data[self.args.xs][1]
|
||||
histogram_bins = data[self.args.histogram_bins][1]
|
||||
histograms_counts = data[self.args.histograms_counts][1]
|
||||
xs = value[self.args.xs]
|
||||
histogram_bins = value[self.args.histogram_bins]
|
||||
histograms_counts = value[self.args.histograms_counts]
|
||||
except KeyError:
|
||||
return
|
||||
if len(xs) != histograms_counts.shape[0]:
|
||||
self.mismatch['xs'] = True
|
||||
else:
|
||||
self.mismatch['xs'] = False
|
||||
if histograms_counts.shape[1] != len(histogram_bins) - 1:
|
||||
self.mismatch['bins'] = True
|
||||
else:
|
||||
self.mismatch['bins'] = False
|
||||
if any(self.mismatch.values()):
|
||||
if not self.timer.isActive():
|
||||
self.timer.start(1000)
|
||||
return
|
||||
else:
|
||||
self.timer.stop()
|
||||
if self._can_use_partial(mods):
|
||||
self._set_partial_data(xs, histograms_counts)
|
||||
else:
|
||||
self._set_full_data(xs, histogram_bins, histograms_counts)
|
||||
|
||||
def length_warning(self):
|
||||
self.xy_plot.clear()
|
||||
self.hist_plot.clear()
|
||||
text = "⚠️ dataset lengths mismatch:\n\n"
|
||||
if self.mismatch['bins']:
|
||||
text = ''.join([text,
|
||||
"bin boundaries should have the same length\n"
|
||||
"as the first dimension of histogram counts."])
|
||||
if self.mismatch['bins'] and self.mismatch['xs']:
|
||||
text = ''.join([text, '\n\n'])
|
||||
if self.mismatch['xs']:
|
||||
text = ''.join([text,
|
||||
"point abscissas should have the same length\n"
|
||||
"as the second dimension of histogram counts."])
|
||||
self.xy_plot.addItem(pyqtgraph.TextItem(text))
|
||||
|
||||
|
||||
def main():
|
||||
applet = SimpleApplet(XYHistPlot)
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from PyQt5 import QtWidgets
|
||||
|
||||
from artiq.applets.simple import SimpleApplet
|
||||
|
||||
|
||||
class ProgressWidget(QtWidgets.QProgressBar):
|
||||
def __init__(self, args, req):
|
||||
QtWidgets.QProgressBar.__init__(self)
|
||||
self.setMinimum(args.min)
|
||||
self.setMaximum(args.max)
|
||||
self.dataset_value = args.value
|
||||
|
||||
def data_changed(self, value, metadata, persist, mods):
|
||||
try:
|
||||
val = round(value[self.dataset_value])
|
||||
except (KeyError, ValueError, TypeError):
|
||||
val = 0
|
||||
self.setValue(val)
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
applet = SimpleApplet(ProgressWidget)
|
||||
applet.add_dataset("value", "counter")
|
||||
applet.argparser.add_argument("--min", type=int, default=0,
|
||||
help="minimum (left) value of the bar")
|
||||
applet.argparser.add_argument("--max", type=int, default=100,
|
||||
help="maximum (right) value of the bar")
|
||||
applet.run()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -7,13 +7,112 @@ import string
|
|||
from qasync import QEventLoop, QtWidgets, QtCore
|
||||
|
||||
from sipyco.sync_struct import Subscriber, process_mod
|
||||
from sipyco.pc_rpc import AsyncioClient as RPCClient
|
||||
from sipyco import pyon
|
||||
from sipyco.pipe_ipc import AsyncioChildComm
|
||||
|
||||
from artiq.language.scan import ScanObject
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class _AppletRequestInterface:
|
||||
def __init__(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def set_dataset(self, key, value, unit=None, scale=None, precision=None, persist=None):
|
||||
"""
|
||||
Set a dataset.
|
||||
See documentation of ``artiq.language.environment.set_dataset``.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def mutate_dataset(self, key, index, value):
|
||||
"""
|
||||
Mutate a dataset.
|
||||
See documentation of ``artiq.language.environment.mutate_dataset``.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def append_to_dataset(self, key, value):
|
||||
"""
|
||||
Append to a dataset.
|
||||
See documentation of ``artiq.language.environment.append_to_dataset``.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def set_argument_value(self, expurl, name, value):
|
||||
"""
|
||||
Temporarily set the value of an argument in a experiment in the dashboard.
|
||||
The value resets to default value when recomputing the argument.
|
||||
|
||||
:param expurl: Experiment URL identifying the experiment in the dashboard. Example: 'repo:ArgumentsDemo'.
|
||||
:param name: Name of the argument in the experiment.
|
||||
:param value: Object representing the new temporary value of the argument. For ``Scannable`` arguments, this parameter
|
||||
should be a ``ScanObject``. The type of the ``ScanObject`` will be set as the selected type when this function is called.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class AppletRequestIPC(_AppletRequestInterface):
|
||||
def __init__(self, ipc):
|
||||
self.ipc = ipc
|
||||
|
||||
def set_dataset(self, key, value, unit=None, scale=None, precision=None, persist=None):
|
||||
metadata = {}
|
||||
if unit is not None:
|
||||
metadata["unit"] = unit
|
||||
if scale is not None:
|
||||
metadata["scale"] = scale
|
||||
if precision is not None:
|
||||
metadata["precision"] = precision
|
||||
self.ipc.set_dataset(key, value, metadata, persist)
|
||||
|
||||
def mutate_dataset(self, key, index, value):
|
||||
mod = {"action": "setitem", "path": [key, 1], "key": index, "value": value}
|
||||
self.ipc.update_dataset(mod)
|
||||
|
||||
def append_to_dataset(self, key, value):
|
||||
mod = {"action": "append", "path": [key, 1], "x": value}
|
||||
self.ipc.update_dataset(mod)
|
||||
|
||||
def set_argument_value(self, expurl, name, value):
|
||||
if isinstance(value, ScanObject):
|
||||
value = value.describe()
|
||||
self.ipc.set_argument_value(expurl, name, value)
|
||||
|
||||
|
||||
class AppletRequestRPC(_AppletRequestInterface):
|
||||
def __init__(self, loop, dataset_ctl):
|
||||
self.loop = loop
|
||||
self.dataset_ctl = dataset_ctl
|
||||
self.background_tasks = set()
|
||||
|
||||
def _background(self, coro, *args, **kwargs):
|
||||
task = self.loop.create_task(coro(*args, **kwargs))
|
||||
self.background_tasks.add(task)
|
||||
task.add_done_callback(self.background_tasks.discard)
|
||||
|
||||
def set_dataset(self, key, value, unit=None, scale=None, precision=None, persist=None):
|
||||
metadata = {}
|
||||
if unit is not None:
|
||||
metadata["unit"] = unit
|
||||
if scale is not None:
|
||||
metadata["scale"] = scale
|
||||
if precision is not None:
|
||||
metadata["precision"] = precision
|
||||
self._background(self.dataset_ctl.set, key, value, metadata=metadata, persist=persist)
|
||||
|
||||
def mutate_dataset(self, key, index, value):
|
||||
mod = {"action": "setitem", "path": [key, 1], "key": index, "value": value}
|
||||
self._background(self.dataset_ctl.update, mod)
|
||||
|
||||
def append_to_dataset(self, key, value):
|
||||
mod = {"action": "append", "path": [key, 1], "x": value}
|
||||
self._background(self.dataset_ctl.update, mod)
|
||||
|
||||
|
||||
class AppletIPCClient(AsyncioChildComm):
|
||||
def set_close_cb(self, close_cb):
|
||||
self.close_cb = close_cb
|
||||
|
@ -64,12 +163,30 @@ class AppletIPCClient(AsyncioChildComm):
|
|||
exc_info=True)
|
||||
self.close_cb()
|
||||
|
||||
def subscribe(self, datasets, init_cb, mod_cb):
|
||||
def subscribe(self, datasets, init_cb, mod_cb, dataset_prefixes=[], *, loop):
|
||||
self.write_pyon({"action": "subscribe",
|
||||
"datasets": datasets})
|
||||
"datasets": datasets,
|
||||
"dataset_prefixes": dataset_prefixes})
|
||||
self.init_cb = init_cb
|
||||
self.mod_cb = mod_cb
|
||||
asyncio.ensure_future(self.listen())
|
||||
self.listen_task = loop.create_task(self.listen())
|
||||
|
||||
def set_dataset(self, key, value, metadata, persist=None):
|
||||
self.write_pyon({"action": "set_dataset",
|
||||
"key": key,
|
||||
"value": value,
|
||||
"metadata": metadata,
|
||||
"persist": persist})
|
||||
|
||||
def update_dataset(self, mod):
|
||||
self.write_pyon({"action": "update_dataset",
|
||||
"mod": mod})
|
||||
|
||||
def set_argument_value(self, expurl, name, value):
|
||||
self.write_pyon({"action": "set_argument_value",
|
||||
"expurl": expurl,
|
||||
"name": name,
|
||||
"value": value})
|
||||
|
||||
|
||||
class SimpleApplet:
|
||||
|
@ -91,8 +208,11 @@ class SimpleApplet:
|
|||
"for dataset notifications "
|
||||
"(ignored in embedded mode)")
|
||||
group.add_argument(
|
||||
"--port", default=3250, type=int,
|
||||
help="TCP port to connect to")
|
||||
"--port-notify", default=3250, type=int,
|
||||
help="TCP port to connect to for notifications (ignored in embedded mode)")
|
||||
group.add_argument(
|
||||
"--port-control", default=3251, type=int,
|
||||
help="TCP port to connect to for control (ignored in embedded mode)")
|
||||
|
||||
self._arggroup_datasets = self.argparser.add_argument_group("datasets")
|
||||
|
||||
|
@ -113,6 +233,9 @@ class SimpleApplet:
|
|||
self.embed = os.getenv("ARTIQ_APPLET_EMBED")
|
||||
self.datasets = {getattr(self.args, arg.replace("-", "_"))
|
||||
for arg in self.dataset_args}
|
||||
# Optional prefixes (dataset sub-trees) to match subscriptions against;
|
||||
# currently only used by out-of-tree subclasses (ndscan).
|
||||
self.dataset_prefixes = []
|
||||
|
||||
def qasync_init(self):
|
||||
app = QtWidgets.QApplication([])
|
||||
|
@ -128,8 +251,21 @@ class SimpleApplet:
|
|||
if self.embed is not None:
|
||||
self.ipc.close()
|
||||
|
||||
def req_init(self):
|
||||
if self.embed is None:
|
||||
dataset_ctl = RPCClient()
|
||||
self.loop.run_until_complete(dataset_ctl.connect_rpc(
|
||||
self.args.server, self.args.port_control, "master_dataset_db"))
|
||||
self.req = AppletRequestRPC(self.loop, dataset_ctl)
|
||||
else:
|
||||
self.req = AppletRequestIPC(self.ipc)
|
||||
|
||||
def req_close(self):
|
||||
if self.embed is None:
|
||||
self.req.dataset_ctl.close_rpc()
|
||||
|
||||
def create_main_widget(self):
|
||||
self.main_widget = self.main_widget_class(self.args)
|
||||
self.main_widget = self.main_widget_class(self.args, self.req)
|
||||
if self.embed is not None:
|
||||
self.ipc.set_close_cb(self.main_widget.close)
|
||||
if os.name == "nt":
|
||||
|
@ -162,6 +298,14 @@ class SimpleApplet:
|
|||
self.data = data
|
||||
return data
|
||||
|
||||
def is_dataset_subscribed(self, key):
|
||||
if key in self.datasets:
|
||||
return True
|
||||
for prefix in self.dataset_prefixes:
|
||||
if key.startswith(prefix):
|
||||
return True
|
||||
return False
|
||||
|
||||
def filter_mod(self, mod):
|
||||
if self.embed is not None:
|
||||
# the parent already filters for us
|
||||
|
@ -170,14 +314,19 @@ class SimpleApplet:
|
|||
if mod["action"] == "init":
|
||||
return True
|
||||
if mod["path"]:
|
||||
return mod["path"][0] in self.datasets
|
||||
return self.is_dataset_subscribed(mod["path"][0])
|
||||
elif mod["action"] in {"setitem", "delitem"}:
|
||||
return mod["key"] in self.datasets
|
||||
return self.is_dataset_subscribed(mod["key"])
|
||||
else:
|
||||
return False
|
||||
|
||||
def emit_data_changed(self, data, mod_buffer):
|
||||
self.main_widget.data_changed(data, mod_buffer)
|
||||
persist = dict()
|
||||
value = dict()
|
||||
metadata = dict()
|
||||
for k, d in data.items():
|
||||
persist[k], value[k], metadata[k] = d
|
||||
self.main_widget.data_changed(value, metadata, persist, mod_buffer)
|
||||
|
||||
def flush_mod_buffer(self):
|
||||
self.emit_data_changed(self.data, self.mod_buffer)
|
||||
|
@ -192,8 +341,8 @@ class SimpleApplet:
|
|||
self.mod_buffer.append(mod)
|
||||
else:
|
||||
self.mod_buffer = [mod]
|
||||
asyncio.get_event_loop().call_later(self.args.update_delay,
|
||||
self.flush_mod_buffer)
|
||||
self.loop.call_later(self.args.update_delay,
|
||||
self.flush_mod_buffer)
|
||||
else:
|
||||
self.emit_data_changed(self.data, [mod])
|
||||
|
||||
|
@ -202,9 +351,11 @@ class SimpleApplet:
|
|||
self.subscriber = Subscriber("datasets",
|
||||
self.sub_init, self.sub_mod)
|
||||
self.loop.run_until_complete(self.subscriber.connect(
|
||||
self.args.server, self.args.port))
|
||||
self.args.server, self.args.port_notify))
|
||||
else:
|
||||
self.ipc.subscribe(self.datasets, self.sub_init, self.sub_mod)
|
||||
self.ipc.subscribe(self.datasets, self.sub_init, self.sub_mod,
|
||||
dataset_prefixes=self.dataset_prefixes,
|
||||
loop=self.loop)
|
||||
|
||||
def unsubscribe(self):
|
||||
if self.embed is None:
|
||||
|
@ -216,12 +367,16 @@ class SimpleApplet:
|
|||
try:
|
||||
self.ipc_init()
|
||||
try:
|
||||
self.create_main_widget()
|
||||
self.subscribe()
|
||||
self.req_init()
|
||||
try:
|
||||
self.loop.run_forever()
|
||||
self.create_main_widget()
|
||||
self.subscribe()
|
||||
try:
|
||||
self.loop.run_forever()
|
||||
finally:
|
||||
self.unsubscribe()
|
||||
finally:
|
||||
self.unsubscribe()
|
||||
self.req_close()
|
||||
finally:
|
||||
self.ipc_close()
|
||||
finally:
|
||||
|
@ -260,4 +415,9 @@ class TitleApplet(SimpleApplet):
|
|||
title = self.args.title
|
||||
else:
|
||||
title = None
|
||||
self.main_widget.data_changed(data, mod_buffer, title)
|
||||
persist = dict()
|
||||
value = dict()
|
||||
metadata = dict()
|
||||
for k, d in data.items():
|
||||
persist[k], value[k], metadata[k] = d
|
||||
self.main_widget.data_changed(value, metadata, persist, mod_buffer, title)
|
||||
|
|
|
@ -20,11 +20,46 @@ class Model(DictSyncTreeSepModel):
|
|||
DictSyncTreeSepModel.__init__(self, ".", ["Dataset", "Value"], init)
|
||||
|
||||
def convert(self, k, v, column):
|
||||
return short_format(v[1])
|
||||
return short_format(v[1], v[2])
|
||||
|
||||
|
||||
class DatasetCtl:
|
||||
def __init__(self, master_host, master_port):
|
||||
self.master_host = master_host
|
||||
self.master_port = master_port
|
||||
|
||||
async def _execute_rpc(self, op_name, key_or_mod, value=None, persist=None, metadata=None):
|
||||
logger.info("Starting %s operation on %s", op_name, key_or_mod)
|
||||
try:
|
||||
remote = RPCClient()
|
||||
await remote.connect_rpc(self.master_host, self.master_port,
|
||||
"master_dataset_db")
|
||||
try:
|
||||
if op_name == "set":
|
||||
await remote.set(key_or_mod, value, persist, metadata)
|
||||
elif op_name == "update":
|
||||
await remote.update(key_or_mod)
|
||||
else:
|
||||
logger.error("Invalid operation: %s", op_name)
|
||||
return
|
||||
finally:
|
||||
remote.close_rpc()
|
||||
except:
|
||||
logger.error("Failed %s operation on %s", op_name,
|
||||
key_or_mod, exc_info=True)
|
||||
else:
|
||||
logger.info("Finished %s operation on %s", op_name,
|
||||
key_or_mod)
|
||||
|
||||
async def set(self, key, value, persist=None, metadata=None):
|
||||
await self._execute_rpc("set", key, value, persist, metadata)
|
||||
|
||||
async def update(self, mod):
|
||||
await self._execute_rpc("update", mod)
|
||||
|
||||
|
||||
class DatasetsDock(QtWidgets.QDockWidget):
|
||||
def __init__(self, datasets_sub, master_host, master_port):
|
||||
def __init__(self, dataset_sub, dataset_ctl):
|
||||
QtWidgets.QDockWidget.__init__(self, "Datasets")
|
||||
self.setObjectName("Datasets")
|
||||
self.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
|
||||
|
@ -62,10 +97,9 @@ class DatasetsDock(QtWidgets.QDockWidget):
|
|||
self.table.addAction(upload_action)
|
||||
|
||||
self.set_model(Model(dict()))
|
||||
datasets_sub.add_setmodel_callback(self.set_model)
|
||||
dataset_sub.add_setmodel_callback(self.set_model)
|
||||
|
||||
self.master_host = master_host
|
||||
self.master_port = master_port
|
||||
self.dataset_ctl = dataset_ctl
|
||||
|
||||
def _search_datasets(self):
|
||||
if hasattr(self, "table_model_filter"):
|
||||
|
@ -82,30 +116,14 @@ class DatasetsDock(QtWidgets.QDockWidget):
|
|||
self.table_model_filter.setSourceModel(self.table_model)
|
||||
self.table.setModel(self.table_model_filter)
|
||||
|
||||
async def _upload_dataset(self, name, value,):
|
||||
logger.info("Uploading dataset '%s' to master...", name)
|
||||
try:
|
||||
remote = RPCClient()
|
||||
await remote.connect_rpc(self.master_host, self.master_port,
|
||||
"master_dataset_db")
|
||||
try:
|
||||
await remote.set(name, value)
|
||||
finally:
|
||||
remote.close_rpc()
|
||||
except:
|
||||
logger.error("Failed uploading dataset '%s'",
|
||||
name, exc_info=True)
|
||||
else:
|
||||
logger.info("Finished uploading dataset '%s'", name)
|
||||
|
||||
def upload_clicked(self):
|
||||
idx = self.table.selectedIndexes()
|
||||
if idx:
|
||||
idx = self.table_model_filter.mapToSource(idx[0])
|
||||
key = self.table_model.index_to_key(idx)
|
||||
if key is not None:
|
||||
persist, value = self.table_model.backing_store[key]
|
||||
asyncio.ensure_future(self._upload_dataset(key, value))
|
||||
persist, value, metadata = self.table_model.backing_store[key]
|
||||
asyncio.ensure_future(self.dataset_ctl.set(key, value, metadata=metadata))
|
||||
|
||||
def save_state(self):
|
||||
return bytes(self.table.header().saveState())
|
||||
|
|
|
@ -10,22 +10,14 @@ import h5py
|
|||
from sipyco import pyon
|
||||
|
||||
from artiq import __artiq_dir__ as artiq_dir
|
||||
from artiq.gui.tools import LayoutWidget, log_level_to_name, get_open_file_name
|
||||
from artiq.gui.tools import (LayoutWidget, WheelFilter,
|
||||
log_level_to_name, get_open_file_name)
|
||||
from artiq.gui.entries import procdesc_to_entry
|
||||
from artiq.master.worker import Worker, log_worker_exception
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class _WheelFilter(QtCore.QObject):
|
||||
def eventFilter(self, obj, event):
|
||||
if (event.type() == QtCore.QEvent.Wheel and
|
||||
event.modifiers() != QtCore.Qt.NoModifier):
|
||||
event.ignore()
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class _ArgumentEditor(QtWidgets.QTreeWidget):
|
||||
def __init__(self, dock):
|
||||
QtWidgets.QTreeWidget.__init__(self)
|
||||
|
@ -46,7 +38,7 @@ class _ArgumentEditor(QtWidgets.QTreeWidget):
|
|||
self.setStyleSheet("QTreeWidget {background: " +
|
||||
self.palette().midlight().color().name() + " ;}")
|
||||
|
||||
self.viewport().installEventFilter(_WheelFilter(self.viewport()))
|
||||
self.viewport().installEventFilter(WheelFilter(self.viewport(), True))
|
||||
|
||||
self._groups = dict()
|
||||
self._arg_to_widgets = dict()
|
||||
|
@ -378,9 +370,9 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
|||
|
||||
|
||||
class LocalDatasetDB:
|
||||
def __init__(self, datasets_sub):
|
||||
self.datasets_sub = datasets_sub
|
||||
datasets_sub.add_setmodel_callback(self.init)
|
||||
def __init__(self, dataset_sub):
|
||||
self.dataset_sub = dataset_sub
|
||||
dataset_sub.add_setmodel_callback(self.init)
|
||||
|
||||
def init(self, data):
|
||||
self._data = data
|
||||
|
@ -389,11 +381,11 @@ class LocalDatasetDB:
|
|||
return self._data.backing_store[key][1]
|
||||
|
||||
def update(self, mod):
|
||||
self.datasets_sub.update(mod)
|
||||
self.dataset_sub.update(mod)
|
||||
|
||||
|
||||
class ExperimentsArea(QtWidgets.QMdiArea):
|
||||
def __init__(self, root, datasets_sub):
|
||||
def __init__(self, root, dataset_sub):
|
||||
QtWidgets.QMdiArea.__init__(self)
|
||||
self.pixmap = QtGui.QPixmap(os.path.join(
|
||||
artiq_dir, "gui", "logo_ver.svg"))
|
||||
|
@ -402,11 +394,11 @@ class ExperimentsArea(QtWidgets.QMdiArea):
|
|||
|
||||
self.open_experiments = []
|
||||
|
||||
self._ddb = LocalDatasetDB(datasets_sub)
|
||||
self._ddb = LocalDatasetDB(dataset_sub)
|
||||
|
||||
self.worker_handlers = {
|
||||
"get_device_db": lambda: {},
|
||||
"get_device": lambda k: {"type": "dummy"},
|
||||
"get_device": lambda key, resolve_alias=False: {"type": "dummy"},
|
||||
"get_dataset": self._ddb.get,
|
||||
"update_dataset": self._ddb.update,
|
||||
}
|
||||
|
@ -516,5 +508,9 @@ class ExperimentsArea(QtWidgets.QMdiArea):
|
|||
self.open_experiments.append(dock)
|
||||
return dock
|
||||
|
||||
def set_argument_value(self, expurl, name, value):
|
||||
logger.warning("Unable to set argument '%s', dropping change. "
|
||||
"'set_argument_value' not supported in browser.", name)
|
||||
|
||||
def on_dock_closed(self, dock):
|
||||
self.open_experiments.remove(dock)
|
||||
|
|
|
@ -71,7 +71,7 @@ class ZoomIconView(QtWidgets.QListView):
|
|||
self._char_width = QtGui.QFontMetrics(self.font()).averageCharWidth()
|
||||
self.setViewMode(self.IconMode)
|
||||
w = self._char_width*self.default_size
|
||||
self.setIconSize(QtCore.QSize(w, w*self.aspect))
|
||||
self.setIconSize(QtCore.QSize(w, int(w*self.aspect)))
|
||||
self.setFlow(self.LeftToRight)
|
||||
self.setResizeMode(self.Adjust)
|
||||
self.setWrapping(True)
|
||||
|
@ -102,13 +102,14 @@ class Hdf5FileSystemModel(QtWidgets.QFileSystemModel):
|
|||
h5 = open_h5(info)
|
||||
if h5 is not None:
|
||||
try:
|
||||
expid = pyon.decode(h5["expid"][()])
|
||||
start_time = datetime.fromtimestamp(h5["start_time"][()])
|
||||
expid = pyon.decode(h5["expid"][()]) if "expid" in h5 else dict()
|
||||
start_time = datetime.fromtimestamp(h5["start_time"][()]) if "start_time" in h5 else "<none>"
|
||||
v = ("artiq_version: {}\nrepo_rev: {}\nfile: {}\n"
|
||||
"class_name: {}\nrid: {}\nstart_time: {}").format(
|
||||
h5["artiq_version"][()], expid["repo_rev"],
|
||||
expid["file"], expid["class_name"],
|
||||
h5["rid"][()], start_time)
|
||||
h5["artiq_version"].asstr()[()] if "artiq_version" in h5 else "<none>",
|
||||
expid.get("repo_rev", "<none>"),
|
||||
expid.get("file", "<none>"), expid.get("class_name", "<none>"),
|
||||
h5["rid"][()] if "rid" in h5 else "<none>", start_time)
|
||||
return v
|
||||
except:
|
||||
logger.warning("unable to read metadata from %s",
|
||||
|
@ -174,31 +175,45 @@ class FilesDock(QtWidgets.QDockWidget):
|
|||
logger.debug("loading datasets from %s", info.filePath())
|
||||
with f:
|
||||
try:
|
||||
expid = pyon.decode(f["expid"][()])
|
||||
start_time = datetime.fromtimestamp(f["start_time"][()])
|
||||
expid = pyon.decode(f["expid"][()]) if "expid" in f else dict()
|
||||
start_time = datetime.fromtimestamp(f["start_time"][()]) if "start_time" in f else "<none>"
|
||||
v = {
|
||||
"artiq_version": f["artiq_version"][()],
|
||||
"repo_rev": expid["repo_rev"],
|
||||
"file": expid["file"],
|
||||
"class_name": expid["class_name"],
|
||||
"rid": f["rid"][()],
|
||||
"artiq_version": f["artiq_version"].asstr()[()] if "artiq_version" in f else "<none>",
|
||||
"repo_rev": expid.get("repo_rev", "<none>"),
|
||||
"file": expid.get("file", "<none>"),
|
||||
"class_name": expid.get("class_name", "<none>"),
|
||||
"rid": f["rid"][()] if "rid" in f else "<none>",
|
||||
"start_time": start_time,
|
||||
}
|
||||
self.metadata_changed.emit(v)
|
||||
except:
|
||||
logger.warning("unable to read metadata from %s",
|
||||
info.filePath(), exc_info=True)
|
||||
rd = dict()
|
||||
|
||||
rd = {}
|
||||
if "archive" in f:
|
||||
rd = {k: (True, v[()]) for k, v in f["archive"].items()}
|
||||
def visitor(k, v):
|
||||
if isinstance(v, h5py.Dataset):
|
||||
# v.attrs is a non-serializable h5py.AttributeManager, need to convert to dict
|
||||
# See https://docs.h5py.org/en/stable/high/attr.html#h5py.AttributeManager
|
||||
rd[k] = (True, v[()], dict(v.attrs))
|
||||
|
||||
f["archive"].visititems(visitor)
|
||||
|
||||
if "datasets" in f:
|
||||
for k, v in f["datasets"].items():
|
||||
if k in rd:
|
||||
logger.warning("dataset '%s' is both in archive and "
|
||||
"outputs", k)
|
||||
rd[k] = (True, v[()])
|
||||
if rd:
|
||||
self.datasets.init(rd)
|
||||
def visitor(k, v):
|
||||
if isinstance(v, h5py.Dataset):
|
||||
if k in rd:
|
||||
logger.warning("dataset '%s' is both in archive "
|
||||
"and outputs", k)
|
||||
# v.attrs is a non-serializable h5py.AttributeManager, need to convert to dict
|
||||
# See https://docs.h5py.org/en/stable/high/attr.html#h5py.AttributeManager
|
||||
rd[k] = (True, v[()], dict(v.attrs))
|
||||
|
||||
f["datasets"].visititems(visitor)
|
||||
|
||||
self.datasets.init(rd)
|
||||
|
||||
self.dataset_changed.emit(info.filePath())
|
||||
|
||||
def list_activated(self, idx):
|
||||
|
|
|
@ -2,6 +2,7 @@ import os
|
|||
import subprocess
|
||||
|
||||
from migen import *
|
||||
from migen.build.platforms.sinara import kasli
|
||||
from misoc.interconnect.csr import *
|
||||
from misoc.integration.builder import *
|
||||
|
||||
|
@ -57,14 +58,19 @@ def build_artiq_soc(soc, argdict):
|
|||
builder = Builder(soc, **argdict)
|
||||
builder.software_packages = []
|
||||
builder.add_software_package("bootloader", os.path.join(firmware_dir, "bootloader"))
|
||||
if isinstance(soc, AMPSoC):
|
||||
builder.add_software_package("libm")
|
||||
builder.add_software_package("libprintf")
|
||||
is_kasli_v1 = isinstance(soc.platform, kasli.Platform) and soc.platform.hw_rev in ("v1.0", "v1.1")
|
||||
kernel_cpu_type = "vexriscv" if is_kasli_v1 else "vexriscv-g"
|
||||
builder.add_software_package("libm", cpu_type=kernel_cpu_type)
|
||||
builder.add_software_package("libprintf", cpu_type=kernel_cpu_type)
|
||||
builder.add_software_package("libunwind", cpu_type=kernel_cpu_type)
|
||||
builder.add_software_package("ksupport", os.path.join(firmware_dir, "ksupport"), cpu_type=kernel_cpu_type)
|
||||
# Generate unwinder for soft float target (ARTIQ runtime)
|
||||
# If the kernel lacks FPU, then the runtime unwinder is already generated
|
||||
if not is_kasli_v1:
|
||||
builder.add_software_package("libunwind")
|
||||
builder.add_software_package("ksupport", os.path.join(firmware_dir, "ksupport"))
|
||||
if not soc.config["DRTIO_ROLE"] == "satellite":
|
||||
builder.add_software_package("runtime", os.path.join(firmware_dir, "runtime"))
|
||||
else:
|
||||
# Assume DRTIO satellite.
|
||||
builder.add_software_package("satman", os.path.join(firmware_dir, "satman"))
|
||||
try:
|
||||
builder.build()
|
||||
|
|
|
@ -21,13 +21,19 @@ class scoped(object):
|
|||
set of variables resolved as globals
|
||||
"""
|
||||
|
||||
class remote(object):
|
||||
"""
|
||||
:ivar remote_fn: (bool) whether function is ran on a remote device,
|
||||
meaning arguments are received remotely and return is sent remotely
|
||||
"""
|
||||
|
||||
# Typed versions of untyped nodes
|
||||
class argT(ast.arg, commontyped):
|
||||
pass
|
||||
|
||||
class ClassDefT(ast.ClassDef):
|
||||
_types = ("constructor_type",)
|
||||
class FunctionDefT(ast.FunctionDef, scoped):
|
||||
class FunctionDefT(ast.FunctionDef, scoped, remote):
|
||||
_types = ("signature_type",)
|
||||
class QuotedFunctionDefT(FunctionDefT):
|
||||
"""
|
||||
|
@ -58,7 +64,7 @@ class BinOpT(ast.BinOp, commontyped):
|
|||
pass
|
||||
class BoolOpT(ast.BoolOp, commontyped):
|
||||
pass
|
||||
class CallT(ast.Call, commontyped):
|
||||
class CallT(ast.Call, commontyped, remote):
|
||||
"""
|
||||
:ivar iodelay: (:class:`iodelay.Expr`)
|
||||
:ivar arg_exprs: (dict of str to :class:`iodelay.Expr`)
|
||||
|
|
|
@ -38,6 +38,9 @@ class TInt(types.TMono):
|
|||
def one():
|
||||
return 1
|
||||
|
||||
def TInt8():
|
||||
return TInt(types.TValue(8))
|
||||
|
||||
def TInt32():
|
||||
return TInt(types.TValue(32))
|
||||
|
||||
|
@ -123,18 +126,23 @@ class TException(types.TMono):
|
|||
# * File, line and column where it was raised (str, int, int).
|
||||
# * Message, which can contain substitutions {0}, {1} and {2} (str).
|
||||
# * Three 64-bit integers, parameterizing the message (numpy.int64).
|
||||
# These attributes are prefixed with `#` so that users cannot access them,
|
||||
# and we don't have to do string allocation in the runtime.
|
||||
# #__name__ is now a string key in the host. TStr may not be an actual
|
||||
# CSlice in the runtime, they might be a CSlice with length = i32::MAX and
|
||||
# ptr = string key in the host.
|
||||
|
||||
# Keep this in sync with the function ARTIQIRGenerator.alloc_exn.
|
||||
attributes = OrderedDict([
|
||||
("__name__", TStr()),
|
||||
("__file__", TStr()),
|
||||
("__line__", TInt32()),
|
||||
("__col__", TInt32()),
|
||||
("__func__", TStr()),
|
||||
("__message__", TStr()),
|
||||
("__param0__", TInt64()),
|
||||
("__param1__", TInt64()),
|
||||
("__param2__", TInt64()),
|
||||
("#__name__", TInt32()),
|
||||
("#__file__", TStr()),
|
||||
("#__line__", TInt32()),
|
||||
("#__col__", TInt32()),
|
||||
("#__func__", TStr()),
|
||||
("#__message__", TStr()),
|
||||
("#__param0__", TInt64()),
|
||||
("#__param1__", TInt64()),
|
||||
("#__param2__", TInt64()),
|
||||
])
|
||||
|
||||
def __init__(self, name="Exception", id=0):
|
||||
|
@ -169,7 +177,9 @@ def fn_list():
|
|||
return types.TConstructor(TList())
|
||||
|
||||
def fn_array():
|
||||
return types.TConstructor(TArray())
|
||||
# numpy.array() is actually a "magic" macro that is expanded in-place, but
|
||||
# just as for builtin functions, we do not want to quote it, etc.
|
||||
return types.TBuiltinFunction("array")
|
||||
|
||||
def fn_Exception():
|
||||
return types.TExceptionConstructor(TException("Exception"))
|
||||
|
@ -237,6 +247,12 @@ def fn_at_mu():
|
|||
def fn_rtio_log():
|
||||
return types.TBuiltinFunction("rtio_log")
|
||||
|
||||
def fn_subkernel_await():
|
||||
return types.TBuiltinFunction("subkernel_await")
|
||||
|
||||
def fn_subkernel_preload():
|
||||
return types.TBuiltinFunction("subkernel_preload")
|
||||
|
||||
# Accessors
|
||||
|
||||
def is_none(typ):
|
||||
|
@ -315,8 +331,11 @@ def is_iterable(typ):
|
|||
return is_listish(typ) or is_range(typ)
|
||||
|
||||
def get_iterable_elt(typ):
|
||||
# TODO: Arrays count as listish, but this returns the innermost element type for
|
||||
# n-dimensional arrays, rather than the n-1 dimensional result of iterating over
|
||||
# the first axis, which makes the name a bit misleading.
|
||||
if is_str(typ) or is_bytes(typ) or is_bytearray(typ):
|
||||
return TInt(types.TValue(8))
|
||||
return TInt8()
|
||||
elif types._is_pointer(typ) or is_iterable(typ):
|
||||
return typ.find()["elt"].find()
|
||||
else:
|
||||
|
@ -332,5 +351,5 @@ def is_allocated(typ):
|
|||
is_float(typ) or is_range(typ) or
|
||||
types._is_pointer(typ) or types.is_function(typ) or
|
||||
types.is_external_function(typ) or types.is_rpc(typ) or
|
||||
types.is_method(typ) or types.is_tuple(typ) or
|
||||
types.is_value(typ))
|
||||
types.is_subkernel(typ) or types.is_method(typ) or
|
||||
types.is_tuple(typ) or types.is_value(typ))
|
||||
|
|
|
@ -5,7 +5,8 @@ the references to the host objects and translates the functions
|
|||
annotated as ``@kernel`` when they are referenced.
|
||||
"""
|
||||
|
||||
import sys, os, re, linecache, inspect, textwrap, types as pytypes, numpy
|
||||
import typing
|
||||
import os, re, linecache, inspect, textwrap, types as pytypes, numpy
|
||||
from collections import OrderedDict, defaultdict
|
||||
|
||||
from pythonparser import ast, algorithm, source, diagnostic, parse_buffer
|
||||
|
@ -18,6 +19,13 @@ from . import types, builtins, asttyped, math_fns, prelude
|
|||
from .transforms import ASTTypedRewriter, Inferencer, IntMonomorphizer, TypedtreePrinter
|
||||
from .transforms.asttyped_rewriter import LocalExtractor
|
||||
|
||||
try:
|
||||
# From numpy=1.25.0 dispatching for `__array_function__` is done via
|
||||
# a C wrapper: https://github.com/numpy/numpy/pull/23020
|
||||
from numpy.core._multiarray_umath import _ArrayFunctionDispatcher
|
||||
except ImportError:
|
||||
_ArrayFunctionDispatcher = None
|
||||
|
||||
|
||||
class SpecializedFunction:
|
||||
def __init__(self, instance_type, host_function):
|
||||
|
@ -45,8 +53,48 @@ class EmbeddingMap:
|
|||
self.object_forward_map = {}
|
||||
self.object_reverse_map = {}
|
||||
self.module_map = {}
|
||||
|
||||
# type_map connects the host Python `type` to the pair of associated
|
||||
# `(TInstance, TConstructor)`s. The `used_…_names` sets cache the
|
||||
# respective `.name`s for O(1) collision avoidance.
|
||||
self.type_map = {}
|
||||
self.used_instance_type_names = set()
|
||||
self.used_constructor_type_names = set()
|
||||
|
||||
self.function_map = {}
|
||||
self.str_forward_map = {}
|
||||
self.str_reverse_map = {}
|
||||
|
||||
self.preallocate_runtime_exception_names(["RuntimeError",
|
||||
"RTIOUnderflow",
|
||||
"RTIOOverflow",
|
||||
"RTIODestinationUnreachable",
|
||||
"DMAError",
|
||||
"I2CError",
|
||||
"CacheError",
|
||||
"SPIError",
|
||||
"0:ZeroDivisionError",
|
||||
"0:IndexError",
|
||||
"UnwrapNoneError",
|
||||
"SubkernelError"])
|
||||
|
||||
def preallocate_runtime_exception_names(self, names):
|
||||
for i, name in enumerate(names):
|
||||
if ":" not in name:
|
||||
name = "0:artiq.coredevice.exceptions." + name
|
||||
exn_id = self.store_str(name)
|
||||
assert exn_id == i
|
||||
|
||||
def store_str(self, s):
|
||||
if s in self.str_forward_map:
|
||||
return self.str_forward_map[s]
|
||||
str_id = len(self.str_forward_map)
|
||||
self.str_forward_map[s] = str_id
|
||||
self.str_reverse_map[str_id] = s
|
||||
return str_id
|
||||
|
||||
def retrieve_str(self, str_id):
|
||||
return self.str_reverse_map[str_id]
|
||||
|
||||
# Modules
|
||||
def store_module(self, module, module_type):
|
||||
|
@ -60,16 +108,6 @@ class EmbeddingMap:
|
|||
|
||||
# Types
|
||||
def store_type(self, host_type, instance_type, constructor_type):
|
||||
self._rename_type(instance_type)
|
||||
self.type_map[host_type] = (instance_type, constructor_type)
|
||||
|
||||
def retrieve_type(self, host_type):
|
||||
return self.type_map[host_type]
|
||||
|
||||
def has_type(self, host_type):
|
||||
return host_type in self.type_map
|
||||
|
||||
def _rename_type(self, new_instance_type):
|
||||
# Generally, user-defined types that have exact same name (which is to say, classes
|
||||
# defined inside functions) do not pose a problem to the compiler. The two places which
|
||||
# cannot handle this are:
|
||||
|
@ -78,12 +116,29 @@ class EmbeddingMap:
|
|||
# Since handling #2 requires renaming on ARTIQ side anyway, it's more straightforward
|
||||
# to do it once when embedding (since non-embedded code cannot define classes in
|
||||
# functions). Also, easier to debug.
|
||||
n = 0
|
||||
for host_type in self.type_map:
|
||||
instance_type, constructor_type = self.type_map[host_type]
|
||||
if instance_type.name == new_instance_type.name:
|
||||
n += 1
|
||||
new_instance_type.name = "{}.{}".format(new_instance_type.name, n)
|
||||
suffix = 0
|
||||
new_instance_name = instance_type.name
|
||||
new_constructor_name = constructor_type.name
|
||||
while True:
|
||||
if (new_instance_name not in self.used_instance_type_names
|
||||
and new_constructor_name not in self.used_constructor_type_names):
|
||||
break
|
||||
suffix += 1
|
||||
new_instance_name = f"{instance_type.name}.{suffix}"
|
||||
new_constructor_name = f"{constructor_type.name}.{suffix}"
|
||||
|
||||
self.used_instance_type_names.add(new_instance_name)
|
||||
instance_type.name = new_instance_name
|
||||
self.used_constructor_type_names.add(new_constructor_name)
|
||||
constructor_type.name = new_constructor_name
|
||||
|
||||
self.type_map[host_type] = (instance_type, constructor_type)
|
||||
|
||||
def retrieve_type(self, host_type):
|
||||
return self.type_map[host_type]
|
||||
|
||||
def has_type(self, host_type):
|
||||
return host_type in self.type_map
|
||||
|
||||
def attribute_count(self):
|
||||
count = 0
|
||||
|
@ -130,7 +185,22 @@ class EmbeddingMap:
|
|||
obj_typ, _ = self.type_map[type(obj_ref)]
|
||||
yield obj_id, obj_ref, obj_typ
|
||||
|
||||
def subkernels(self):
|
||||
subkernels = {}
|
||||
for k, v in self.object_forward_map.items():
|
||||
if hasattr(v, "artiq_embedded"):
|
||||
if v.artiq_embedded.destination is not None:
|
||||
subkernels[k] = v
|
||||
return subkernels
|
||||
|
||||
def has_rpc(self):
|
||||
return any(filter(
|
||||
lambda x: (inspect.isfunction(x) or inspect.ismethod(x)) and \
|
||||
(not hasattr(x, "artiq_embedded") or x.artiq_embedded.destination is None),
|
||||
self.object_forward_map.values()
|
||||
))
|
||||
|
||||
def has_rpc_or_subkernel(self):
|
||||
return any(filter(lambda x: inspect.isfunction(x) or inspect.ismethod(x),
|
||||
self.object_forward_map.values()))
|
||||
|
||||
|
@ -138,6 +208,7 @@ class EmbeddingMap:
|
|||
class ASTSynthesizer:
|
||||
def __init__(self, embedding_map, value_map, quote_function=None, expanded_from=None):
|
||||
self.source = ""
|
||||
self.source_last_new_line = 0
|
||||
self.source_buffer = source.Buffer(self.source, "<synthesized>")
|
||||
self.embedding_map = embedding_map
|
||||
self.value_map = value_map
|
||||
|
@ -156,20 +227,90 @@ class ASTSynthesizer:
|
|||
return source.Range(self.source_buffer, range_from, range_to,
|
||||
expanded_from=self.expanded_from)
|
||||
|
||||
def _add_iterable(self, fragment):
|
||||
# Since DILocation points on the beginning of the piece of source
|
||||
# we don't care if the fragment's end will overflow LLVM's limit.
|
||||
if len(self.source) - self.source_last_new_line >= 2**16:
|
||||
fragment = "\\\n" + fragment
|
||||
self.source_last_new_line = len(self.source) + 2
|
||||
return self._add(fragment)
|
||||
|
||||
def fast_quote_list(self, value):
|
||||
elts = [None] * len(value)
|
||||
is_T = False
|
||||
if len(value) > 0:
|
||||
v = value[0]
|
||||
is_T = True
|
||||
if isinstance(v, int):
|
||||
T = int
|
||||
elif isinstance(v, float):
|
||||
T = float
|
||||
elif isinstance(v, numpy.int32):
|
||||
T = numpy.int32
|
||||
elif isinstance(v, numpy.int64):
|
||||
T = numpy.int64
|
||||
else:
|
||||
is_T = False
|
||||
if is_T:
|
||||
for v in value:
|
||||
if not isinstance(v, T):
|
||||
is_T = False
|
||||
break
|
||||
if is_T:
|
||||
is_int = T != float
|
||||
if T == int:
|
||||
typ = builtins.TInt()
|
||||
elif T == float:
|
||||
typ = builtins.TFloat()
|
||||
elif T == numpy.int32:
|
||||
typ = builtins.TInt32()
|
||||
elif T == numpy.int64:
|
||||
typ = builtins.TInt64()
|
||||
else:
|
||||
assert False
|
||||
text = [repr(elt) for elt in value]
|
||||
start = len(self.source)
|
||||
self.source += ", ".join(text)
|
||||
if is_int:
|
||||
for i, (v, t) in enumerate(zip(value, text)):
|
||||
l = len(t)
|
||||
elts[i] = asttyped.NumT(
|
||||
n=int(v), ctx=None, type=typ,
|
||||
loc=source.Range(
|
||||
self.source_buffer, start, start + l,
|
||||
expanded_from=self.expanded_from))
|
||||
start += l + 2
|
||||
else:
|
||||
for i, (v, t) in enumerate(zip(value, text)):
|
||||
l = len(t)
|
||||
elts[i] = asttyped.NumT(
|
||||
n=v, ctx=None, type=typ,
|
||||
loc=source.Range(
|
||||
self.source_buffer, start, start + l,
|
||||
expanded_from=self.expanded_from))
|
||||
start += l + 2
|
||||
else:
|
||||
for index, elt in enumerate(value):
|
||||
elts[index] = self.quote(elt)
|
||||
if index < len(value) - 1:
|
||||
self._add_iterable(", ")
|
||||
return elts
|
||||
|
||||
def quote(self, value):
|
||||
"""Construct an AST fragment equal to `value`."""
|
||||
if value is None:
|
||||
typ = builtins.TNone()
|
||||
return asttyped.NameConstantT(value=value, type=typ,
|
||||
loc=self._add(repr(value)))
|
||||
elif value is True or value is False:
|
||||
elif isinstance(value, (bool, numpy.bool_)):
|
||||
typ = builtins.TBool()
|
||||
return asttyped.NameConstantT(value=value, type=typ,
|
||||
loc=self._add(repr(value)))
|
||||
elif value is numpy.float:
|
||||
coerced = bool(value)
|
||||
return asttyped.NameConstantT(value=coerced, type=typ,
|
||||
loc=self._add(repr(coerced)))
|
||||
elif value is float:
|
||||
typ = builtins.fn_float()
|
||||
return asttyped.NameConstantT(value=None, type=typ,
|
||||
loc=self._add("numpy.float"))
|
||||
loc=self._add("float"))
|
||||
elif value is numpy.int32:
|
||||
typ = builtins.fn_int32()
|
||||
return asttyped.NameConstantT(value=None, type=typ,
|
||||
|
@ -203,35 +344,28 @@ class ASTSynthesizer:
|
|||
loc=self._add(repr(value)))
|
||||
elif isinstance(value, str):
|
||||
return asttyped.StrT(s=value, ctx=None, type=builtins.TStr(),
|
||||
loc=self._add(repr(value)))
|
||||
loc=self._add_iterable(repr(value)))
|
||||
elif isinstance(value, bytes):
|
||||
return asttyped.StrT(s=value, ctx=None, type=builtins.TBytes(),
|
||||
loc=self._add(repr(value)))
|
||||
loc=self._add_iterable(repr(value)))
|
||||
elif isinstance(value, bytearray):
|
||||
quote_loc = self._add('`')
|
||||
repr_loc = self._add(repr(value))
|
||||
unquote_loc = self._add('`')
|
||||
quote_loc = self._add_iterable('`')
|
||||
repr_loc = self._add_iterable(repr(value))
|
||||
unquote_loc = self._add_iterable('`')
|
||||
loc = quote_loc.join(unquote_loc)
|
||||
|
||||
return asttyped.QuoteT(value=value, type=builtins.TByteArray(), loc=loc)
|
||||
elif isinstance(value, list):
|
||||
begin_loc = self._add("[")
|
||||
elts = []
|
||||
for index, elt in enumerate(value):
|
||||
elts.append(self.quote(elt))
|
||||
if index < len(value) - 1:
|
||||
self._add(", ")
|
||||
end_loc = self._add("]")
|
||||
begin_loc = self._add_iterable("[")
|
||||
elts = self.fast_quote_list(value)
|
||||
end_loc = self._add_iterable("]")
|
||||
return asttyped.ListT(elts=elts, ctx=None, type=builtins.TList(),
|
||||
begin_loc=begin_loc, end_loc=end_loc,
|
||||
loc=begin_loc.join(end_loc))
|
||||
elif isinstance(value, tuple):
|
||||
begin_loc = self._add("(")
|
||||
elts = []
|
||||
for index, elt in enumerate(value):
|
||||
elts.append(self.quote(elt))
|
||||
self._add(", ")
|
||||
end_loc = self._add(")")
|
||||
begin_loc = self._add_iterable("(")
|
||||
elts = self.fast_quote_list(value)
|
||||
end_loc = self._add_iterable(")")
|
||||
return asttyped.TupleT(elts=elts, ctx=None,
|
||||
type=types.TTuple([e.type for e in elts]),
|
||||
begin_loc=begin_loc, end_loc=end_loc,
|
||||
|
@ -241,7 +375,9 @@ class ASTSynthesizer:
|
|||
elif inspect.isfunction(value) or inspect.ismethod(value) or \
|
||||
isinstance(value, pytypes.BuiltinFunctionType) or \
|
||||
isinstance(value, SpecializedFunction) or \
|
||||
isinstance(value, numpy.ufunc):
|
||||
isinstance(value, numpy.ufunc) or \
|
||||
(isinstance(value, _ArrayFunctionDispatcher) if
|
||||
_ArrayFunctionDispatcher is not None else False):
|
||||
if inspect.ismethod(value):
|
||||
quoted_self = self.quote(value.__self__)
|
||||
function_type = self.quote_function(value.__func__, self.expanded_from)
|
||||
|
@ -350,7 +486,7 @@ class ASTSynthesizer:
|
|||
return asttyped.QuoteT(value=value, type=instance_type,
|
||||
loc=loc)
|
||||
|
||||
def call(self, callee, args, kwargs, callback=None):
|
||||
def call(self, callee, args, kwargs, callback=None, remote_fn=False):
|
||||
"""
|
||||
Construct an AST fragment calling a function specified by
|
||||
an AST node `function_node`, with given arguments.
|
||||
|
@ -394,7 +530,7 @@ class ASTSynthesizer:
|
|||
starargs=None, kwargs=None,
|
||||
type=types.TVar(), iodelay=None, arg_exprs={},
|
||||
begin_loc=begin_loc, end_loc=end_loc, star_loc=None, dstar_loc=None,
|
||||
loc=callee_node.loc.join(end_loc))
|
||||
loc=callee_node.loc.join(end_loc), remote_fn=remote_fn)
|
||||
|
||||
if callback is not None:
|
||||
node = asttyped.CallT(
|
||||
|
@ -429,7 +565,7 @@ class StitchingASTTypedRewriter(ASTTypedRewriter):
|
|||
arg=node.arg, annotation=None,
|
||||
arg_loc=node.arg_loc, colon_loc=node.colon_loc, loc=node.loc)
|
||||
|
||||
def visit_quoted_function(self, node, function):
|
||||
def visit_quoted_function(self, node, function, remote_fn):
|
||||
extractor = LocalExtractor(env_stack=self.env_stack, engine=self.engine)
|
||||
extractor.visit(node)
|
||||
|
||||
|
@ -446,11 +582,11 @@ class StitchingASTTypedRewriter(ASTTypedRewriter):
|
|||
node = asttyped.QuotedFunctionDefT(
|
||||
typing_env=extractor.typing_env, globals_in_scope=extractor.global_,
|
||||
signature_type=types.TVar(), return_type=types.TVar(),
|
||||
name=node.name, args=node.args, returns=node.returns,
|
||||
name=node.name, args=node.args, returns=None,
|
||||
body=node.body, decorator_list=node.decorator_list,
|
||||
keyword_loc=node.keyword_loc, name_loc=node.name_loc,
|
||||
arrow_loc=node.arrow_loc, colon_loc=node.colon_loc, at_locs=node.at_locs,
|
||||
loc=node.loc)
|
||||
loc=node.loc, remote_fn=remote_fn)
|
||||
|
||||
try:
|
||||
self.env_stack.append(node.typing_env)
|
||||
|
@ -522,7 +658,7 @@ class StitchingInferencer(Inferencer):
|
|||
self.engine.process(diag)
|
||||
return
|
||||
|
||||
# Figure out what ARTIQ type does the value of the attribute have.
|
||||
# Figure out the ARTIQ type of the value of the attribute.
|
||||
# We do this by quoting it, as if to serialize. This has some
|
||||
# overhead (i.e. synthesizing a source buffer), but has the advantage
|
||||
# of having the host-to-ARTIQ mapping code in only one place and
|
||||
|
@ -658,7 +794,7 @@ class TypedtreeHasher(algorithm.Visitor):
|
|||
return hash(tuple(freeze(getattr(node, field_name)) for field_name in fields))
|
||||
|
||||
class Stitcher:
|
||||
def __init__(self, core, dmgr, engine=None, print_as_rpc=True):
|
||||
def __init__(self, core, dmgr, engine=None, print_as_rpc=True, destination=0, subkernel_arg_types=[]):
|
||||
self.core = core
|
||||
self.dmgr = dmgr
|
||||
if engine is None:
|
||||
|
@ -682,12 +818,21 @@ class Stitcher:
|
|||
|
||||
self.embedding_map = EmbeddingMap()
|
||||
self.value_map = defaultdict(lambda: [])
|
||||
self.definitely_changed = False
|
||||
|
||||
self.destination = destination
|
||||
self.first_call = True
|
||||
# for non-annotated subkernels:
|
||||
# main kernel inferencer output with types of arguments
|
||||
self.subkernel_arg_types = subkernel_arg_types
|
||||
|
||||
def stitch_call(self, function, args, kwargs, callback=None):
|
||||
# We synthesize source code for the initial call so that
|
||||
# diagnostics would have something meaningful to display to the user.
|
||||
synthesizer = self._synthesizer(self._function_loc(function.artiq_embedded.function))
|
||||
call_node = synthesizer.call(function, args, kwargs, callback)
|
||||
# first call of a subkernel will get its arguments from remote (DRTIO)
|
||||
remote_fn = self.destination != 0
|
||||
call_node = synthesizer.call(function, args, kwargs, callback, remote_fn=remote_fn)
|
||||
synthesizer.finalize()
|
||||
self.typedtree.append(call_node)
|
||||
|
||||
|
@ -702,13 +847,19 @@ class Stitcher:
|
|||
old_attr_count = None
|
||||
while True:
|
||||
inferencer.visit(self.typedtree)
|
||||
typedtree_hash = typedtree_hasher.visit(self.typedtree)
|
||||
attr_count = self.embedding_map.attribute_count()
|
||||
if self.definitely_changed:
|
||||
changed = True
|
||||
self.definitely_changed = False
|
||||
else:
|
||||
typedtree_hash = typedtree_hasher.visit(self.typedtree)
|
||||
attr_count = self.embedding_map.attribute_count()
|
||||
changed = old_attr_count != attr_count or \
|
||||
old_typedtree_hash != typedtree_hash
|
||||
old_typedtree_hash = typedtree_hash
|
||||
old_attr_count = attr_count
|
||||
|
||||
if old_typedtree_hash == typedtree_hash and old_attr_count == attr_count:
|
||||
if not changed:
|
||||
break
|
||||
old_typedtree_hash = typedtree_hash
|
||||
old_attr_count = attr_count
|
||||
|
||||
# After we've discovered every referenced attribute, check if any kernel_invariant
|
||||
# specifications refers to ones we didn't encounter.
|
||||
|
@ -793,6 +944,10 @@ class Stitcher:
|
|||
return [diagnostic.Diagnostic("note",
|
||||
"in kernel function here", {},
|
||||
call_loc)]
|
||||
elif fn_kind == 'subkernel':
|
||||
return [diagnostic.Diagnostic("note",
|
||||
"in subkernel call here", {},
|
||||
call_loc)]
|
||||
else:
|
||||
assert False
|
||||
else:
|
||||
|
@ -812,7 +967,7 @@ class Stitcher:
|
|||
self._function_loc(function),
|
||||
notes=self._call_site_note(loc, fn_kind))
|
||||
self.engine.process(diag)
|
||||
elif fn_kind == 'rpc' and param.default is not inspect.Parameter.empty:
|
||||
elif fn_kind == 'rpc' or fn_kind == 'subkernel' and param.default is not inspect.Parameter.empty:
|
||||
notes = []
|
||||
notes.append(diagnostic.Diagnostic("note",
|
||||
"expanded from here while trying to infer a type for an"
|
||||
|
@ -831,11 +986,21 @@ class Stitcher:
|
|||
Inferencer(engine=self.engine).visit(ast)
|
||||
IntMonomorphizer(engine=self.engine).visit(ast)
|
||||
return ast.type
|
||||
else:
|
||||
# Let the rest of the program decide.
|
||||
return types.TVar()
|
||||
elif fn_kind == 'kernel' and self.first_call and self.destination != 0:
|
||||
# subkernels do not have access to the main kernel code to infer
|
||||
# arg types - so these are cached and passed onto subkernel
|
||||
# compilation, to avoid having to annotate them fully
|
||||
for name, typ in self.subkernel_arg_types:
|
||||
if param.name == name:
|
||||
return typ
|
||||
|
||||
# Let the rest of the program decide.
|
||||
return types.TVar()
|
||||
|
||||
def _quote_embedded_function(self, function, flags, remote_fn=False):
|
||||
# we are now parsing new functions... definitely changed the type
|
||||
self.definitely_changed = True
|
||||
|
||||
def _quote_embedded_function(self, function, flags):
|
||||
if isinstance(function, SpecializedFunction):
|
||||
host_function = function.host_function
|
||||
else:
|
||||
|
@ -902,13 +1067,11 @@ class Stitcher:
|
|||
|
||||
# Parse.
|
||||
source_buffer = source.Buffer(source_code, filename, first_line)
|
||||
lexer = source_lexer.Lexer(source_buffer, version=sys.version_info[0:2],
|
||||
diagnostic_engine=self.engine)
|
||||
lexer = source_lexer.Lexer(source_buffer, version=(3, 6), diagnostic_engine=self.engine)
|
||||
lexer.indent = [(initial_indent,
|
||||
source.Range(source_buffer, 0, len(initial_whitespace)),
|
||||
initial_whitespace)]
|
||||
parser = source_parser.Parser(lexer, version=sys.version_info[0:2],
|
||||
diagnostic_engine=self.engine)
|
||||
parser = source_parser.Parser(lexer, version=(3, 6), diagnostic_engine=self.engine)
|
||||
function_node = parser.file_input().body[0]
|
||||
|
||||
# Mangle the name, since we put everything into a single module.
|
||||
|
@ -933,7 +1096,7 @@ class Stitcher:
|
|||
engine=self.engine, prelude=self.prelude,
|
||||
globals=self.globals, host_environment=host_environment,
|
||||
quote=self._quote)
|
||||
function_node = asttyped_rewriter.visit_quoted_function(function_node, embedded_function)
|
||||
function_node = asttyped_rewriter.visit_quoted_function(function_node, embedded_function, remote_fn)
|
||||
function_node.flags = flags
|
||||
|
||||
# Add it into our typedtree so that it gets inferenced and codegen'd.
|
||||
|
@ -945,26 +1108,108 @@ class Stitcher:
|
|||
return function_node
|
||||
|
||||
def _extract_annot(self, function, annot, kind, call_loc, fn_kind):
|
||||
if annot is None:
|
||||
annot = builtins.TNone()
|
||||
|
||||
if not isinstance(annot, types.Type):
|
||||
diag = diagnostic.Diagnostic("error",
|
||||
"type annotation for {kind}, '{annot}', is not an ARTIQ type",
|
||||
{"kind": kind, "annot": repr(annot)},
|
||||
self._function_loc(function),
|
||||
notes=self._call_site_note(call_loc, fn_kind))
|
||||
self.engine.process(diag)
|
||||
|
||||
return types.TVar()
|
||||
if isinstance(function, SpecializedFunction):
|
||||
host_function = function.host_function
|
||||
else:
|
||||
host_function = function
|
||||
|
||||
if hasattr(host_function, 'artiq_embedded'):
|
||||
embedded_function = host_function.artiq_embedded.function
|
||||
else:
|
||||
embedded_function = host_function
|
||||
|
||||
if isinstance(embedded_function, str):
|
||||
embedded_function = host_function
|
||||
|
||||
return self._to_artiq_type(
|
||||
annot,
|
||||
function=function,
|
||||
kind=kind,
|
||||
eval_in_scope=lambda x: eval(x, embedded_function.__globals__),
|
||||
call_loc=call_loc,
|
||||
fn_kind=fn_kind)
|
||||
|
||||
def _to_artiq_type(
|
||||
self, annot, *, function, kind: str, eval_in_scope, call_loc: str, fn_kind: str
|
||||
) -> types.Type:
|
||||
if isinstance(annot, str):
|
||||
try:
|
||||
annot = eval_in_scope(annot)
|
||||
except Exception:
|
||||
diag = diagnostic.Diagnostic(
|
||||
"error",
|
||||
"type annotation for {kind}, {annot}, cannot be evaluated",
|
||||
{"kind": kind, "annot": repr(annot)},
|
||||
self._function_loc(function),
|
||||
notes=self._call_site_note(call_loc, fn_kind))
|
||||
self.engine.process(diag)
|
||||
|
||||
if isinstance(annot, types.Type):
|
||||
return annot
|
||||
|
||||
# Convert built-in Python types to ARTIQ ones.
|
||||
if annot is None:
|
||||
return builtins.TNone()
|
||||
elif annot is numpy.int64:
|
||||
return builtins.TInt64()
|
||||
elif annot is numpy.int32:
|
||||
return builtins.TInt32()
|
||||
elif annot is float:
|
||||
return builtins.TFloat()
|
||||
elif annot is bool:
|
||||
return builtins.TBool()
|
||||
elif annot is str:
|
||||
return builtins.TStr()
|
||||
elif annot is bytes:
|
||||
return builtins.TBytes()
|
||||
elif annot is bytearray:
|
||||
return builtins.TByteArray()
|
||||
|
||||
# Convert generic Python types to ARTIQ ones.
|
||||
generic_ty = typing.get_origin(annot)
|
||||
if generic_ty is not None:
|
||||
type_args = typing.get_args(annot)
|
||||
artiq_args = [
|
||||
self._to_artiq_type(
|
||||
x,
|
||||
function=function,
|
||||
kind=kind,
|
||||
eval_in_scope=eval_in_scope,
|
||||
call_loc=call_loc,
|
||||
fn_kind=fn_kind)
|
||||
for x in type_args
|
||||
]
|
||||
|
||||
if generic_ty is list and len(artiq_args) == 1:
|
||||
return builtins.TList(artiq_args[0])
|
||||
elif generic_ty is tuple:
|
||||
return types.TTuple(artiq_args)
|
||||
|
||||
# Otherwise report an unknown type and just use a fresh tyvar.
|
||||
|
||||
if annot is int:
|
||||
message = (
|
||||
"type annotation for {kind}, 'int' cannot be used as an ARTIQ type. "
|
||||
"Use numpy's int32 or int64 instead."
|
||||
)
|
||||
ty = builtins.TInt()
|
||||
else:
|
||||
message = "type annotation for {kind}, '{annot}', is not an ARTIQ type"
|
||||
ty = types.TVar()
|
||||
|
||||
diag = diagnostic.Diagnostic("error",
|
||||
message,
|
||||
{"kind": kind, "annot": repr(annot)},
|
||||
self._function_loc(function),
|
||||
notes=self._call_site_note(call_loc, fn_kind))
|
||||
self.engine.process(diag)
|
||||
|
||||
return ty
|
||||
|
||||
def _quote_syscall(self, function, loc):
|
||||
signature = inspect.signature(function)
|
||||
|
||||
arg_types = OrderedDict()
|
||||
optarg_types = OrderedDict()
|
||||
for param in signature.parameters.values():
|
||||
if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:
|
||||
diag = diagnostic.Diagnostic("error",
|
||||
|
@ -1002,6 +1247,40 @@ class Stitcher:
|
|||
self.functions[function] = function_type
|
||||
return function_type
|
||||
|
||||
def _quote_subkernel(self, function, loc):
|
||||
if isinstance(function, SpecializedFunction):
|
||||
host_function = function.host_function
|
||||
else:
|
||||
host_function = function
|
||||
ret_type = builtins.TNone()
|
||||
signature = inspect.signature(host_function)
|
||||
|
||||
if signature.return_annotation is not inspect.Signature.empty:
|
||||
ret_type = self._extract_annot(host_function, signature.return_annotation,
|
||||
"return type", loc, fn_kind='subkernel')
|
||||
arg_types = OrderedDict()
|
||||
optarg_types = OrderedDict()
|
||||
for param in signature.parameters.values():
|
||||
if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:
|
||||
diag = diagnostic.Diagnostic("error",
|
||||
"subkernels must only use positional arguments; '{argument}' isn't",
|
||||
{"argument": param.name},
|
||||
self._function_loc(function),
|
||||
notes=self._call_site_note(loc, fn_kind='subkernel'))
|
||||
self.engine.process(diag)
|
||||
|
||||
arg_type = self._type_of_param(function, loc, param, fn_kind='subkernel')
|
||||
if param.default is inspect.Parameter.empty:
|
||||
arg_types[param.name] = arg_type
|
||||
else:
|
||||
optarg_types[param.name] = arg_type
|
||||
|
||||
function_type = types.TSubkernel(arg_types, optarg_types, ret_type,
|
||||
sid=self.embedding_map.store_object(host_function),
|
||||
destination=host_function.artiq_embedded.destination)
|
||||
self.functions[function] = function_type
|
||||
return function_type
|
||||
|
||||
def _quote_rpc(self, function, loc):
|
||||
if isinstance(function, SpecializedFunction):
|
||||
host_function = function.host_function
|
||||
|
@ -1061,8 +1340,18 @@ class Stitcher:
|
|||
(host_function.artiq_embedded.core_name is None and
|
||||
host_function.artiq_embedded.portable is False and
|
||||
host_function.artiq_embedded.syscall is None and
|
||||
host_function.artiq_embedded.destination is None and
|
||||
host_function.artiq_embedded.forbidden is False):
|
||||
self._quote_rpc(function, loc)
|
||||
elif host_function.artiq_embedded.destination is not None and \
|
||||
host_function.artiq_embedded.destination != self.destination:
|
||||
# treat subkernels as kernels if running on the same device
|
||||
if not 0 < host_function.artiq_embedded.destination <= 255:
|
||||
diag = diagnostic.Diagnostic("error",
|
||||
"subkernel destination must be between 1 and 255 (inclusive)", {},
|
||||
self._function_loc(host_function))
|
||||
self.engine.process(diag)
|
||||
self._quote_subkernel(function, loc)
|
||||
elif host_function.artiq_embedded.function is not None:
|
||||
if host_function.__name__ == "<lambda>":
|
||||
note = diagnostic.Diagnostic("note",
|
||||
|
@ -1086,8 +1375,13 @@ class Stitcher:
|
|||
notes=[note])
|
||||
self.engine.process(diag)
|
||||
|
||||
destination = host_function.artiq_embedded.destination
|
||||
# remote_fn only for first call in subkernels
|
||||
remote_fn = destination is not None and self.first_call
|
||||
self._quote_embedded_function(function,
|
||||
flags=host_function.artiq_embedded.flags)
|
||||
flags=host_function.artiq_embedded.flags,
|
||||
remote_fn=remote_fn)
|
||||
self.first_call = False
|
||||
elif host_function.artiq_embedded.syscall is not None:
|
||||
# Insert a storage-less global whose type instructs the compiler
|
||||
# to perform a system call instead of a regular call.
|
||||
|
|
|
@ -135,6 +135,7 @@ class NamedValue(Value):
|
|||
def __init__(self, typ, name):
|
||||
super().__init__(typ)
|
||||
self.name, self.function = name, None
|
||||
self.is_removed = False
|
||||
|
||||
def set_name(self, new_name):
|
||||
if self.function is not None:
|
||||
|
@ -235,7 +236,7 @@ class Instruction(User):
|
|||
self.drop_references()
|
||||
# Check this after drop_references in case this
|
||||
# is a self-referencing phi.
|
||||
assert not any(self.uses)
|
||||
assert all(use.is_removed for use in self.uses)
|
||||
|
||||
def replace_with(self, value):
|
||||
self.replace_all_uses_with(value)
|
||||
|
@ -370,7 +371,7 @@ class BasicBlock(NamedValue):
|
|||
self.remove_from_parent()
|
||||
# Check this after erasing instructions in case the block
|
||||
# loops into itself.
|
||||
assert not any(self.uses)
|
||||
assert all(use.is_removed for use in self.uses)
|
||||
|
||||
def prepend(self, insn):
|
||||
assert isinstance(insn, Instruction)
|
||||
|
@ -705,6 +706,81 @@ class SetLocal(Instruction):
|
|||
def value(self):
|
||||
return self.operands[1]
|
||||
|
||||
class GetArgFromRemote(Instruction):
|
||||
"""
|
||||
An instruction that receives function arguments from remote
|
||||
(ie. subkernel in DRTIO context)
|
||||
|
||||
:ivar arg_name: (string) argument name
|
||||
:ivar arg_type: argument type
|
||||
"""
|
||||
|
||||
"""
|
||||
:param arg_name: (string) argument name
|
||||
:param arg_type: argument type
|
||||
"""
|
||||
def __init__(self, arg_name, arg_type, name=""):
|
||||
assert isinstance(arg_name, str)
|
||||
super().__init__([], arg_type, name)
|
||||
self.arg_name = arg_name
|
||||
self.arg_type = arg_type
|
||||
|
||||
def copy(self, mapper):
|
||||
self_copy = super().copy(mapper)
|
||||
self_copy.arg_name = self.arg_name
|
||||
self_copy.arg_type = self.arg_type
|
||||
return self_copy
|
||||
|
||||
def opcode(self):
|
||||
return "getargfromremote({})".format(repr(self.arg_name))
|
||||
|
||||
class GetOptArgFromRemote(GetArgFromRemote):
|
||||
"""
|
||||
An instruction that may or may not retrieve an optional function argument
|
||||
from remote, depending on number of values received by firmware.
|
||||
|
||||
:ivar rcv_count: number of received values,
|
||||
determined by firmware
|
||||
:ivar index: (integer) index of the current argument,
|
||||
in reference to remote arguments
|
||||
"""
|
||||
|
||||
"""
|
||||
:param rcv_count: number of received valuese
|
||||
:param index: (integer) index of the current argument,
|
||||
in reference to remote arguments
|
||||
"""
|
||||
def __init__(self, arg_name, arg_type, rcv_count, index, name=""):
|
||||
super().__init__(arg_name, arg_type, name)
|
||||
self.rcv_count = rcv_count
|
||||
self.index = index
|
||||
|
||||
def copy(self, mapper):
|
||||
self_copy = super().copy(mapper)
|
||||
self_copy.rcv_count = self.rcv_count
|
||||
self_copy.index = self.index
|
||||
return self_copy
|
||||
|
||||
def opcode(self):
|
||||
return "getoptargfromremote({})".format(repr(self.arg_name))
|
||||
|
||||
class SubkernelAwaitArgs(Instruction):
|
||||
"""
|
||||
A builtin instruction that takes min and max received messages as operands,
|
||||
and a list of received types.
|
||||
|
||||
:ivar arg_types: (list of types) types of passed arguments (including optional)
|
||||
"""
|
||||
|
||||
"""
|
||||
:param arg_types: (list of types) types of passed arguments (including optional)
|
||||
"""
|
||||
|
||||
def __init__(self, operands, arg_types, name=None):
|
||||
assert isinstance(arg_types, list)
|
||||
self.arg_types = arg_types
|
||||
super().__init__(operands, builtins.TNone(), name)
|
||||
|
||||
class GetAttr(Instruction):
|
||||
"""
|
||||
An intruction that loads an attribute from an object,
|
||||
|
@ -727,7 +803,7 @@ class GetAttr(Instruction):
|
|||
typ = obj.type.attributes[attr]
|
||||
else:
|
||||
typ = obj.type.constructor.attributes[attr]
|
||||
if types.is_function(typ) or types.is_rpc(typ):
|
||||
if types.is_function(typ) or types.is_rpc(typ) or types.is_subkernel(typ):
|
||||
typ = types.TMethod(obj.type, typ)
|
||||
super().__init__([obj], typ, name)
|
||||
self.attr = attr
|
||||
|
@ -1189,14 +1265,18 @@ class IndirectBranch(Terminator):
|
|||
class Return(Terminator):
|
||||
"""
|
||||
A return instruction.
|
||||
:param remote_return: (bool)
|
||||
marks a return in subkernel context,
|
||||
where the return value is sent back through DRTIO
|
||||
"""
|
||||
|
||||
"""
|
||||
:param value: (:class:`Value`) return value
|
||||
"""
|
||||
def __init__(self, value, name=""):
|
||||
def __init__(self, value, remote_return=False, name=""):
|
||||
assert isinstance(value, Value)
|
||||
super().__init__([value], builtins.TNone(), name)
|
||||
self.remote_return = remote_return
|
||||
|
||||
def opcode(self):
|
||||
return "return"
|
||||
|
@ -1245,9 +1325,9 @@ class Raise(Terminator):
|
|||
if len(self.operands) > 1:
|
||||
return self.operands[1]
|
||||
|
||||
class Reraise(Terminator):
|
||||
class Resume(Terminator):
|
||||
"""
|
||||
A reraise instruction.
|
||||
A resume instruction.
|
||||
"""
|
||||
|
||||
"""
|
||||
|
@ -1261,7 +1341,7 @@ class Reraise(Terminator):
|
|||
super().__init__(operands, builtins.TNone(), name)
|
||||
|
||||
def opcode(self):
|
||||
return "reraise"
|
||||
return "resume"
|
||||
|
||||
def exception_target(self):
|
||||
if len(self.operands) > 0:
|
||||
|
@ -1347,6 +1427,7 @@ class LandingPad(Terminator):
|
|||
def __init__(self, cleanup, name=""):
|
||||
super().__init__([cleanup], builtins.TException(), name)
|
||||
self.types = []
|
||||
self.has_cleanup = True
|
||||
|
||||
def copy(self, mapper):
|
||||
self_copy = super().copy(mapper)
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
/* Force ld to make the ELF header as loadable. */
|
||||
PHDRS
|
||||
{
|
||||
headers PT_LOAD FILEHDR PHDRS ;
|
||||
text PT_LOAD ;
|
||||
data PT_LOAD ;
|
||||
dynamic PT_DYNAMIC ;
|
||||
eh_frame PT_GNU_EH_FRAME ;
|
||||
}
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
/* Push back .text section enough so that ld.lld not complain */
|
||||
. = SIZEOF_HEADERS;
|
||||
|
||||
.text :
|
||||
{
|
||||
*(.text .text.*)
|
||||
} : text
|
||||
|
||||
.rodata :
|
||||
{
|
||||
*(.rodata .rodata.*)
|
||||
}
|
||||
|
||||
.eh_frame :
|
||||
{
|
||||
KEEP(*(.eh_frame))
|
||||
} : text
|
||||
|
||||
.eh_frame_hdr :
|
||||
{
|
||||
KEEP(*(.eh_frame_hdr))
|
||||
} : text : eh_frame
|
||||
|
||||
.got :
|
||||
{
|
||||
*(.got)
|
||||
} : text
|
||||
|
||||
.got.plt :
|
||||
{
|
||||
*(.got.plt)
|
||||
} : text
|
||||
|
||||
.data :
|
||||
{
|
||||
*(.data .data.*)
|
||||
} : data
|
||||
|
||||
.dynamic :
|
||||
{
|
||||
*(.dynamic)
|
||||
} : data : dynamic
|
||||
|
||||
.bss (NOLOAD) : ALIGN(4)
|
||||
{
|
||||
__bss_start = .;
|
||||
*(.sbss .sbss.* .bss .bss.*);
|
||||
. = ALIGN(4);
|
||||
_end = .;
|
||||
}
|
||||
|
||||
/* Kernel stack grows downward from end of memory, so put guard page after
|
||||
* all the program contents. Note: This requires all loaded sections (at
|
||||
* least those accessed) to be explicitly listed in the above!
|
||||
*/
|
||||
. = ALIGN(0x1000);
|
||||
_sstack_guard = .;
|
||||
}
|
|
@ -10,7 +10,7 @@ string and infers types for it using a trivial :module:`prelude`.
|
|||
|
||||
import os
|
||||
from pythonparser import source, diagnostic, parse_buffer
|
||||
from . import prelude, types, transforms, analyses, validators
|
||||
from . import prelude, types, transforms, analyses, validators, embedding
|
||||
|
||||
class Source:
|
||||
def __init__(self, source_buffer, engine=None):
|
||||
|
@ -18,7 +18,7 @@ class Source:
|
|||
self.engine = diagnostic.Engine(all_errors_are_fatal=True)
|
||||
else:
|
||||
self.engine = engine
|
||||
self.embedding_map = None
|
||||
self.embedding_map = embedding.EmbeddingMap()
|
||||
self.name, _ = os.path.splitext(os.path.basename(source_buffer.name))
|
||||
|
||||
asttyped_rewriter = transforms.ASTTypedRewriter(engine=engine,
|
||||
|
@ -57,7 +57,8 @@ class Module:
|
|||
constness_validator = validators.ConstnessValidator(engine=self.engine)
|
||||
artiq_ir_generator = transforms.ARTIQIRGenerator(engine=self.engine,
|
||||
module_name=src.name,
|
||||
ref_period=ref_period)
|
||||
ref_period=ref_period,
|
||||
embedding_map=self.embedding_map)
|
||||
dead_code_eliminator = transforms.DeadCodeEliminator(engine=self.engine)
|
||||
local_access_validator = validators.LocalAccessValidator(engine=self.engine)
|
||||
local_demoter = transforms.LocalDemoter()
|
||||
|
@ -83,6 +84,8 @@ class Module:
|
|||
constant_hoister.process(self.artiq_ir)
|
||||
if remarks:
|
||||
invariant_detection.process(self.artiq_ir)
|
||||
# for subkernels: main kernel inferencer output, to be passed to further compilations
|
||||
self.subkernel_arg_types = inferencer.subkernel_arg_types
|
||||
|
||||
def build_llvm_ir(self, target):
|
||||
"""Compile the module to LLVM IR for the specified target."""
|
||||
|
|
|
@ -37,6 +37,7 @@ def globals():
|
|||
|
||||
# ARTIQ decorators
|
||||
"kernel": builtins.fn_kernel(),
|
||||
"subkernel": builtins.fn_kernel(),
|
||||
"portable": builtins.fn_kernel(),
|
||||
"rpc": builtins.fn_kernel(),
|
||||
|
||||
|
@ -54,4 +55,8 @@ def globals():
|
|||
# ARTIQ utility functions
|
||||
"rtio_log": builtins.fn_rtio_log(),
|
||||
"core_log": builtins.fn_print(),
|
||||
|
||||
# ARTIQ subkernel utility functions
|
||||
"subkernel_await": builtins.fn_subkernel_await(),
|
||||
"subkernel_preload": builtins.fn_subkernel_preload(),
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import os, sys, tempfile, subprocess, io
|
||||
from artiq.compiler import types, ir
|
||||
from llvmlite_artiq import ir as ll, binding as llvm
|
||||
from llvmlite import ir as ll, binding as llvm
|
||||
|
||||
llvm.initialize()
|
||||
llvm.initialize_all_targets()
|
||||
|
@ -28,8 +28,10 @@ class RunTool:
|
|||
for argument in self._pattern:
|
||||
cmdline.append(argument.format(**self._tempnames))
|
||||
|
||||
# https://bugs.python.org/issue17023
|
||||
windows = os.name == "nt"
|
||||
process = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
universal_newlines=True)
|
||||
universal_newlines=True, shell=windows)
|
||||
stdout, stderr = process.communicate()
|
||||
if process.returncode != 0:
|
||||
raise Exception("{} invocation failed: {}".
|
||||
|
@ -67,40 +69,41 @@ class Target:
|
|||
generated by the ARTIQ compiler will be deployed.
|
||||
|
||||
:var triple: (string)
|
||||
LLVM target triple, e.g. ``"or1k"``
|
||||
LLVM target triple, e.g. ``"riscv32"``
|
||||
:var data_layout: (string)
|
||||
LLVM target data layout, e.g. ``"E-m:e-p:32:32-i64:32-f64:32-v64:32-v128:32-a:0:32-n32"``
|
||||
:var features: (list of string)
|
||||
LLVM target CPU features, e.g. ``["mul", "div", "ffl1"]``
|
||||
:var additional_linker_options: (list of string)
|
||||
Linker options for the target in addition to the target-independent ones, e.g. ``["--target2=rel"]``
|
||||
:var print_function: (string)
|
||||
Name of a formatted print functions (with the signature of ``printf``)
|
||||
provided by the target, e.g. ``"printf"``.
|
||||
:var little_endian: (boolean)
|
||||
Whether the code will be executed on a little-endian machine. This cannot be always
|
||||
determined from data_layout due to JIT.
|
||||
:var now_pinning: (boolean)
|
||||
Whether the target implements the now-pinning RTIO optimization.
|
||||
"""
|
||||
triple = "unknown"
|
||||
data_layout = ""
|
||||
features = []
|
||||
additional_linker_options = []
|
||||
print_function = "printf"
|
||||
little_endian = False
|
||||
now_pinning = True
|
||||
|
||||
tool_ld = "ld.lld"
|
||||
tool_strip = "llvm-strip"
|
||||
tool_addr2line = "llvm-addr2line"
|
||||
tool_symbolizer = "llvm-symbolizer"
|
||||
tool_cxxfilt = "llvm-cxxfilt"
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, subkernel_id=None):
|
||||
self.llcontext = ll.Context()
|
||||
self.subkernel_id = subkernel_id
|
||||
|
||||
def target_machine(self):
|
||||
lltarget = llvm.Target.from_triple(self.triple)
|
||||
llmachine = lltarget.create_target_machine(
|
||||
features=",".join(["+{}".format(f) for f in self.features]),
|
||||
reloc="pic", codemodel="default")
|
||||
reloc="pic", codemodel="default",
|
||||
abiname="ilp32d" if isinstance(self, RV32GTarget) else "")
|
||||
llmachine.set_asm_verbosity(True)
|
||||
return llmachine
|
||||
|
||||
|
@ -146,7 +149,8 @@ class Target:
|
|||
ir.BasicBlock._dump_loc = False
|
||||
|
||||
type_printer = types.TypePrinter()
|
||||
_dump(os.getenv("ARTIQ_DUMP_IR"), "ARTIQ IR", ".txt",
|
||||
suffix = "_subkernel_{}".format(self.subkernel_id) if self.subkernel_id is not None else ""
|
||||
_dump(os.getenv("ARTIQ_DUMP_IR"), "ARTIQ IR", suffix + ".txt",
|
||||
lambda: "\n".join(fn.as_entity(type_printer) for fn in module.artiq_ir))
|
||||
|
||||
llmod = module.build_llvm_ir(self)
|
||||
|
@ -158,12 +162,12 @@ class Target:
|
|||
_dump("", "LLVM IR (broken)", ".ll", lambda: str(llmod))
|
||||
raise
|
||||
|
||||
_dump(os.getenv("ARTIQ_DUMP_UNOPT_LLVM"), "LLVM IR (generated)", "_unopt.ll",
|
||||
_dump(os.getenv("ARTIQ_DUMP_UNOPT_LLVM"), "LLVM IR (generated)", suffix + "_unopt.ll",
|
||||
lambda: str(llparsedmod))
|
||||
|
||||
self.optimize(llparsedmod)
|
||||
|
||||
_dump(os.getenv("ARTIQ_DUMP_LLVM"), "LLVM IR (optimized)", ".ll",
|
||||
_dump(os.getenv("ARTIQ_DUMP_LLVM"), "LLVM IR (optimized)", suffix + ".ll",
|
||||
lambda: str(llparsedmod))
|
||||
|
||||
return llparsedmod
|
||||
|
@ -182,6 +186,8 @@ class Target:
|
|||
def link(self, objects):
|
||||
"""Link the relocatable objects into a shared library for this target."""
|
||||
with RunTool([self.tool_ld, "-shared", "--eh-frame-hdr"] +
|
||||
self.additional_linker_options +
|
||||
["-T" + os.path.join(os.path.dirname(__file__), "kernel.ld")] +
|
||||
["{{obj{}}}".format(index) for index in range(len(objects))] +
|
||||
["-x"] +
|
||||
["-o", "{output}"],
|
||||
|
@ -212,9 +218,10 @@ class Target:
|
|||
# just after the call. Offset them back to get an address somewhere
|
||||
# inside the call instruction (or its delay slot), since that's what
|
||||
# the backtrace entry should point at.
|
||||
last_inlined = None
|
||||
offset_addresses = [hex(addr - 1) for addr in addresses]
|
||||
with RunTool([self.tool_addr2line, "--addresses", "--functions", "--inlines",
|
||||
"--demangle", "--exe={library}"] + offset_addresses,
|
||||
with RunTool([self.tool_symbolizer, "--addresses", "--functions", "--inlines",
|
||||
"--demangle", "--output-style=GNU", "--exe={library}"] + offset_addresses,
|
||||
library=library) \
|
||||
as results:
|
||||
lines = iter(results["__stdout__"].read().rstrip().split("\n"))
|
||||
|
@ -227,9 +234,11 @@ class Target:
|
|||
if address_or_function[:2] == "0x":
|
||||
address = int(address_or_function[2:], 16) + 1 # remove offset
|
||||
function = next(lines)
|
||||
inlined = False
|
||||
else:
|
||||
address = backtrace[-1][4] # inlined
|
||||
function = address_or_function
|
||||
inlined = True
|
||||
location = next(lines)
|
||||
|
||||
filename, line = location.rsplit(":", 1)
|
||||
|
@ -240,10 +249,17 @@ class Target:
|
|||
else:
|
||||
line = int(line)
|
||||
# can't get column out of addr2line D:
|
||||
backtrace.append((filename, line, -1, function, address))
|
||||
if inlined:
|
||||
last_inlined.append((filename, line, -1, function, address))
|
||||
else:
|
||||
last_inlined = []
|
||||
backtrace.append((filename, line, -1, function, address,
|
||||
last_inlined))
|
||||
return backtrace
|
||||
|
||||
def demangle(self, names):
|
||||
if not any(names):
|
||||
return names
|
||||
with RunTool([self.tool_cxxfilt] + names) as results:
|
||||
return results["__stdout__"].read().rstrip().split("\n")
|
||||
|
||||
|
@ -251,33 +267,43 @@ class NativeTarget(Target):
|
|||
def __init__(self):
|
||||
super().__init__()
|
||||
self.triple = llvm.get_default_triple()
|
||||
host_data_layout = str(llvm.targets.Target.from_default_triple().create_target_machine().target_data)
|
||||
assert host_data_layout[0] in "eE"
|
||||
self.little_endian = host_data_layout[0] == "e"
|
||||
self.data_layout = str(llvm.targets.Target.from_default_triple().create_target_machine().target_data)
|
||||
|
||||
class OR1KTarget(Target):
|
||||
triple = "or1k-linux"
|
||||
data_layout = "E-m:e-p:32:32-i8:8:8-i16:16:16-i64:32:32-" \
|
||||
"f64:32:32-v64:32:32-v128:32:32-a0:0:32-n32"
|
||||
features = ["mul", "div", "ffl1", "cmov", "addc"]
|
||||
class RV32IMATarget(Target):
|
||||
triple = "riscv32-unknown-linux"
|
||||
data_layout = "e-m:e-p:32:32-i64:64-n32-S128"
|
||||
features = ["m", "a"]
|
||||
additional_linker_options = ["-m", "elf32lriscv"]
|
||||
print_function = "core_log"
|
||||
little_endian = False
|
||||
now_pinning = True
|
||||
|
||||
tool_ld = "or1k-linux-ld"
|
||||
tool_strip = "or1k-linux-strip"
|
||||
tool_addr2line = "or1k-linux-addr2line"
|
||||
tool_cxxfilt = "or1k-linux-c++filt"
|
||||
tool_ld = "ld.lld"
|
||||
tool_strip = "llvm-strip"
|
||||
tool_symbolizer = "llvm-symbolizer"
|
||||
tool_cxxfilt = "llvm-cxxfilt"
|
||||
|
||||
class RV32GTarget(Target):
|
||||
triple = "riscv32-unknown-linux"
|
||||
data_layout = "e-m:e-p:32:32-i64:64-n32-S128"
|
||||
features = ["m", "a", "f", "d"]
|
||||
additional_linker_options = ["-m", "elf32lriscv"]
|
||||
print_function = "core_log"
|
||||
now_pinning = True
|
||||
|
||||
tool_ld = "ld.lld"
|
||||
tool_strip = "llvm-strip"
|
||||
tool_symbolizer = "llvm-symbolizer"
|
||||
tool_cxxfilt = "llvm-cxxfilt"
|
||||
|
||||
class CortexA9Target(Target):
|
||||
triple = "armv7-unknown-linux-gnueabihf"
|
||||
data_layout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
|
||||
features = ["dsp", "fp16", "neon", "vfp3"]
|
||||
additional_linker_options = ["-m", "armelf_linux_eabi", "--target2=rel"]
|
||||
print_function = "core_log"
|
||||
little_endian = True
|
||||
now_pinning = False
|
||||
|
||||
tool_ld = "armv7-unknown-linux-gnueabihf-ld"
|
||||
tool_strip = "armv7-unknown-linux-gnueabihf-strip"
|
||||
tool_addr2line = "armv7-unknown-linux-gnueabihf-addr2line"
|
||||
tool_cxxfilt = "armv7-unknown-linux-gnueabihf-c++filt"
|
||||
tool_ld = "ld.lld"
|
||||
tool_strip = "llvm-strip"
|
||||
tool_symbolizer = "llvm-symbolizer"
|
||||
tool_cxxfilt = "llvm-cxxfilt"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import os, sys, fileinput, ctypes
|
||||
from pythonparser import diagnostic
|
||||
from llvmlite_artiq import binding as llvm
|
||||
from llvmlite import binding as llvm
|
||||
from ..module import Module, Source
|
||||
from ..targets import NativeTarget
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import sys, fileinput
|
||||
from pythonparser import diagnostic
|
||||
from llvmlite_artiq import ir as ll
|
||||
from llvmlite import ir as ll
|
||||
from ..module import Module, Source
|
||||
from ..targets import NativeTarget
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import sys, os
|
||||
from pythonparser import diagnostic
|
||||
from ..module import Module, Source
|
||||
from ..targets import OR1KTarget
|
||||
from ..targets import RV32GTarget
|
||||
from . import benchmark
|
||||
|
||||
def main():
|
||||
|
@ -30,7 +30,7 @@ def main():
|
|||
benchmark(lambda: Module(source),
|
||||
"ARTIQ transforms and validators")
|
||||
|
||||
benchmark(lambda: OR1KTarget().compile_and_link([module]),
|
||||
benchmark(lambda: RV32GTarget().compile_and_link([module]),
|
||||
"LLVM optimization and linking")
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -5,7 +5,7 @@ from ...master.databases import DeviceDB, DatasetDB
|
|||
from ...master.worker_db import DeviceManager, DatasetManager
|
||||
from ..module import Module
|
||||
from ..embedding import Stitcher
|
||||
from ..targets import OR1KTarget
|
||||
from ..targets import RV32GTarget
|
||||
from . import benchmark
|
||||
|
||||
|
||||
|
@ -30,8 +30,9 @@ def main():
|
|||
device_db_path = os.path.join(os.path.dirname(sys.argv[1]), "device_db.py")
|
||||
device_mgr = DeviceManager(DeviceDB(device_db_path))
|
||||
|
||||
dataset_db_path = os.path.join(os.path.dirname(sys.argv[1]), "dataset_db.pyon")
|
||||
dataset_mgr = DatasetManager(DatasetDB(dataset_db_path))
|
||||
dataset_db_path = os.path.join(os.path.dirname(sys.argv[1]), "dataset_db.mdb")
|
||||
dataset_db = DatasetDB(dataset_db_path)
|
||||
dataset_mgr = DatasetManager()
|
||||
|
||||
argument_mgr = ProcessArgumentManager({})
|
||||
|
||||
|
@ -45,7 +46,7 @@ def main():
|
|||
|
||||
stitcher = embed()
|
||||
module = Module(stitcher)
|
||||
target = OR1KTarget()
|
||||
target = RV32GTarget()
|
||||
llvm_ir = target.compile(module)
|
||||
elf_obj = target.assemble(llvm_ir)
|
||||
elf_shlib = target.link([elf_obj])
|
||||
|
@ -68,5 +69,7 @@ def main():
|
|||
benchmark(lambda: target.strip(elf_shlib),
|
||||
"Stripping debug information")
|
||||
|
||||
dataset_db.close_db()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import sys, os
|
||||
from pythonparser import diagnostic
|
||||
from ..module import Module, Source
|
||||
from ..targets import OR1KTarget
|
||||
from ..targets import RV32GTarget
|
||||
|
||||
def main():
|
||||
if not len(sys.argv) > 1:
|
||||
|
@ -20,7 +20,7 @@ def main():
|
|||
for filename in sys.argv[1:]:
|
||||
modules.append(Module(Source.from_filename(filename, engine=engine)))
|
||||
|
||||
llobj = OR1KTarget().compile_and_link(modules)
|
||||
llobj = RV32GTarget().compile_and_link(modules)
|
||||
|
||||
basename, ext = os.path.splitext(sys.argv[-1])
|
||||
with open(basename + ".so", "wb") as f:
|
||||
|
|
|
@ -8,6 +8,7 @@ semantics explicitly.
|
|||
|
||||
from collections import OrderedDict, defaultdict
|
||||
from functools import reduce
|
||||
from itertools import chain
|
||||
from pythonparser import algorithm, diagnostic, ast
|
||||
from .. import types, builtins, asttyped, ir, iodelay
|
||||
|
||||
|
@ -61,6 +62,9 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
the basic block to which ``return`` will transfer control
|
||||
:ivar unwind_target: (:class:`ir.BasicBlock` or None)
|
||||
the basic block to which unwinding will transfer control
|
||||
:ivar catch_clauses: (list of (:class:`ir.BasicBlock`, :class:`types.Type` or None))
|
||||
a list of catch clauses that should be appended to inner try block
|
||||
landingpad
|
||||
:ivar final_branch: (function (target: :class:`ir.BasicBlock`, block: :class:`ir.BasicBlock)
|
||||
or None)
|
||||
the function that appends to ``block`` a jump through the ``finally`` statement
|
||||
|
@ -88,8 +92,9 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
|
||||
_size_type = builtins.TInt32()
|
||||
|
||||
def __init__(self, module_name, engine, ref_period):
|
||||
def __init__(self, module_name, engine, ref_period, embedding_map):
|
||||
self.engine = engine
|
||||
self.embedding_map = embedding_map
|
||||
self.functions = []
|
||||
self.name = [module_name] if module_name != "" else []
|
||||
self.ref_period = ir.Constant(ref_period, builtins.TFloat())
|
||||
|
@ -102,10 +107,13 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
self.current_private_env = None
|
||||
self.current_args = None
|
||||
self.current_assign = None
|
||||
self.current_exception = None
|
||||
self.current_remote_fn = False
|
||||
self.break_target = None
|
||||
self.continue_target = None
|
||||
self.return_target = None
|
||||
self.unwind_target = None
|
||||
self.catch_clauses = []
|
||||
self.final_branch = None
|
||||
self.function_map = dict()
|
||||
self.variable_map = dict()
|
||||
|
@ -204,7 +212,8 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
old_priv_env, self.current_private_env = self.current_private_env, priv_env
|
||||
|
||||
self.generic_visit(node)
|
||||
self.terminate(ir.Return(ir.Constant(None, builtins.TNone())))
|
||||
self.terminate(ir.Return(ir.Constant(None, builtins.TNone()),
|
||||
remote_return=self.current_remote_fn))
|
||||
|
||||
return self.functions
|
||||
finally:
|
||||
|
@ -287,6 +296,8 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
old_block, self.current_block = self.current_block, entry
|
||||
|
||||
old_globals, self.current_globals = self.current_globals, node.globals_in_scope
|
||||
old_remote_fn = self.current_remote_fn
|
||||
self.current_remote_fn = getattr(node, "remote_fn", False)
|
||||
|
||||
env_without_globals = \
|
||||
{var: node.typing_env[var]
|
||||
|
@ -319,7 +330,8 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
self.terminate(ir.Return(result))
|
||||
elif builtins.is_none(typ.ret):
|
||||
if not self.current_block.is_terminated():
|
||||
self.current_block.append(ir.Return(ir.Constant(None, builtins.TNone())))
|
||||
self.current_block.append(ir.Return(ir.Constant(None, builtins.TNone()),
|
||||
remote_return=self.current_remote_fn))
|
||||
else:
|
||||
if not self.current_block.is_terminated():
|
||||
if len(self.current_block.predecessors()) != 0:
|
||||
|
@ -338,6 +350,7 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
self.current_block = old_block
|
||||
self.current_globals = old_globals
|
||||
self.current_env = old_env
|
||||
self.current_remote_fn = old_remote_fn
|
||||
if not is_lambda:
|
||||
self.current_private_env = old_priv_env
|
||||
|
||||
|
@ -360,7 +373,8 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
return_value = self.visit(node.value)
|
||||
|
||||
if self.return_target is None:
|
||||
self.append(ir.Return(return_value))
|
||||
self.append(ir.Return(return_value,
|
||||
remote_return=self.current_remote_fn))
|
||||
else:
|
||||
self.append(ir.SetLocal(self.current_private_env, "$return", return_value))
|
||||
self.append(ir.Branch(self.return_target))
|
||||
|
@ -626,6 +640,11 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
self.final_branch(raise_proxy, self.current_block)
|
||||
self.current_block = raise_proxy
|
||||
|
||||
if exn is not None:
|
||||
# if we need to raise the exception in a final body, we have to
|
||||
# lazy-evaluate the exception object to make sure that we generate
|
||||
# it in the raise_proxy block
|
||||
exn = exn()
|
||||
if exn is not None:
|
||||
assert loc is not None
|
||||
loc_file = ir.Constant(loc.source_buffer.name, builtins.TStr())
|
||||
|
@ -633,10 +652,10 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
loc_column = ir.Constant(loc.column(), builtins.TInt32())
|
||||
loc_function = ir.Constant(".".join(self.name), builtins.TStr())
|
||||
|
||||
self.append(ir.SetAttr(exn, "__file__", loc_file))
|
||||
self.append(ir.SetAttr(exn, "__line__", loc_line))
|
||||
self.append(ir.SetAttr(exn, "__col__", loc_column))
|
||||
self.append(ir.SetAttr(exn, "__func__", loc_function))
|
||||
self.append(ir.SetAttr(exn, "#__file__", loc_file))
|
||||
self.append(ir.SetAttr(exn, "#__line__", loc_line))
|
||||
self.append(ir.SetAttr(exn, "#__col__", loc_column))
|
||||
self.append(ir.SetAttr(exn, "#__func__", loc_function))
|
||||
|
||||
if self.unwind_target is not None:
|
||||
self.append(ir.Raise(exn, self.unwind_target))
|
||||
|
@ -644,18 +663,21 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
self.append(ir.Raise(exn))
|
||||
else:
|
||||
if self.unwind_target is not None:
|
||||
self.append(ir.Reraise(self.unwind_target))
|
||||
self.append(ir.Resume(self.unwind_target))
|
||||
else:
|
||||
self.append(ir.Reraise())
|
||||
self.append(ir.Resume())
|
||||
|
||||
def visit_Raise(self, node):
|
||||
if node.exc is not None and types.is_exn_constructor(node.exc.type):
|
||||
self.raise_exn(self.alloc_exn(node.exc.type.instance), loc=self.current_loc)
|
||||
self.raise_exn(lambda: self.alloc_exn(node.exc.type.instance), loc=self.current_loc)
|
||||
else:
|
||||
self.raise_exn(self.visit(node.exc), loc=self.current_loc)
|
||||
self.raise_exn(lambda: self.visit(node.exc), loc=self.current_loc)
|
||||
|
||||
def visit_Try(self, node):
|
||||
dispatcher = self.add_block("try.dispatch")
|
||||
cleanup = self.add_block('handler.cleanup')
|
||||
landingpad = ir.LandingPad(cleanup)
|
||||
dispatcher.append(landingpad)
|
||||
|
||||
if any(node.finalbody):
|
||||
# k for continuation
|
||||
|
@ -690,16 +712,51 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
value = return_action.append(ir.GetLocal(self.current_private_env, "$return"))
|
||||
return_action.append(ir.Return(value))
|
||||
final_branch(return_action, return_proxy)
|
||||
else:
|
||||
landingpad.has_cleanup = False
|
||||
|
||||
# we should propagate the clauses to nested try catch blocks
|
||||
# so nested try catch will jump to our clause if the inner one does not
|
||||
# match
|
||||
# note that the phi instruction here requires some hack, see
|
||||
# llvm_ir_generator process_function for details
|
||||
clauses = []
|
||||
found_catch_all = False
|
||||
for handler_node in node.handlers:
|
||||
if found_catch_all:
|
||||
self.warn_unreachable(handler_node)
|
||||
continue
|
||||
exn_type = handler_node.name_type.find()
|
||||
if handler_node.filter is not None and \
|
||||
not builtins.is_exception(exn_type, 'Exception'):
|
||||
handler = self.add_block("handler." + exn_type.name)
|
||||
phi = ir.Phi(builtins.TException(), 'exn')
|
||||
handler.append(phi)
|
||||
clauses.append((handler, exn_type, phi))
|
||||
else:
|
||||
handler = self.add_block("handler.catchall")
|
||||
phi = ir.Phi(builtins.TException(), 'exn')
|
||||
handler.append(phi)
|
||||
clauses.append((handler, None, phi))
|
||||
found_catch_all = True
|
||||
|
||||
all_clauses = clauses[:]
|
||||
for clause in self.catch_clauses:
|
||||
# if the last clause is accept all, do not add further clauses
|
||||
if len(all_clauses) == 0 or all_clauses[-1][1] is not None:
|
||||
all_clauses.append(clause)
|
||||
|
||||
body = self.add_block("try.body")
|
||||
self.append(ir.Branch(body))
|
||||
self.current_block = body
|
||||
|
||||
old_unwind, self.unwind_target = self.unwind_target, dispatcher
|
||||
old_clauses, self.catch_clauses = self.catch_clauses, all_clauses
|
||||
try:
|
||||
old_unwind, self.unwind_target = self.unwind_target, dispatcher
|
||||
self.visit(node.body)
|
||||
finally:
|
||||
self.unwind_target = old_unwind
|
||||
self.catch_clauses = old_clauses
|
||||
|
||||
if not self.current_block.is_terminated():
|
||||
self.visit(node.orelse)
|
||||
|
@ -708,85 +765,149 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
body = self.current_block
|
||||
|
||||
if any(node.finalbody):
|
||||
# if we have a final block, we should not append clauses to our
|
||||
# landingpad or we will skip the finally block.
|
||||
# when the finally block calls resume, it will unwind to the outer
|
||||
# try catch block automatically
|
||||
all_clauses = clauses
|
||||
# reset targets
|
||||
if self.break_target:
|
||||
self.break_target = old_break
|
||||
if self.continue_target:
|
||||
self.continue_target = old_continue
|
||||
self.return_target = old_return
|
||||
|
||||
old_final_branch, self.final_branch = self.final_branch, final_branch
|
||||
if any(node.finalbody):
|
||||
# create new unwind target for cleanup
|
||||
final_dispatcher = self.add_block("try.final.dispatch")
|
||||
final_landingpad = ir.LandingPad(cleanup)
|
||||
final_dispatcher.append(final_landingpad)
|
||||
|
||||
cleanup = self.add_block('handler.cleanup')
|
||||
landingpad = dispatcher.append(ir.LandingPad(cleanup))
|
||||
# make sure that exception clauses are unwinded to the finally block
|
||||
old_unwind, self.unwind_target = self.unwind_target, final_dispatcher
|
||||
|
||||
if any(node.finalbody):
|
||||
# if we have a while:try/finally continue must execute finally
|
||||
# before continuing the while
|
||||
redirect = final_branch
|
||||
else:
|
||||
redirect = lambda dest, proxy: proxy.append(ir.Branch(dest))
|
||||
|
||||
# we need to set break/continue/return to execute end_catch
|
||||
if self.break_target is not None:
|
||||
break_proxy = self.add_block("try.break")
|
||||
break_proxy.append(ir.Builtin("end_catch", [], builtins.TNone()))
|
||||
old_break, self.break_target = self.break_target, break_proxy
|
||||
redirect(old_break, break_proxy)
|
||||
|
||||
if self.continue_target is not None:
|
||||
continue_proxy = self.add_block("try.continue")
|
||||
continue_proxy.append(ir.Builtin("end_catch", [],
|
||||
builtins.TNone()))
|
||||
old_continue, self.continue_target = self.continue_target, continue_proxy
|
||||
redirect(old_continue, continue_proxy)
|
||||
|
||||
return_proxy = self.add_block("try.return")
|
||||
return_proxy.append(ir.Builtin("end_catch", [], builtins.TNone()))
|
||||
old_return, self.return_target = self.return_target, return_proxy
|
||||
old_return_target = old_return
|
||||
if old_return_target is None:
|
||||
old_return_target = self.add_block("try.doreturn")
|
||||
value = old_return_target.append(ir.GetLocal(self.current_private_env, "$return"))
|
||||
old_return_target.append(ir.Return(value))
|
||||
redirect(old_return_target, return_proxy)
|
||||
|
||||
handlers = []
|
||||
for handler_node in node.handlers:
|
||||
exn_type = handler_node.name_type.find()
|
||||
if handler_node.filter is not None and \
|
||||
not builtins.is_exception(exn_type, 'Exception'):
|
||||
handler = self.add_block("handler." + exn_type.name)
|
||||
landingpad.add_clause(handler, exn_type)
|
||||
else:
|
||||
handler = self.add_block("handler.catchall")
|
||||
landingpad.add_clause(handler, None)
|
||||
|
||||
for (handler_node, (handler, exn_type, phi)) in zip(node.handlers, clauses):
|
||||
self.current_block = handler
|
||||
if handler_node.name is not None:
|
||||
exn = self.append(ir.Builtin("exncast", [landingpad], handler_node.name_type))
|
||||
exn = self.append(ir.Builtin("exncast", [phi], handler_node.name_type))
|
||||
self._set_local(handler_node.name, exn)
|
||||
self.visit(handler_node.body)
|
||||
# only need to call end_catch if the current block is not terminated
|
||||
# other possible paths: break/continue/return/raise
|
||||
# we will call end_catch in the first 3 cases, and we should not
|
||||
# end_catch in the last case for nested exception
|
||||
if not self.current_block.is_terminated():
|
||||
self.append(ir.Builtin("end_catch", [], builtins.TNone()))
|
||||
post_handler = self.current_block
|
||||
handlers.append(post_handler)
|
||||
|
||||
handlers.append((handler, post_handler))
|
||||
# branch to all possible clauses, including those from outer try catch
|
||||
# block
|
||||
# if we have a finally block, all_clauses will not include those from
|
||||
# the outer block
|
||||
for (handler, clause, phi) in all_clauses:
|
||||
phi.add_incoming(landingpad, dispatcher)
|
||||
landingpad.add_clause(handler, clause)
|
||||
|
||||
if self.break_target:
|
||||
self.break_target = old_break
|
||||
if self.continue_target:
|
||||
self.continue_target = old_continue
|
||||
self.return_target = old_return
|
||||
|
||||
if any(node.finalbody):
|
||||
# Finalize and continue after try statement.
|
||||
self.final_branch = old_final_branch
|
||||
|
||||
finalizer = self.add_block("finally")
|
||||
self.current_block = finalizer
|
||||
|
||||
self.visit(node.finalbody)
|
||||
post_finalizer = self.current_block
|
||||
|
||||
# Finalize and reraise. Separate from previous case to expose flow
|
||||
# to LocalAccessValidator.
|
||||
finalizer_reraise = self.add_block("finally.reraise")
|
||||
self.unwind_target = old_unwind
|
||||
# Exception path
|
||||
finalizer_reraise = self.add_block("finally.resume")
|
||||
self.current_block = finalizer_reraise
|
||||
|
||||
self.visit(node.finalbody)
|
||||
self.terminate(ir.Reraise(self.unwind_target))
|
||||
|
||||
self.current_block = tail = self.add_block("try.tail")
|
||||
if any(node.finalbody):
|
||||
final_targets.append(tail)
|
||||
|
||||
for block in final_paths:
|
||||
block.append(ir.Branch(finalizer))
|
||||
|
||||
if not body.is_terminated():
|
||||
body.append(ir.SetLocal(final_state, "$cont", tail))
|
||||
body.append(ir.Branch(finalizer))
|
||||
|
||||
self.terminate(ir.Resume(self.unwind_target))
|
||||
cleanup.append(ir.Branch(finalizer_reraise))
|
||||
|
||||
for handler, post_handler in handlers:
|
||||
if not post_handler.is_terminated():
|
||||
post_handler.append(ir.SetLocal(final_state, "$cont", tail))
|
||||
post_handler.append(ir.Branch(finalizer))
|
||||
# Normal path
|
||||
finalizer = self.add_block("finally")
|
||||
self.current_block = finalizer
|
||||
self.visit(node.finalbody)
|
||||
post_finalizer = self.current_block
|
||||
self.current_block = tail = self.add_block("try.tail")
|
||||
final_targets.append(tail)
|
||||
|
||||
# if final block is not terminated, branch to tail
|
||||
if not post_finalizer.is_terminated():
|
||||
dest = post_finalizer.append(ir.GetLocal(final_state, "$cont"))
|
||||
post_finalizer.append(ir.IndirectBranch(dest, final_targets))
|
||||
# make sure proxies will branch to finalizer
|
||||
for block in final_paths:
|
||||
if finalizer in block.predecessors():
|
||||
# avoid producing irreducible graphs
|
||||
# generate a new finalizer
|
||||
self.current_block = tmp_finalizer = self.add_block("finally.tmp")
|
||||
self.visit(node.finalbody)
|
||||
if not self.current_block.is_terminated():
|
||||
assert isinstance(block.instructions[-1], ir.SetLocal)
|
||||
self.current_block.append(ir.Branch(block.instructions[-1].operands[-1]))
|
||||
block.instructions[-1].erase()
|
||||
block.append(ir.Branch(tmp_finalizer))
|
||||
self.current_block = tail
|
||||
else:
|
||||
block.append(ir.Branch(finalizer))
|
||||
# if no raise in body/handlers, branch to finalizer
|
||||
for block in chain([body], handlers):
|
||||
if not block.is_terminated():
|
||||
if finalizer in block.predecessors():
|
||||
# similar to the above case
|
||||
self.current_block = tmp_finalizer = self.add_block("finally.tmp")
|
||||
self.visit(node.finalbody)
|
||||
self.terminate(ir.Branch(tail))
|
||||
block.append(ir.Branch(tmp_finalizer))
|
||||
self.current_block = tail
|
||||
else:
|
||||
block.append(ir.SetLocal(final_state, "$cont", tail))
|
||||
block.append(ir.Branch(finalizer))
|
||||
else:
|
||||
self.current_block = tail = self.add_block("try.tail")
|
||||
if not body.is_terminated():
|
||||
body.append(ir.Branch(tail))
|
||||
|
||||
cleanup.append(ir.Reraise(self.unwind_target))
|
||||
cleanup.append(ir.Resume(self.unwind_target))
|
||||
|
||||
for handler, post_handler in handlers:
|
||||
if not post_handler.is_terminated():
|
||||
post_handler.append(ir.Branch(tail))
|
||||
for handler in handlers:
|
||||
if not handler.is_terminated():
|
||||
handler.append(ir.Branch(tail))
|
||||
|
||||
def _try_finally(self, body_gen, finally_gen, name):
|
||||
dispatcher = self.add_block("{}.dispatch".format(name))
|
||||
|
@ -805,7 +926,7 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
self.current_block = self.add_block("{}.cleanup".format(name))
|
||||
dispatcher.append(ir.LandingPad(self.current_block))
|
||||
finally_gen()
|
||||
self.raise_exn()
|
||||
self.terminate(ir.Resume(self.unwind_target))
|
||||
|
||||
self.current_block = self.post_body
|
||||
|
||||
|
@ -995,7 +1116,7 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
|
||||
old_final_branch, self.final_branch = self.final_branch, None
|
||||
old_unwind, self.unwind_target = self.unwind_target, None
|
||||
self.raise_exn(exn_gen(*args[1:]), loc=loc)
|
||||
self.raise_exn(lambda: exn_gen(*args[1:]), loc=loc)
|
||||
finally:
|
||||
self.current_function = old_func
|
||||
self.current_block = old_block
|
||||
|
@ -1084,7 +1205,27 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
finally:
|
||||
self.current_assign = old_assign
|
||||
|
||||
if isinstance(node.slice, ast.Index):
|
||||
if types.is_tuple(node.value.type):
|
||||
assert isinstance(node.slice, ast.Index), \
|
||||
"Internal compiler error: tuple index should be an Index"
|
||||
assert isinstance(node.slice.value, ast.Num), \
|
||||
"Internal compiler error: tuple index should be a constant"
|
||||
|
||||
if self.current_assign is not None:
|
||||
diag = diagnostic.Diagnostic("error",
|
||||
"cannot assign to a tuple element",
|
||||
{}, node.loc)
|
||||
self.engine.process(diag)
|
||||
|
||||
index = node.slice.value.n
|
||||
indexed = self.append(
|
||||
ir.GetAttr(value, index, name="{}.e{}".format(value.name, index)),
|
||||
loc=node.loc
|
||||
)
|
||||
|
||||
return indexed
|
||||
|
||||
elif isinstance(node.slice, ast.Index):
|
||||
try:
|
||||
old_assign, self.current_assign = self.current_assign, None
|
||||
index = self.visit(node.slice.value)
|
||||
|
@ -1116,7 +1257,11 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
_readable_name(index))))
|
||||
if self.current_assign is None:
|
||||
return indexed
|
||||
else: # Slice
|
||||
else:
|
||||
# This is a slice. The endpoint checking logic is the same for both lists
|
||||
# and NumPy arrays, but the actual implementations differ – while slices of
|
||||
# built-in lists are always copies in Python, they are views sharing the
|
||||
# same backing storage in NumPy.
|
||||
length = self.iterable_len(value, node.slice.type)
|
||||
|
||||
if node.slice.lower is not None:
|
||||
|
@ -1141,91 +1286,127 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
mapped_stop_index = self._map_index(length, stop_index, one_past_the_end=True,
|
||||
loc=node.begin_loc)
|
||||
|
||||
if node.slice.step is not None:
|
||||
try:
|
||||
old_assign, self.current_assign = self.current_assign, None
|
||||
step = self.visit(node.slice.step)
|
||||
finally:
|
||||
self.current_assign = old_assign
|
||||
if builtins.is_array(node.type):
|
||||
# To implement strided slicing with the proper NumPy reference
|
||||
# semantics, the pointer/length array representation will need to be
|
||||
# extended by another field to hold a variable stride.
|
||||
assert node.slice.step is None, (
|
||||
"array slices with non-trivial step "
|
||||
"should have been disallowed during type inference")
|
||||
|
||||
# One-dimensionally slicing an array only affects the outermost
|
||||
# dimension.
|
||||
shape = self.append(ir.GetAttr(value, "shape"))
|
||||
lengths = [
|
||||
self.append(ir.GetAttr(shape, i))
|
||||
for i in range(len(shape.type.elts))
|
||||
]
|
||||
|
||||
# Compute outermost length – zero for "backwards" indices.
|
||||
raw_len = self.append(
|
||||
ir.Arith(ast.Sub(loc=None), mapped_stop_index, mapped_start_index))
|
||||
is_neg_len = self.append(
|
||||
ir.Compare(ast.Lt(loc=None), raw_len, ir.Constant(0, raw_len.type)))
|
||||
outer_len = self.append(
|
||||
ir.Select(is_neg_len, ir.Constant(0, raw_len.type), raw_len))
|
||||
new_shape = self._make_array_shape([outer_len] + lengths[1:])
|
||||
|
||||
# Offset buffer pointer by start index (times stride for inner dims).
|
||||
stride = reduce(
|
||||
lambda l, r: self.append(ir.Arith(ast.Mult(loc=None), l, r)),
|
||||
lengths[1:], ir.Constant(1, lengths[0].type))
|
||||
offset = self.append(
|
||||
ir.Arith(ast.Mult(loc=None), stride, mapped_start_index))
|
||||
buffer = self.append(ir.GetAttr(value, "buffer"))
|
||||
new_buffer = self.append(ir.Offset(buffer, offset))
|
||||
|
||||
return self.append(ir.Alloc([new_buffer, new_shape], node.type))
|
||||
else:
|
||||
if node.slice.step is not None:
|
||||
try:
|
||||
old_assign, self.current_assign = self.current_assign, None
|
||||
step = self.visit(node.slice.step)
|
||||
finally:
|
||||
self.current_assign = old_assign
|
||||
|
||||
self._make_check(
|
||||
self.append(ir.Compare(ast.NotEq(loc=None), step, ir.Constant(0, step.type))),
|
||||
lambda: self.alloc_exn(builtins.TException("ValueError"),
|
||||
ir.Constant("step cannot be zero", builtins.TStr())),
|
||||
loc=node.slice.step.loc)
|
||||
else:
|
||||
step = ir.Constant(1, node.slice.type)
|
||||
counting_up = self.append(ir.Compare(ast.Gt(loc=None), step,
|
||||
ir.Constant(0, step.type)))
|
||||
|
||||
unstepped_size = self.append(ir.Arith(ast.Sub(loc=None),
|
||||
mapped_stop_index, mapped_start_index))
|
||||
slice_size_a = self.append(ir.Arith(ast.FloorDiv(loc=None), unstepped_size, step))
|
||||
slice_size_b = self.append(ir.Arith(ast.Mod(loc=None), unstepped_size, step))
|
||||
rem_not_empty = self.append(ir.Compare(ast.NotEq(loc=None), slice_size_b,
|
||||
ir.Constant(0, slice_size_b.type)))
|
||||
slice_size_c = self.append(ir.Arith(ast.Add(loc=None), slice_size_a,
|
||||
ir.Constant(1, slice_size_a.type)))
|
||||
slice_size = self.append(ir.Select(rem_not_empty,
|
||||
slice_size_c, slice_size_a,
|
||||
name="slice.size"))
|
||||
self._make_check(
|
||||
self.append(ir.Compare(ast.NotEq(loc=None), step, ir.Constant(0, step.type))),
|
||||
lambda: self.alloc_exn(builtins.TException("ValueError"),
|
||||
ir.Constant("step cannot be zero", builtins.TStr())),
|
||||
loc=node.slice.step.loc)
|
||||
else:
|
||||
step = ir.Constant(1, node.slice.type)
|
||||
counting_up = self.append(ir.Compare(ast.Gt(loc=None), step,
|
||||
ir.Constant(0, step.type)))
|
||||
self.append(ir.Compare(ast.LtE(loc=None), slice_size, length)),
|
||||
lambda slice_size, length: self.alloc_exn(builtins.TException("ValueError"),
|
||||
ir.Constant("slice size {0} is larger than iterable length {1}",
|
||||
builtins.TStr()),
|
||||
slice_size, length),
|
||||
params=[slice_size, length],
|
||||
loc=node.slice.loc)
|
||||
|
||||
unstepped_size = self.append(ir.Arith(ast.Sub(loc=None),
|
||||
mapped_stop_index, mapped_start_index))
|
||||
slice_size_a = self.append(ir.Arith(ast.FloorDiv(loc=None), unstepped_size, step))
|
||||
slice_size_b = self.append(ir.Arith(ast.Mod(loc=None), unstepped_size, step))
|
||||
rem_not_empty = self.append(ir.Compare(ast.NotEq(loc=None), slice_size_b,
|
||||
ir.Constant(0, slice_size_b.type)))
|
||||
slice_size_c = self.append(ir.Arith(ast.Add(loc=None), slice_size_a,
|
||||
ir.Constant(1, slice_size_a.type)))
|
||||
slice_size = self.append(ir.Select(rem_not_empty,
|
||||
slice_size_c, slice_size_a,
|
||||
name="slice.size"))
|
||||
self._make_check(
|
||||
self.append(ir.Compare(ast.LtE(loc=None), slice_size, length)),
|
||||
lambda slice_size, length: self.alloc_exn(builtins.TException("ValueError"),
|
||||
ir.Constant("slice size {0} is larger than iterable length {1}",
|
||||
builtins.TStr()),
|
||||
slice_size, length),
|
||||
params=[slice_size, length],
|
||||
loc=node.slice.loc)
|
||||
if self.current_assign is None:
|
||||
is_neg_size = self.append(ir.Compare(ast.Lt(loc=None),
|
||||
slice_size, ir.Constant(0, slice_size.type)))
|
||||
abs_slice_size = self.append(ir.Select(is_neg_size,
|
||||
ir.Constant(0, slice_size.type), slice_size))
|
||||
other_value = self.append(ir.Alloc([abs_slice_size], value.type,
|
||||
name="slice.result"))
|
||||
else:
|
||||
other_value = self.current_assign
|
||||
|
||||
if self.current_assign is None:
|
||||
is_neg_size = self.append(ir.Compare(ast.Lt(loc=None),
|
||||
slice_size, ir.Constant(0, slice_size.type)))
|
||||
abs_slice_size = self.append(ir.Select(is_neg_size,
|
||||
ir.Constant(0, slice_size.type), slice_size))
|
||||
other_value = self.append(ir.Alloc([abs_slice_size], value.type,
|
||||
name="slice.result"))
|
||||
else:
|
||||
other_value = self.current_assign
|
||||
prehead = self.current_block
|
||||
|
||||
prehead = self.current_block
|
||||
head = self.current_block = self.add_block("slice.head")
|
||||
prehead.append(ir.Branch(head))
|
||||
|
||||
head = self.current_block = self.add_block("slice.head")
|
||||
prehead.append(ir.Branch(head))
|
||||
index = self.append(ir.Phi(node.slice.type,
|
||||
name="slice.index"))
|
||||
index.add_incoming(mapped_start_index, prehead)
|
||||
other_index = self.append(ir.Phi(node.slice.type,
|
||||
name="slice.resindex"))
|
||||
other_index.add_incoming(ir.Constant(0, node.slice.type), prehead)
|
||||
|
||||
index = self.append(ir.Phi(node.slice.type,
|
||||
name="slice.index"))
|
||||
index.add_incoming(mapped_start_index, prehead)
|
||||
other_index = self.append(ir.Phi(node.slice.type,
|
||||
name="slice.resindex"))
|
||||
other_index.add_incoming(ir.Constant(0, node.slice.type), prehead)
|
||||
# Still within bounds?
|
||||
bounded_up = self.append(ir.Compare(ast.Lt(loc=None), index, mapped_stop_index))
|
||||
bounded_down = self.append(ir.Compare(ast.Gt(loc=None), index, mapped_stop_index))
|
||||
within_bounds = self.append(ir.Select(counting_up, bounded_up, bounded_down))
|
||||
|
||||
# Still within bounds?
|
||||
bounded_up = self.append(ir.Compare(ast.Lt(loc=None), index, mapped_stop_index))
|
||||
bounded_down = self.append(ir.Compare(ast.Gt(loc=None), index, mapped_stop_index))
|
||||
within_bounds = self.append(ir.Select(counting_up, bounded_up, bounded_down))
|
||||
body = self.current_block = self.add_block("slice.body")
|
||||
|
||||
body = self.current_block = self.add_block("slice.body")
|
||||
if self.current_assign is None:
|
||||
elem = self.iterable_get(value, index)
|
||||
self.append(ir.SetElem(other_value, other_index, elem))
|
||||
else:
|
||||
elem = self.append(ir.GetElem(self.current_assign, other_index))
|
||||
self.append(ir.SetElem(value, index, elem))
|
||||
|
||||
if self.current_assign is None:
|
||||
elem = self.iterable_get(value, index)
|
||||
self.append(ir.SetElem(other_value, other_index, elem))
|
||||
else:
|
||||
elem = self.append(ir.GetElem(self.current_assign, other_index))
|
||||
self.append(ir.SetElem(value, index, elem))
|
||||
next_index = self.append(ir.Arith(ast.Add(loc=None), index, step))
|
||||
index.add_incoming(next_index, body)
|
||||
next_other_index = self.append(ir.Arith(ast.Add(loc=None), other_index,
|
||||
ir.Constant(1, node.slice.type)))
|
||||
other_index.add_incoming(next_other_index, body)
|
||||
self.append(ir.Branch(head))
|
||||
|
||||
next_index = self.append(ir.Arith(ast.Add(loc=None), index, step))
|
||||
index.add_incoming(next_index, body)
|
||||
next_other_index = self.append(ir.Arith(ast.Add(loc=None), other_index,
|
||||
ir.Constant(1, node.slice.type)))
|
||||
other_index.add_incoming(next_other_index, body)
|
||||
self.append(ir.Branch(head))
|
||||
tail = self.current_block = self.add_block("slice.tail")
|
||||
head.append(ir.BranchIf(within_bounds, body, tail))
|
||||
|
||||
tail = self.current_block = self.add_block("slice.tail")
|
||||
head.append(ir.BranchIf(within_bounds, body, tail))
|
||||
|
||||
if self.current_assign is None:
|
||||
return other_value
|
||||
if self.current_assign is None:
|
||||
return other_value
|
||||
|
||||
def visit_TupleT(self, node):
|
||||
if self.current_assign is None:
|
||||
|
@ -2038,11 +2219,13 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
return phi
|
||||
|
||||
# Keep this function with builtins.TException.attributes.
|
||||
def alloc_exn(self, typ, message=None, param0=None, param1=None, param2=None):
|
||||
def alloc_exn(self, typ, message=None, param0=None, param1=None,
|
||||
param2=None, nomsgcheck=False):
|
||||
typ = typ.find()
|
||||
name = "{}:{}".format(typ.id, typ.name)
|
||||
name_id = self.embedding_map.store_str(name)
|
||||
attributes = [
|
||||
ir.Constant(name, builtins.TStr()), # typeinfo
|
||||
ir.Constant(name_id, builtins.TInt32()), # typeinfo
|
||||
ir.Constant("<not thrown>", builtins.TStr()), # file
|
||||
ir.Constant(0, builtins.TInt32()), # line
|
||||
ir.Constant(0, builtins.TInt32()), # column
|
||||
|
@ -2051,8 +2234,16 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
|
||||
if message is None:
|
||||
attributes.append(ir.Constant(typ.name, builtins.TStr()))
|
||||
elif isinstance(message, ir.Constant) or nomsgcheck:
|
||||
attributes.append(message) # message
|
||||
else:
|
||||
attributes.append(message) # message
|
||||
diag = diagnostic.Diagnostic(
|
||||
"error",
|
||||
"only constant exception messages are supported",
|
||||
{},
|
||||
self.current_loc if message.loc is None else message.loc
|
||||
)
|
||||
self.engine.process(diag)
|
||||
|
||||
param_type = builtins.TInt64()
|
||||
for param in [param0, param1, param2]:
|
||||
|
@ -2340,6 +2531,33 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
or types.is_builtin(typ, "at_mu"):
|
||||
return self.append(ir.Builtin(typ.name,
|
||||
[self.visit(arg) for arg in node.args], node.type))
|
||||
elif types.is_builtin(typ, "subkernel_await"):
|
||||
if len(node.args) == 2 and len(node.keywords) == 0:
|
||||
fn = node.args[0].type
|
||||
timeout = self.visit(node.args[1])
|
||||
elif len(node.args) == 1 and len(node.keywords) == 0:
|
||||
fn = node.args[0].type
|
||||
timeout = ir.Constant(10_000, builtins.TInt64())
|
||||
else:
|
||||
assert False
|
||||
if types.is_method(fn):
|
||||
fn = types.get_method_function(fn)
|
||||
sid = ir.Constant(fn.sid, builtins.TInt32())
|
||||
if not builtins.is_none(fn.ret):
|
||||
ret = self.append(ir.Builtin("subkernel_retrieve_return", [sid, timeout], fn.ret))
|
||||
else:
|
||||
ret = ir.Constant(None, builtins.TNone())
|
||||
self.append(ir.Builtin("subkernel_await_finish", [sid, timeout], builtins.TNone()))
|
||||
return ret
|
||||
elif types.is_builtin(typ, "subkernel_preload"):
|
||||
if len(node.args) == 1 and len(node.keywords) == 0:
|
||||
fn = node.args[0].type
|
||||
else:
|
||||
assert False
|
||||
if types.is_method(fn):
|
||||
fn = types.get_method_function(fn)
|
||||
sid = ir.Constant(fn.sid, builtins.TInt32())
|
||||
return self.append(ir.Builtin("subkernel_preload", [sid], builtins.TNone()))
|
||||
elif types.is_exn_constructor(typ):
|
||||
return self.alloc_exn(node.type, *[self.visit(arg_node) for arg_node in node.args])
|
||||
elif types.is_constructor(typ):
|
||||
|
@ -2351,8 +2569,8 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
node.loc)
|
||||
self.engine.process(diag)
|
||||
|
||||
def _user_call(self, callee, positional, keywords, arg_exprs={}):
|
||||
if types.is_function(callee.type) or types.is_rpc(callee.type):
|
||||
def _user_call(self, callee, positional, keywords, arg_exprs={}, remote_fn=False):
|
||||
if types.is_function(callee.type) or types.is_rpc(callee.type) or types.is_subkernel(callee.type):
|
||||
func = callee
|
||||
self_arg = None
|
||||
fn_typ = callee.type
|
||||
|
@ -2367,16 +2585,51 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
else:
|
||||
assert False
|
||||
|
||||
if types.is_rpc(fn_typ):
|
||||
if self_arg is None:
|
||||
if types.is_rpc(fn_typ) or types.is_subkernel(fn_typ):
|
||||
if self_arg is None or types.is_subkernel(fn_typ):
|
||||
# self is not passed to subkernels by remote
|
||||
args = positional
|
||||
else:
|
||||
elif self_arg is not None:
|
||||
args = [self_arg] + positional
|
||||
|
||||
for keyword in keywords:
|
||||
arg = keywords[keyword]
|
||||
args.append(self.append(ir.Alloc([ir.Constant(keyword, builtins.TStr()), arg],
|
||||
ir.TKeyword(arg.type))))
|
||||
elif remote_fn:
|
||||
assert self_arg is None
|
||||
assert len(fn_typ.args) >= len(positional)
|
||||
assert len(keywords) == 0 # no keyword support
|
||||
args = [None] * fn_typ.arity()
|
||||
index = 0
|
||||
# fill in first available args
|
||||
for arg in positional:
|
||||
args[index] = arg
|
||||
index += 1
|
||||
|
||||
# remaining args are received through DRTIO
|
||||
if index < len(args):
|
||||
# min/max args received remotely (minus already filled)
|
||||
offset = index
|
||||
min_args = ir.Constant(len(fn_typ.args)-offset, builtins.TInt8())
|
||||
max_args = ir.Constant(fn_typ.arity()-offset, builtins.TInt8())
|
||||
|
||||
arg_types = list(fn_typ.args.items())[offset:]
|
||||
arg_type_list = [a[1] for a in arg_types] + [a[1] for a in fn_typ.optargs.items()]
|
||||
rcvd_count = self.append(ir.SubkernelAwaitArgs([min_args, max_args], arg_type_list))
|
||||
# obligatory arguments
|
||||
for arg_name, arg_type in arg_types:
|
||||
args[index] = self.append(ir.GetArgFromRemote(arg_name, arg_type,
|
||||
name="ARG.{}".format(arg_name)))
|
||||
index += 1
|
||||
|
||||
# optional arguments
|
||||
for optarg_name, optarg_type in fn_typ.optargs.items():
|
||||
idx = ir.Constant(index-offset, builtins.TInt8())
|
||||
args[index] = \
|
||||
self.append(ir.GetOptArgFromRemote(optarg_name, optarg_type, rcvd_count, idx))
|
||||
index += 1
|
||||
|
||||
else:
|
||||
args = [None] * (len(fn_typ.args) + len(fn_typ.optargs))
|
||||
|
||||
|
@ -2462,7 +2715,8 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
else:
|
||||
assert False, "Broadcasting for {} arguments not implemented".format(len)
|
||||
else:
|
||||
insn = self._user_call(callee, args, keywords, node.arg_exprs)
|
||||
remote_fn = getattr(node, "remote_fn", False)
|
||||
insn = self._user_call(callee, args, keywords, node.arg_exprs, remote_fn)
|
||||
if isinstance(node.func, asttyped.AttributeT):
|
||||
attr_node = node.func
|
||||
self.method_map[(attr_node.value.type.find(),
|
||||
|
@ -2513,11 +2767,12 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
old_final_branch, self.final_branch = self.final_branch, None
|
||||
old_unwind, self.unwind_target = self.unwind_target, None
|
||||
|
||||
exn = self.alloc_exn(builtins.TException("AssertionError"), message=msg)
|
||||
self.append(ir.SetAttr(exn, "__file__", file))
|
||||
self.append(ir.SetAttr(exn, "__line__", line))
|
||||
self.append(ir.SetAttr(exn, "__col__", col))
|
||||
self.append(ir.SetAttr(exn, "__func__", function))
|
||||
exn = self.alloc_exn(builtins.TException("AssertionError"),
|
||||
message=msg, nomsgcheck=True)
|
||||
self.append(ir.SetAttr(exn, "#__file__", file))
|
||||
self.append(ir.SetAttr(exn, "#__line__", line))
|
||||
self.append(ir.SetAttr(exn, "#__col__", col))
|
||||
self.append(ir.SetAttr(exn, "#__func__", function))
|
||||
self.append(ir.Raise(exn))
|
||||
finally:
|
||||
self.current_function = old_func
|
||||
|
@ -2653,14 +2908,15 @@ class ARTIQIRGenerator(algorithm.Visitor):
|
|||
|
||||
format_string += ")"
|
||||
elif builtins.is_exception(value.type):
|
||||
name = self.append(ir.GetAttr(value, "__name__"))
|
||||
message = self.append(ir.GetAttr(value, "__message__"))
|
||||
param1 = self.append(ir.GetAttr(value, "__param0__"))
|
||||
param2 = self.append(ir.GetAttr(value, "__param1__"))
|
||||
param3 = self.append(ir.GetAttr(value, "__param2__"))
|
||||
# message may not be an actual string...
|
||||
# so we cannot really print it
|
||||
name = self.append(ir.GetAttr(value, "#__name__"))
|
||||
param1 = self.append(ir.GetAttr(value, "#__param0__"))
|
||||
param2 = self.append(ir.GetAttr(value, "#__param1__"))
|
||||
param3 = self.append(ir.GetAttr(value, "#__param2__"))
|
||||
|
||||
format_string += "%.*s(%.*s, %lld, %lld, %lld)"
|
||||
args += [name, message, param1, param2, param3]
|
||||
format_string += "%ld(%lld, %lld, %lld)"
|
||||
args += [name, param1, param2, param3]
|
||||
else:
|
||||
assert False
|
||||
|
||||
|
|
|
@ -238,7 +238,7 @@ class ASTTypedRewriter(algorithm.Transformer):
|
|||
body=node.body, decorator_list=node.decorator_list,
|
||||
keyword_loc=node.keyword_loc, name_loc=node.name_loc,
|
||||
arrow_loc=node.arrow_loc, colon_loc=node.colon_loc, at_locs=node.at_locs,
|
||||
loc=node.loc)
|
||||
loc=node.loc, remote_fn=False)
|
||||
|
||||
try:
|
||||
self.env_stack.append(node.typing_env)
|
||||
|
@ -439,8 +439,9 @@ class ASTTypedRewriter(algorithm.Transformer):
|
|||
|
||||
def visit_Call(self, node):
|
||||
node = self.generic_visit(node)
|
||||
node = asttyped.CallT(type=types.TVar(), iodelay=None, arg_exprs={},
|
||||
func=node.func, args=node.args, keywords=node.keywords,
|
||||
node = asttyped.CallT(type=types.TVar(), iodelay=None, arg_exprs={},
|
||||
remote_fn=False, func=node.func,
|
||||
args=node.args, keywords=node.keywords,
|
||||
starargs=node.starargs, kwargs=node.kwargs,
|
||||
star_loc=node.star_loc, dstar_loc=node.dstar_loc,
|
||||
begin_loc=node.begin_loc, end_loc=node.end_loc, loc=node.loc)
|
||||
|
|
|
@ -15,13 +15,26 @@ class DeadCodeEliminator:
|
|||
self.process_function(func)
|
||||
|
||||
def process_function(self, func):
|
||||
modified = True
|
||||
while modified:
|
||||
modified = False
|
||||
for block in list(func.basic_blocks):
|
||||
if not any(block.predecessors()) and block != func.entry():
|
||||
self.remove_block(block)
|
||||
modified = True
|
||||
# defer removing those blocks, so our use checks will ignore deleted blocks
|
||||
preserve = [func.entry()]
|
||||
work_list = [func.entry()]
|
||||
while any(work_list):
|
||||
block = work_list.pop()
|
||||
for succ in block.successors():
|
||||
if succ not in preserve:
|
||||
preserve.append(succ)
|
||||
work_list.append(succ)
|
||||
|
||||
to_be_removed = []
|
||||
for block in func.basic_blocks:
|
||||
if block not in preserve:
|
||||
block.is_removed = True
|
||||
to_be_removed.append(block)
|
||||
for insn in block.instructions:
|
||||
insn.is_removed = True
|
||||
|
||||
for block in to_be_removed:
|
||||
self.remove_block(block)
|
||||
|
||||
modified = True
|
||||
while modified:
|
||||
|
@ -42,6 +55,8 @@ class DeadCodeEliminator:
|
|||
def remove_block(self, block):
|
||||
# block.uses are updated while iterating
|
||||
for use in set(block.uses):
|
||||
if use.is_removed:
|
||||
continue
|
||||
if isinstance(use, ir.Phi):
|
||||
use.remove_incoming_block(block)
|
||||
if not any(use.operands):
|
||||
|
@ -56,6 +71,8 @@ class DeadCodeEliminator:
|
|||
|
||||
def remove_instruction(self, insn):
|
||||
for use in set(insn.uses):
|
||||
if use.is_removed:
|
||||
continue
|
||||
if isinstance(use, ir.Phi):
|
||||
use.remove_incoming_value(insn)
|
||||
if not any(use.operands):
|
||||
|
|
|
@ -6,6 +6,29 @@ from collections import OrderedDict
|
|||
from pythonparser import algorithm, diagnostic, ast
|
||||
from .. import asttyped, types, builtins
|
||||
from .typedtree_printer import TypedtreePrinter
|
||||
from artiq.experiment import kernel
|
||||
|
||||
|
||||
def is_nested_empty_list(node):
|
||||
"""If the passed AST node is an empty list, or a regularly nested list thereof,
|
||||
returns the number of nesting layers, or ``None`` otherwise.
|
||||
|
||||
For instance, ``is_nested_empty_list([]) == 1`` and
|
||||
``is_nested_empty_list([[], []]) == 2``, but
|
||||
``is_nested_empty_list([[[]], []]) == None`` as the number of nesting layers doesn't
|
||||
match.
|
||||
"""
|
||||
if not isinstance(node, ast.List):
|
||||
return None
|
||||
if not node.elts:
|
||||
return 1
|
||||
result = is_nested_empty_list(node.elts[0])
|
||||
if result is None:
|
||||
return None
|
||||
for elt in node.elts[:1]:
|
||||
if result != is_nested_empty_list(elt):
|
||||
return None
|
||||
return result + 1
|
||||
|
||||
|
||||
class Inferencer(algorithm.Visitor):
|
||||
|
@ -23,6 +46,7 @@ class Inferencer(algorithm.Visitor):
|
|||
self.function = None # currently visited function, for Return inference
|
||||
self.in_loop = False
|
||||
self.has_return = False
|
||||
self.subkernel_arg_types = dict()
|
||||
|
||||
def _unify(self, typea, typeb, loca, locb, makenotes=None, when=""):
|
||||
try:
|
||||
|
@ -155,7 +179,7 @@ class Inferencer(algorithm.Visitor):
|
|||
# Convert to a method.
|
||||
attr_type = types.TMethod(object_type, attr_type)
|
||||
self._unify_method_self(attr_type, attr_name, attr_loc, loc, value_node.loc)
|
||||
elif types.is_rpc(attr_type):
|
||||
elif types.is_rpc(attr_type) or types.is_subkernel(attr_type):
|
||||
# Convert to a method. We don't have to bother typechecking
|
||||
# the self argument, since for RPCs anything goes.
|
||||
attr_type = types.TMethod(object_type, attr_type)
|
||||
|
@ -216,6 +240,7 @@ class Inferencer(algorithm.Visitor):
|
|||
value.loc, None)
|
||||
|
||||
def visit_SliceT(self, node):
|
||||
self.generic_visit(node)
|
||||
if (node.lower, node.upper, node.step) == (None, None, None):
|
||||
self._unify(node.type, builtins.TInt32(),
|
||||
node.loc, None)
|
||||
|
@ -235,7 +260,31 @@ class Inferencer(algorithm.Visitor):
|
|||
|
||||
def visit_SubscriptT(self, node):
|
||||
self.generic_visit(node)
|
||||
if isinstance(node.slice, ast.Index):
|
||||
|
||||
if types.is_tuple(node.value.type):
|
||||
if (not isinstance(node.slice, ast.Index) or
|
||||
not isinstance(node.slice.value, ast.Num)):
|
||||
diag = diagnostic.Diagnostic(
|
||||
"error", "tuples can only be indexed by a constant", {},
|
||||
node.slice.loc, []
|
||||
)
|
||||
self.engine.process(diag)
|
||||
return
|
||||
|
||||
tuple_type = node.value.type.find()
|
||||
index = node.slice.value.n
|
||||
if index < 0 or index >= len(tuple_type.elts):
|
||||
diag = diagnostic.Diagnostic(
|
||||
"error",
|
||||
"index {index} is out of range for tuple of size {size}",
|
||||
{"index": index, "size": len(tuple_type.elts)},
|
||||
node.slice.loc, []
|
||||
)
|
||||
self.engine.process(diag)
|
||||
return
|
||||
|
||||
self._unify(node.type, tuple_type.elts[index], node.loc, node.value.loc)
|
||||
elif isinstance(node.slice, ast.Index):
|
||||
if types.is_tuple(node.slice.value.type):
|
||||
if types.is_var(node.value.type):
|
||||
return
|
||||
|
@ -268,12 +317,21 @@ class Inferencer(algorithm.Visitor):
|
|||
else:
|
||||
self._unify_iterable(element=node, collection=node.value)
|
||||
elif isinstance(node.slice, ast.Slice):
|
||||
if builtins.is_array(node.value.type):
|
||||
if node.slice.step is not None:
|
||||
diag = diagnostic.Diagnostic(
|
||||
"error",
|
||||
"strided slicing not yet supported for NumPy arrays", {},
|
||||
node.slice.step.loc, [])
|
||||
self.engine.process(diag)
|
||||
return
|
||||
self._unify(node.type, node.value.type, node.loc, node.value.loc)
|
||||
else: # ExtSlice
|
||||
pass # error emitted above
|
||||
|
||||
def visit_IfExpT(self, node):
|
||||
self.generic_visit(node)
|
||||
self._unify(node.test.type, builtins.TBool(), node.test.loc, None)
|
||||
self._unify(node.body.type, node.orelse.type,
|
||||
node.body.loc, node.orelse.loc)
|
||||
self._unify(node.type, node.body.type,
|
||||
|
@ -882,28 +940,45 @@ class Inferencer(algorithm.Visitor):
|
|||
if len(node.args) == 1 and keywords_acceptable:
|
||||
arg, = node.args
|
||||
|
||||
# In the absence of any other information (there currently isn't a way
|
||||
# to specify any), assume that all iterables are expandable into a
|
||||
# (runtime-checked) rectangular array of the innermost element type.
|
||||
elt = arg.type
|
||||
num_dims = 0
|
||||
result_dims = (node.type.find()["num_dims"].value
|
||||
if builtins.is_array(node.type) else -1)
|
||||
while True:
|
||||
if num_dims == result_dims:
|
||||
# If we already know the number of dimensions of the result,
|
||||
# stop so we can disambiguate the (innermost) element type of
|
||||
# the argument if it is still unknown (e.g. empty array).
|
||||
break
|
||||
if types.is_var(elt):
|
||||
return # undetermined yet
|
||||
if not builtins.is_iterable(elt) or builtins.is_str(elt):
|
||||
break
|
||||
if builtins.is_array(elt):
|
||||
num_dims += elt.find()["num_dims"].value
|
||||
else:
|
||||
num_dims += 1
|
||||
elt = builtins.get_iterable_elt(elt)
|
||||
num_empty_dims = is_nested_empty_list(arg)
|
||||
if num_empty_dims is not None:
|
||||
# As a special case, following the behaviour of numpy.array (and
|
||||
# repr() on ndarrays), consider empty lists to be exactly of the
|
||||
# number of dimensions given, instead of potentially containing an
|
||||
# unknown number of extra dimensions.
|
||||
num_dims = num_empty_dims
|
||||
|
||||
# The ultimate element type will be TVar initially, but we might be
|
||||
# able to resolve it from context.
|
||||
elt = arg.type
|
||||
for _ in range(num_dims):
|
||||
assert builtins.is_list(elt)
|
||||
elt = elt.find()["elt"]
|
||||
else:
|
||||
# In the absence of any other information (there currently isn't a way
|
||||
# to specify any), assume that all iterables are expandable into a
|
||||
# (runtime-checked) rectangular array of the innermost element type.
|
||||
elt = arg.type
|
||||
num_dims = 0
|
||||
expected_dims = (node.type.find()["num_dims"].value
|
||||
if builtins.is_array(node.type) else -1)
|
||||
while True:
|
||||
if num_dims == expected_dims:
|
||||
# If we already know the number of dimensions of the result,
|
||||
# stop so we can disambiguate the (innermost) element type of
|
||||
# the argument if it is still unknown.
|
||||
break
|
||||
if types.is_var(elt):
|
||||
# Can't make progress here because we don't know how many more
|
||||
# dimensions might be "hidden" inside.
|
||||
return
|
||||
if not builtins.is_iterable(elt) or builtins.is_str(elt):
|
||||
break
|
||||
if builtins.is_array(elt):
|
||||
num_dims += elt.find()["num_dims"].value
|
||||
else:
|
||||
num_dims += 1
|
||||
elt = builtins.get_iterable_elt(elt)
|
||||
|
||||
if explicit_dtype is not None:
|
||||
# TODO: Factor out type detection; support quoted type constructors
|
||||
|
@ -1219,6 +1294,55 @@ class Inferencer(algorithm.Visitor):
|
|||
# Ignored.
|
||||
self._unify(node.type, builtins.TNone(),
|
||||
node.loc, None)
|
||||
elif types.is_builtin(typ, "subkernel_await"):
|
||||
valid_forms = lambda: [
|
||||
valid_form("subkernel_await(f: subkernel) -> f return type"),
|
||||
valid_form("subkernel_await(f: subkernel, timeout: numpy.int64) -> f return type")
|
||||
]
|
||||
if 1 <= len(node.args) <= 2:
|
||||
arg0 = node.args[0].type
|
||||
if types.is_var(arg0):
|
||||
pass # undetermined yet
|
||||
else:
|
||||
if types.is_method(arg0):
|
||||
fn = types.get_method_function(arg0)
|
||||
elif types.is_function(arg0) or types.is_subkernel(arg0):
|
||||
fn = arg0
|
||||
else:
|
||||
diagnose(valid_forms())
|
||||
self._unify(node.type, fn.ret,
|
||||
node.loc, None)
|
||||
if len(node.args) == 2:
|
||||
arg1 = node.args[1]
|
||||
if types.is_var(arg1.type):
|
||||
pass
|
||||
elif builtins.is_int(arg1.type):
|
||||
# promote to TInt64
|
||||
self._unify(arg1.type, builtins.TInt64(),
|
||||
arg1.loc, None)
|
||||
else:
|
||||
diagnose(valid_forms())
|
||||
else:
|
||||
diagnose(valid_forms())
|
||||
elif types.is_builtin(typ, "subkernel_preload"):
|
||||
valid_forms = lambda: [
|
||||
valid_form("subkernel_preload(f: subkernel) -> None")
|
||||
]
|
||||
if len(node.args) == 1:
|
||||
arg0 = node.args[0].type
|
||||
if types.is_var(arg0):
|
||||
pass # undetermined yet
|
||||
else:
|
||||
if types.is_method(arg0):
|
||||
fn = types.get_method_function(arg0)
|
||||
elif types.is_function(arg0) or types.is_subkernel(arg0):
|
||||
fn = arg0
|
||||
else:
|
||||
diagnose(valid_forms())
|
||||
self._unify(node.type, fn.ret,
|
||||
node.loc, None)
|
||||
else:
|
||||
diagnose(valid_forms())
|
||||
else:
|
||||
assert False
|
||||
|
||||
|
@ -1257,6 +1381,7 @@ class Inferencer(algorithm.Visitor):
|
|||
typ_args = typ.args
|
||||
typ_optargs = typ.optargs
|
||||
typ_ret = typ.ret
|
||||
typ_func = typ
|
||||
else:
|
||||
typ_self = types.get_method_self(typ)
|
||||
typ_func = types.get_method_function(typ)
|
||||
|
@ -1314,12 +1439,23 @@ class Inferencer(algorithm.Visitor):
|
|||
other_node=node.args[0])
|
||||
self._unify(node.type, ret, node.loc, None)
|
||||
return
|
||||
if types.is_subkernel(typ_func) and typ_func.sid not in self.subkernel_arg_types:
|
||||
self.subkernel_arg_types[typ_func.sid] = []
|
||||
|
||||
for actualarg, (formalname, formaltyp) in \
|
||||
zip(node.args, list(typ_args.items()) + list(typ_optargs.items())):
|
||||
self._unify(actualarg.type, formaltyp,
|
||||
actualarg.loc, None)
|
||||
passed_args[formalname] = actualarg.loc
|
||||
if types.is_subkernel(typ_func):
|
||||
if types.is_instance(actualarg.type):
|
||||
# objects cannot be passed to subkernels, as rpc code doesn't support them
|
||||
diag = diagnostic.Diagnostic("error",
|
||||
"argument '{name}' of type: {typ} is not supported in subkernels",
|
||||
{"name": formalname, "typ": actualarg.type},
|
||||
actualarg.loc, [])
|
||||
self.engine.process(diag)
|
||||
self.subkernel_arg_types[typ_func.sid].append((formalname, formaltyp))
|
||||
|
||||
for keyword in node.keywords:
|
||||
if keyword.arg in passed_args:
|
||||
|
@ -1350,7 +1486,7 @@ class Inferencer(algorithm.Visitor):
|
|||
passed_args[keyword.arg] = keyword.arg_loc
|
||||
|
||||
for formalname in typ_args:
|
||||
if formalname not in passed_args:
|
||||
if formalname not in passed_args and not node.remote_fn:
|
||||
note = diagnostic.Diagnostic("note",
|
||||
"the called function is of type {type}",
|
||||
{"type": types.TypePrinter().name(node.func.type)},
|
||||
|
@ -1613,7 +1749,14 @@ class Inferencer(algorithm.Visitor):
|
|||
|
||||
def visit_FunctionDefT(self, node):
|
||||
for index, decorator in enumerate(node.decorator_list):
|
||||
if types.is_builtin(decorator.type, "kernel") or \
|
||||
def eval_attr(attr):
|
||||
if isinstance(attr.value, asttyped.QuoteT):
|
||||
return getattr(attr.value.value, attr.attr)
|
||||
return getattr(eval_attr(attr.value), attr.attr)
|
||||
if isinstance(decorator, asttyped.AttributeT):
|
||||
decorator = eval_attr(decorator)
|
||||
if id(decorator) == id(kernel) or \
|
||||
types.is_builtin(decorator.type, "kernel") or \
|
||||
isinstance(decorator, asttyped.CallT) and \
|
||||
types.is_builtin(decorator.func.type, "kernel"):
|
||||
continue
|
||||
|
|
|
@ -280,7 +280,7 @@ class IODelayEstimator(algorithm.Visitor):
|
|||
context="as an argument for delay_mu()")
|
||||
call_delay = value
|
||||
elif not types.is_builtin(typ):
|
||||
if types.is_function(typ) or types.is_rpc(typ):
|
||||
if types.is_function(typ) or types.is_rpc(typ) or types.is_subkernel(typ):
|
||||
offset = 0
|
||||
elif types.is_method(typ):
|
||||
offset = 1
|
||||
|
@ -288,7 +288,7 @@ class IODelayEstimator(algorithm.Visitor):
|
|||
else:
|
||||
assert False
|
||||
|
||||
if types.is_rpc(typ):
|
||||
if types.is_rpc(typ) or types.is_subkernel(typ):
|
||||
call_delay = iodelay.Const(0)
|
||||
else:
|
||||
delay = typ.find().delay.find()
|
||||
|
@ -311,13 +311,20 @@ class IODelayEstimator(algorithm.Visitor):
|
|||
args[arg_name] = arg_node
|
||||
|
||||
free_vars = delay.duration.free_vars()
|
||||
node.arg_exprs = {
|
||||
arg: self.evaluate(args[arg], abort=abort,
|
||||
context="in the expression for argument '{}' "
|
||||
"that affects I/O delay".format(arg))
|
||||
for arg in free_vars
|
||||
}
|
||||
call_delay = delay.duration.fold(node.arg_exprs)
|
||||
try:
|
||||
node.arg_exprs = {
|
||||
arg: self.evaluate(args[arg], abort=abort,
|
||||
context="in the expression for argument '{}' "
|
||||
"that affects I/O delay".format(arg))
|
||||
for arg in free_vars
|
||||
}
|
||||
call_delay = delay.duration.fold(node.arg_exprs)
|
||||
except KeyError as e:
|
||||
if getattr(node, "remote_fn", False):
|
||||
note = diagnostic.Diagnostic("note",
|
||||
"function called here", {},
|
||||
node.loc)
|
||||
self.abort("due to arguments passed remotely", node.loc, note)
|
||||
else:
|
||||
assert False
|
||||
else:
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -3,6 +3,7 @@ The :mod:`types` module contains the classes describing the types
|
|||
in :mod:`asttyped`.
|
||||
"""
|
||||
|
||||
import builtins
|
||||
import string
|
||||
from collections import OrderedDict
|
||||
from . import iodelay
|
||||
|
@ -55,40 +56,39 @@ class TVar(Type):
|
|||
|
||||
def __init__(self):
|
||||
self.parent = self
|
||||
self.rank = 0
|
||||
|
||||
def find(self):
|
||||
if self.parent is self:
|
||||
parent = self.parent
|
||||
if parent is self:
|
||||
return self
|
||||
else:
|
||||
# The recursive find() invocation is turned into a loop
|
||||
# because paths resulting from unification of large arrays
|
||||
# can easily cause a stack overflow.
|
||||
root = self
|
||||
while root.__class__ == TVar:
|
||||
if root is root.parent:
|
||||
break
|
||||
else:
|
||||
root = root.parent
|
||||
|
||||
# path compression
|
||||
iter = self
|
||||
while iter.__class__ == TVar:
|
||||
if iter is root:
|
||||
break
|
||||
else:
|
||||
iter, iter.parent = iter.parent, root
|
||||
|
||||
return root
|
||||
while parent.__class__ == TVar and root is not parent:
|
||||
_, parent = root, root.parent = parent, parent.parent
|
||||
return root.parent
|
||||
|
||||
def unify(self, other):
|
||||
if other is self:
|
||||
return
|
||||
other = other.find()
|
||||
|
||||
if self.parent is self:
|
||||
self.parent = other
|
||||
x = other.find()
|
||||
y = self.find()
|
||||
if x is y:
|
||||
return
|
||||
if y.__class__ == TVar:
|
||||
if x.__class__ == TVar:
|
||||
if x.rank < y.rank:
|
||||
x, y = y, x
|
||||
y.parent = x
|
||||
if x.rank == y.rank:
|
||||
x.rank += 1
|
||||
else:
|
||||
y.parent = x
|
||||
else:
|
||||
self.find().unify(other)
|
||||
y.unify(x)
|
||||
|
||||
def fold(self, accum, fn):
|
||||
if self.parent is self:
|
||||
|
@ -97,6 +97,8 @@ class TVar(Type):
|
|||
return self.find().fold(accum, fn)
|
||||
|
||||
def __repr__(self):
|
||||
if getattr(builtins, "__in_sphinx__", False):
|
||||
return str(self)
|
||||
if self.parent is self:
|
||||
return "<artiq.compiler.types.TVar %d>" % id(self)
|
||||
else:
|
||||
|
@ -143,6 +145,8 @@ class TMono(Type):
|
|||
return fn(accum, self)
|
||||
|
||||
def __repr__(self):
|
||||
if getattr(builtins, "__in_sphinx__", False):
|
||||
return str(self)
|
||||
return "artiq.compiler.types.TMono(%s, %s)" % (repr(self.name), repr(self.params))
|
||||
|
||||
def __getitem__(self, param):
|
||||
|
@ -191,6 +195,8 @@ class TTuple(Type):
|
|||
return fn(accum, self)
|
||||
|
||||
def __repr__(self):
|
||||
if getattr(builtins, "__in_sphinx__", False):
|
||||
return str(self)
|
||||
return "artiq.compiler.types.TTuple(%s)" % repr(self.elts)
|
||||
|
||||
def __eq__(self, other):
|
||||
|
@ -269,6 +275,8 @@ class TFunction(Type):
|
|||
return fn(accum, self)
|
||||
|
||||
def __repr__(self):
|
||||
if getattr(builtins, "__in_sphinx__", False):
|
||||
return str(self)
|
||||
return "artiq.compiler.types.TFunction({}, {}, {})".format(
|
||||
repr(self.args), repr(self.optargs), repr(self.ret))
|
||||
|
||||
|
@ -296,7 +304,7 @@ class TExternalFunction(TFunction):
|
|||
mangling rules).
|
||||
:ivar flags: (set of str) function flags.
|
||||
Flag ``nounwind`` means the function never raises an exception.
|
||||
Flag ``nowrite`` means the function never writes any memory
|
||||
Flag ``nowrite`` means the function never accesses any memory
|
||||
that the ARTIQ Python code can observe.
|
||||
:ivar broadcast_across_arrays: (bool)
|
||||
If True, the function is transparently applied element-wise when called
|
||||
|
@ -362,6 +370,8 @@ class TRPC(Type):
|
|||
return fn(accum, self)
|
||||
|
||||
def __repr__(self):
|
||||
if getattr(builtins, "__in_sphinx__", False):
|
||||
return str(self)
|
||||
return "artiq.compiler.types.TRPC({})".format(repr(self.ret))
|
||||
|
||||
def __eq__(self, other):
|
||||
|
@ -375,6 +385,50 @@ class TRPC(Type):
|
|||
def __hash__(self):
|
||||
return hash(self.service)
|
||||
|
||||
class TSubkernel(TFunction):
|
||||
"""
|
||||
A kernel to be run on a satellite.
|
||||
|
||||
:ivar args: (:class:`collections.OrderedDict` of string to :class:`Type`)
|
||||
function arguments
|
||||
:ivar ret: (:class:`Type`)
|
||||
return type
|
||||
:ivar sid: (int) subkernel ID number
|
||||
:ivar destination: (int) satellite destination number
|
||||
"""
|
||||
|
||||
attributes = OrderedDict()
|
||||
|
||||
def __init__(self, args, optargs, ret, sid, destination):
|
||||
assert isinstance(ret, Type)
|
||||
super().__init__(args, optargs, ret)
|
||||
self.sid, self.destination = sid, destination
|
||||
self.delay = TFixedDelay(iodelay.Const(0))
|
||||
|
||||
def unify(self, other):
|
||||
if other is self:
|
||||
return
|
||||
if isinstance(other, TSubkernel) and \
|
||||
self.sid == other.sid and \
|
||||
self.destination == other.destination:
|
||||
self.ret.unify(other.ret)
|
||||
elif isinstance(other, TVar):
|
||||
other.unify(self)
|
||||
else:
|
||||
raise UnificationError(self, other)
|
||||
|
||||
def __repr__(self):
|
||||
if getattr(builtins, "__in_sphinx__", False):
|
||||
return str(self)
|
||||
return "artiq.compiler.types.TSubkernel({})".format(repr(self.ret))
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, TSubkernel) and \
|
||||
self.sid == other.sid
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.sid)
|
||||
|
||||
class TBuiltin(Type):
|
||||
"""
|
||||
An instance of builtin type. Every instance of a builtin
|
||||
|
@ -399,6 +453,8 @@ class TBuiltin(Type):
|
|||
return fn(accum, self)
|
||||
|
||||
def __repr__(self):
|
||||
if getattr(builtins, "__in_sphinx__", False):
|
||||
return str(self)
|
||||
return "artiq.compiler.types.{}({})".format(type(self).__name__, repr(self.name))
|
||||
|
||||
def __eq__(self, other):
|
||||
|
@ -459,6 +515,8 @@ class TInstance(TMono):
|
|||
self.constant_attributes = set()
|
||||
|
||||
def __repr__(self):
|
||||
if getattr(builtins, "__in_sphinx__", False):
|
||||
return str(self)
|
||||
return "artiq.compiler.types.TInstance({}, {})".format(
|
||||
repr(self.name), repr(self.attributes))
|
||||
|
||||
|
@ -474,6 +532,8 @@ class TModule(TMono):
|
|||
self.constant_attributes = set()
|
||||
|
||||
def __repr__(self):
|
||||
if getattr(builtins, "__in_sphinx__", False):
|
||||
return str(self)
|
||||
return "artiq.compiler.types.TModule({}, {})".format(
|
||||
repr(self.name), repr(self.attributes))
|
||||
|
||||
|
@ -513,6 +573,8 @@ class TValue(Type):
|
|||
return fn(accum, self)
|
||||
|
||||
def __repr__(self):
|
||||
if getattr(builtins, "__in_sphinx__", False):
|
||||
return str(self)
|
||||
return "artiq.compiler.types.TValue(%s)" % repr(self.value)
|
||||
|
||||
def __eq__(self, other):
|
||||
|
@ -571,6 +633,8 @@ class TDelay(Type):
|
|||
return not (self == other)
|
||||
|
||||
def __repr__(self):
|
||||
if getattr(builtins, "__in_sphinx__", False):
|
||||
return str(self)
|
||||
if self.duration is None:
|
||||
return "<{}.TIndeterminateDelay>".format(__name__)
|
||||
elif self.cause is None:
|
||||
|
@ -624,6 +688,9 @@ def is_function(typ):
|
|||
def is_rpc(typ):
|
||||
return isinstance(typ.find(), TRPC)
|
||||
|
||||
def is_subkernel(typ):
|
||||
return isinstance(typ.find(), TSubkernel)
|
||||
|
||||
def is_external_function(typ, name=None):
|
||||
typ = typ.find()
|
||||
if name is None:
|
||||
|
@ -790,6 +857,10 @@ class TypePrinter(object):
|
|||
return "[rpc{} #{}](...)->{}".format(typ.service,
|
||||
" async" if typ.is_async else "",
|
||||
self.name(typ.ret, depth + 1))
|
||||
elif isinstance(typ, TSubkernel):
|
||||
return "<subkernel{} dest#{}>->{}".format(typ.sid,
|
||||
typ.destination,
|
||||
self.name(typ.ret, depth + 1))
|
||||
elif isinstance(typ, TBuiltinFunction):
|
||||
return "<function {}>".format(typ.name)
|
||||
elif isinstance(typ, (TConstructor, TExceptionConstructor)):
|
||||
|
|
|
@ -102,8 +102,20 @@ class RegionOf(algorithm.Visitor):
|
|||
if types.is_external_function(node.func.type, "cache_get"):
|
||||
# The cache is borrow checked dynamically
|
||||
return Global()
|
||||
else:
|
||||
self.visit_sometimes_allocating(node)
|
||||
|
||||
if (types.is_builtin_function(node.func.type, "array")
|
||||
or types.is_builtin_function(node.func.type, "make_array")
|
||||
or types.is_builtin_function(node.func.type, "numpy.transpose")):
|
||||
# While lifetime tracking across function calls in general is currently
|
||||
# broken (see below), these special builtins that allocate an array on
|
||||
# the stack of the caller _always_ allocate regardless of the parameters,
|
||||
# and we can thus handle them without running into the precision issue
|
||||
# mentioned in commit ae999db.
|
||||
return self.visit_allocating(node)
|
||||
|
||||
# FIXME: Return statement missing here, but see m-labs/artiq#1497 and
|
||||
# commit ae999db.
|
||||
self.visit_sometimes_allocating(node)
|
||||
|
||||
# Value lives as long as the object/container, if it's mutable,
|
||||
# or else forever
|
||||
|
|
|
@ -127,9 +127,9 @@ class AD53xx:
|
|||
transactions (default: 1)
|
||||
:param div_write: SPI clock divider for write operations (default: 4,
|
||||
50MHz max SPI clock with {t_high, t_low} >=8ns)
|
||||
:param div_read: SPI clock divider for read operations (default: 8, not
|
||||
optimized for speed, but cf data sheet t22: 25ns min SCLK edge to SDO
|
||||
valid)
|
||||
:param div_read: SPI clock divider for read operations (default: 16, not
|
||||
optimized for speed; datasheet says t22: 25ns min SCLK edge to SDO
|
||||
valid, and suggests the SPI speed for reads should be <=20 MHz)
|
||||
:param vref: DAC reference voltage (default: 5.)
|
||||
:param offset_dacs: Initial register value for the two offset DACs, device
|
||||
dependent and must be set correctly for correct voltage to mu
|
||||
|
@ -233,7 +233,7 @@ class AD53xx:
|
|||
def write_gain_mu(self, channel, gain=0xffff):
|
||||
"""Program the gain register for a DAC channel.
|
||||
|
||||
The DAC output is not updated until LDAC is pulsed (see :meth load:).
|
||||
The DAC output is not updated until LDAC is pulsed (see :meth:`load`).
|
||||
This method advances the timeline by the duration of one SPI transfer.
|
||||
|
||||
:param gain: 16-bit gain register value (default: 0xffff)
|
||||
|
@ -245,7 +245,7 @@ class AD53xx:
|
|||
def write_offset_mu(self, channel, offset=0x8000):
|
||||
"""Program the offset register for a DAC channel.
|
||||
|
||||
The DAC output is not updated until LDAC is pulsed (see :meth load:).
|
||||
The DAC output is not updated until LDAC is pulsed (see :meth:`load`).
|
||||
This method advances the timeline by the duration of one SPI transfer.
|
||||
|
||||
:param offset: 16-bit offset register value (default: 0x8000)
|
||||
|
@ -258,7 +258,7 @@ class AD53xx:
|
|||
"""Program the DAC offset voltage for a channel.
|
||||
|
||||
An offset of +V can be used to trim out a DAC offset error of -V.
|
||||
The DAC output is not updated until LDAC is pulsed (see :meth load:).
|
||||
The DAC output is not updated until LDAC is pulsed (see :meth:`load`).
|
||||
This method advances the timeline by the duration of one SPI transfer.
|
||||
|
||||
:param voltage: the offset voltage
|
||||
|
@ -270,7 +270,7 @@ class AD53xx:
|
|||
def write_dac_mu(self, channel, value):
|
||||
"""Program the DAC input register for a channel.
|
||||
|
||||
The DAC output is not updated until LDAC is pulsed (see :meth load:).
|
||||
The DAC output is not updated until LDAC is pulsed (see :meth:`load`).
|
||||
This method advances the timeline by the duration of one SPI transfer.
|
||||
"""
|
||||
self.bus.write(
|
||||
|
@ -280,7 +280,7 @@ class AD53xx:
|
|||
def write_dac(self, channel, voltage):
|
||||
"""Program the DAC output voltage for a channel.
|
||||
|
||||
The DAC output is not updated until LDAC is pulsed (see :meth load:).
|
||||
The DAC output is not updated until LDAC is pulsed (see :meth:`load`).
|
||||
This method advances the timeline by the duration of one SPI transfer.
|
||||
"""
|
||||
self.write_dac_mu(channel, voltage_to_mu(voltage, self.offset_dacs,
|
||||
|
@ -313,7 +313,7 @@ class AD53xx:
|
|||
|
||||
If no LDAC device was defined, the LDAC pulse is skipped.
|
||||
|
||||
See :meth load:.
|
||||
See :meth:`load`.
|
||||
|
||||
:param values: list of DAC values to program
|
||||
:param channels: list of DAC channels to program. If not specified,
|
||||
|
@ -355,7 +355,7 @@ class AD53xx:
|
|||
""" Two-point calibration of a DAC channel.
|
||||
|
||||
Programs the offset and gain register to trim out DAC errors. Does not
|
||||
take effect until LDAC is pulsed (see :meth load:).
|
||||
take effect until LDAC is pulsed (see :meth:`load`).
|
||||
|
||||
Calibration consists of measuring the DAC output voltage for a channel
|
||||
with the DAC set to zero-scale (0x0000) and full-scale (0xffff).
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,23 +0,0 @@
|
|||
from artiq.language.core import kernel
|
||||
|
||||
|
||||
class AD9154:
|
||||
"""Kernel interface to AD9154 registers, using non-realtime SPI."""
|
||||
|
||||
def __init__(self, dmgr, spi_device, chip_select):
|
||||
self.core = dmgr.get("core")
|
||||
self.bus = dmgr.get(spi_device)
|
||||
self.chip_select = chip_select
|
||||
|
||||
@kernel
|
||||
def setup_bus(self, div=16):
|
||||
self.bus.set_config_mu(0, 24, div, self.chip_select)
|
||||
|
||||
@kernel
|
||||
def write(self, addr, data):
|
||||
self.bus.write((addr << 16) | (data<< 8))
|
||||
|
||||
@kernel
|
||||
def read(self, addr):
|
||||
self.write((1 << 15) | addr, 0)
|
||||
return self.bus.read()
|
|
@ -3,15 +3,16 @@ from numpy import int32, int64
|
|||
from artiq.language.core import (
|
||||
kernel, delay, portable, delay_mu, now_mu, at_mu)
|
||||
from artiq.language.units import us, ms
|
||||
from artiq.language.types import *
|
||||
from artiq.language.types import TBool, TInt32, TInt64, TFloat, TList, TTuple
|
||||
|
||||
from artiq.coredevice import spi2 as spi
|
||||
from artiq.coredevice import urukul
|
||||
from artiq.coredevice.urukul import DEFAULT_PROFILE
|
||||
|
||||
# Work around ARTIQ-Python import machinery
|
||||
urukul_sta_pll_lock = urukul.urukul_sta_pll_lock
|
||||
urukul_sta_smp_err = urukul.urukul_sta_smp_err
|
||||
|
||||
|
||||
__all__ = [
|
||||
"AD9910",
|
||||
"PHASE_MODE_CONTINUOUS", "PHASE_MODE_ABSOLUTE", "PHASE_MODE_TRACKING",
|
||||
|
@ -20,7 +21,6 @@ __all__ = [
|
|||
"RAM_MODE_CONT_BIDIR_RAMP", "RAM_MODE_CONT_RAMPUP",
|
||||
]
|
||||
|
||||
|
||||
_PHASE_MODE_DEFAULT = -1
|
||||
PHASE_MODE_CONTINUOUS = 0
|
||||
PHASE_MODE_ABSOLUTE = 1
|
||||
|
@ -61,6 +61,9 @@ RAM_MODE_BIDIR_RAMP = 2
|
|||
RAM_MODE_CONT_BIDIR_RAMP = 3
|
||||
RAM_MODE_CONT_RAMPUP = 4
|
||||
|
||||
# Default profile for RAM mode
|
||||
_DEFAULT_PROFILE_RAM = 0
|
||||
|
||||
|
||||
class SyncDataUser:
|
||||
def __init__(self, core, sync_delay_seed, io_update_delay):
|
||||
|
@ -124,22 +127,24 @@ class AD9910:
|
|||
To stabilize the SYNC_IN delay tuning, run :meth:`tune_sync_delay` once
|
||||
and set this to the delay tap number returned (default: -1 to signal no
|
||||
synchronization and no tuning during :meth:`init`).
|
||||
Can be a string of the form "eeprom_device:byte_offset" to read the value
|
||||
from a I2C EEPROM; in which case, `io_update_delay` must be set to the
|
||||
same string value.
|
||||
Can be a string of the form "eeprom_device:byte_offset" to read the
|
||||
value from a I2C EEPROM; in which case, `io_update_delay` must be set
|
||||
to the same string value.
|
||||
:param io_update_delay: IO_UPDATE pulse alignment delay.
|
||||
To align IO_UPDATE to SYNC_CLK, run :meth:`tune_io_update_delay` and
|
||||
set this to the delay tap number returned.
|
||||
Can be a string of the form "eeprom_device:byte_offset" to read the value
|
||||
from a I2C EEPROM; in which case, `sync_delay_seed` must be set to the
|
||||
same string value.
|
||||
Can be a string of the form "eeprom_device:byte_offset" to read the
|
||||
value from a I2C EEPROM; in which case, `sync_delay_seed` must be set
|
||||
to the same string value.
|
||||
"""
|
||||
kernel_invariants = {"chip_select", "cpld", "core", "bus",
|
||||
"ftw_per_hz", "sysclk_per_mu"}
|
||||
|
||||
def __init__(self, dmgr, chip_select, cpld_device, sw_device=None,
|
||||
pll_n=40, pll_cp=7, pll_vco=5, sync_delay_seed=-1,
|
||||
io_update_delay=0, pll_en=1):
|
||||
self.kernel_invariants = {"cpld", "core", "bus", "chip_select",
|
||||
"pll_en", "pll_n", "pll_vco", "pll_cp",
|
||||
"ftw_per_hz", "sysclk_per_mu", "sysclk",
|
||||
"sync_data"}
|
||||
self.cpld = dmgr.get(cpld_device)
|
||||
self.core = self.cpld.core
|
||||
self.bus = self.cpld.bus
|
||||
|
@ -148,38 +153,41 @@ class AD9910:
|
|||
if sw_device:
|
||||
self.sw = dmgr.get(sw_device)
|
||||
self.kernel_invariants.add("sw")
|
||||
clk = self.cpld.refclk/[4, 1, 2, 4][self.cpld.clk_div]
|
||||
clk = self.cpld.refclk / [4, 1, 2, 4][self.cpld.clk_div]
|
||||
self.pll_en = pll_en
|
||||
self.pll_n = pll_n
|
||||
self.pll_vco = pll_vco
|
||||
self.pll_cp = pll_cp
|
||||
if pll_en:
|
||||
sysclk = clk*pll_n
|
||||
sysclk = clk * pll_n
|
||||
assert clk <= 60e6
|
||||
assert 12 <= pll_n <= 127
|
||||
assert 0 <= pll_vco <= 5
|
||||
vco_min, vco_max = [(370, 510), (420, 590), (500, 700),
|
||||
(600, 880), (700, 950), (820, 1150)][pll_vco]
|
||||
assert vco_min <= sysclk/1e6 <= vco_max
|
||||
assert vco_min <= sysclk / 1e6 <= vco_max
|
||||
assert 0 <= pll_cp <= 7
|
||||
else:
|
||||
sysclk = clk
|
||||
assert sysclk <= 1e9
|
||||
self.ftw_per_hz = (1 << 32)/sysclk
|
||||
self.sysclk_per_mu = int(round(sysclk*self.core.ref_period))
|
||||
self.ftw_per_hz = (1 << 32) / sysclk
|
||||
self.sysclk_per_mu = int(round(sysclk * self.core.ref_period))
|
||||
self.sysclk = sysclk
|
||||
|
||||
if isinstance(sync_delay_seed, str) or isinstance(io_update_delay, str):
|
||||
if isinstance(sync_delay_seed, str) or isinstance(io_update_delay,
|
||||
str):
|
||||
if sync_delay_seed != io_update_delay:
|
||||
raise ValueError("When using EEPROM, sync_delay_seed must be equal to io_update_delay")
|
||||
raise ValueError("When using EEPROM, sync_delay_seed must be "
|
||||
"equal to io_update_delay")
|
||||
self.sync_data = SyncDataEeprom(dmgr, self.core, sync_delay_seed)
|
||||
else:
|
||||
self.sync_data = SyncDataUser(self.core, sync_delay_seed, io_update_delay)
|
||||
self.sync_data = SyncDataUser(self.core, sync_delay_seed,
|
||||
io_update_delay)
|
||||
|
||||
self.phase_mode = PHASE_MODE_CONTINUOUS
|
||||
|
||||
@kernel
|
||||
def set_phase_mode(self, phase_mode):
|
||||
def set_phase_mode(self, phase_mode: TInt32):
|
||||
r"""Set the default phase mode.
|
||||
|
||||
for future calls to :meth:`set` and
|
||||
|
@ -224,7 +232,7 @@ class AD9910:
|
|||
self.phase_mode = phase_mode
|
||||
|
||||
@kernel
|
||||
def write16(self, addr, data):
|
||||
def write16(self, addr: TInt32, data: TInt32):
|
||||
"""Write to 16 bit register.
|
||||
|
||||
:param addr: Register address
|
||||
|
@ -232,10 +240,10 @@ class AD9910:
|
|||
"""
|
||||
self.bus.set_config_mu(urukul.SPI_CONFIG | spi.SPI_END, 24,
|
||||
urukul.SPIT_DDS_WR, self.chip_select)
|
||||
self.bus.write((addr << 24) | (data << 8))
|
||||
self.bus.write((addr << 24) | ((data & 0xffff) << 8))
|
||||
|
||||
@kernel
|
||||
def write32(self, addr, data):
|
||||
def write32(self, addr: TInt32, data: TInt32):
|
||||
"""Write to 32 bit register.
|
||||
|
||||
:param addr: Register address
|
||||
|
@ -249,7 +257,7 @@ class AD9910:
|
|||
self.bus.write(data)
|
||||
|
||||
@kernel
|
||||
def read16(self, addr):
|
||||
def read16(self, addr: TInt32) -> TInt32:
|
||||
"""Read from 16 bit register.
|
||||
|
||||
:param addr: Register address
|
||||
|
@ -264,7 +272,7 @@ class AD9910:
|
|||
return self.bus.read()
|
||||
|
||||
@kernel
|
||||
def read32(self, addr):
|
||||
def read32(self, addr: TInt32) -> TInt32:
|
||||
"""Read from 32 bit register.
|
||||
|
||||
:param addr: Register address
|
||||
|
@ -279,7 +287,7 @@ class AD9910:
|
|||
return self.bus.read()
|
||||
|
||||
@kernel
|
||||
def read64(self, addr):
|
||||
def read64(self, addr: TInt32) -> TInt64:
|
||||
"""Read from 64 bit register.
|
||||
|
||||
:param addr: Register address
|
||||
|
@ -302,7 +310,7 @@ class AD9910:
|
|||
return (int64(hi) << 32) | lo
|
||||
|
||||
@kernel
|
||||
def write64(self, addr, data_high, data_low):
|
||||
def write64(self, addr: TInt32, data_high: TInt32, data_low: TInt32):
|
||||
"""Write to 64 bit register.
|
||||
|
||||
:param addr: Register address
|
||||
|
@ -320,14 +328,14 @@ class AD9910:
|
|||
self.bus.write(data_low)
|
||||
|
||||
@kernel
|
||||
def write_ram(self, data):
|
||||
def write_ram(self, data: TList(TInt32)):
|
||||
"""Write data to RAM.
|
||||
|
||||
The profile to write to and the step, start, and end address
|
||||
need to be configured before and separately using
|
||||
:meth:`set_profile_ram` and the parent CPLD `set_profile`.
|
||||
|
||||
:param data List(int32): Data to be written to RAM.
|
||||
:param data: Data to be written to RAM.
|
||||
"""
|
||||
self.bus.set_config_mu(urukul.SPI_CONFIG, 8, urukul.SPIT_DDS_WR,
|
||||
self.chip_select)
|
||||
|
@ -341,14 +349,14 @@ class AD9910:
|
|||
self.bus.write(data[len(data) - 1])
|
||||
|
||||
@kernel
|
||||
def read_ram(self, data):
|
||||
def read_ram(self, data: TList(TInt32)):
|
||||
"""Read data from RAM.
|
||||
|
||||
The profile to read from and the step, start, and end address
|
||||
need to be configured before and separately using
|
||||
:meth:`set_profile_ram` and the parent CPLD `set_profile`.
|
||||
|
||||
:param data List(int32): List to be filled with data read from RAM.
|
||||
:param data: List to be filled with data read from RAM.
|
||||
"""
|
||||
self.bus.set_config_mu(urukul.SPI_CONFIG, 8, urukul.SPIT_DDS_WR,
|
||||
self.chip_select)
|
||||
|
@ -370,16 +378,25 @@ class AD9910:
|
|||
data[(n - preload) + i] = self.bus.read()
|
||||
|
||||
@kernel
|
||||
def set_cfr1(self, power_down=0b0000, phase_autoclear=0,
|
||||
drg_load_lrr=0, drg_autoclear=0,
|
||||
internal_profile=0, ram_destination=0, ram_enable=0,
|
||||
manual_osk_external=0, osk_enable=0, select_auto_osk=0):
|
||||
def set_cfr1(self,
|
||||
power_down: TInt32 = 0b0000,
|
||||
phase_autoclear: TInt32 = 0,
|
||||
drg_load_lrr: TInt32 = 0,
|
||||
drg_autoclear: TInt32 = 0,
|
||||
phase_clear: TInt32 = 0,
|
||||
internal_profile: TInt32 = 0,
|
||||
ram_destination: TInt32 = 0,
|
||||
ram_enable: TInt32 = 0,
|
||||
manual_osk_external: TInt32 = 0,
|
||||
osk_enable: TInt32 = 0,
|
||||
select_auto_osk: TInt32 = 0):
|
||||
"""Set CFR1. See the AD9910 datasheet for parameter meanings.
|
||||
|
||||
This method does not pulse IO_UPDATE.
|
||||
|
||||
:param power_down: Power down bits.
|
||||
:param phase_autoclear: Autoclear phase accumulator.
|
||||
:param phase_clear: Asynchronous, static reset of the phase accumulator.
|
||||
:param drg_load_lrr: Load digital ramp generator LRR.
|
||||
:param drg_autoclear: Autoclear digital ramp generator.
|
||||
:param internal_profile: Internal profile control.
|
||||
|
@ -399,13 +416,43 @@ class AD9910:
|
|||
(drg_load_lrr << 15) |
|
||||
(drg_autoclear << 14) |
|
||||
(phase_autoclear << 13) |
|
||||
(phase_clear << 11) |
|
||||
(osk_enable << 9) |
|
||||
(select_auto_osk << 8) |
|
||||
(power_down << 4) |
|
||||
2) # SDIO input only, MSB first
|
||||
|
||||
@kernel
|
||||
def init(self, blind=False):
|
||||
def set_cfr2(self,
|
||||
asf_profile_enable: TInt32 = 1,
|
||||
drg_enable: TInt32 = 0,
|
||||
effective_ftw: TInt32 = 1,
|
||||
sync_validation_disable: TInt32 = 0,
|
||||
matched_latency_enable: TInt32 = 0):
|
||||
"""Set CFR2. See the AD9910 datasheet for parameter meanings.
|
||||
|
||||
This method does not pulse IO_UPDATE.
|
||||
|
||||
:param asf_profile_enable: Enable amplitude scale from single tone profiles.
|
||||
:param drg_enable: Digital ramp enable.
|
||||
:param effective_ftw: Read effective FTW.
|
||||
:param sync_validation_disable: Disable the SYNC_SMP_ERR pin indicating
|
||||
(active high) detection of a synchronization pulse sampling error.
|
||||
:param matched_latency_enable: Simultaneous application of amplitude,
|
||||
phase, and frequency changes to the DDS arrive at the output
|
||||
|
||||
* matched_latency_enable = 0: in the order listed
|
||||
* matched_latency_enable = 1: simultaneously.
|
||||
"""
|
||||
self.write32(_AD9910_REG_CFR2,
|
||||
(asf_profile_enable << 24) |
|
||||
(drg_enable << 19) |
|
||||
(effective_ftw << 16) |
|
||||
(matched_latency_enable << 7) |
|
||||
(sync_validation_disable << 5))
|
||||
|
||||
@kernel
|
||||
def init(self, blind: TBool = False):
|
||||
"""Initialize and configure the DDS.
|
||||
|
||||
Sets up SPI mode, confirms chip presence, powers down unused blocks,
|
||||
|
@ -418,70 +465,71 @@ class AD9910:
|
|||
if self.sync_data.sync_delay_seed >= 0 and not self.cpld.sync_div:
|
||||
raise ValueError("parent cpld does not drive SYNC")
|
||||
if self.sync_data.sync_delay_seed >= 0:
|
||||
if self.sysclk_per_mu != self.sysclk*self.core.ref_period:
|
||||
if self.sysclk_per_mu != self.sysclk * self.core.ref_period:
|
||||
raise ValueError("incorrect clock ratio for synchronization")
|
||||
delay(50*ms) # slack
|
||||
delay(50 * ms) # slack
|
||||
|
||||
# Set SPI mode
|
||||
self.set_cfr1()
|
||||
self.cpld.io_update.pulse(1*us)
|
||||
delay(1*ms)
|
||||
self.cpld.io_update.pulse(1 * us)
|
||||
delay(1 * ms)
|
||||
if not blind:
|
||||
# Use the AUX DAC setting to identify and confirm presence
|
||||
aux_dac = self.read32(_AD9910_REG_AUX_DAC)
|
||||
if aux_dac & 0xff != 0x7f:
|
||||
raise ValueError("Urukul AD9910 AUX_DAC mismatch")
|
||||
delay(50*us) # slack
|
||||
delay(50 * us) # slack
|
||||
# Configure PLL settings and bring up PLL
|
||||
# enable amplitude scale from profiles
|
||||
# read effective FTW
|
||||
# sync timing validation disable (enabled later)
|
||||
self.write32(_AD9910_REG_CFR2, 0x01010020)
|
||||
self.cpld.io_update.pulse(1*us)
|
||||
self.set_cfr2(sync_validation_disable=1)
|
||||
self.cpld.io_update.pulse(1 * us)
|
||||
cfr3 = (0x0807c000 | (self.pll_vco << 24) |
|
||||
(self.pll_cp << 19) | (self.pll_en << 8) |
|
||||
(self.pll_n << 1))
|
||||
self.write32(_AD9910_REG_CFR3, cfr3 | 0x400) # PFD reset
|
||||
self.cpld.io_update.pulse(1*us)
|
||||
self.cpld.io_update.pulse(1 * us)
|
||||
if self.pll_en:
|
||||
self.write32(_AD9910_REG_CFR3, cfr3)
|
||||
self.cpld.io_update.pulse(1*us)
|
||||
self.cpld.io_update.pulse(1 * us)
|
||||
if blind:
|
||||
delay(100*ms)
|
||||
delay(100 * ms)
|
||||
else:
|
||||
# Wait for PLL lock, up to 100 ms
|
||||
for i in range(100):
|
||||
sta = self.cpld.sta_read()
|
||||
lock = urukul_sta_pll_lock(sta)
|
||||
delay(1*ms)
|
||||
delay(1 * ms)
|
||||
if lock & (1 << self.chip_select - 4):
|
||||
break
|
||||
if i >= 100 - 1:
|
||||
raise ValueError("PLL lock timeout")
|
||||
delay(10*us) # slack
|
||||
if self.sync_data.sync_delay_seed >= 0:
|
||||
delay(10 * us) # slack
|
||||
if self.sync_data.sync_delay_seed >= 0 and not blind:
|
||||
self.tune_sync_delay(self.sync_data.sync_delay_seed)
|
||||
delay(1*ms)
|
||||
delay(1 * ms)
|
||||
|
||||
@kernel
|
||||
def power_down(self, bits=0b1111):
|
||||
def power_down(self, bits: TInt32 = 0b1111):
|
||||
"""Power down DDS.
|
||||
|
||||
:param bits: Power down bits, see datasheet
|
||||
"""
|
||||
self.set_cfr1(power_down=bits)
|
||||
self.cpld.io_update.pulse(1*us)
|
||||
self.cpld.io_update.pulse(1 * us)
|
||||
|
||||
# KLUDGE: ref_time_mu default argument is explicitly marked int64() to
|
||||
# avoid silent truncation of explicitly passed timestamps. (Compiler bug?)
|
||||
@kernel
|
||||
def set_mu(self, ftw, pow_=0, asf=0x3fff, phase_mode=_PHASE_MODE_DEFAULT,
|
||||
ref_time_mu=int64(-1), profile=0):
|
||||
"""Set profile 0 data in machine units.
|
||||
def set_mu(self, ftw: TInt32 = 0, pow_: TInt32 = 0, asf: TInt32 = 0x3fff,
|
||||
phase_mode: TInt32 = _PHASE_MODE_DEFAULT,
|
||||
ref_time_mu: TInt64 = int64(-1),
|
||||
profile: TInt32 = DEFAULT_PROFILE,
|
||||
ram_destination: TInt32 = -1) -> TInt32:
|
||||
"""Set DDS data in machine units.
|
||||
|
||||
This uses machine units (FTW, POW, ASF). The frequency tuning word
|
||||
width is 32, the phase offset word width is 16, and the amplitude
|
||||
scale factor width is 12.
|
||||
scale factor width is 14.
|
||||
|
||||
After the SPI transfer, the shared IO update pin is pulsed to
|
||||
activate the data.
|
||||
|
@ -496,7 +544,13 @@ class AD9910:
|
|||
by :meth:`set_phase_mode` for this call.
|
||||
:param ref_time_mu: Fiducial time used to compute absolute or tracking
|
||||
phase updates. In machine units as obtained by `now_mu()`.
|
||||
:param profile: Profile number to set (0-7, default: 0).
|
||||
:param profile: Single tone profile number to set (0-7, default: 7).
|
||||
Ineffective if `ram_destination` is specified.
|
||||
:param ram_destination: RAM destination (:const:`RAM_DEST_FTW`,
|
||||
:const:`RAM_DEST_POW`, :const:`RAM_DEST_ASF`,
|
||||
:const:`RAM_DEST_POWASF`). If specified, write free DDS parameters
|
||||
to the ASF/FTW/POW registers instead of to the single tone profile
|
||||
register (default behaviour, see `profile`).
|
||||
:return: Resulting phase offset word after application of phase
|
||||
tracking offset. When using :const:`PHASE_MODE_CONTINUOUS` in
|
||||
subsequent calls, use this value as the "current" phase.
|
||||
|
@ -518,9 +572,18 @@ class AD9910:
|
|||
# Also no need to use IO_UPDATE time as this
|
||||
# is equivalent to an output pipeline latency.
|
||||
dt = int32(now_mu()) - int32(ref_time_mu)
|
||||
pow_ += dt*ftw*self.sysclk_per_mu >> 16
|
||||
self.write64(_AD9910_REG_PROFILE0 + profile,
|
||||
(asf << 16) | (pow_ & 0xffff), ftw)
|
||||
pow_ += dt * ftw * self.sysclk_per_mu >> 16
|
||||
if ram_destination == -1:
|
||||
self.write64(_AD9910_REG_PROFILE0 + profile,
|
||||
(asf << 16) | (pow_ & 0xffff), ftw)
|
||||
else:
|
||||
if not ram_destination == RAM_DEST_FTW:
|
||||
self.set_ftw(ftw)
|
||||
if not ram_destination == RAM_DEST_POWASF:
|
||||
if not ram_destination == RAM_DEST_ASF:
|
||||
self.set_asf(asf)
|
||||
if not ram_destination == RAM_DEST_POW:
|
||||
self.set_pow(pow_)
|
||||
delay_mu(int64(self.sync_data.io_update_delay))
|
||||
self.cpld.io_update.pulse_mu(8) # assumes 8 mu > t_SYN_CCLK
|
||||
at_mu(now_mu() & ~7) # clear fine TSC again
|
||||
|
@ -530,8 +593,30 @@ class AD9910:
|
|||
return pow_
|
||||
|
||||
@kernel
|
||||
def set_profile_ram(self, start, end, step=1, profile=0, nodwell_high=0,
|
||||
zero_crossing=0, mode=1):
|
||||
def get_mu(self, profile: TInt32 = DEFAULT_PROFILE
|
||||
) -> TTuple([TInt32, TInt32, TInt32]):
|
||||
"""Get the frequency tuning word, phase offset word,
|
||||
and amplitude scale factor.
|
||||
|
||||
.. seealso:: :meth:`get`
|
||||
|
||||
:param profile: Profile number to get (0-7, default: 7)
|
||||
:return: A tuple ``(ftw, pow, asf)``
|
||||
"""
|
||||
|
||||
# Read data
|
||||
data = int64(self.read64(_AD9910_REG_PROFILE0 + profile))
|
||||
# Extract and return fields
|
||||
ftw = int32(data)
|
||||
pow_ = int32((data >> 32) & 0xffff)
|
||||
asf = int32((data >> 48) & 0x3fff)
|
||||
return ftw, pow_, asf
|
||||
|
||||
@kernel
|
||||
def set_profile_ram(self, start: TInt32, end: TInt32, step: TInt32 = 1,
|
||||
profile: TInt32 = _DEFAULT_PROFILE_RAM,
|
||||
nodwell_high: TInt32 = 0, zero_crossing: TInt32 = 0,
|
||||
mode: TInt32 = 1):
|
||||
"""Set the RAM profile settings.
|
||||
|
||||
:param start: Profile start address in RAM.
|
||||
|
@ -555,57 +640,87 @@ class AD9910:
|
|||
self.write64(_AD9910_REG_PROFILE0 + profile, hi, lo)
|
||||
|
||||
@kernel
|
||||
def set_ftw(self, ftw):
|
||||
"""Set the value stored to the AD9910's frequency tuning word (FTW) register.
|
||||
def set_ftw(self, ftw: TInt32):
|
||||
"""Set the value stored to the AD9910's frequency tuning word (FTW)
|
||||
register.
|
||||
|
||||
:param ftw: Frequency tuning word to be stored, range: 0 to 0xffffffff.
|
||||
"""
|
||||
self.write32(_AD9910_REG_FTW, ftw)
|
||||
|
||||
@kernel
|
||||
def set_asf(self, asf):
|
||||
"""Set the value stored to the AD9910's amplitude scale factor (ASF) register.
|
||||
def set_asf(self, asf: TInt32):
|
||||
"""Set the value stored to the AD9910's amplitude scale factor (ASF)
|
||||
register.
|
||||
|
||||
:param asf: Amplitude scale factor to be stored, range: 0 to 0x3fff.
|
||||
"""
|
||||
self.write32(_AD9910_REG_ASF, asf << 2)
|
||||
|
||||
@kernel
|
||||
def set_pow(self, pow_):
|
||||
"""Set the value stored to the AD9910's phase offset word (POW) register.
|
||||
def set_pow(self, pow_: TInt32):
|
||||
"""Set the value stored to the AD9910's phase offset word (POW)
|
||||
register.
|
||||
|
||||
:param pow_: Phase offset word to be stored, range: 0 to 0xffff.
|
||||
"""
|
||||
self.write16(_AD9910_REG_POW, pow_)
|
||||
|
||||
@kernel
|
||||
def get_ftw(self) -> TInt32:
|
||||
"""Get the value stored to the AD9910's frequency tuning word (FTW)
|
||||
register.
|
||||
|
||||
:return: Frequency tuning word
|
||||
"""
|
||||
return self.read32(_AD9910_REG_FTW)
|
||||
|
||||
@kernel
|
||||
def get_asf(self) -> TInt32:
|
||||
"""Get the value stored to the AD9910's amplitude scale factor (ASF)
|
||||
register.
|
||||
|
||||
:return: Amplitude scale factor
|
||||
"""
|
||||
return self.read32(_AD9910_REG_ASF) >> 2
|
||||
|
||||
@kernel
|
||||
def get_pow(self) -> TInt32:
|
||||
"""Get the value stored to the AD9910's phase offset word (POW)
|
||||
register.
|
||||
|
||||
:return: Phase offset word
|
||||
"""
|
||||
return self.read16(_AD9910_REG_POW)
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def frequency_to_ftw(self, frequency) -> TInt32:
|
||||
def frequency_to_ftw(self, frequency: TFloat) -> TInt32:
|
||||
"""Return the 32-bit frequency tuning word corresponding to the given
|
||||
frequency.
|
||||
"""
|
||||
return int32(round(self.ftw_per_hz*frequency))
|
||||
return int32(round(self.ftw_per_hz * frequency))
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def ftw_to_frequency(self, ftw):
|
||||
def ftw_to_frequency(self, ftw: TInt32) -> TFloat:
|
||||
"""Return the frequency corresponding to the given frequency tuning
|
||||
word.
|
||||
"""
|
||||
return ftw / self.ftw_per_hz
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def turns_to_pow(self, turns) -> TInt32:
|
||||
def turns_to_pow(self, turns: TFloat) -> TInt32:
|
||||
"""Return the 16-bit phase offset word corresponding to the given phase
|
||||
in turns."""
|
||||
return int32(round(turns*0x10000)) & int32(0xffff)
|
||||
return int32(round(turns * 0x10000)) & int32(0xffff)
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def pow_to_turns(self, pow_):
|
||||
def pow_to_turns(self, pow_: TInt32) -> TFloat:
|
||||
"""Return the phase in turns corresponding to a given phase offset
|
||||
word."""
|
||||
return pow_/0x10000
|
||||
return pow_ / 0x10000
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def amplitude_to_asf(self, amplitude) -> TInt32:
|
||||
def amplitude_to_asf(self, amplitude: TFloat) -> TInt32:
|
||||
"""Return 14-bit amplitude scale factor corresponding to given
|
||||
fractional amplitude."""
|
||||
code = int32(round(amplitude * 0x3fff))
|
||||
|
@ -614,13 +729,13 @@ class AD9910:
|
|||
return code
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def asf_to_amplitude(self, asf):
|
||||
def asf_to_amplitude(self, asf: TInt32) -> TFloat:
|
||||
"""Return amplitude as a fraction of full scale corresponding to given
|
||||
amplitude scale factor."""
|
||||
return asf / float(0x3fff)
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def frequency_to_ram(self, frequency, ram):
|
||||
def frequency_to_ram(self, frequency: TList(TFloat), ram: TList(TInt32)):
|
||||
"""Convert frequency values to RAM profile data.
|
||||
|
||||
To be used with :const:`RAM_DEST_FTW`.
|
||||
|
@ -633,7 +748,7 @@ class AD9910:
|
|||
ram[i] = self.frequency_to_ftw(frequency[i])
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def turns_to_ram(self, turns, ram):
|
||||
def turns_to_ram(self, turns: TList(TFloat), ram: TList(TInt32)):
|
||||
"""Convert phase values to RAM profile data.
|
||||
|
||||
To be used with :const:`RAM_DEST_POW`.
|
||||
|
@ -646,7 +761,7 @@ class AD9910:
|
|||
ram[i] = self.turns_to_pow(turns[i]) << 16
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def amplitude_to_ram(self, amplitude, ram):
|
||||
def amplitude_to_ram(self, amplitude: TList(TFloat), ram: TList(TInt32)):
|
||||
"""Convert amplitude values to RAM profile data.
|
||||
|
||||
To be used with :const:`RAM_DEST_ASF`.
|
||||
|
@ -659,7 +774,8 @@ class AD9910:
|
|||
ram[i] = self.amplitude_to_asf(amplitude[i]) << 18
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def turns_amplitude_to_ram(self, turns, amplitude, ram):
|
||||
def turns_amplitude_to_ram(self, turns: TList(TFloat),
|
||||
amplitude: TList(TFloat), ram: TList(TInt32)):
|
||||
"""Convert phase and amplitude values to RAM profile data.
|
||||
|
||||
To be used with :const:`RAM_DEST_POWASF`.
|
||||
|
@ -674,33 +790,65 @@ class AD9910:
|
|||
self.amplitude_to_asf(amplitude[i]) << 2)
|
||||
|
||||
@kernel
|
||||
def set_frequency(self, frequency):
|
||||
"""Set the value stored to the AD9910's frequency tuning word (FTW) register.
|
||||
def set_frequency(self, frequency: TFloat):
|
||||
"""Set the value stored to the AD9910's frequency tuning word (FTW)
|
||||
register.
|
||||
|
||||
:param frequency: frequency to be stored, in Hz.
|
||||
"""
|
||||
return self.set_ftw(self.frequency_to_ftw(frequency))
|
||||
self.set_ftw(self.frequency_to_ftw(frequency))
|
||||
|
||||
@kernel
|
||||
def set_amplitude(self, amplitude):
|
||||
"""Set the value stored to the AD9910's amplitude scale factor (ASF) register.
|
||||
def set_amplitude(self, amplitude: TFloat):
|
||||
"""Set the value stored to the AD9910's amplitude scale factor (ASF)
|
||||
register.
|
||||
|
||||
:param amplitude: amplitude to be stored, in units of full scale.
|
||||
"""
|
||||
return self.set_asf(self.amplitude_to_asf(amplitude))
|
||||
self.set_asf(self.amplitude_to_asf(amplitude))
|
||||
|
||||
@kernel
|
||||
def set_phase(self, turns):
|
||||
"""Set the value stored to the AD9910's phase offset word (POW) register.
|
||||
def set_phase(self, turns: TFloat):
|
||||
"""Set the value stored to the AD9910's phase offset word (POW)
|
||||
register.
|
||||
|
||||
:param turns: phase offset to be stored, in turns.
|
||||
"""
|
||||
return self.set_pow(self.turns_to_pow(turns))
|
||||
self.set_pow(self.turns_to_pow(turns))
|
||||
|
||||
@kernel
|
||||
def set(self, frequency, phase=0.0, amplitude=1.0,
|
||||
phase_mode=_PHASE_MODE_DEFAULT, ref_time_mu=int64(-1), profile=0):
|
||||
"""Set profile 0 data in SI units.
|
||||
def get_frequency(self) -> TFloat:
|
||||
"""Get the value stored to the AD9910's frequency tuning word (FTW)
|
||||
register.
|
||||
|
||||
:return: frequency in Hz.
|
||||
"""
|
||||
return self.ftw_to_frequency(self.get_ftw())
|
||||
|
||||
@kernel
|
||||
def get_amplitude(self) -> TFloat:
|
||||
"""Get the value stored to the AD9910's amplitude scale factor (ASF)
|
||||
register.
|
||||
|
||||
:return: amplitude in units of full scale.
|
||||
"""
|
||||
return self.asf_to_amplitude(self.get_asf())
|
||||
|
||||
@kernel
|
||||
def get_phase(self) -> TFloat:
|
||||
"""Get the value stored to the AD9910's phase offset word (POW)
|
||||
register.
|
||||
|
||||
:return: phase offset in turns.
|
||||
"""
|
||||
return self.pow_to_turns(self.get_pow())
|
||||
|
||||
@kernel
|
||||
def set(self, frequency: TFloat = 0.0, phase: TFloat = 0.0,
|
||||
amplitude: TFloat = 1.0, phase_mode: TInt32 = _PHASE_MODE_DEFAULT,
|
||||
ref_time_mu: TInt64 = int64(-1), profile: TInt32 = DEFAULT_PROFILE,
|
||||
ram_destination: TInt32 = -1) -> TFloat:
|
||||
"""Set DDS data in SI units.
|
||||
|
||||
.. seealso:: :meth:`set_mu`
|
||||
|
||||
|
@ -709,16 +857,34 @@ class AD9910:
|
|||
:param amplitude: Amplitude in units of full scale
|
||||
:param phase_mode: Phase mode constant
|
||||
:param ref_time_mu: Fiducial time stamp in machine units
|
||||
:param profile: Profile to affect
|
||||
:param profile: Single tone profile to affect.
|
||||
:param ram_destination: RAM destination.
|
||||
:return: Resulting phase offset in turns
|
||||
"""
|
||||
return self.pow_to_turns(self.set_mu(
|
||||
self.frequency_to_ftw(frequency), self.turns_to_pow(phase),
|
||||
self.amplitude_to_asf(amplitude), phase_mode, ref_time_mu,
|
||||
profile))
|
||||
profile, ram_destination))
|
||||
|
||||
@kernel
|
||||
def set_att_mu(self, att):
|
||||
def get(self, profile: TInt32 = DEFAULT_PROFILE
|
||||
) -> TTuple([TFloat, TFloat, TFloat]):
|
||||
"""Get the frequency, phase, and amplitude.
|
||||
|
||||
.. seealso:: :meth:`get_mu`
|
||||
|
||||
:param profile: Profile number to get (0-7, default: 7)
|
||||
:return: A tuple ``(frequency, phase, amplitude)``
|
||||
"""
|
||||
|
||||
# Get values
|
||||
ftw, pow_, asf = self.get_mu(profile)
|
||||
# Convert and return
|
||||
return (self.ftw_to_frequency(ftw), self.pow_to_turns(pow_),
|
||||
self.asf_to_amplitude(asf))
|
||||
|
||||
@kernel
|
||||
def set_att_mu(self, att: TInt32):
|
||||
"""Set digital step attenuator in machine units.
|
||||
|
||||
This method will write the attenuator settings of all four channels.
|
||||
|
@ -730,7 +896,7 @@ class AD9910:
|
|||
self.cpld.set_att_mu(self.chip_select - 4, att)
|
||||
|
||||
@kernel
|
||||
def set_att(self, att):
|
||||
def set_att(self, att: TFloat):
|
||||
"""Set digital step attenuator in SI units.
|
||||
|
||||
This method will write the attenuator settings of all four channels.
|
||||
|
@ -742,7 +908,27 @@ class AD9910:
|
|||
self.cpld.set_att(self.chip_select - 4, att)
|
||||
|
||||
@kernel
|
||||
def cfg_sw(self, state):
|
||||
def get_att_mu(self) -> TInt32:
|
||||
"""Get digital step attenuator value in machine units.
|
||||
|
||||
.. seealso:: :meth:`artiq.coredevice.urukul.CPLD.get_channel_att_mu`
|
||||
|
||||
:return: Attenuation setting, 8 bit digital.
|
||||
"""
|
||||
return self.cpld.get_channel_att_mu(self.chip_select - 4)
|
||||
|
||||
@kernel
|
||||
def get_att(self) -> TFloat:
|
||||
"""Get digital step attenuator value in SI units.
|
||||
|
||||
.. seealso:: :meth:`artiq.coredevice.urukul.CPLD.get_channel_att`
|
||||
|
||||
:return: Attenuation in dB.
|
||||
"""
|
||||
return self.cpld.get_channel_att(self.chip_select - 4)
|
||||
|
||||
@kernel
|
||||
def cfg_sw(self, state: TBool):
|
||||
"""Set CPLD CFG RF switch state. The RF switch is controlled by the
|
||||
logical or of the CPLD configuration shift register
|
||||
RF switch bit and the SW TTL line (if used).
|
||||
|
@ -752,20 +938,26 @@ class AD9910:
|
|||
self.cpld.cfg_sw(self.chip_select - 4, state)
|
||||
|
||||
@kernel
|
||||
def set_sync(self, in_delay, window):
|
||||
def set_sync(self,
|
||||
in_delay: TInt32,
|
||||
window: TInt32,
|
||||
en_sync_gen: TInt32 = 0):
|
||||
"""Set the relevant parameters in the multi device synchronization
|
||||
register. See the AD9910 datasheet for details. The SYNC clock
|
||||
generator preset value is set to zero, and the SYNC_OUT generator is
|
||||
disabled.
|
||||
disabled by default.
|
||||
|
||||
:param in_delay: SYNC_IN delay tap (0-31) in steps of ~75ps
|
||||
:param window: Symmetric SYNC_IN validation window (0-15) in
|
||||
steps of ~75ps for both hold and setup margin.
|
||||
:param en_sync_gen: Whether to enable the DDS-internal sync generator
|
||||
(SYNC_OUT, cf. sync_sel == 1). Should be left off for the normal
|
||||
use case, where the SYNC clock is supplied by the core device.
|
||||
"""
|
||||
self.write32(_AD9910_REG_SYNC,
|
||||
(window << 28) | # SYNC S/H validation delay
|
||||
(1 << 27) | # SYNC receiver enable
|
||||
(0 << 26) | # SYNC generator disable
|
||||
(en_sync_gen << 26) | # SYNC generator enable
|
||||
(0 << 25) | # SYNC generator SYS rising edge
|
||||
(0 << 18) | # SYNC preset
|
||||
(0 << 11) | # SYNC output delay
|
||||
|
@ -781,13 +973,15 @@ class AD9910:
|
|||
|
||||
Also modifies CFR2.
|
||||
"""
|
||||
self.write32(_AD9910_REG_CFR2, 0x01010020) # clear SMP_ERR
|
||||
self.cpld.io_update.pulse(1*us)
|
||||
self.write32(_AD9910_REG_CFR2, 0x01010000) # enable SMP_ERR
|
||||
self.cpld.io_update.pulse(1*us)
|
||||
self.set_cfr2(sync_validation_disable=1) # clear SMP_ERR
|
||||
self.cpld.io_update.pulse(1 * us)
|
||||
delay(10 * us) # slack
|
||||
self.set_cfr2(sync_validation_disable=0) # enable SMP_ERR
|
||||
self.cpld.io_update.pulse(1 * us)
|
||||
|
||||
@kernel
|
||||
def tune_sync_delay(self, search_seed=15):
|
||||
def tune_sync_delay(self,
|
||||
search_seed: TInt32 = 15) -> TTuple([TInt32, TInt32]):
|
||||
"""Find a stable SYNC_IN delay.
|
||||
|
||||
This method first locates a valid SYNC_IN delay at zero validation
|
||||
|
@ -812,7 +1006,7 @@ class AD9910:
|
|||
margin = 1 # 1*75ps setup and hold
|
||||
for window in range(16):
|
||||
next_seed = -1
|
||||
for in_delay in range(search_span - 2*window):
|
||||
for in_delay in range(search_span - 2 * window):
|
||||
# alternate search direction around search_seed
|
||||
if in_delay & 1:
|
||||
in_delay = -in_delay
|
||||
|
@ -822,9 +1016,9 @@ class AD9910:
|
|||
self.set_sync(in_delay, window)
|
||||
self.clear_smp_err()
|
||||
# integrate SMP_ERR statistics for a few hundred cycles
|
||||
delay(100*us)
|
||||
delay(100 * us)
|
||||
err = urukul_sta_smp_err(self.cpld.sta_read())
|
||||
delay(100*us) # slack
|
||||
delay(100 * us) # slack
|
||||
if not (err >> (self.chip_select - 4)) & 1:
|
||||
next_seed = in_delay
|
||||
break
|
||||
|
@ -836,14 +1030,15 @@ class AD9910:
|
|||
window = max(min_window, window - 1 - margin)
|
||||
self.set_sync(search_seed, window)
|
||||
self.clear_smp_err()
|
||||
delay(100*us) # slack
|
||||
delay(100 * us) # slack
|
||||
return search_seed, window
|
||||
else:
|
||||
break
|
||||
raise ValueError("no valid window/delay")
|
||||
|
||||
@kernel
|
||||
def measure_io_update_alignment(self, delay_start, delay_stop):
|
||||
def measure_io_update_alignment(self, delay_start: TInt64,
|
||||
delay_stop: TInt64) -> TInt32:
|
||||
"""Use the digital ramp generator to locate the alignment between
|
||||
IO_UPDATE and SYNC_CLK.
|
||||
|
||||
|
@ -859,7 +1054,7 @@ class AD9910:
|
|||
# set up DRG
|
||||
self.set_cfr1(drg_load_lrr=1, drg_autoclear=1)
|
||||
# DRG -> FTW, DRG enable
|
||||
self.write32(_AD9910_REG_CFR2, 0x01090000)
|
||||
self.set_cfr2(drg_enable=1)
|
||||
# no limits
|
||||
self.write64(_AD9910_REG_RAMP_LIMIT, -1, 0)
|
||||
# DRCTL=0, dt=1 t_SYNC_CLK
|
||||
|
@ -878,14 +1073,14 @@ class AD9910:
|
|||
at_mu(t + 0x1000 + delay_stop)
|
||||
self.cpld.io_update.pulse_mu(16 - delay_stop) # realign
|
||||
ftw = self.read32(_AD9910_REG_FTW) # read out effective FTW
|
||||
delay(100*us) # slack
|
||||
delay(100 * us) # slack
|
||||
# disable DRG
|
||||
self.write32(_AD9910_REG_CFR2, 0x01010000)
|
||||
self.set_cfr2(drg_enable=0)
|
||||
self.cpld.io_update.pulse_mu(8)
|
||||
return ftw & 1
|
||||
|
||||
@kernel
|
||||
def tune_io_update_delay(self):
|
||||
def tune_io_update_delay(self) -> TInt32:
|
||||
"""Find a stable IO_UPDATE delay alignment.
|
||||
|
||||
Scan through increasing IO_UPDATE delays until a delay is found that
|
||||
|
@ -922,5 +1117,5 @@ class AD9910:
|
|||
"no clear IO_UPDATE-SYNC_CLK alignment edge found")
|
||||
else:
|
||||
# the good delay is period//2 after the edge
|
||||
return (i + 1 + period//2) & (period - 1)
|
||||
return (i + 1 + period // 2) & (period - 1)
|
||||
raise ValueError("no IO_UPDATE-SYNC_CLK alignment edge found")
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
from numpy import int32, int64
|
||||
|
||||
from artiq.language.types import TInt32, TInt64
|
||||
from artiq.language.types import TInt32, TInt64, TFloat, TTuple, TBool
|
||||
from artiq.language.core import kernel, delay, portable
|
||||
from artiq.language.units import ms, us, ns
|
||||
from artiq.coredevice.ad9912_reg import *
|
||||
|
@ -25,11 +25,14 @@ class AD9912:
|
|||
f_ref/clk_div*pll_n where f_ref is the reference frequency and clk_div
|
||||
is the reference clock divider (both set in the parent Urukul CPLD
|
||||
instance).
|
||||
:param pll_en: PLL enable bit, set to 0 to bypass PLL (default: 1).
|
||||
Note that when bypassing the PLL the red front panel LED may remain on.
|
||||
"""
|
||||
kernel_invariants = {"chip_select", "cpld", "core", "bus", "ftw_per_hz"}
|
||||
|
||||
def __init__(self, dmgr, chip_select, cpld_device, sw_device=None,
|
||||
pll_n=10):
|
||||
pll_n=10, pll_en=1):
|
||||
self.kernel_invariants = {"cpld", "core", "bus", "chip_select",
|
||||
"pll_n", "pll_en", "ftw_per_hz"}
|
||||
self.cpld = dmgr.get(cpld_device)
|
||||
self.core = self.cpld.core
|
||||
self.bus = self.cpld.bus
|
||||
|
@ -38,13 +41,17 @@ class AD9912:
|
|||
if sw_device:
|
||||
self.sw = dmgr.get(sw_device)
|
||||
self.kernel_invariants.add("sw")
|
||||
self.pll_en = pll_en
|
||||
self.pll_n = pll_n
|
||||
sysclk = self.cpld.refclk/[1, 1, 2, 4][self.cpld.clk_div]*pll_n
|
||||
if pll_en:
|
||||
sysclk = self.cpld.refclk / [1, 1, 2, 4][self.cpld.clk_div] * pll_n
|
||||
else:
|
||||
sysclk = self.cpld.refclk
|
||||
assert sysclk <= 1e9
|
||||
self.ftw_per_hz = 1/sysclk*(int64(1) << 48)
|
||||
self.ftw_per_hz = 1 / sysclk * (int64(1) << 48)
|
||||
|
||||
@kernel
|
||||
def write(self, addr, data, length):
|
||||
def write(self, addr: TInt32, data: TInt32, length: TInt32):
|
||||
"""Variable length write to a register.
|
||||
Up to 4 bytes.
|
||||
|
||||
|
@ -55,14 +62,14 @@ class AD9912:
|
|||
assert length > 0
|
||||
assert length <= 4
|
||||
self.bus.set_config_mu(urukul.SPI_CONFIG, 16,
|
||||
urukul.SPIT_DDS_WR, self.chip_select)
|
||||
urukul.SPIT_DDS_WR, self.chip_select)
|
||||
self.bus.write((addr | ((length - 1) << 13)) << 16)
|
||||
self.bus.set_config_mu(urukul.SPI_CONFIG | spi.SPI_END, length*8,
|
||||
urukul.SPIT_DDS_WR, self.chip_select)
|
||||
self.bus.write(data << (32 - length*8))
|
||||
self.bus.set_config_mu(urukul.SPI_CONFIG | spi.SPI_END, length * 8,
|
||||
urukul.SPIT_DDS_WR, self.chip_select)
|
||||
self.bus.write(data << (32 - length * 8))
|
||||
|
||||
@kernel
|
||||
def read(self, addr, length):
|
||||
def read(self, addr: TInt32, length: TInt32) -> TInt32:
|
||||
"""Variable length read from a register.
|
||||
Up to 4 bytes.
|
||||
|
||||
|
@ -73,15 +80,15 @@ class AD9912:
|
|||
assert length > 0
|
||||
assert length <= 4
|
||||
self.bus.set_config_mu(urukul.SPI_CONFIG, 16,
|
||||
urukul.SPIT_DDS_WR, self.chip_select)
|
||||
urukul.SPIT_DDS_WR, self.chip_select)
|
||||
self.bus.write((addr | ((length - 1) << 13) | 0x8000) << 16)
|
||||
self.bus.set_config_mu(urukul.SPI_CONFIG | spi.SPI_END
|
||||
| spi.SPI_INPUT, length*8,
|
||||
urukul.SPIT_DDS_RD, self.chip_select)
|
||||
| spi.SPI_INPUT, length * 8,
|
||||
urukul.SPIT_DDS_RD, self.chip_select)
|
||||
self.bus.write(0)
|
||||
data = self.bus.read()
|
||||
if length < 4:
|
||||
data &= (1 << (length*8)) - 1
|
||||
data &= (1 << (length * 8)) - 1
|
||||
return data
|
||||
|
||||
@kernel
|
||||
|
@ -94,24 +101,26 @@ class AD9912:
|
|||
"""
|
||||
# SPI mode
|
||||
self.write(AD9912_SER_CONF, 0x99, length=1)
|
||||
self.cpld.io_update.pulse(2*us)
|
||||
self.cpld.io_update.pulse(2 * us)
|
||||
# Verify chip ID and presence
|
||||
prodid = self.read(AD9912_PRODIDH, length=2)
|
||||
if (prodid != 0x1982) and (prodid != 0x1902):
|
||||
raise ValueError("Urukul AD9912 product id mismatch")
|
||||
delay(50*us)
|
||||
delay(50 * us)
|
||||
# HSTL power down, CMOS power down
|
||||
self.write(AD9912_PWRCNTRL1, 0x80, length=1)
|
||||
self.cpld.io_update.pulse(2*us)
|
||||
self.write(AD9912_N_DIV, self.pll_n//2 - 2, length=1)
|
||||
self.cpld.io_update.pulse(2*us)
|
||||
# I_cp = 375 µA, VCO high range
|
||||
self.write(AD9912_PLLCFG, 0b00000101, length=1)
|
||||
self.cpld.io_update.pulse(2*us)
|
||||
delay(1*ms)
|
||||
pwrcntrl1 = 0x80 | ((~self.pll_en & 1) << 4)
|
||||
self.write(AD9912_PWRCNTRL1, pwrcntrl1, length=1)
|
||||
self.cpld.io_update.pulse(2 * us)
|
||||
if self.pll_en:
|
||||
self.write(AD9912_N_DIV, self.pll_n // 2 - 2, length=1)
|
||||
self.cpld.io_update.pulse(2 * us)
|
||||
# I_cp = 375 µA, VCO high range
|
||||
self.write(AD9912_PLLCFG, 0b00000101, length=1)
|
||||
self.cpld.io_update.pulse(2 * us)
|
||||
delay(1 * ms)
|
||||
|
||||
@kernel
|
||||
def set_att_mu(self, att):
|
||||
def set_att_mu(self, att: TInt32):
|
||||
"""Set digital step attenuator in machine units.
|
||||
|
||||
This method will write the attenuator settings of all four channels.
|
||||
|
@ -123,7 +132,7 @@ class AD9912:
|
|||
self.cpld.set_att_mu(self.chip_select - 4, att)
|
||||
|
||||
@kernel
|
||||
def set_att(self, att):
|
||||
def set_att(self, att: TFloat):
|
||||
"""Set digital step attenuator in SI units.
|
||||
|
||||
This method will write the attenuator settings of all four channels.
|
||||
|
@ -135,62 +144,125 @@ class AD9912:
|
|||
self.cpld.set_att(self.chip_select - 4, att)
|
||||
|
||||
@kernel
|
||||
def set_mu(self, ftw, pow):
|
||||
def get_att_mu(self) -> TInt32:
|
||||
"""Get digital step attenuator value in machine units.
|
||||
|
||||
.. seealso:: :meth:`artiq.coredevice.urukul.CPLD.get_channel_att_mu`
|
||||
|
||||
:return: Attenuation setting, 8 bit digital.
|
||||
"""
|
||||
return self.cpld.get_channel_att_mu(self.chip_select - 4)
|
||||
|
||||
@kernel
|
||||
def get_att(self) -> TFloat:
|
||||
"""Get digital step attenuator value in SI units.
|
||||
|
||||
.. seealso:: :meth:`artiq.coredevice.urukul.CPLD.get_channel_att`
|
||||
|
||||
:return: Attenuation in dB.
|
||||
"""
|
||||
return self.cpld.get_channel_att(self.chip_select - 4)
|
||||
|
||||
@kernel
|
||||
def set_mu(self, ftw: TInt64, pow_: TInt32 = 0):
|
||||
"""Set profile 0 data in machine units.
|
||||
|
||||
After the SPI transfer, the shared IO update pin is pulsed to
|
||||
activate the data.
|
||||
|
||||
:param ftw: Frequency tuning word: 48 bit unsigned.
|
||||
:param pow: Phase tuning word: 16 bit unsigned.
|
||||
:param pow_: Phase tuning word: 16 bit unsigned.
|
||||
"""
|
||||
# streaming transfer of FTW and POW
|
||||
self.bus.set_config_mu(urukul.SPI_CONFIG, 16,
|
||||
urukul.SPIT_DDS_WR, self.chip_select)
|
||||
urukul.SPIT_DDS_WR, self.chip_select)
|
||||
self.bus.write((AD9912_POW1 << 16) | (3 << 29))
|
||||
self.bus.set_config_mu(urukul.SPI_CONFIG, 32,
|
||||
urukul.SPIT_DDS_WR, self.chip_select)
|
||||
self.bus.write((pow << 16) | (int32(ftw >> 32) & 0xffff))
|
||||
urukul.SPIT_DDS_WR, self.chip_select)
|
||||
self.bus.write((pow_ << 16) | (int32(ftw >> 32) & 0xffff))
|
||||
self.bus.set_config_mu(urukul.SPI_CONFIG | spi.SPI_END, 32,
|
||||
urukul.SPIT_DDS_WR, self.chip_select)
|
||||
urukul.SPIT_DDS_WR, self.chip_select)
|
||||
self.bus.write(int32(ftw))
|
||||
self.cpld.io_update.pulse(10*ns)
|
||||
self.cpld.io_update.pulse(10 * ns)
|
||||
|
||||
@kernel
|
||||
def get_mu(self) -> TTuple([TInt64, TInt32]):
|
||||
"""Get the frequency tuning word and phase offset word.
|
||||
|
||||
.. seealso:: :meth:`get`
|
||||
|
||||
:return: A tuple ``(ftw, pow)``.
|
||||
"""
|
||||
|
||||
# Read data
|
||||
high = self.read(AD9912_POW1, 4)
|
||||
self.core.break_realtime() # Regain slack to perform second read
|
||||
low = self.read(AD9912_FTW3, 4)
|
||||
# Extract and return fields
|
||||
ftw = (int64(high & 0xffff) << 32) | (int64(low) & int64(0xffffffff))
|
||||
pow_ = (high >> 16) & 0x3fff
|
||||
return ftw, pow_
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def frequency_to_ftw(self, frequency) -> TInt64:
|
||||
def frequency_to_ftw(self, frequency: TFloat) -> TInt64:
|
||||
"""Returns the 48-bit frequency tuning word corresponding to the given
|
||||
frequency.
|
||||
"""
|
||||
return int64(round(self.ftw_per_hz*frequency)) & ((int64(1) << 48) - 1)
|
||||
return int64(round(self.ftw_per_hz * frequency)) & (
|
||||
(int64(1) << 48) - 1)
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def ftw_to_frequency(self, ftw):
|
||||
def ftw_to_frequency(self, ftw: TInt64) -> TFloat:
|
||||
"""Returns the frequency corresponding to the given
|
||||
frequency tuning word.
|
||||
"""
|
||||
return ftw/self.ftw_per_hz
|
||||
return ftw / self.ftw_per_hz
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def turns_to_pow(self, phase) -> TInt32:
|
||||
def turns_to_pow(self, phase: TFloat) -> TInt32:
|
||||
"""Returns the 16-bit phase offset word corresponding to the given
|
||||
phase.
|
||||
"""
|
||||
return int32(round((1 << 14)*phase)) & 0xffff
|
||||
return int32(round((1 << 14) * phase)) & 0xffff
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def pow_to_turns(self, pow_: TInt32) -> TFloat:
|
||||
"""Return the phase in turns corresponding to a given phase offset
|
||||
word.
|
||||
|
||||
:param pow_: Phase offset word.
|
||||
:return: Phase in turns.
|
||||
"""
|
||||
return pow_ / (1 << 14)
|
||||
|
||||
@kernel
|
||||
def set(self, frequency, phase=0.0):
|
||||
def set(self, frequency: TFloat, phase: TFloat = 0.0):
|
||||
"""Set profile 0 data in SI units.
|
||||
|
||||
.. seealso:: :meth:`set_mu`
|
||||
|
||||
:param ftw: Frequency in Hz
|
||||
:param pow: Phase tuning word in turns
|
||||
:param frequency: Frequency in Hz
|
||||
:param phase: Phase tuning word in turns
|
||||
"""
|
||||
self.set_mu(self.frequency_to_ftw(frequency),
|
||||
self.turns_to_pow(phase))
|
||||
self.turns_to_pow(phase))
|
||||
|
||||
@kernel
|
||||
def cfg_sw(self, state):
|
||||
def get(self) -> TTuple([TFloat, TFloat]):
|
||||
"""Get the frequency and phase.
|
||||
|
||||
.. seealso:: :meth:`get_mu`
|
||||
|
||||
:return: A tuple ``(frequency, phase)``.
|
||||
"""
|
||||
|
||||
# Get values
|
||||
ftw, pow_ = self.get_mu()
|
||||
# Convert and return
|
||||
return self.ftw_to_frequency(ftw), self.pow_to_turns(pow_)
|
||||
|
||||
@kernel
|
||||
def cfg_sw(self, state: TBool):
|
||||
"""Set CPLD CFG RF switch state. The RF switch is controlled by the
|
||||
logical or of the CPLD configuration shift register
|
||||
RF switch bit and the SW TTL line (if used).
|
||||
|
|
|
@ -80,6 +80,13 @@ class AD9914:
|
|||
self.set_x_duration_mu = 7 * self.write_duration_mu
|
||||
self.exit_x_duration_mu = 3 * self.write_duration_mu
|
||||
|
||||
@staticmethod
|
||||
def get_rtio_channels(bus_channel, channel, **kwargs):
|
||||
# return only first entry, as there are several devices with the same RTIO channel
|
||||
if channel == 0:
|
||||
return [(bus_channel, None)]
|
||||
return []
|
||||
|
||||
@kernel
|
||||
def write(self, addr, data):
|
||||
rtio_output((self.bus_channel << 8) | addr, data)
|
||||
|
|
|
@ -73,6 +73,10 @@ class ADF5356:
|
|||
|
||||
self._init_registers()
|
||||
|
||||
@staticmethod
|
||||
def get_rtio_channels(**kwargs):
|
||||
return []
|
||||
|
||||
@kernel
|
||||
def init(self, blind=False):
|
||||
"""
|
||||
|
@ -102,6 +106,18 @@ class ADF5356:
|
|||
else:
|
||||
self.sync()
|
||||
|
||||
@kernel
|
||||
def set_att(self, att):
|
||||
"""Set digital step attenuator in SI units.
|
||||
|
||||
This method will write the attenuator settings of the channel.
|
||||
|
||||
.. seealso:: :meth:`artiq.coredevice.mirny.Mirny.set_att`
|
||||
|
||||
:param att: Attenuation in dB.
|
||||
"""
|
||||
self.cpld.set_att(self.channel, att)
|
||||
|
||||
@kernel
|
||||
def set_att_mu(self, att):
|
||||
"""Set digital step attenuator in machine units.
|
||||
|
@ -236,6 +252,7 @@ class ADF5356:
|
|||
Write all registers to the device. Attempts to lock the PLL.
|
||||
"""
|
||||
f_pfd = self.f_pfd()
|
||||
delay(200 * us) # Slack
|
||||
|
||||
if f_pfd <= 75.0 * MHz:
|
||||
for i in range(13, 0, -1):
|
||||
|
@ -249,6 +266,7 @@ class ADF5356:
|
|||
n, frac1, (frac2_msb, frac2_lsb), (mod2_msb, mod2_lsb) = calculate_pll(
|
||||
self.f_vco(), f_pfd >> 1
|
||||
)
|
||||
delay(200 * us) # Slack
|
||||
|
||||
self.write(
|
||||
13
|
||||
|
|
|
@ -0,0 +1,185 @@
|
|||
from artiq.language.core import kernel, portable
|
||||
|
||||
from numpy import int32
|
||||
|
||||
|
||||
# almazny-specific data
|
||||
ALMAZNY_LEGACY_REG_BASE = 0x0C
|
||||
ALMAZNY_LEGACY_OE_SHIFT = 12
|
||||
|
||||
# higher SPI write divider to match almazny shift register timing
|
||||
# min SER time before SRCLK rise = 125ns
|
||||
# -> div=32 gives 125ns for data before clock rise
|
||||
# works at faster dividers too but could be less reliable
|
||||
ALMAZNY_LEGACY_SPIT_WR = 32
|
||||
|
||||
|
||||
class AlmaznyLegacy:
|
||||
"""
|
||||
Almazny (High frequency mezzanine board for Mirny)
|
||||
|
||||
This applies to Almazny hardware v1.1 and earlier.
|
||||
Use :class:`artiq.coredevice.almazny.AlmaznyChannel` for Almazny v1.2 and later.
|
||||
|
||||
:param host_mirny: Mirny device Almazny is connected to
|
||||
"""
|
||||
|
||||
def __init__(self, dmgr, host_mirny):
|
||||
self.mirny_cpld = dmgr.get(host_mirny)
|
||||
self.att_mu = [0x3f] * 4
|
||||
self.channel_sw = [0] * 4
|
||||
self.output_enable = False
|
||||
|
||||
@kernel
|
||||
def init(self):
|
||||
self.output_toggle(self.output_enable)
|
||||
|
||||
@kernel
|
||||
def att_to_mu(self, att):
|
||||
"""
|
||||
Convert an attenuator setting in dB to machine units.
|
||||
|
||||
:param att: attenuator setting in dB [0-31.5]
|
||||
:return: attenuator setting in machine units
|
||||
"""
|
||||
mu = round(att * 2.0)
|
||||
if mu > 63 or mu < 0:
|
||||
raise ValueError("Invalid Almazny attenuator settings!")
|
||||
return mu
|
||||
|
||||
@kernel
|
||||
def mu_to_att(self, att_mu):
|
||||
"""
|
||||
Convert a digital attenuator setting to dB.
|
||||
|
||||
:param att_mu: attenuator setting in machine units
|
||||
:return: attenuator setting in dB
|
||||
"""
|
||||
return att_mu / 2
|
||||
|
||||
@kernel
|
||||
def set_att(self, channel, att, rf_switch=True):
|
||||
"""
|
||||
Sets attenuators on chosen shift register (channel).
|
||||
|
||||
:param channel: index of the register [0-3]
|
||||
:param att: attenuation setting in dBm [0-31.5]
|
||||
:param rf_switch: rf switch (bool)
|
||||
"""
|
||||
self.set_att_mu(channel, self.att_to_mu(att), rf_switch)
|
||||
|
||||
@kernel
|
||||
def set_att_mu(self, channel, att_mu, rf_switch=True):
|
||||
"""
|
||||
Sets attenuators on chosen shift register (channel).
|
||||
|
||||
:param channel: index of the register [0-3]
|
||||
:param att_mu: attenuation setting in machine units [0-63]
|
||||
:param rf_switch: rf switch (bool)
|
||||
"""
|
||||
self.channel_sw[channel] = 1 if rf_switch else 0
|
||||
self.att_mu[channel] = att_mu
|
||||
self._update_register(channel)
|
||||
|
||||
@kernel
|
||||
def output_toggle(self, oe):
|
||||
"""
|
||||
Toggles output on all shift registers on or off.
|
||||
|
||||
:param oe: toggle output enable (bool)
|
||||
"""
|
||||
self.output_enable = oe
|
||||
cfg_reg = self.mirny_cpld.read_reg(1)
|
||||
en = 1 if self.output_enable else 0
|
||||
delay(100 * us)
|
||||
new_reg = (en << ALMAZNY_LEGACY_OE_SHIFT) | (cfg_reg & 0x3FF)
|
||||
self.mirny_cpld.write_reg(1, new_reg)
|
||||
delay(100 * us)
|
||||
|
||||
@kernel
|
||||
def _flip_mu_bits(self, mu):
|
||||
# in this form MSB is actually 0.5dB attenuator
|
||||
# unnatural for users, so we flip the six bits
|
||||
return (((mu & 0x01) << 5)
|
||||
| ((mu & 0x02) << 3)
|
||||
| ((mu & 0x04) << 1)
|
||||
| ((mu & 0x08) >> 1)
|
||||
| ((mu & 0x10) >> 3)
|
||||
| ((mu & 0x20) >> 5))
|
||||
|
||||
@kernel
|
||||
def _update_register(self, ch):
|
||||
self.mirny_cpld.write_ext(
|
||||
ALMAZNY_LEGACY_REG_BASE + ch,
|
||||
8,
|
||||
self._flip_mu_bits(self.att_mu[ch]) | (self.channel_sw[ch] << 6),
|
||||
ALMAZNY_LEGACY_SPIT_WR
|
||||
)
|
||||
delay(100 * us)
|
||||
|
||||
@kernel
|
||||
def _update_all_registers(self):
|
||||
for i in range(4):
|
||||
self._update_register(i)
|
||||
|
||||
|
||||
class AlmaznyChannel:
|
||||
"""
|
||||
One Almazny channel
|
||||
Almazny is a mezzanine for the Quad PLL RF source Mirny that exposes and
|
||||
controls the frequency-doubled outputs.
|
||||
This driver requires Almazny hardware revision v1.2 or later
|
||||
and Mirny CPLD gateware v0.3 or later.
|
||||
Use :class:`artiq.coredevice.almazny.AlmaznyLegacy` for Almazny hardware v1.1 and earlier.
|
||||
|
||||
:param host_mirny: Mirny CPLD device name
|
||||
:param channel: channel index (0-3)
|
||||
"""
|
||||
|
||||
def __init__(self, dmgr, host_mirny, channel):
|
||||
self.channel = channel
|
||||
self.mirny_cpld = dmgr.get(host_mirny)
|
||||
|
||||
@portable
|
||||
def to_mu(self, att, enable, led):
|
||||
"""
|
||||
Convert an attenuation in dB, RF switch state and LED state to machine
|
||||
units.
|
||||
|
||||
:param att: attenuator setting in dB (0-31.5)
|
||||
:param enable: RF switch state (bool)
|
||||
:param led: LED state (bool)
|
||||
:return: channel setting in machine units
|
||||
"""
|
||||
mu = int32(round(att * 2.))
|
||||
if mu >= 64 or mu < 0:
|
||||
raise ValueError("Attenuation out of range")
|
||||
# unfortunate hardware design: bit reverse
|
||||
mu = ((mu & 0x15) << 1) | ((mu >> 1) & 0x15)
|
||||
mu = ((mu & 0x03) << 4) | (mu & 0x0c) | ((mu >> 4) & 0x03)
|
||||
if enable:
|
||||
mu |= 1 << 6
|
||||
if led:
|
||||
mu |= 1 << 7
|
||||
return mu
|
||||
|
||||
@kernel
|
||||
def set_mu(self, mu):
|
||||
"""
|
||||
Set channel state (machine units).
|
||||
|
||||
:param mu: channel state in machine units.
|
||||
"""
|
||||
self.mirny_cpld.write_ext(
|
||||
addr=0xc + self.channel, length=8, data=mu, ext_div=32)
|
||||
|
||||
@kernel
|
||||
def set(self, att, enable, led=False):
|
||||
"""
|
||||
Set attenuation, RF switch, and LED state (SI units).
|
||||
|
||||
:param att: attenuator setting in dB (0-31.5)
|
||||
:param enable: RF switch state (bool)
|
||||
:param led: LED state (bool)
|
||||
"""
|
||||
self.set_mu(self.to_mu(att, enable, led))
|
|
@ -1,79 +0,0 @@
|
|||
from artiq.language.core import kernel, portable, delay
|
||||
from artiq.language.units import us, ms
|
||||
from artiq.coredevice.shiftreg import ShiftReg
|
||||
|
||||
|
||||
@portable
|
||||
def to_mu(att):
|
||||
return round(att*2.0) ^ 0x3f
|
||||
|
||||
@portable
|
||||
def from_mu(att_mu):
|
||||
return 0.5*(att_mu ^ 0x3f)
|
||||
|
||||
|
||||
class BaseModAtt:
|
||||
def __init__(self, dmgr, rst_n, clk, le, mosi, miso):
|
||||
self.rst_n = dmgr.get(rst_n)
|
||||
self.shift_reg = ShiftReg(dmgr,
|
||||
clk=clk, ser=mosi, latch=le, ser_in=miso, n=8*4)
|
||||
|
||||
@kernel
|
||||
def reset(self):
|
||||
# HMC's incompetence in digital design and interfaces means that
|
||||
# the HMC542 needs a level low on RST_N and then a rising edge
|
||||
# on Latch Enable. Their "latch" isn't a latch but a DFF.
|
||||
# Of course, it also powers up with a random attenuation, and
|
||||
# that cannot be fixed with simple pull-ups/pull-downs.
|
||||
self.rst_n.off()
|
||||
self.shift_reg.latch.off()
|
||||
delay(1*us)
|
||||
self.shift_reg.latch.on()
|
||||
delay(1*us)
|
||||
self.shift_reg.latch.off()
|
||||
self.rst_n.on()
|
||||
delay(1*us)
|
||||
|
||||
@kernel
|
||||
def set_mu(self, att0, att1, att2, att3):
|
||||
"""
|
||||
Sets the four attenuators on BaseMod.
|
||||
The values are in half decibels, between 0 (no attenuation)
|
||||
and 63 (31.5dB attenuation).
|
||||
"""
|
||||
word = (
|
||||
(att0 << 2) |
|
||||
(att1 << 10) |
|
||||
(att2 << 18) |
|
||||
(att3 << 26)
|
||||
)
|
||||
self.shift_reg.set(word)
|
||||
|
||||
@kernel
|
||||
def get_mu(self):
|
||||
"""
|
||||
Retrieves the current settings of the four attenuators on BaseMod.
|
||||
"""
|
||||
word = self.shift_reg.get()
|
||||
att0 = (word >> 2) & 0x3f
|
||||
att1 = (word >> 10) & 0x3f
|
||||
att2 = (word >> 18) & 0x3f
|
||||
att3 = (word >> 26) & 0x3f
|
||||
return att0, att1, att2, att3
|
||||
|
||||
@kernel
|
||||
def set(self, att0, att1, att2, att3):
|
||||
"""
|
||||
Sets the four attenuators on BaseMod.
|
||||
The values are in decibels.
|
||||
"""
|
||||
self.set_mu(to_mu(att0), to_mu(att1), to_mu(att2), to_mu(att3))
|
||||
|
||||
@kernel
|
||||
def get(self):
|
||||
"""
|
||||
Retrieves the current settings of the four attenuators on BaseMod.
|
||||
The values are in decibels.
|
||||
"""
|
||||
att0, att1, att2, att3 = self.get_mu()
|
||||
return from_mu(att0), from_mu(att1), from_mu(att2), from_mu(att3)
|
|
@ -2,11 +2,11 @@ from artiq.language.core import *
|
|||
from artiq.language.types import *
|
||||
|
||||
|
||||
@syscall(flags={"nounwind", "nowrite"})
|
||||
@syscall(flags={"nounwind"})
|
||||
def cache_get(key: TStr) -> TList(TInt32):
|
||||
raise NotImplementedError("syscall not simulated")
|
||||
|
||||
@syscall(flags={"nowrite"})
|
||||
@syscall
|
||||
def cache_put(key: TStr, value: TList(TInt32)) -> TNone:
|
||||
raise NotImplementedError("syscall not simulated")
|
||||
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
import sys
|
||||
import socket
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def set_keepalive(sock, after_idle, interval, max_fails):
|
||||
if sys.platform.startswith("linux"):
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
||||
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, after_idle)
|
||||
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, interval)
|
||||
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, max_fails)
|
||||
elif sys.platform.startswith("win") or sys.platform.startswith("cygwin"):
|
||||
# setting max_fails is not supported, typically ends up being 5 or 10
|
||||
# depending on Windows version
|
||||
sock.ioctl(socket.SIO_KEEPALIVE_VALS,
|
||||
(1, after_idle * 1000, interval * 1000))
|
||||
else:
|
||||
logger.warning("TCP keepalive not supported on platform '%s', ignored",
|
||||
sys.platform)
|
||||
|
||||
|
||||
def initialize_connection(host, port):
|
||||
sock = socket.create_connection((host, port))
|
||||
set_keepalive(sock, 10, 10, 3)
|
||||
logger.debug("connected to %s:%d", host, port)
|
||||
return sock
|
|
@ -102,15 +102,15 @@ def decode_dump(data):
|
|||
# messages are big endian
|
||||
parts = struct.unpack(endian + "IQbbb", data[:15])
|
||||
(sent_bytes, total_byte_count,
|
||||
error_occured, log_channel, dds_onehot_sel) = parts
|
||||
error_occurred, log_channel, dds_onehot_sel) = parts
|
||||
|
||||
expected_len = sent_bytes + 15
|
||||
if expected_len != len(data):
|
||||
raise ValueError("analyzer dump has incorrect length "
|
||||
"(got {}, expected {})".format(
|
||||
len(data), expected_len))
|
||||
if error_occured:
|
||||
logger.warning("error occured within the analyzer, "
|
||||
if error_occurred:
|
||||
logger.warning("error occurred within the analyzer, "
|
||||
"data may be corrupted")
|
||||
if total_byte_count > sent_bytes:
|
||||
logger.info("analyzer ring buffer has wrapped %d times",
|
||||
|
|
|
@ -8,9 +8,8 @@ from fractions import Fraction
|
|||
from collections import namedtuple
|
||||
|
||||
from artiq.coredevice import exceptions
|
||||
from artiq.coredevice.comm import initialize_connection
|
||||
from artiq import __version__ as software_version
|
||||
|
||||
from sipyco.keepalive import create_connection
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -24,6 +23,8 @@ class Request(Enum):
|
|||
RPCReply = 7
|
||||
RPCException = 8
|
||||
|
||||
SubkernelUpload = 9
|
||||
|
||||
|
||||
class Reply(Enum):
|
||||
SystemInfo = 2
|
||||
|
@ -66,13 +67,13 @@ def _receive_list(kernel, embedding_map):
|
|||
tag = chr(kernel._read_int8())
|
||||
if tag == "b":
|
||||
buffer = kernel._read(length)
|
||||
return list(buffer)
|
||||
return list(struct.unpack(kernel.endian + "%s?" % length, buffer))
|
||||
elif tag == "i":
|
||||
buffer = kernel._read(4 * length)
|
||||
return list(struct.unpack(kernel.endian + "%sl" % length, buffer))
|
||||
elif tag == "I":
|
||||
buffer = kernel._read(8 * length)
|
||||
return list(struct.unpack(kernel.endian + "%sq" % length, buffer))
|
||||
return list(numpy.ndarray((length, ), kernel.endian + 'i8', buffer))
|
||||
elif tag == "f":
|
||||
buffer = kernel._read(8 * length)
|
||||
return list(struct.unpack(kernel.endian + "%sd" % length, buffer))
|
||||
|
@ -96,7 +97,7 @@ def _receive_array(kernel, embedding_map):
|
|||
length = numpy.prod(shape)
|
||||
if tag == "b":
|
||||
buffer = kernel._read(length)
|
||||
elems = numpy.ndarray((length, ), 'B', buffer)
|
||||
elems = numpy.ndarray((length, ), '?', buffer)
|
||||
elif tag == "i":
|
||||
buffer = kernel._read(4 * length)
|
||||
elems = numpy.ndarray((length, ), kernel.endian + 'i4', buffer)
|
||||
|
@ -171,6 +172,16 @@ class CommKernelDummy:
|
|||
pass
|
||||
|
||||
|
||||
def incompatible_versions(v1, v2):
|
||||
if v1.endswith(".beta") or v2.endswith(".beta"):
|
||||
# Beta branches may introduce breaking changes. Check version strictly.
|
||||
return v1 != v2
|
||||
else:
|
||||
# On stable branches, runtime/software protocol backward compatibility is kept.
|
||||
# Runtime and software with the same major version number are compatible.
|
||||
return v1.split(".", maxsplit=1)[0] != v2.split(".", maxsplit=1)[0]
|
||||
|
||||
|
||||
class CommKernel:
|
||||
warned_of_mismatch = False
|
||||
|
||||
|
@ -185,7 +196,7 @@ class CommKernel:
|
|||
def open(self):
|
||||
if hasattr(self, "socket"):
|
||||
return
|
||||
self.socket = initialize_connection(self.host, self.port)
|
||||
self.socket = create_connection(self.host, self.port)
|
||||
self.socket.sendall(b"ARTIQ coredev\n")
|
||||
endian = self._read(1)
|
||||
if endian == b"e":
|
||||
|
@ -199,6 +210,7 @@ class CommKernel:
|
|||
self.unpack_float64 = struct.Struct(self.endian + "d").unpack
|
||||
|
||||
self.pack_header = struct.Struct(self.endian + "lB").pack
|
||||
self.pack_int8 = struct.Struct(self.endian + "B").pack
|
||||
self.pack_int32 = struct.Struct(self.endian + "l").pack
|
||||
self.pack_int64 = struct.Struct(self.endian + "q").pack
|
||||
self.pack_float64 = struct.Struct(self.endian + "d").pack
|
||||
|
@ -313,7 +325,7 @@ class CommKernel:
|
|||
self._write(chunk)
|
||||
|
||||
def _write_int8(self, value):
|
||||
self._write(value)
|
||||
self._write(self.pack_int8(value))
|
||||
|
||||
def _write_int32(self, value):
|
||||
self._write(self.pack_int32(value))
|
||||
|
@ -347,7 +359,7 @@ class CommKernel:
|
|||
runtime_id = self._read(4)
|
||||
if runtime_id == b"AROR":
|
||||
gateware_version = self._read_string().split(";")[0]
|
||||
if gateware_version != software_version and not self.warned_of_mismatch:
|
||||
if not self.warned_of_mismatch and incompatible_versions(gateware_version, software_version):
|
||||
logger.warning("Mismatch between gateware (%s) "
|
||||
"and software (%s) versions",
|
||||
gateware_version, software_version)
|
||||
|
@ -373,6 +385,19 @@ class CommKernel:
|
|||
else:
|
||||
self._read_expect(Reply.LoadCompleted)
|
||||
|
||||
def upload_subkernel(self, kernel_library, id, destination):
|
||||
self._write_header(Request.SubkernelUpload)
|
||||
self._write_int32(id)
|
||||
self._write_int8(destination)
|
||||
self._write_bytes(kernel_library)
|
||||
self._flush()
|
||||
|
||||
self._read_header()
|
||||
if self._read_type == Reply.LoadFailed:
|
||||
raise LoadError(self._read_string())
|
||||
else:
|
||||
self._read_expect(Reply.LoadCompleted)
|
||||
|
||||
def run(self):
|
||||
self._write_empty(Request.RunKernel)
|
||||
self._flush()
|
||||
|
@ -409,6 +434,9 @@ class CommKernel:
|
|||
self._skip_rpc_value(tags)
|
||||
elif tag == "r":
|
||||
self._skip_rpc_value(tags)
|
||||
elif tag == "a":
|
||||
_ndims = tags.pop(0)
|
||||
self._skip_rpc_value(tags)
|
||||
else:
|
||||
pass
|
||||
|
||||
|
@ -437,12 +465,12 @@ class CommKernel:
|
|||
self._write_bool(value)
|
||||
elif tag == "i":
|
||||
check(isinstance(value, (int, numpy.int32)) and
|
||||
(-2**31 < value < 2**31-1),
|
||||
(-2**31 <= value < 2**31),
|
||||
lambda: "32-bit int")
|
||||
self._write_int32(value)
|
||||
elif tag == "I":
|
||||
check(isinstance(value, (int, numpy.int32, numpy.int64)) and
|
||||
(-2**63 < value < 2**63-1),
|
||||
(-2**63 <= value < 2**63),
|
||||
lambda: "64-bit int")
|
||||
self._write_int64(value)
|
||||
elif tag == "f":
|
||||
|
@ -451,8 +479,8 @@ class CommKernel:
|
|||
self._write_float64(value)
|
||||
elif tag == "F":
|
||||
check(isinstance(value, Fraction) and
|
||||
(-2**63 < value.numerator < 2**63-1) and
|
||||
(-2**63 < value.denominator < 2**63-1),
|
||||
(-2**63 <= value.numerator < 2**63) and
|
||||
(-2**63 <= value.denominator < 2**63),
|
||||
lambda: "64-bit Fraction")
|
||||
self._write_int64(value.numerator)
|
||||
self._write_int64(value.denominator)
|
||||
|
@ -476,11 +504,19 @@ class CommKernel:
|
|||
if tag_element == "b":
|
||||
self._write(bytes(value))
|
||||
elif tag_element == "i":
|
||||
self._write(struct.pack(self.endian + "%sl" %
|
||||
len(value), *value))
|
||||
try:
|
||||
self._write(struct.pack(self.endian + "%sl" % len(value), *value))
|
||||
except struct.error:
|
||||
raise RPCReturnValueError(
|
||||
"type mismatch: cannot serialize {value} as {type}".format(
|
||||
value=repr(value), type="32-bit integer list"))
|
||||
elif tag_element == "I":
|
||||
self._write(struct.pack(self.endian + "%sq" %
|
||||
len(value), *value))
|
||||
try:
|
||||
self._write(struct.pack(self.endian + "%sq" % len(value), *value))
|
||||
except struct.error:
|
||||
raise RPCReturnValueError(
|
||||
"type mismatch: cannot serialize {value} as {type}".format(
|
||||
value=repr(value), type="64-bit integer list"))
|
||||
elif tag_element == "f":
|
||||
self._write(struct.pack(self.endian + "%sd" %
|
||||
len(value), *value))
|
||||
|
@ -555,14 +591,6 @@ class CommKernel:
|
|||
|
||||
try:
|
||||
result = service(*args, **kwargs)
|
||||
logger.debug("rpc service: %d %r %r = %r",
|
||||
service_id, args, kwargs, result)
|
||||
|
||||
self._write_header(Request.RPCReply)
|
||||
self._write_bytes(return_tags)
|
||||
self._send_rpc_value(bytearray(return_tags),
|
||||
result, result, service)
|
||||
self._flush()
|
||||
except RPCReturnValueError as exn:
|
||||
raise
|
||||
except Exception as exn:
|
||||
|
@ -571,29 +599,33 @@ class CommKernel:
|
|||
|
||||
self._write_header(Request.RPCException)
|
||||
|
||||
# Note: instead of sending strings, we send object ID
|
||||
# This is to avoid the need of allocatio on the device side
|
||||
# This is a special case: this only applies to exceptions
|
||||
if hasattr(exn, "artiq_core_exception"):
|
||||
exn = exn.artiq_core_exception
|
||||
self._write_string(exn.name)
|
||||
self._write_string(self._truncate_message(exn.message))
|
||||
self._write_int32(embedding_map.store_str(exn.name))
|
||||
self._write_int32(embedding_map.store_str(self._truncate_message(exn.message)))
|
||||
for index in range(3):
|
||||
self._write_int64(exn.param[index])
|
||||
|
||||
filename, line, column, function = exn.traceback[-1]
|
||||
self._write_string(filename)
|
||||
self._write_int32(embedding_map.store_str(filename))
|
||||
self._write_int32(line)
|
||||
self._write_int32(column)
|
||||
self._write_string(function)
|
||||
self._write_int32(embedding_map.store_str(function))
|
||||
else:
|
||||
exn_type = type(exn)
|
||||
if exn_type in (ZeroDivisionError, ValueError, IndexError, RuntimeError) or \
|
||||
hasattr(exn, "artiq_builtin"):
|
||||
self._write_string("0:{}".format(exn_type.__name__))
|
||||
name = "0:{}".format(exn_type.__name__)
|
||||
else:
|
||||
exn_id = embedding_map.store_object(exn_type)
|
||||
self._write_string("{}:{}.{}".format(exn_id,
|
||||
exn_type.__module__,
|
||||
exn_type.__qualname__))
|
||||
self._write_string(self._truncate_message(str(exn)))
|
||||
name = "{}:{}.{}".format(exn_id,
|
||||
exn_type.__module__,
|
||||
exn_type.__qualname__)
|
||||
self._write_int32(embedding_map.store_str(name))
|
||||
self._write_int32(embedding_map.store_str(self._truncate_message(str(exn))))
|
||||
for index in range(3):
|
||||
self._write_int64(0)
|
||||
|
||||
|
@ -604,37 +636,93 @@ class CommKernel:
|
|||
((filename, line, function, _), ) = tb
|
||||
else:
|
||||
assert False
|
||||
self._write_string(filename)
|
||||
self._write_int32(embedding_map.store_str(filename))
|
||||
self._write_int32(line)
|
||||
self._write_int32(-1) # column not known
|
||||
self._write_string(function)
|
||||
self._write_int32(embedding_map.store_str(function))
|
||||
self._flush()
|
||||
else:
|
||||
logger.debug("rpc service: %d %r %r = %r",
|
||||
service_id, args, kwargs, result)
|
||||
self._write_header(Request.RPCReply)
|
||||
self._write_bytes(return_tags)
|
||||
self._send_rpc_value(bytearray(return_tags),
|
||||
result, result, service)
|
||||
self._flush()
|
||||
|
||||
def _serve_exception(self, embedding_map, symbolizer, demangler):
|
||||
name = self._read_string()
|
||||
message = self._read_string()
|
||||
params = [self._read_int64() for _ in range(3)]
|
||||
exception_count = self._read_int32()
|
||||
nested_exceptions = []
|
||||
|
||||
filename = self._read_string()
|
||||
line = self._read_int32()
|
||||
column = self._read_int32()
|
||||
function = self._read_string()
|
||||
def read_exception_string():
|
||||
# note: if length == -1, the following int32 is the object key
|
||||
length = self._read_int32()
|
||||
if length == -1:
|
||||
return embedding_map.retrieve_str(self._read_int32())
|
||||
else:
|
||||
return self._read(length).decode("utf-8")
|
||||
|
||||
backtrace = [self._read_int32() for _ in range(self._read_int32())]
|
||||
for _ in range(exception_count):
|
||||
name = embedding_map.retrieve_str(self._read_int32())
|
||||
message = read_exception_string()
|
||||
params = [self._read_int64() for _ in range(3)]
|
||||
|
||||
traceback = list(reversed(symbolizer(backtrace))) + \
|
||||
[(filename, line, column, *demangler([function]), None)]
|
||||
core_exn = exceptions.CoreException(name, message, params, traceback)
|
||||
filename = read_exception_string()
|
||||
line = self._read_int32()
|
||||
column = self._read_int32()
|
||||
function = read_exception_string()
|
||||
nested_exceptions.append([name, message, params,
|
||||
filename, line, column, function])
|
||||
|
||||
demangled_names = demangler([ex[6] for ex in nested_exceptions])
|
||||
for i in range(exception_count):
|
||||
nested_exceptions[i][6] = demangled_names[i]
|
||||
|
||||
exception_info = []
|
||||
for _ in range(exception_count):
|
||||
sp = self._read_int32()
|
||||
initial_backtrace = self._read_int32()
|
||||
current_backtrace = self._read_int32()
|
||||
exception_info.append((sp, initial_backtrace, current_backtrace))
|
||||
|
||||
backtrace = []
|
||||
stack_pointers = []
|
||||
for _ in range(self._read_int32()):
|
||||
backtrace.append(self._read_int32())
|
||||
stack_pointers.append(self._read_int32())
|
||||
|
||||
self._process_async_error()
|
||||
|
||||
traceback = list(symbolizer(backtrace))
|
||||
core_exn = exceptions.CoreException(nested_exceptions, exception_info,
|
||||
traceback, stack_pointers)
|
||||
|
||||
if core_exn.id == 0:
|
||||
python_exn_type = getattr(exceptions, core_exn.name.split('.')[-1])
|
||||
else:
|
||||
python_exn_type = embedding_map.retrieve_object(core_exn.id)
|
||||
|
||||
python_exn = python_exn_type(message.format(*params))
|
||||
try:
|
||||
python_exn = python_exn_type(
|
||||
nested_exceptions[-1][1].format(*nested_exceptions[0][2]))
|
||||
except Exception as ex:
|
||||
python_exn = RuntimeError(
|
||||
f"Exception type={python_exn_type}, which couldn't be "
|
||||
f"reconstructed ({ex})"
|
||||
)
|
||||
python_exn.artiq_core_exception = core_exn
|
||||
raise python_exn
|
||||
|
||||
def _process_async_error(self):
|
||||
errors = self._read_int8()
|
||||
if errors > 0:
|
||||
map_name = lambda y, z: [f"{y}(s)"] if z else []
|
||||
errors = map_name("collision", errors & 2 ** 0) + \
|
||||
map_name("busy error", errors & 2 ** 1) + \
|
||||
map_name("sequence error", errors & 2 ** 2)
|
||||
logger.warning(f"{(', '.join(errors[:-1]) + ' and ') if len(errors) > 1 else ''}{errors[-1]} "
|
||||
f"reported during kernel execution")
|
||||
|
||||
def serve(self, embedding_map, symbolizer, demangler):
|
||||
while True:
|
||||
self._read_header()
|
||||
|
@ -646,4 +734,5 @@ class CommKernel:
|
|||
raise exceptions.ClockFailure
|
||||
else:
|
||||
self._read_expect(Reply.KernelFinished)
|
||||
self._process_async_error()
|
||||
return
|
||||
|
|
|
@ -2,8 +2,7 @@ from enum import Enum
|
|||
import logging
|
||||
import struct
|
||||
|
||||
from artiq.coredevice.comm import initialize_connection
|
||||
|
||||
from sipyco.keepalive import create_connection
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -20,11 +19,6 @@ class Request(Enum):
|
|||
ConfigRemove = 14
|
||||
ConfigErase = 15
|
||||
|
||||
StartProfiler = 9
|
||||
StopProfiler = 10
|
||||
GetProfile = 11
|
||||
|
||||
Hotswap = 4
|
||||
Reboot = 5
|
||||
|
||||
DebugAllocator = 8
|
||||
|
@ -39,8 +33,6 @@ class Reply(Enum):
|
|||
|
||||
ConfigData = 7
|
||||
|
||||
Profile = 5
|
||||
|
||||
RebootImminent = 3
|
||||
|
||||
|
||||
|
@ -61,7 +53,7 @@ class CommMgmt:
|
|||
def open(self):
|
||||
if hasattr(self, "socket"):
|
||||
return
|
||||
self.socket = initialize_connection(self.host, self.port)
|
||||
self.socket = create_connection(self.host, self.port)
|
||||
self.socket.sendall(b"ARTIQ management\n")
|
||||
endian = self._read(1)
|
||||
if endian == b"e":
|
||||
|
@ -118,9 +110,10 @@ class CommMgmt:
|
|||
return ty
|
||||
|
||||
def _read_expect(self, ty):
|
||||
if self._read_header() != ty:
|
||||
header = self._read_header()
|
||||
if header != ty:
|
||||
raise IOError("Incorrect reply from device: {} (expected {})".
|
||||
format(self._read_type, ty))
|
||||
format(header, ty))
|
||||
|
||||
def _read_int32(self):
|
||||
(value, ) = struct.unpack(self.endian + "l", self._read(4))
|
||||
|
@ -167,7 +160,12 @@ class CommMgmt:
|
|||
def config_read(self, key):
|
||||
self._write_header(Request.ConfigRead)
|
||||
self._write_string(key)
|
||||
self._read_expect(Reply.ConfigData)
|
||||
ty = self._read_header()
|
||||
if ty == Reply.Error:
|
||||
raise IOError("Device failed to read config. The key may not exist.")
|
||||
elif ty != Reply.ConfigData:
|
||||
raise IOError("Incorrect reply from device: {} (expected {})".
|
||||
format(ty, Reply.ConfigData))
|
||||
return self._read_string()
|
||||
|
||||
def config_write(self, key, value):
|
||||
|
@ -176,7 +174,7 @@ class CommMgmt:
|
|||
self._write_bytes(value)
|
||||
ty = self._read_header()
|
||||
if ty == Reply.Error:
|
||||
raise IOError("Flash storage is full")
|
||||
raise IOError("Device failed to write config. More information may be available in the log.")
|
||||
elif ty != Reply.Success:
|
||||
raise IOError("Incorrect reply from device: {} (expected {})".
|
||||
format(ty, Reply.Success))
|
||||
|
@ -190,45 +188,6 @@ class CommMgmt:
|
|||
self._write_header(Request.ConfigErase)
|
||||
self._read_expect(Reply.Success)
|
||||
|
||||
def start_profiler(self, interval, edges_size, hits_size):
|
||||
self._write_header(Request.StartProfiler)
|
||||
self._write_int32(interval)
|
||||
self._write_int32(edges_size)
|
||||
self._write_int32(hits_size)
|
||||
self._read_expect(Reply.Success)
|
||||
|
||||
def stop_profiler(self):
|
||||
self._write_header(Request.StopProfiler)
|
||||
self._read_expect(Reply.Success)
|
||||
|
||||
def stop_profiler(self):
|
||||
self._write_header(Request.StopProfiler)
|
||||
self._read_expect(Reply.Success)
|
||||
|
||||
def get_profile(self):
|
||||
self._write_header(Request.GetProfile)
|
||||
self._read_expect(Reply.Profile)
|
||||
|
||||
hits = {}
|
||||
for _ in range(self._read_int32()):
|
||||
addr = self._read_int32()
|
||||
count = self._read_int32()
|
||||
hits[addr] = count
|
||||
|
||||
edges = {}
|
||||
for _ in range(self._read_int32()):
|
||||
caller = self._read_int32()
|
||||
callee = self._read_int32()
|
||||
count = self._read_int32()
|
||||
edges[(caller, callee)] = count
|
||||
|
||||
return hits, edges
|
||||
|
||||
def hotswap(self, firmware):
|
||||
self._write_header(Request.Hotswap)
|
||||
self._write_bytes(firmware)
|
||||
self._read_expect(Reply.RebootImminent)
|
||||
|
||||
def reboot(self):
|
||||
self._write_header(Request.Reboot)
|
||||
self._read_expect(Reply.RebootImminent)
|
||||
|
|
|
@ -3,6 +3,7 @@ import logging
|
|||
import struct
|
||||
from enum import Enum
|
||||
|
||||
from sipyco.keepalive import async_open_connection
|
||||
|
||||
__all__ = ["TTLProbe", "TTLOverride", "CommMonInj"]
|
||||
|
||||
|
@ -28,17 +29,16 @@ class CommMonInj:
|
|||
self.disconnect_cb = disconnect_cb
|
||||
|
||||
async def connect(self, host, port=1383):
|
||||
self._reader, self._writer = await asyncio.open_connection(host, port)
|
||||
self._reader, self._writer = await async_open_connection(
|
||||
host,
|
||||
port,
|
||||
after_idle=1,
|
||||
interval=1,
|
||||
max_fails=3,
|
||||
)
|
||||
|
||||
try:
|
||||
self._writer.write(b"ARTIQ moninj\n")
|
||||
# get device endian
|
||||
endian = await self._reader.read(1)
|
||||
if endian == b"e":
|
||||
self.endian = "<"
|
||||
elif endian == b"E":
|
||||
self.endian = ">"
|
||||
else:
|
||||
raise IOError("Incorrect reply from device: expected e/E.")
|
||||
self._receive_task = asyncio.ensure_future(self._receive_cr())
|
||||
except:
|
||||
self._writer.close()
|
||||
|
@ -46,6 +46,9 @@ class CommMonInj:
|
|||
del self._writer
|
||||
raise
|
||||
|
||||
def wait_terminate(self):
|
||||
return self._receive_task
|
||||
|
||||
async def close(self):
|
||||
self.disconnect_cb = None
|
||||
try:
|
||||
|
@ -60,19 +63,19 @@ class CommMonInj:
|
|||
del self._writer
|
||||
|
||||
def monitor_probe(self, enable, channel, probe):
|
||||
packet = struct.pack(self.endian + "bblb", 0, enable, channel, probe)
|
||||
packet = struct.pack("<bblb", 0, enable, channel, probe)
|
||||
self._writer.write(packet)
|
||||
|
||||
def monitor_injection(self, enable, channel, overrd):
|
||||
packet = struct.pack(self.endian + "bblb", 3, enable, channel, overrd)
|
||||
packet = struct.pack("<bblb", 3, enable, channel, overrd)
|
||||
self._writer.write(packet)
|
||||
|
||||
def inject(self, channel, override, value):
|
||||
packet = struct.pack(self.endian + "blbb", 1, channel, override, value)
|
||||
packet = struct.pack("<blbb", 1, channel, override, value)
|
||||
self._writer.write(packet)
|
||||
|
||||
def get_injection_status(self, channel, override):
|
||||
packet = struct.pack(self.endian + "blb", 2, channel, override)
|
||||
packet = struct.pack("<blb", 2, channel, override)
|
||||
self._writer.write(packet)
|
||||
|
||||
async def _receive_cr(self):
|
||||
|
@ -82,17 +85,19 @@ class CommMonInj:
|
|||
if not ty:
|
||||
return
|
||||
if ty == b"\x00":
|
||||
payload = await self._reader.read(9)
|
||||
channel, probe, value = struct.unpack(
|
||||
self.endian + "lbl", payload)
|
||||
payload = await self._reader.readexactly(13)
|
||||
channel, probe, value = struct.unpack("<lbq", payload)
|
||||
self.monitor_cb(channel, probe, value)
|
||||
elif ty == b"\x01":
|
||||
payload = await self._reader.read(6)
|
||||
channel, override, value = struct.unpack(
|
||||
self.endian + "lbb", payload)
|
||||
payload = await self._reader.readexactly(6)
|
||||
channel, override, value = struct.unpack("<lbb", payload)
|
||||
self.injection_status_cb(channel, override, value)
|
||||
else:
|
||||
raise ValueError("Unknown packet type", ty)
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except:
|
||||
logger.error("Moninj connection terminating with exception", exc_info=True)
|
||||
finally:
|
||||
if self.disconnect_cb is not None:
|
||||
self.disconnect_cb()
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
import os, sys
|
||||
import numpy
|
||||
from inspect import getfullargspec
|
||||
from functools import wraps
|
||||
|
||||
from pythonparser import diagnostic
|
||||
|
||||
|
@ -11,7 +13,7 @@ from artiq.language.units import *
|
|||
|
||||
from artiq.compiler.module import Module
|
||||
from artiq.compiler.embedding import Stitcher
|
||||
from artiq.compiler.targets import OR1KTarget, CortexA9Target
|
||||
from artiq.compiler.targets import RV32IMATarget, RV32GTarget, CortexA9Target
|
||||
|
||||
from artiq.coredevice.comm_kernel import CommKernel, CommKernelDummy
|
||||
# Import for side effects (creating the exception classes).
|
||||
|
@ -52,6 +54,17 @@ def rtio_get_counter() -> TInt64:
|
|||
raise NotImplementedError("syscall not simulated")
|
||||
|
||||
|
||||
def get_target_cls(target):
|
||||
if target == "rv32g":
|
||||
return RV32GTarget
|
||||
elif target == "rv32ima":
|
||||
return RV32IMATarget
|
||||
elif target == "cortexa9":
|
||||
return CortexA9Target
|
||||
else:
|
||||
raise ValueError("Unsupported target")
|
||||
|
||||
|
||||
class Core:
|
||||
"""Core device driver.
|
||||
|
||||
|
@ -65,79 +78,164 @@ class Core:
|
|||
:param ref_multiplier: ratio between the RTIO fine timestamp frequency
|
||||
and the RTIO coarse timestamp frequency (e.g. SERDES multiplication
|
||||
factor).
|
||||
:param analyzer_proxy: name of the core device analyzer proxy to trigger
|
||||
(optional).
|
||||
:param analyze_at_run_end: automatically trigger the core device analyzer
|
||||
proxy after the Experiment's run stage finishes.
|
||||
"""
|
||||
|
||||
kernel_invariants = {
|
||||
"core", "ref_period", "coarse_ref_period", "ref_multiplier",
|
||||
}
|
||||
|
||||
def __init__(self, dmgr, host, ref_period, ref_multiplier=8, target="or1k"):
|
||||
def __init__(self, dmgr,
|
||||
host, ref_period,
|
||||
analyzer_proxy=None, analyze_at_run_end=False,
|
||||
ref_multiplier=8,
|
||||
target="rv32g", satellite_cpu_targets={}):
|
||||
self.ref_period = ref_period
|
||||
self.ref_multiplier = ref_multiplier
|
||||
if target == "or1k":
|
||||
self.target_cls = OR1KTarget
|
||||
elif target == "cortexa9":
|
||||
self.target_cls = CortexA9Target
|
||||
else:
|
||||
raise ValueError("Unsupported target")
|
||||
self.satellite_cpu_targets = satellite_cpu_targets
|
||||
self.target_cls = get_target_cls(target)
|
||||
self.coarse_ref_period = ref_period*ref_multiplier
|
||||
if host is None:
|
||||
self.comm = CommKernelDummy()
|
||||
else:
|
||||
self.comm = CommKernel(host)
|
||||
self.analyzer_proxy_name = analyzer_proxy
|
||||
self.analyze_at_run_end = analyze_at_run_end
|
||||
|
||||
self.first_run = True
|
||||
self.dmgr = dmgr
|
||||
self.core = self
|
||||
self.comm.core = self
|
||||
self.analyzer_proxy = None
|
||||
|
||||
def notify_run_end(self):
|
||||
if self.analyze_at_run_end:
|
||||
self.trigger_analyzer_proxy()
|
||||
|
||||
def close(self):
|
||||
self.comm.close()
|
||||
|
||||
def compile(self, function, args, kwargs, set_result=None,
|
||||
attribute_writeback=True, print_as_rpc=True):
|
||||
attribute_writeback=True, print_as_rpc=True,
|
||||
target=None, destination=0, subkernel_arg_types=[]):
|
||||
try:
|
||||
engine = _DiagnosticEngine(all_errors_are_fatal=True)
|
||||
|
||||
stitcher = Stitcher(engine=engine, core=self, dmgr=self.dmgr,
|
||||
print_as_rpc=print_as_rpc)
|
||||
print_as_rpc=print_as_rpc,
|
||||
destination=destination, subkernel_arg_types=subkernel_arg_types)
|
||||
stitcher.stitch_call(function, args, kwargs, set_result)
|
||||
stitcher.finalize()
|
||||
|
||||
module = Module(stitcher,
|
||||
ref_period=self.ref_period,
|
||||
attribute_writeback=attribute_writeback)
|
||||
target = self.target_cls()
|
||||
target = target if target is not None else self.target_cls()
|
||||
|
||||
library = target.compile_and_link([module])
|
||||
stripped_library = target.strip(library)
|
||||
|
||||
return stitcher.embedding_map, stripped_library, \
|
||||
lambda addresses: target.symbolize(library, addresses), \
|
||||
lambda symbols: target.demangle(symbols)
|
||||
lambda symbols: target.demangle(symbols), \
|
||||
module.subkernel_arg_types
|
||||
except diagnostic.Error as error:
|
||||
raise CompileError(error.diagnostic) from error
|
||||
|
||||
def _run_compiled(self, kernel_library, embedding_map, symbolizer, demangler):
|
||||
if self.first_run:
|
||||
self.comm.check_system_info()
|
||||
self.first_run = False
|
||||
self.comm.load(kernel_library)
|
||||
self.comm.run()
|
||||
self.comm.serve(embedding_map, symbolizer, demangler)
|
||||
|
||||
def run(self, function, args, kwargs):
|
||||
result = None
|
||||
@rpc(flags={"async"})
|
||||
def set_result(new_result):
|
||||
nonlocal result
|
||||
result = new_result
|
||||
|
||||
embedding_map, kernel_library, symbolizer, demangler = \
|
||||
embedding_map, kernel_library, symbolizer, demangler, subkernel_arg_types = \
|
||||
self.compile(function, args, kwargs, set_result)
|
||||
|
||||
if self.first_run:
|
||||
self.comm.check_system_info()
|
||||
self.first_run = False
|
||||
|
||||
self.comm.load(kernel_library)
|
||||
self.comm.run()
|
||||
self.comm.serve(embedding_map, symbolizer, demangler)
|
||||
|
||||
self.compile_and_upload_subkernels(embedding_map, args, subkernel_arg_types)
|
||||
self._run_compiled(kernel_library, embedding_map, symbolizer, demangler)
|
||||
return result
|
||||
|
||||
def compile_subkernel(self, sid, subkernel_fn, embedding_map, args, subkernel_arg_types):
|
||||
# pass self to subkernels (if applicable)
|
||||
# assuming the first argument is self
|
||||
subkernel_args = getfullargspec(subkernel_fn.artiq_embedded.function)
|
||||
self_arg = []
|
||||
if len(subkernel_args[0]) > 0:
|
||||
if subkernel_args[0][0] == 'self':
|
||||
self_arg = args[:1]
|
||||
destination = subkernel_fn.artiq_embedded.destination
|
||||
destination_tgt = self.satellite_cpu_targets[destination]
|
||||
target = get_target_cls(destination_tgt)(subkernel_id=sid)
|
||||
object_map, kernel_library, _, _, _ = \
|
||||
self.compile(subkernel_fn, self_arg, {}, attribute_writeback=False,
|
||||
print_as_rpc=False, target=target, destination=destination,
|
||||
subkernel_arg_types=subkernel_arg_types.get(sid, []))
|
||||
if object_map.has_rpc_or_subkernel():
|
||||
raise ValueError("Subkernel must not use RPC or subkernels in other destinations")
|
||||
return destination, kernel_library
|
||||
|
||||
def compile_and_upload_subkernels(self, embedding_map, args, subkernel_arg_types):
|
||||
for sid, subkernel_fn in embedding_map.subkernels().items():
|
||||
destination, kernel_library = \
|
||||
self.compile_subkernel(sid, subkernel_fn, embedding_map,
|
||||
args, subkernel_arg_types)
|
||||
self.comm.upload_subkernel(kernel_library, sid, destination)
|
||||
|
||||
def precompile(self, function, *args, **kwargs):
|
||||
"""Precompile a kernel and return a callable that executes it on the core device
|
||||
at a later time.
|
||||
|
||||
Arguments to the kernel are set at compilation time and passed to this function,
|
||||
as additional positional and keyword arguments.
|
||||
The returned callable accepts no arguments.
|
||||
|
||||
Precompiled kernels may use RPCs and subkernels.
|
||||
|
||||
Object attributes at the beginning of a precompiled kernel execution have the
|
||||
values they had at precompilation time. If up-to-date values are required,
|
||||
use RPC to read them.
|
||||
Similarly, modified values are not written back, and explicit RPC should be used
|
||||
to modify host objects.
|
||||
Carefully review the source code of drivers calls used in precompiled kernels, as
|
||||
they may rely on host object attributes being transfered between kernel calls.
|
||||
Examples include code used to control DDS phase, and Urukul RF switch control
|
||||
via the CPLD register.
|
||||
|
||||
The return value of the callable is the return value of the kernel, if any.
|
||||
|
||||
The callable may be called several times.
|
||||
"""
|
||||
if not hasattr(function, "artiq_embedded"):
|
||||
raise ValueError("Argument is not a kernel")
|
||||
|
||||
result = None
|
||||
@rpc(flags={"async"})
|
||||
def set_result(new_result):
|
||||
nonlocal result
|
||||
result = new_result
|
||||
|
||||
embedding_map, kernel_library, symbolizer, demangler, subkernel_arg_types = \
|
||||
self.compile(function, args, kwargs, set_result, attribute_writeback=False)
|
||||
self.compile_and_upload_subkernels(embedding_map, args, subkernel_arg_types)
|
||||
|
||||
@wraps(function)
|
||||
def run_precompiled():
|
||||
nonlocal result
|
||||
self._run_compiled(kernel_library, embedding_map, symbolizer, demangler)
|
||||
return result
|
||||
|
||||
return run_precompiled
|
||||
|
||||
@portable
|
||||
def seconds_to_mu(self, seconds):
|
||||
"""Convert seconds to the corresponding number of machine units
|
||||
|
@ -204,3 +302,23 @@ class Core:
|
|||
min_now = rtio_get_counter() + 125000
|
||||
if now_mu() < min_now:
|
||||
at_mu(min_now)
|
||||
|
||||
def trigger_analyzer_proxy(self):
|
||||
"""Causes the core analyzer proxy to retrieve a dump from the device,
|
||||
and distribute it to all connected clients (typically dashboards).
|
||||
|
||||
Returns only after the dump has been retrieved from the device.
|
||||
|
||||
Raises IOError if no analyzer proxy has been configured, or if the
|
||||
analyzer proxy fails. In the latter case, more details would be
|
||||
available in the proxy log.
|
||||
"""
|
||||
if self.analyzer_proxy is None:
|
||||
if self.analyzer_proxy_name is not None:
|
||||
self.analyzer_proxy = self.dmgr.get(self.analyzer_proxy_name)
|
||||
if self.analyzer_proxy is None:
|
||||
raise IOError("No analyzer proxy configured")
|
||||
else:
|
||||
success = self.analyzer_proxy.trigger()
|
||||
if not success:
|
||||
raise IOError("Analyzer proxy reported failure")
|
||||
|
|
|
@ -19,16 +19,24 @@
|
|||
},
|
||||
"min_artiq_version": {
|
||||
"type": "string",
|
||||
"description": "Minimum required ARTIQ version"
|
||||
"description": "Minimum required ARTIQ version",
|
||||
"default": "0"
|
||||
},
|
||||
"hw_rev": {
|
||||
"type": "string",
|
||||
"description": "Hardware revision"
|
||||
},
|
||||
"base": {
|
||||
"type": "string",
|
||||
"enum": ["use_drtio_role", "standalone", "master", "satellite"],
|
||||
"description": "Deprecated, use drtio_role instead",
|
||||
"default": "use_drtio_role"
|
||||
},
|
||||
"drtio_role": {
|
||||
"type": "string",
|
||||
"enum": ["standalone", "master", "satellite"],
|
||||
"description": "SoC base; value depends on intended system topology"
|
||||
"description": "Role that this device takes in a DRTIO network; 'standalone' means no DRTIO",
|
||||
"default": "standalone"
|
||||
},
|
||||
"ext_ref_frequency": {
|
||||
"type": "number",
|
||||
|
@ -64,6 +72,13 @@
|
|||
"type": "boolean",
|
||||
"default": false
|
||||
},
|
||||
"sed_lanes": {
|
||||
"type": "number",
|
||||
"minimum": 1,
|
||||
"maximum": 32,
|
||||
"default": 8,
|
||||
"description": "Number of FIFOs in the SED, must be a power of 2"
|
||||
},
|
||||
"peripherals": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
|
@ -71,6 +86,26 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"if": {
|
||||
"properties": {
|
||||
"target": { "const": "kasli" },
|
||||
"hw_rev": {
|
||||
"not": {
|
||||
"oneOf": [
|
||||
{ "const": "v1.0" },
|
||||
{ "const": "v1.1" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
"enable_sata_drtio": {
|
||||
"const": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": ["target", "variant", "hw_rev", "base", "peripherals"],
|
||||
"additionalProperties": false,
|
||||
|
||||
|
@ -95,7 +130,7 @@
|
|||
},
|
||||
"hw_rev": {
|
||||
"type": "string",
|
||||
"enum": ["v1.0"]
|
||||
"enum": ["v1.0", "v1.1"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -107,7 +142,7 @@
|
|||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"enum": ["dio", "urukul", "novogorny", "sampler", "suservo", "zotino", "grabber", "mirny", "fastino", "phaser"]
|
||||
"enum": ["dio", "dio_spi", "urukul", "novogorny", "sampler", "suservo", "zotino", "grabber", "mirny", "fastino", "phaser", "hvamp", "shuttler"]
|
||||
},
|
||||
"board": {
|
||||
"type": "string"
|
||||
|
@ -143,15 +178,101 @@
|
|||
},
|
||||
"bank_direction_low": {
|
||||
"type": "string",
|
||||
"enum": ["input", "output"]
|
||||
"enum": ["input", "output", "clkgen"]
|
||||
},
|
||||
"bank_direction_high": {
|
||||
"type": "string",
|
||||
"enum": ["input", "output"]
|
||||
"enum": ["input", "output", "clkgen"]
|
||||
}
|
||||
},
|
||||
"required": ["ports", "bank_direction_low", "bank_direction_high"]
|
||||
}
|
||||
}, {
|
||||
"title": "DIO_SPI",
|
||||
"if": {
|
||||
"properties": {
|
||||
"type": {
|
||||
"const": "dio_spi"
|
||||
}
|
||||
}
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
"ports": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer"
|
||||
},
|
||||
"minItems": 1,
|
||||
"maxItems": 1
|
||||
},
|
||||
"spi": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"default": "dio_spi"
|
||||
},
|
||||
"clk": {
|
||||
"type": "integer",
|
||||
"minimum": 0,
|
||||
"maximum": 7
|
||||
},
|
||||
"mosi": {
|
||||
"type": "integer",
|
||||
"minimum": 0,
|
||||
"maximum": 7
|
||||
},
|
||||
"miso": {
|
||||
"type": "integer",
|
||||
"minimum": 0,
|
||||
"maximum": 7
|
||||
},
|
||||
"cs": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer",
|
||||
"minimum": 0,
|
||||
"maximum": 7
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": ["clk"]
|
||||
},
|
||||
"minItems": 1
|
||||
},
|
||||
"ttl": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"default": "ttl"
|
||||
},
|
||||
"pin": {
|
||||
"type": "integer",
|
||||
"minimum": 0,
|
||||
"maximum": 7
|
||||
},
|
||||
"direction": {
|
||||
"type": "string",
|
||||
"enum": ["input", "output"]
|
||||
},
|
||||
"edge_counter": {
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
}
|
||||
},
|
||||
"required": ["pin", "direction"]
|
||||
},
|
||||
"default": []
|
||||
}
|
||||
},
|
||||
"required": ["ports", "spi"]
|
||||
}
|
||||
}, {
|
||||
"title": "Urukul",
|
||||
"if": {
|
||||
|
@ -175,24 +296,31 @@
|
|||
"type": "boolean",
|
||||
"default": false
|
||||
},
|
||||
"refclk": {
|
||||
"type": "number",
|
||||
"minimum": 0
|
||||
},
|
||||
"clk_sel": {
|
||||
"type": "integer",
|
||||
"minimum": 0,
|
||||
"maximum": 3
|
||||
},
|
||||
"clk_div": {
|
||||
"type": "integer",
|
||||
"minimum": 0,
|
||||
"maximum": 3
|
||||
},
|
||||
"pll_n": {
|
||||
"refclk": {
|
||||
"type": "number",
|
||||
"minimum": 0
|
||||
},
|
||||
"clk_sel": {
|
||||
"type": "integer",
|
||||
"minimum": 0,
|
||||
"maximum": 3
|
||||
},
|
||||
"clk_div": {
|
||||
"type": "integer",
|
||||
"minimum": 0,
|
||||
"maximum": 3,
|
||||
"default": 0
|
||||
},
|
||||
"pll_n": {
|
||||
"type": "integer"
|
||||
},
|
||||
"pll_vco": {
|
||||
"pll_en": {
|
||||
"type": "integer",
|
||||
"minimum": 0,
|
||||
"maximum": 1,
|
||||
"default": 1
|
||||
},
|
||||
"pll_vco": {
|
||||
"type": "integer"
|
||||
},
|
||||
"dds": {
|
||||
|
@ -266,6 +394,11 @@
|
|||
"minItems": 2,
|
||||
"maxItems": 2
|
||||
},
|
||||
"sampler_hw_rev": {
|
||||
"type": "string",
|
||||
"pattern": "^v[0-9]+\\.[0-9]+",
|
||||
"default": "v2.2"
|
||||
},
|
||||
"urukul0_ports": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
|
@ -282,20 +415,26 @@
|
|||
"minItems": 2,
|
||||
"maxItems": 2
|
||||
},
|
||||
"refclk": {
|
||||
"type": "number",
|
||||
"minimum": 0
|
||||
},
|
||||
"clk_sel": {
|
||||
"type": "integer",
|
||||
"minimum": 0,
|
||||
"maximum": 3
|
||||
},
|
||||
"pll_n": {
|
||||
"refclk": {
|
||||
"type": "number",
|
||||
"minimum": 0
|
||||
},
|
||||
"clk_sel": {
|
||||
"type": "integer",
|
||||
"minimum": 0,
|
||||
"maximum": 3
|
||||
},
|
||||
"pll_n": {
|
||||
"type": "integer",
|
||||
"default": 32
|
||||
},
|
||||
"pll_vco": {
|
||||
"pll_en": {
|
||||
"type": "integer",
|
||||
"minimum": 0,
|
||||
"maximum": 1,
|
||||
"default": 1
|
||||
},
|
||||
"pll_vco": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
|
@ -364,17 +503,34 @@
|
|||
"minItems": 1,
|
||||
"maxItems": 1
|
||||
},
|
||||
"refclk": {
|
||||
"type": "number",
|
||||
"exclusiveMinimum": 0,
|
||||
"refclk": {
|
||||
"type": "number",
|
||||
"exclusiveMinimum": 0,
|
||||
"default": 100e6
|
||||
},
|
||||
"clk_sel": {
|
||||
"type": "integer",
|
||||
"minimum": 0,
|
||||
"maximum": 3,
|
||||
},
|
||||
"clk_sel": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "integer",
|
||||
"minimum": 0,
|
||||
"maximum": 3
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"enum": ["xo", "mmcx", "sma"]
|
||||
}
|
||||
],
|
||||
"default": 0
|
||||
}
|
||||
},
|
||||
"almazny": {
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
},
|
||||
"almazny_hw_rev": {
|
||||
"type": "string",
|
||||
"pattern": "^v[0-9]+\\.[0-9]+",
|
||||
"default": "v1.2"
|
||||
}
|
||||
},
|
||||
"required": ["ports"]
|
||||
}
|
||||
|
@ -423,6 +579,58 @@
|
|||
},
|
||||
"minItems": 1,
|
||||
"maxItems": 1
|
||||
},
|
||||
"mode": {
|
||||
"type": "string",
|
||||
"enum": ["base", "miqro"],
|
||||
"default": "base"
|
||||
}
|
||||
},
|
||||
"required": ["ports"]
|
||||
}
|
||||
}, {
|
||||
"title": "HVAmp",
|
||||
"if": {
|
||||
"properties": {
|
||||
"type": {
|
||||
"const": "hvamp"
|
||||
}
|
||||
}
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
"ports": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer"
|
||||
},
|
||||
"minItems": 1,
|
||||
"maxItems": 1
|
||||
}
|
||||
},
|
||||
"required": ["ports"]
|
||||
}
|
||||
},{
|
||||
"title": "Shuttler",
|
||||
"if": {
|
||||
"properties": {
|
||||
"type": {
|
||||
"const": "shuttler"
|
||||
}
|
||||
}
|
||||
},
|
||||
"then": {
|
||||
"properties": {
|
||||
"ports": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "integer"
|
||||
},
|
||||
"minItems": 1,
|
||||
"maxItems": 2
|
||||
},
|
||||
"drtio_destination": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"required": ["ports"]
|
||||
|
|
|
@ -110,7 +110,7 @@ class DAC34H84:
|
|||
syncsel_mixercd = 0b1001 # sif_sync and register write
|
||||
syncsel_nco = 0b1000 # sif_sync
|
||||
syncsel_fifo_input = 0b10 # external lvds istr
|
||||
sif_sync = 1
|
||||
sif_sync = 0
|
||||
|
||||
syncsel_fifoin = 0b0010 # istr
|
||||
syncsel_fifoout = 0b0100 # ostr
|
||||
|
@ -178,7 +178,8 @@ class DAC34H84:
|
|||
(self.collisiongone_ena << 12) | (self.sif4_ena << 7) |
|
||||
(self.mixer_ena << 6) | (self.mixer_gain << 5) |
|
||||
(self.nco_ena << 4) | (self.revbus << 3) | (self.twos << 1))
|
||||
mmap.append((0x03 << 16) | (self.coarse_dac << 12) | (self.sif_txenable << 0))
|
||||
mmap.append((0x03 << 16) | (self.coarse_dac << 12) |
|
||||
(self.sif_txenable << 0))
|
||||
mmap.append(
|
||||
(0x07 << 16) |
|
||||
(self.mask_alarm_from_zerochk << 15) | (1 << 14) |
|
||||
|
@ -200,7 +201,7 @@ class DAC34H84:
|
|||
mmap.append(
|
||||
(0x0d << 16) |
|
||||
(self.cmix_fs8 << 15) | (self.cmix_fs4 << 14) |
|
||||
(self.cmix_fs2 << 12) | (self.cmix_nfs4 << 11) |
|
||||
(self.cmix_fs2 << 13) | (self.cmix_nfs4 << 12) |
|
||||
(self.qmc_gainb << 0))
|
||||
mmap.append((0x0e << 16) | (self.qmc_gainc << 0))
|
||||
mmap.append(
|
||||
|
|
|
@ -6,7 +6,7 @@ alone could achieve.
|
|||
"""
|
||||
|
||||
from artiq.language.core import syscall, kernel
|
||||
from artiq.language.types import TInt32, TInt64, TStr, TNone, TTuple
|
||||
from artiq.language.types import TInt32, TInt64, TStr, TNone, TTuple, TBool
|
||||
from artiq.coredevice.exceptions import DMAError
|
||||
|
||||
from numpy import int64
|
||||
|
@ -17,7 +17,7 @@ def dma_record_start(name: TStr) -> TNone:
|
|||
raise NotImplementedError("syscall not simulated")
|
||||
|
||||
@syscall
|
||||
def dma_record_stop(duration: TInt64) -> TNone:
|
||||
def dma_record_stop(duration: TInt64, enable_ddma: TBool) -> TNone:
|
||||
raise NotImplementedError("syscall not simulated")
|
||||
|
||||
@syscall
|
||||
|
@ -25,11 +25,11 @@ def dma_erase(name: TStr) -> TNone:
|
|||
raise NotImplementedError("syscall not simulated")
|
||||
|
||||
@syscall
|
||||
def dma_retrieve(name: TStr) -> TTuple([TInt64, TInt32]):
|
||||
def dma_retrieve(name: TStr) -> TTuple([TInt64, TInt32, TBool]):
|
||||
raise NotImplementedError("syscall not simulated")
|
||||
|
||||
@syscall
|
||||
def dma_playback(timestamp: TInt64, ptr: TInt32) -> TNone:
|
||||
def dma_playback(timestamp: TInt64, ptr: TInt32, enable_ddma: TBool) -> TNone:
|
||||
raise NotImplementedError("syscall not simulated")
|
||||
|
||||
|
||||
|
@ -47,6 +47,7 @@ class DMARecordContextManager:
|
|||
def __init__(self):
|
||||
self.name = ""
|
||||
self.saved_now_mu = int64(0)
|
||||
self.enable_ddma = False
|
||||
|
||||
@kernel
|
||||
def __enter__(self):
|
||||
|
@ -56,7 +57,7 @@ class DMARecordContextManager:
|
|||
|
||||
@kernel
|
||||
def __exit__(self, type, value, traceback):
|
||||
dma_record_stop(now_mu()) # see above
|
||||
dma_record_stop(now_mu(), self.enable_ddma) # see above
|
||||
at_mu(self.saved_now_mu)
|
||||
|
||||
|
||||
|
@ -74,12 +75,20 @@ class CoreDMA:
|
|||
self.epoch = 0
|
||||
|
||||
@kernel
|
||||
def record(self, name):
|
||||
def record(self, name, enable_ddma=False):
|
||||
"""Returns a context manager that will record a DMA trace called ``name``.
|
||||
Any previously recorded trace with the same name is overwritten.
|
||||
The trace will persist across kernel switches."""
|
||||
The trace will persist across kernel switches.
|
||||
|
||||
In DRTIO context, distributed DMA can be toggled with ``enable_ddma``.
|
||||
Enabling it allows running DMA on satellites, rather than sending all
|
||||
events from the master.
|
||||
|
||||
Keeping it disabled it may improve performance in some scenarios,
|
||||
e.g. when there are many small satellite buffers."""
|
||||
self.epoch += 1
|
||||
self.recorder.name = name
|
||||
self.recorder.enable_ddma = enable_ddma
|
||||
return self.recorder
|
||||
|
||||
@kernel
|
||||
|
@ -92,24 +101,24 @@ class CoreDMA:
|
|||
def playback(self, name):
|
||||
"""Replays a previously recorded DMA trace. This function blocks until
|
||||
the entire trace is submitted to the RTIO FIFOs."""
|
||||
(advance_mu, ptr) = dma_retrieve(name)
|
||||
dma_playback(now_mu(), ptr)
|
||||
(advance_mu, ptr, uses_ddma) = dma_retrieve(name)
|
||||
dma_playback(now_mu(), ptr, uses_ddma)
|
||||
delay_mu(advance_mu)
|
||||
|
||||
@kernel
|
||||
def get_handle(self, name):
|
||||
"""Returns a handle to a previously recorded DMA trace. The returned handle
|
||||
is only valid until the next call to :meth:`record` or :meth:`erase`."""
|
||||
(advance_mu, ptr) = dma_retrieve(name)
|
||||
return (self.epoch, advance_mu, ptr)
|
||||
(advance_mu, ptr, uses_ddma) = dma_retrieve(name)
|
||||
return (self.epoch, advance_mu, ptr, uses_ddma)
|
||||
|
||||
@kernel
|
||||
def playback_handle(self, handle):
|
||||
"""Replays a handle obtained with :meth:`get_handle`. Using this function
|
||||
is much faster than :meth:`playback` for replaying a set of traces repeatedly,
|
||||
but incurs the overhead of managing the handles onto the programmer."""
|
||||
(epoch, advance_mu, ptr) = handle
|
||||
(epoch, advance_mu, ptr, uses_ddma) = handle
|
||||
if self.epoch != epoch:
|
||||
raise DMAError("Invalid handle")
|
||||
dma_playback(now_mu(), ptr)
|
||||
dma_playback(now_mu(), ptr, uses_ddma)
|
||||
delay_mu(advance_mu)
|
||||
|
|
|
@ -91,6 +91,10 @@ class EdgeCounter:
|
|||
self.channel = channel
|
||||
self.counter_max = (1 << (gateware_width - 1)) - 1
|
||||
|
||||
@staticmethod
|
||||
def get_rtio_channels(channel, **kwargs):
|
||||
return [(channel, None)]
|
||||
|
||||
@kernel
|
||||
def gate_rising(self, duration):
|
||||
"""Count rising edges for the given duration and request the total at
|
||||
|
|
|
@ -16,50 +16,94 @@ AssertionError = builtins.AssertionError
|
|||
|
||||
class CoreException:
|
||||
"""Information about an exception raised or passed through the core device."""
|
||||
def __init__(self, exceptions, exception_info, traceback, stack_pointers):
|
||||
self.exceptions = exceptions
|
||||
self.exception_info = exception_info
|
||||
self.traceback = list(traceback)
|
||||
self.stack_pointers = stack_pointers
|
||||
|
||||
def __init__(self, name, message, params, traceback):
|
||||
first_exception = exceptions[0]
|
||||
name = first_exception[0]
|
||||
if ':' in name:
|
||||
exn_id, self.name = name.split(':', 2)
|
||||
self.id = int(exn_id)
|
||||
else:
|
||||
self.id, self.name = 0, name
|
||||
self.message, self.params = message, params
|
||||
self.traceback = list(traceback)
|
||||
self.message = first_exception[1]
|
||||
self.params = first_exception[2]
|
||||
|
||||
def append_backtrace(self, record, inlined=False):
|
||||
filename, line, column, function, address = record
|
||||
stub_globals = {"__name__": filename, "__loader__": source_loader}
|
||||
source_line = linecache.getline(filename, line, stub_globals)
|
||||
indentation = re.search(r"^\s*", source_line).end()
|
||||
|
||||
if address is None:
|
||||
formatted_address = ""
|
||||
elif inlined:
|
||||
formatted_address = " (inlined)"
|
||||
else:
|
||||
formatted_address = " (RA=+0x{:x})".format(address)
|
||||
|
||||
filename = filename.replace(artiq_dir, "<artiq>")
|
||||
lines = []
|
||||
if column == -1:
|
||||
lines.append(" {}".format(source_line.strip() if source_line else "<unknown>"))
|
||||
lines.append(" File \"{file}\", line {line}, in {function}{address}".
|
||||
format(file=filename, line=line, function=function,
|
||||
address=formatted_address))
|
||||
else:
|
||||
lines.append(" {}^".format(" " * (column - indentation)))
|
||||
lines.append(" {}".format(source_line.strip() if source_line else "<unknown>"))
|
||||
lines.append(" File \"{file}\", line {line}, column {column},"
|
||||
" in {function}{address}".
|
||||
format(file=filename, line=line, column=column + 1,
|
||||
function=function, address=formatted_address))
|
||||
return lines
|
||||
|
||||
def single_traceback(self, exception_index):
|
||||
# note that we insert in reversed order
|
||||
lines = []
|
||||
last_sp = 0
|
||||
start_backtrace_index = self.exception_info[exception_index][1]
|
||||
zipped = list(zip(self.traceback[start_backtrace_index:],
|
||||
self.stack_pointers[start_backtrace_index:]))
|
||||
exception = self.exceptions[exception_index]
|
||||
name = exception[0]
|
||||
message = exception[1]
|
||||
params = exception[2]
|
||||
if ':' in name:
|
||||
exn_id, name = name.split(':', 2)
|
||||
exn_id = int(exn_id)
|
||||
else:
|
||||
exn_id = 0
|
||||
lines.append("{}({}): {}".format(name, exn_id, message.format(*params)))
|
||||
zipped.append(((exception[3], exception[4], exception[5], exception[6],
|
||||
None, []), None))
|
||||
|
||||
for ((filename, line, column, function, address, inlined), sp) in zipped:
|
||||
# backtrace of nested exceptions may be discontinuous
|
||||
# but the stack pointer must increase monotonically
|
||||
if sp is not None and sp <= last_sp:
|
||||
continue
|
||||
last_sp = sp
|
||||
|
||||
for record in reversed(inlined):
|
||||
lines += self.append_backtrace(record, True)
|
||||
lines += self.append_backtrace((filename, line, column, function,
|
||||
address))
|
||||
|
||||
lines.append("Traceback (most recent call first):")
|
||||
|
||||
return "\n".join(reversed(lines))
|
||||
|
||||
def __str__(self):
|
||||
lines = []
|
||||
lines.append("Core Device Traceback (most recent call last):")
|
||||
last_address = 0
|
||||
for (filename, line, column, function, address) in self.traceback:
|
||||
stub_globals = {"__name__": filename, "__loader__": source_loader}
|
||||
source_line = linecache.getline(filename, line, stub_globals)
|
||||
indentation = re.search(r"^\s*", source_line).end()
|
||||
|
||||
if address is None:
|
||||
formatted_address = ""
|
||||
elif address == last_address:
|
||||
formatted_address = " (inlined)"
|
||||
else:
|
||||
formatted_address = " (RA=+0x{:x})".format(address)
|
||||
last_address = address
|
||||
|
||||
filename = filename.replace(artiq_dir, "<artiq>")
|
||||
if column == -1:
|
||||
lines.append(" File \"{file}\", line {line}, in {function}{address}".
|
||||
format(file=filename, line=line, function=function,
|
||||
address=formatted_address))
|
||||
lines.append(" {}".format(source_line.strip() if source_line else "<unknown>"))
|
||||
else:
|
||||
lines.append(" File \"{file}\", line {line}, column {column},"
|
||||
" in {function}{address}".
|
||||
format(file=filename, line=line, column=column + 1,
|
||||
function=function, address=formatted_address))
|
||||
lines.append(" {}".format(source_line.strip() if source_line else "<unknown>"))
|
||||
lines.append(" {}^".format(" " * (column - indentation)))
|
||||
|
||||
lines.append("{}({}): {}".format(self.name, self.id,
|
||||
self.message.format(*self.params)))
|
||||
return "\n".join(lines)
|
||||
tracebacks = [self.single_traceback(i) for i in range(len(self.exceptions))]
|
||||
traceback_str = ('\n\nDuring handling of the above exception, ' +
|
||||
'another exception occurred:\n\n').join(tracebacks)
|
||||
return 'Core Device Traceback:\n' +\
|
||||
traceback_str +\
|
||||
'\n\nEnd of Core Device Traceback\n'
|
||||
|
||||
|
||||
class InternalError(Exception):
|
||||
|
@ -104,6 +148,13 @@ class DMAError(Exception):
|
|||
artiq_builtin = True
|
||||
|
||||
|
||||
class SubkernelError(Exception):
|
||||
"""Raised when an operation regarding a subkernel is invalid
|
||||
or cannot be completed.
|
||||
"""
|
||||
artiq_builtin = True
|
||||
|
||||
|
||||
class ClockFailure(Exception):
|
||||
"""Raised when RTIO PLL has lost lock."""
|
||||
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
"""RTIO driver for the Fastino 32channel, 16 bit, 2.5 MS/s per channel,
|
||||
streaming DAC.
|
||||
"""
|
||||
from numpy import int32, int64
|
||||
|
||||
from artiq.language.core import kernel, portable, delay
|
||||
from artiq.language.core import kernel, portable, delay, delay_mu
|
||||
from artiq.coredevice.rtio import (rtio_output, rtio_output_wide,
|
||||
rtio_input_data)
|
||||
from artiq.language.units import us
|
||||
from artiq.language.units import ns
|
||||
from artiq.language.types import TInt32, TList
|
||||
|
||||
|
||||
|
@ -20,7 +21,7 @@ class Fastino:
|
|||
DAC updates synchronized to a frame edge.
|
||||
|
||||
The `log2_width=0` RTIO layout uses one DAC channel per RTIO address and a
|
||||
dense RTIO address space. The RTIO words are narrow. (32 bit) and
|
||||
dense RTIO address space. The RTIO words are narrow (32 bit) and
|
||||
few-channel updates are efficient. There is the least amount of DAC state
|
||||
tracking in kernels, at the cost of more DMA and RTIO data.
|
||||
The setting here and in the RTIO PHY (gateware) must match.
|
||||
|
@ -40,24 +41,52 @@ class Fastino:
|
|||
:param log2_width: Width of DAC channel group (logarithm base 2).
|
||||
Value must match the corresponding value in the RTIO PHY (gateware).
|
||||
"""
|
||||
kernel_invariants = {"core", "channel", "width"}
|
||||
kernel_invariants = {"core", "channel", "width", "t_frame"}
|
||||
|
||||
def __init__(self, dmgr, channel, core_device="core", log2_width=0):
|
||||
self.channel = channel << 8
|
||||
self.core = dmgr.get(core_device)
|
||||
self.width = 1 << log2_width
|
||||
# frame duration in mu (14 words each 7 clock cycles each 4 ns)
|
||||
# self.core.seconds_to_mu(14*7*4*ns) # unfortunately this may round wrong
|
||||
assert self.core.ref_period == 1*ns
|
||||
self.t_frame = int64(14*7*4)
|
||||
|
||||
@staticmethod
|
||||
def get_rtio_channels(channel, **kwargs):
|
||||
return [(channel, None)]
|
||||
|
||||
@kernel
|
||||
def init(self):
|
||||
"""Initialize the device.
|
||||
|
||||
This clears reset, unsets DAC_CLR, enables AFE_PWR,
|
||||
clears error counters, then enables error counting
|
||||
* disables RESET, DAC_CLR, enables AFE_PWR
|
||||
* clears error counters, enables error counting
|
||||
* turns LEDs off
|
||||
* clears `hold` and `continuous` on all channels
|
||||
* clear and resets interpolators to unit rate change on all
|
||||
channels
|
||||
|
||||
It does not change set channel voltages and does not reset the PLLs or clock
|
||||
domains.
|
||||
|
||||
Note: On Fastino gateware before v0.2 this may lead to 0 voltage being emitted
|
||||
transiently.
|
||||
"""
|
||||
self.set_cfg(reset=0, afe_power_down=0, dac_clr=0, clr_err=1)
|
||||
delay(1*us)
|
||||
delay_mu(self.t_frame)
|
||||
self.set_cfg(reset=0, afe_power_down=0, dac_clr=0, clr_err=0)
|
||||
delay(1*us)
|
||||
delay_mu(self.t_frame)
|
||||
self.set_continuous(0)
|
||||
delay_mu(self.t_frame)
|
||||
self.stage_cic(1)
|
||||
delay_mu(self.t_frame)
|
||||
self.apply_cic(0xffffffff)
|
||||
delay_mu(self.t_frame)
|
||||
self.set_leds(0)
|
||||
delay_mu(self.t_frame)
|
||||
self.set_hold(0)
|
||||
delay_mu(self.t_frame)
|
||||
|
||||
@kernel
|
||||
def write(self, addr, data):
|
||||
|
@ -77,8 +106,9 @@ class Fastino:
|
|||
:param addr: Address to read from.
|
||||
:return: The data read.
|
||||
"""
|
||||
rtio_output(self.channel | addr | 0x80)
|
||||
return rtio_input_data(self.channel >> 8)
|
||||
raise NotImplementedError
|
||||
# rtio_output(self.channel | addr | 0x80)
|
||||
# return rtio_input_data(self.channel >> 8)
|
||||
|
||||
@kernel
|
||||
def set_dac_mu(self, dac, data):
|
||||
|
@ -112,7 +142,7 @@ class Fastino:
|
|||
:param voltage: Voltage in SI Volts.
|
||||
:return: DAC data word in machine units, 16 bit integer.
|
||||
"""
|
||||
data = int(round((0x8000/10.)*voltage)) + 0x8000
|
||||
data = int32(round((0x8000/10.)*voltage)) + int32(0x8000)
|
||||
if data < 0 or data > 0xffff:
|
||||
raise ValueError("DAC voltage out of bounds")
|
||||
return data
|
||||
|
@ -129,7 +159,7 @@ class Fastino:
|
|||
v = self.voltage_to_mu(voltage[i])
|
||||
if i & 1:
|
||||
v = data[i // 2] | (v << 16)
|
||||
data[i // 2] = v
|
||||
data[i // 2] = int32(v)
|
||||
|
||||
@kernel
|
||||
def set_dac(self, dac, voltage):
|
||||
|
@ -190,3 +220,85 @@ class Fastino:
|
|||
green LED.
|
||||
"""
|
||||
self.write(0x23, leds)
|
||||
|
||||
@kernel
|
||||
def set_continuous(self, channel_mask):
|
||||
"""Enable continuous DAC updates on channels regardless of new data
|
||||
being submitted.
|
||||
"""
|
||||
self.write(0x25, channel_mask)
|
||||
|
||||
@kernel
|
||||
def stage_cic_mu(self, rate_mantissa, rate_exponent, gain_exponent):
|
||||
"""Stage machine unit CIC interpolator configuration.
|
||||
"""
|
||||
if rate_mantissa < 0 or rate_mantissa >= 1 << 6:
|
||||
raise ValueError("rate_mantissa out of bounds")
|
||||
if rate_exponent < 0 or rate_exponent >= 1 << 4:
|
||||
raise ValueError("rate_exponent out of bounds")
|
||||
if gain_exponent < 0 or gain_exponent >= 1 << 6:
|
||||
raise ValueError("gain_exponent out of bounds")
|
||||
config = rate_mantissa | (rate_exponent << 6) | (gain_exponent << 10)
|
||||
self.write(0x26, config)
|
||||
|
||||
@kernel
|
||||
def stage_cic(self, rate) -> TInt32:
|
||||
"""Compute and stage interpolator configuration.
|
||||
|
||||
This method approximates the desired interpolation rate using a 10 bit
|
||||
floating point representation (6 bit mantissa, 4 bit exponent) and
|
||||
then determines an optimal interpolation gain compensation exponent
|
||||
to avoid clipping. Gains for rates that are powers of two are accurately
|
||||
compensated. Other rates lead to overall less than unity gain (but more
|
||||
than 0.5 gain).
|
||||
|
||||
The overall gain including gain compensation is
|
||||
`actual_rate**order/2**ceil(log2(actual_rate**order))`
|
||||
where `order = 3`.
|
||||
|
||||
Returns the actual interpolation rate.
|
||||
"""
|
||||
if rate <= 0 or rate > 1 << 16:
|
||||
raise ValueError("rate out of bounds")
|
||||
rate_mantissa = rate
|
||||
rate_exponent = 0
|
||||
while rate_mantissa > 1 << 6:
|
||||
rate_exponent += 1
|
||||
rate_mantissa >>= 1
|
||||
order = 3
|
||||
gain = 1
|
||||
for i in range(order):
|
||||
gain *= rate_mantissa
|
||||
gain_exponent = 0
|
||||
while gain > 1 << gain_exponent:
|
||||
gain_exponent += 1
|
||||
gain_exponent += order*rate_exponent
|
||||
assert gain_exponent <= order*16
|
||||
self.stage_cic_mu(rate_mantissa - 1, rate_exponent, gain_exponent)
|
||||
return rate_mantissa << rate_exponent
|
||||
|
||||
@kernel
|
||||
def apply_cic(self, channel_mask):
|
||||
"""Apply the staged interpolator configuration on the specified channels.
|
||||
|
||||
Each Fastino channel starting with gateware v0.2 includes a fourth order
|
||||
(cubic) CIC interpolator with variable rate change and variable output
|
||||
gain compensation (see :meth:`stage_cic`).
|
||||
|
||||
Fastino gateware before v0.2 does not include the interpolators and the
|
||||
methods affecting the CICs should not be used.
|
||||
|
||||
Channels using non-unity interpolation rate should have
|
||||
continous DAC updates enabled (see :meth:`set_continuous`) unless
|
||||
their output is supposed to be constant.
|
||||
|
||||
This method resets and settles the affected interpolators. There will be
|
||||
no output updates for the next `order = 3` input samples.
|
||||
Affected channels will only accept one input sample per input sample
|
||||
period. This method synchronizes the input sample period to the current
|
||||
frame on the affected channels.
|
||||
|
||||
If application of new interpolator settings results in a change of the
|
||||
overall gain, there will be a corresponding output step.
|
||||
"""
|
||||
self.write(0x27, channel_mask)
|
||||
|
|
|
@ -25,6 +25,10 @@ class Grabber:
|
|||
# ROI engine outputs for one video frame.
|
||||
self.sentinel = int32(int64(2**count_width))
|
||||
|
||||
@staticmethod
|
||||
def get_rtio_channels(channel_base, **kwargs):
|
||||
return [(channel_base, "ROI coordinates"), (channel_base + 1, "ROI mask")]
|
||||
|
||||
@kernel
|
||||
def setup_roi(self, n, x0, y0, x1, y1):
|
||||
"""
|
||||
|
|
|
@ -33,6 +33,11 @@ def i2c_read(busno: TInt32, ack: TBool) -> TInt32:
|
|||
raise NotImplementedError("syscall not simulated")
|
||||
|
||||
|
||||
@syscall(flags={"nounwind", "nowrite"})
|
||||
def i2c_switch_select(busno: TInt32, address: TInt32, mask: TInt32) -> TNone:
|
||||
raise NotImplementedError("syscall not simulated")
|
||||
|
||||
|
||||
@kernel
|
||||
def i2c_poll(busno, busaddr):
|
||||
"""Poll I2C device at address.
|
||||
|
@ -137,8 +142,10 @@ def i2c_read_many(busno, busaddr, addr, data):
|
|||
i2c_stop(busno)
|
||||
|
||||
|
||||
class PCA9548:
|
||||
"""Driver for the PCA9548 I2C bus switch.
|
||||
class I2CSwitch:
|
||||
"""Driver for the I2C bus switch.
|
||||
|
||||
PCA954X (or other) type detection is done by the CPU during I2C init.
|
||||
|
||||
I2C transactions not real-time, and are performed by the CPU without
|
||||
involving RTIO.
|
||||
|
@ -151,25 +158,19 @@ class PCA9548:
|
|||
self.busno = busno
|
||||
self.address = address
|
||||
|
||||
@kernel
|
||||
def select(self, mask):
|
||||
"""Enable/disable channels.
|
||||
|
||||
:param mask: Bit mask of enabled channels
|
||||
"""
|
||||
i2c_write_byte(self.busno, self.address, mask)
|
||||
|
||||
@kernel
|
||||
def set(self, channel):
|
||||
"""Enable one channel.
|
||||
|
||||
:param channel: channel number (0-7)
|
||||
"""
|
||||
self.select(1 << channel)
|
||||
i2c_switch_select(self.busno, self.address >> 1, 1 << channel)
|
||||
|
||||
@kernel
|
||||
def readback(self):
|
||||
return i2c_read_byte(self.busno, self.address)
|
||||
def unset(self):
|
||||
"""Disable output of the I2C switch.
|
||||
"""
|
||||
i2c_switch_select(self.busno, self.address >> 1, 0)
|
||||
|
||||
|
||||
class TCA6424A:
|
||||
|
@ -207,3 +208,46 @@ class TCA6424A:
|
|||
|
||||
self._write24(0x8c, 0) # set all directions to output
|
||||
self._write24(0x84, outputs_le) # set levels
|
||||
|
||||
class PCF8574A:
|
||||
"""Driver for the PCF8574 I2C remote 8-bit I/O expander.
|
||||
|
||||
I2C transactions not real-time, and are performed by the CPU without
|
||||
involving RTIO.
|
||||
"""
|
||||
def __init__(self, dmgr, busno=0, address=0x7c, core_device="core"):
|
||||
self.core = dmgr.get(core_device)
|
||||
self.busno = busno
|
||||
self.address = address
|
||||
|
||||
@kernel
|
||||
def set(self, data):
|
||||
"""Drive data on the quasi-bidirectional pins.
|
||||
|
||||
:param data: Pin data. High bits are weakly driven high
|
||||
(and thus inputs), low bits are strongly driven low.
|
||||
"""
|
||||
i2c_start(self.busno)
|
||||
try:
|
||||
if not i2c_write(self.busno, self.address):
|
||||
raise I2CError("PCF8574A failed to ack address")
|
||||
if not i2c_write(self.busno, data):
|
||||
raise I2CError("PCF8574A failed to ack data")
|
||||
finally:
|
||||
i2c_stop(self.busno)
|
||||
|
||||
@kernel
|
||||
def get(self):
|
||||
"""Retrieve quasi-bidirectional pin input data.
|
||||
|
||||
:return: Pin data
|
||||
"""
|
||||
i2c_start(self.busno)
|
||||
ret = 0
|
||||
try:
|
||||
if not i2c_write(self.busno, self.address | 1):
|
||||
raise I2CError("PCF8574A failed to ack address")
|
||||
ret = i2c_read(self.busno, False)
|
||||
finally:
|
||||
i2c_stop(self.busno)
|
||||
return ret
|
||||
|
|
|
@ -32,4 +32,7 @@ def load(description_path):
|
|||
global validator
|
||||
validator.validate(result)
|
||||
|
||||
if result["base"] != "use_drtio_role":
|
||||
result["drtio_role"] = result["base"]
|
||||
|
||||
return result
|
||||
|
|
|
@ -37,13 +37,17 @@ class KasliEEPROM:
|
|||
@kernel
|
||||
def select(self):
|
||||
mask = 1 << self.port
|
||||
self.sw0.select(mask)
|
||||
self.sw1.select(mask >> 8)
|
||||
if self.port < 8:
|
||||
self.sw0.set(self.port)
|
||||
self.sw1.unset()
|
||||
else:
|
||||
self.sw0.unset()
|
||||
self.sw1.set(self.port - 8)
|
||||
|
||||
@kernel
|
||||
def deselect(self):
|
||||
self.sw0.select(0)
|
||||
self.sw1.select(0)
|
||||
self.sw0.unset()
|
||||
self.sw1.unset()
|
||||
|
||||
@kernel
|
||||
def write_i32(self, addr, value):
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
"""RTIO driver for Mirny (4 channel GHz PLLs)
|
||||
"""
|
||||
|
||||
from artiq.language.core import kernel, delay
|
||||
from artiq.language.core import kernel, delay, portable
|
||||
from artiq.language.units import us
|
||||
|
||||
from numpy import int32
|
||||
|
@ -40,9 +40,8 @@ class Mirny:
|
|||
:param refclk: Reference clock (SMA, MMCX or on-board 100 MHz oscillator)
|
||||
frequency in Hz
|
||||
:param clk_sel: Reference clock selection.
|
||||
valid options are: "XO" - onboard crystal oscillator
|
||||
"SMA" - front-panel SMA connector
|
||||
"MMCX" - internal MMCX connector
|
||||
Valid options are: "XO" - onboard crystal oscillator;
|
||||
"SMA" - front-panel SMA connector; "MMCX" - internal MMCX connector.
|
||||
Passing an integer writes it as ``clk_sel`` in the CPLD's register 1.
|
||||
The effect depends on the hardware revision.
|
||||
:param core_device: Core device name (default: "core")
|
||||
|
@ -123,6 +122,18 @@ class Mirny:
|
|||
self.write_reg(1, (self.clk_sel << 4))
|
||||
delay(1000 * us)
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def att_to_mu(self, att):
|
||||
"""Convert an attenuation setting in dB to machine units.
|
||||
|
||||
:param att: Attenuation setting in dB.
|
||||
:return: Digital attenuation setting.
|
||||
"""
|
||||
code = int32(255) - int32(round(att * 8))
|
||||
if code < 0 or code > 255:
|
||||
raise ValueError("Invalid Mirny attenuation!")
|
||||
return code
|
||||
|
||||
@kernel
|
||||
def set_att_mu(self, channel, att):
|
||||
"""Set digital step attenuator in machine units.
|
||||
|
@ -133,11 +144,26 @@ class Mirny:
|
|||
self.bus.write(((channel | 8) << 25) | (att << 16))
|
||||
|
||||
@kernel
|
||||
def write_ext(self, addr, length, data):
|
||||
def set_att(self, channel, att):
|
||||
"""Set digital step attenuator in SI units.
|
||||
|
||||
This method will write the attenuator settings of the selected channel.
|
||||
|
||||
.. seealso:: :meth:`set_att_mu`
|
||||
|
||||
:param channel: Attenuator channel (0-3).
|
||||
:param att: Attenuation setting in dB. Higher value is more
|
||||
attenuation. Minimum attenuation is 0*dB, maximum attenuation is
|
||||
31.5*dB.
|
||||
"""
|
||||
self.set_att_mu(channel, self.att_to_mu(att))
|
||||
|
||||
@kernel
|
||||
def write_ext(self, addr, length, data, ext_div=SPIT_WR):
|
||||
"""Perform SPI write to a prefixed address"""
|
||||
self.bus.set_config_mu(SPI_CONFIG, 8, SPIT_WR, SPI_CS)
|
||||
self.bus.write(addr << 25)
|
||||
self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END, length, SPIT_WR, SPI_CS)
|
||||
self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END, length, ext_div, SPI_CS)
|
||||
if length < 32:
|
||||
data <<= 32 - length
|
||||
self.bus.write(data)
|
||||
|
|
|
@ -1,47 +0,0 @@
|
|||
from artiq.experiment import kernel
|
||||
from artiq.coredevice.i2c import (
|
||||
i2c_start, i2c_write, i2c_read, i2c_stop, I2CError)
|
||||
|
||||
|
||||
class PCF8574A:
|
||||
"""Driver for the PCF8574 I2C remote 8-bit I/O expander.
|
||||
|
||||
I2C transactions not real-time, and are performed by the CPU without
|
||||
involving RTIO.
|
||||
"""
|
||||
def __init__(self, dmgr, busno=0, address=0x7c, core_device="core"):
|
||||
self.core = dmgr.get(core_device)
|
||||
self.busno = busno
|
||||
self.address = address
|
||||
|
||||
@kernel
|
||||
def set(self, data):
|
||||
"""Drive data on the quasi-bidirectional pins.
|
||||
|
||||
:param data: Pin data. High bits are weakly driven high
|
||||
(and thus inputs), low bits are strongly driven low.
|
||||
"""
|
||||
i2c_start(self.busno)
|
||||
try:
|
||||
if not i2c_write(self.busno, self.address):
|
||||
raise I2CError("PCF8574A failed to ack address")
|
||||
if not i2c_write(self.busno, data):
|
||||
raise I2CError("PCF8574A failed to ack data")
|
||||
finally:
|
||||
i2c_stop(self.busno)
|
||||
|
||||
@kernel
|
||||
def get(self):
|
||||
"""Retrieve quasi-bidirectional pin input data.
|
||||
|
||||
:return: Pin data
|
||||
"""
|
||||
i2c_start(self.busno)
|
||||
ret = 0
|
||||
try:
|
||||
if not i2c_write(self.busno, self.address | 1):
|
||||
raise I2CError("PCF8574A failed to ack address")
|
||||
ret = i2c_read(self.busno, False)
|
||||
finally:
|
||||
i2c_stop(self.busno)
|
||||
return ret
|
|
@ -1,77 +0,0 @@
|
|||
from .spr import mtspr, mfspr
|
||||
from artiq.language.core import kernel
|
||||
|
||||
|
||||
_MAX_SPRS_PER_GRP_BITS = 11
|
||||
_SPRGROUP_PC = 7 << _MAX_SPRS_PER_GRP_BITS
|
||||
_SPR_PCMR_CP = 0x00000001 # Counter present
|
||||
_SPR_PCMR_CISM = 0x00000004 # Count in supervisor mode
|
||||
_SPR_PCMR_CIUM = 0x00000008 # Count in user mode
|
||||
_SPR_PCMR_LA = 0x00000010 # Load access event
|
||||
_SPR_PCMR_SA = 0x00000020 # Store access event
|
||||
_SPR_PCMR_IF = 0x00000040 # Instruction fetch event
|
||||
_SPR_PCMR_DCM = 0x00000080 # Data cache miss event
|
||||
_SPR_PCMR_ICM = 0x00000100 # Insn cache miss event
|
||||
_SPR_PCMR_IFS = 0x00000200 # Insn fetch stall event
|
||||
_SPR_PCMR_LSUS = 0x00000400 # LSU stall event
|
||||
_SPR_PCMR_BS = 0x00000800 # Branch stall event
|
||||
_SPR_PCMR_DTLBM = 0x00001000 # DTLB miss event
|
||||
_SPR_PCMR_ITLBM = 0x00002000 # ITLB miss event
|
||||
_SPR_PCMR_DDS = 0x00004000 # Data dependency stall event
|
||||
_SPR_PCMR_WPE = 0x03ff8000 # Watchpoint events
|
||||
|
||||
|
||||
@kernel(flags={"nowrite", "nounwind"})
|
||||
def _PCCR(n):
|
||||
return _SPRGROUP_PC + n
|
||||
|
||||
|
||||
@kernel(flags={"nowrite", "nounwind"})
|
||||
def _PCMR(n):
|
||||
return _SPRGROUP_PC + 8 + n
|
||||
|
||||
|
||||
class CorePCU:
|
||||
"""Core device performance counter unit (PCU) access"""
|
||||
def __init__(self, dmgr, core_device="core"):
|
||||
self.core = dmgr.get(core_device)
|
||||
|
||||
@kernel
|
||||
def start(self):
|
||||
"""
|
||||
Configure and clear the kernel CPU performance counters.
|
||||
|
||||
The eight counters are configured to count the following events:
|
||||
* Load or store
|
||||
* Instruction fetch
|
||||
* Data cache miss
|
||||
* Instruction cache miss
|
||||
* Instruction fetch stall
|
||||
* Load-store-unit stall
|
||||
* Branch stall
|
||||
* Data dependency stall
|
||||
"""
|
||||
for i in range(8):
|
||||
if not mfspr(_PCMR(i)) & _SPR_PCMR_CP:
|
||||
raise ValueError("counter not present")
|
||||
mtspr(_PCMR(i), 0)
|
||||
mtspr(_PCCR(i), 0)
|
||||
mtspr(_PCMR(0), _SPR_PCMR_CISM | _SPR_PCMR_LA | _SPR_PCMR_SA)
|
||||
mtspr(_PCMR(1), _SPR_PCMR_CISM | _SPR_PCMR_IF)
|
||||
mtspr(_PCMR(2), _SPR_PCMR_CISM | _SPR_PCMR_DCM)
|
||||
mtspr(_PCMR(3), _SPR_PCMR_CISM | _SPR_PCMR_ICM)
|
||||
mtspr(_PCMR(4), _SPR_PCMR_CISM | _SPR_PCMR_IFS)
|
||||
mtspr(_PCMR(5), _SPR_PCMR_CISM | _SPR_PCMR_LSUS)
|
||||
mtspr(_PCMR(6), _SPR_PCMR_CISM | _SPR_PCMR_BS)
|
||||
mtspr(_PCMR(7), _SPR_PCMR_CISM | _SPR_PCMR_DDS)
|
||||
|
||||
@kernel
|
||||
def get(self, r):
|
||||
"""
|
||||
Read the performance counters and store the counts in the
|
||||
array provided.
|
||||
|
||||
:param list[int] r: array to store the counter values
|
||||
"""
|
||||
for i in range(8):
|
||||
r[i] = mfspr(_PCCR(i))
|
|
@ -1,12 +1,18 @@
|
|||
from numpy import int32, int64
|
||||
|
||||
from artiq.language.core import kernel, delay_mu, delay
|
||||
from artiq.coredevice.rtio import rtio_output, rtio_input_data
|
||||
from artiq.language.units import us, ns, ms, MHz, dB
|
||||
from artiq.coredevice.rtio import rtio_output, rtio_input_data, rtio_input_timestamp
|
||||
from artiq.language.units import us, ns, ms, MHz
|
||||
from artiq.language.types import TInt32
|
||||
from artiq.coredevice.dac34h84 import DAC34H84
|
||||
from artiq.coredevice.trf372017 import TRF372017
|
||||
|
||||
|
||||
PHASER_BOARD_ID = 19
|
||||
|
||||
PHASER_GW_BASE = 1
|
||||
PHASER_GW_MIQRO = 2
|
||||
|
||||
PHASER_ADDR_BOARD_ID = 0x00
|
||||
PHASER_ADDR_HW_REV = 0x01
|
||||
PHASER_ADDR_GW_REV = 0x02
|
||||
|
@ -38,6 +44,20 @@ PHASER_ADDR_DUC1_P = 0x26
|
|||
PHASER_ADDR_DAC1_DATA = 0x28
|
||||
PHASER_ADDR_DAC1_TEST = 0x2c
|
||||
|
||||
# servo registers
|
||||
PHASER_ADDR_SERVO_CFG0 = 0x30
|
||||
PHASER_ADDR_SERVO_CFG1 = 0x31
|
||||
|
||||
# 0x32 - 0x71 servo coefficients + offset data
|
||||
PHASER_ADDR_SERVO_DATA_BASE = 0x32
|
||||
|
||||
# 0x72 - 0x78 Miqro channel profile/window memories
|
||||
PHASER_ADDR_MIQRO_MEM_ADDR = 0x72
|
||||
PHASER_ADDR_MIQRO_MEM_DATA = 0x74
|
||||
|
||||
# Miqro profile memory select
|
||||
PHASER_MIQRO_SEL_PROFILE = 1 << 14
|
||||
|
||||
PHASER_SEL_DAC = 1 << 0
|
||||
PHASER_SEL_TRF0 = 1 << 1
|
||||
PHASER_SEL_TRF1 = 1 << 2
|
||||
|
@ -56,6 +76,11 @@ PHASER_DAC_SEL_TEST = 1
|
|||
|
||||
PHASER_HW_REV_VARIANT = 1 << 4
|
||||
|
||||
SERVO_COEFF_WIDTH = 16
|
||||
SERVO_DATA_WIDTH = 16
|
||||
SERVO_COEFF_SHIFT = 14
|
||||
SERVO_T_CYCLE = (32+12+192+24+4)*ns # Must match gateware ADC parameters
|
||||
|
||||
|
||||
class Phaser:
|
||||
"""Phaser 4-channel, 16-bit, 1 GS/s DAC coredevice driver.
|
||||
|
@ -63,6 +88,26 @@ class Phaser:
|
|||
Phaser contains a 4 channel, 1 GS/s DAC chip with integrated upconversion,
|
||||
quadrature modulation compensation and interpolation features.
|
||||
|
||||
The coredevice RTIO PHY and the Phaser gateware come in different modes
|
||||
that have different features. Phaser mode and coredevice PHY mode are both
|
||||
selected at their respective gateware compile-time and need to match.
|
||||
|
||||
=============== ============== ===================================
|
||||
Phaser gateware Coredevice PHY Features per :class:`PhaserChannel`
|
||||
=============== ============== ===================================
|
||||
Base <= v0.5 Base Base (5 :class:`PhaserOscillator`)
|
||||
Base >= v0.6 Base Base + Servo
|
||||
Miqro >= v0.6 Miqro :class:`Miqro`
|
||||
=============== ============== ===================================
|
||||
|
||||
The coredevice driver (this class and :class:`PhaserChannel`) exposes
|
||||
the superset of all functionality regardless of the Coredevice RTIO PHY
|
||||
or Phaser gateware modes. This is to evade type unification limitations.
|
||||
Features absent in Coredevice PHY/Phaser gateware will not work and
|
||||
should not be accessed.
|
||||
|
||||
**Base mode**
|
||||
|
||||
The coredevice produces 2 IQ (in-phase and quadrature) data streams with 25
|
||||
MS/s and 14 bit per quadrature. Each data stream supports 5 independent
|
||||
numerically controlled IQ oscillators (NCOs, DDSs with 32 bit frequency, 16
|
||||
|
@ -84,12 +129,22 @@ class Phaser:
|
|||
LVDS bus operating at 1 Gb/s per pin pair and processed in the DAC (Texas
|
||||
Instruments DAC34H84). On the DAC 2x interpolation, sinx/x compensation,
|
||||
quadrature modulator compensation, fine and coarse mixing as well as group
|
||||
delay capabilities are available.
|
||||
delay capabilities are available. If desired, these features my be
|
||||
configured via the `dac` dictionary.
|
||||
|
||||
The latency/group delay from the RTIO events setting
|
||||
:class:`PhaserOscillator` or :class:`PhaserChannel` DUC parameters all the
|
||||
way to the DAC outputs is deterministic. This enables deterministic
|
||||
absolute phase with respect to other RTIO input and output events.
|
||||
absolute phase with respect to other RTIO input and output events
|
||||
(see `get_next_frame_mu()`).
|
||||
|
||||
**Miqro mode**
|
||||
|
||||
See :class:`Miqro`
|
||||
|
||||
Here the DAC operates in 4x interpolation.
|
||||
|
||||
**Analog flow**
|
||||
|
||||
The four analog DAC outputs are passed through anti-aliasing filters.
|
||||
|
||||
|
@ -108,6 +163,33 @@ class Phaser:
|
|||
configured through a shared SPI bus that is accessed and controlled via
|
||||
FPGA registers.
|
||||
|
||||
**Servo**
|
||||
|
||||
Each phaser output channel features a servo to control the RF output amplitude
|
||||
using feedback from an ADC. The servo consists of a first order IIR (infinite
|
||||
impulse response) filter fed by the ADC and a multiplier that scales the I
|
||||
and Q datastreams from the DUC by the IIR output. The IIR state is updated at
|
||||
the 3.788 MHz ADC sampling rate.
|
||||
|
||||
Each channel IIR features 4 profiles, each consisting of the [b0, b1, a1] filter
|
||||
coefficients as well as an output offset. The coefficients and offset can be
|
||||
set for each profile individually and the profiles each have their own ``y0``,
|
||||
``y1`` output registers (the ``x0``, ``x1`` inputs are shared). To avoid
|
||||
transient effects, care should be taken to not update the coefficents in the
|
||||
currently selected profile.
|
||||
|
||||
The servo can be en- or disabled for each channel. When disabled, the servo
|
||||
output multiplier is simply bypassed and the datastream reaches the DAC unscaled.
|
||||
|
||||
The IIR output can be put on hold for each channel. In hold mode, the filter
|
||||
still ingests samples and updates its input ``x0`` and ``x1`` registers, but
|
||||
does not update the ``y0``, ``y1`` output registers.
|
||||
|
||||
After power-up the servo is disabled, in profile 0, with coefficients [0, 0, 0]
|
||||
and hold is enabled. If older gateware without ther servo is loaded onto the
|
||||
Phaser FPGA, the device simply behaves as if the servo is disabled and none of
|
||||
the servo functions have any effect.
|
||||
|
||||
.. note:: Various register settings of the DAC and the quadrature
|
||||
upconverters are available to be modified through the `dac`, `trf0`,
|
||||
`trf1` dictionaries. These can be set through the device database
|
||||
|
@ -147,7 +229,7 @@ class Phaser:
|
|||
"dac_mmap"}
|
||||
|
||||
def __init__(self, dmgr, channel_base, miso_delay=1, tune_fifo_offset=True,
|
||||
clk_sel=0, sync_dly=0, dac=None, trf0=None, trf1=None,
|
||||
clk_sel=0, sync_dly=0, dac=None, trf0=None, trf1=None, gw_rev=PHASER_GW_BASE,
|
||||
core_device="core"):
|
||||
self.channel_base = channel_base
|
||||
self.core = dmgr.get(core_device)
|
||||
|
@ -157,15 +239,29 @@ class Phaser:
|
|||
# self.core.seconds_to_mu(10*8*4*ns) # unfortunately this returns 319
|
||||
assert self.core.ref_period == 1*ns
|
||||
self.t_frame = 10*8*4
|
||||
self.frame_tstamp = int64(0)
|
||||
self.clk_sel = clk_sel
|
||||
self.tune_fifo_offset = tune_fifo_offset
|
||||
self.sync_dly = sync_dly
|
||||
self.gw_rev = gw_rev # verified in init()
|
||||
|
||||
self.dac_mmap = DAC34H84(dac).get_mmap()
|
||||
|
||||
self.channel = [PhaserChannel(self, ch, trf)
|
||||
for ch, trf in enumerate([trf0, trf1])]
|
||||
|
||||
@staticmethod
|
||||
def get_rtio_channels(channel_base, gw_rev=PHASER_GW_BASE, **kwargs):
|
||||
if gw_rev == PHASER_GW_MIQRO:
|
||||
return [(channel_base, "base"), (channel_base + 1, "ch0"), (channel_base + 2, "ch1")]
|
||||
elif gw_rev == PHASER_GW_BASE:
|
||||
return [(channel_base, "base"),
|
||||
(channel_base + 1, "ch0 frequency"),
|
||||
(channel_base + 2, "ch0 phase amplitude"),
|
||||
(channel_base + 3, "ch1 frequency"),
|
||||
(channel_base + 4, "ch1 phase amplitude")]
|
||||
raise ValueError("invalid gw_rev `{}`".format(gw_rev))
|
||||
|
||||
@kernel
|
||||
def init(self, debug=False):
|
||||
"""Initialize the board.
|
||||
|
@ -184,6 +280,10 @@ class Phaser:
|
|||
is_baseband = hw_rev & PHASER_HW_REV_VARIANT
|
||||
|
||||
gw_rev = self.read8(PHASER_ADDR_GW_REV)
|
||||
if debug:
|
||||
print("gw_rev:", self.gw_rev)
|
||||
self.core.break_realtime()
|
||||
assert gw_rev == self.gw_rev
|
||||
delay(.1*ms) # slack
|
||||
|
||||
# allow a few errors during startup and alignment since boot
|
||||
|
@ -191,6 +291,12 @@ class Phaser:
|
|||
raise ValueError("large number of frame CRC errors")
|
||||
delay(.1*ms) # slack
|
||||
|
||||
# determine the origin for frame-aligned timestamps
|
||||
self.measure_frame_timestamp()
|
||||
if self.frame_tstamp < 0:
|
||||
raise ValueError("frame timestamp measurement timed out")
|
||||
delay(.1*ms)
|
||||
|
||||
# reset
|
||||
self.set_cfg(dac_resetb=0, dac_sleep=1, dac_txena=0,
|
||||
trf0_ps=1, trf1_ps=1,
|
||||
|
@ -223,7 +329,9 @@ class Phaser:
|
|||
|
||||
for data in self.dac_mmap:
|
||||
self.dac_write(data >> 16, data)
|
||||
delay(40*us)
|
||||
delay(120*us)
|
||||
self.dac_sync()
|
||||
delay(40*us)
|
||||
|
||||
# pll_ndivsync_ena disable
|
||||
config18 = self.dac_read(0x18)
|
||||
|
@ -254,7 +362,7 @@ class Phaser:
|
|||
if self.tune_fifo_offset:
|
||||
fifo_offset = self.dac_tune_fifo_offset()
|
||||
if debug:
|
||||
print(fifo_offset)
|
||||
print("fifo_offset:", fifo_offset)
|
||||
self.core.break_realtime()
|
||||
|
||||
# self.dac_write(0x20, 0x0000) # stop fifo sync
|
||||
|
@ -266,12 +374,22 @@ class Phaser:
|
|||
delay(.1*ms) # slack
|
||||
if alarms & ~0x0040: # ignore PLL alarms (see DS)
|
||||
if debug:
|
||||
print(alarms)
|
||||
print("alarms:", alarms)
|
||||
self.core.break_realtime()
|
||||
# ignore alarms
|
||||
else:
|
||||
raise ValueError("DAC alarm")
|
||||
|
||||
# avoid malformed output for: mixer_ena=1, nco_ena=0 after power up
|
||||
self.dac_write(self.dac_mmap[2] >> 16, self.dac_mmap[2] | (1 << 4))
|
||||
delay(40*us)
|
||||
self.dac_sync()
|
||||
delay(100*us)
|
||||
self.dac_write(self.dac_mmap[2] >> 16, self.dac_mmap[2])
|
||||
delay(40*us)
|
||||
self.dac_sync()
|
||||
delay(100*us)
|
||||
|
||||
# power up trfs, release att reset
|
||||
self.set_cfg(clk_sel=self.clk_sel, dac_txena=0)
|
||||
|
||||
|
@ -282,31 +400,40 @@ class Phaser:
|
|||
if channel.get_att_mu() != 0x5a:
|
||||
raise ValueError("attenuator test failed")
|
||||
delay(.1*ms)
|
||||
channel.set_att_mu(0x00) # minimum attenuation
|
||||
channel.set_att_mu(0x00) # maximum attenuation
|
||||
|
||||
# test oscillators and DUC
|
||||
for i in range(len(channel.oscillator)):
|
||||
oscillator = channel.oscillator[i]
|
||||
asf = 0
|
||||
if i == 0:
|
||||
asf = 0x7fff
|
||||
# 6pi/4 phase
|
||||
oscillator.set_amplitude_phase_mu(asf=asf, pow=0xc000, clr=1)
|
||||
channel.set_servo(profile=0, enable=0, hold=1)
|
||||
|
||||
if self.gw_rev == PHASER_GW_BASE:
|
||||
# test oscillators and DUC
|
||||
for i in range(len(channel.oscillator)):
|
||||
oscillator = channel.oscillator[i]
|
||||
asf = 0
|
||||
if i == 0:
|
||||
asf = 0x7fff
|
||||
# 6pi/4 phase
|
||||
oscillator.set_amplitude_phase_mu(asf=asf, pow=0xc000, clr=1)
|
||||
delay(1*us)
|
||||
# 3pi/4
|
||||
channel.set_duc_phase_mu(0x6000)
|
||||
channel.set_duc_cfg(select=0, clr=1)
|
||||
self.duc_stb()
|
||||
delay(.1*ms) # settle link, pipeline and impulse response
|
||||
data = channel.get_dac_data()
|
||||
delay(1*us)
|
||||
# 3pi/4
|
||||
channel.set_duc_phase_mu(0x6000)
|
||||
channel.set_duc_cfg(select=0, clr=1)
|
||||
self.duc_stb()
|
||||
delay(.1*ms) # settle link, pipeline and impulse response
|
||||
data = channel.get_dac_data()
|
||||
delay(.1*ms)
|
||||
sqrt2 = 0x5a81 # 0x7fff/sqrt(2)
|
||||
data_i = data & 0xffff
|
||||
data_q = (data >> 16) & 0xffff
|
||||
# allow ripple
|
||||
if (data_i < sqrt2 - 30 or data_i > sqrt2 or
|
||||
abs(data_i - data_q) > 2):
|
||||
raise ValueError("DUC+oscillator phase/amplitude test failed")
|
||||
channel.oscillator[0].set_amplitude_phase_mu(asf=0, pow=0xc000,
|
||||
clr=1)
|
||||
delay(.1*ms)
|
||||
sqrt2 = 0x5a81 # 0x7fff/sqrt(2)
|
||||
data_i = data & 0xffff
|
||||
data_q = (data >> 16) & 0xffff
|
||||
# allow ripple
|
||||
if (data_i < sqrt2 - 30 or data_i > sqrt2 or
|
||||
abs(data_i - data_q) > 2):
|
||||
raise ValueError("DUC+oscillator phase/amplitude test failed")
|
||||
|
||||
if self.gw_rev == PHASER_GW_MIQRO:
|
||||
channel.miqro.reset()
|
||||
|
||||
if is_baseband:
|
||||
continue
|
||||
|
@ -318,6 +445,7 @@ class Phaser:
|
|||
delay(.2*ms)
|
||||
for data in channel.trf_mmap:
|
||||
channel.trf_write(data)
|
||||
channel.cal_trf_vco()
|
||||
|
||||
delay(2*ms) # lock
|
||||
if not (self.get_sta() & (PHASER_STA_TRF0_LD << ch)):
|
||||
|
@ -326,6 +454,7 @@ class Phaser:
|
|||
if channel.trf_read(0) & 0x1000:
|
||||
raise ValueError("TRF R_SAT_ERR")
|
||||
delay(.1*ms)
|
||||
channel.en_trf_out()
|
||||
|
||||
# enable dac tx
|
||||
self.set_cfg(clk_sel=self.clk_sel)
|
||||
|
@ -351,6 +480,12 @@ class Phaser:
|
|||
response = rtio_input_data(self.channel_base)
|
||||
return response >> self.miso_delay
|
||||
|
||||
@kernel
|
||||
def write16(self, addr, data: TInt32):
|
||||
"""Write 16 bit to a sequence of FPGA registers."""
|
||||
self.write8(addr, data >> 8)
|
||||
self.write8(addr + 1, data)
|
||||
|
||||
@kernel
|
||||
def write32(self, addr, data: TInt32):
|
||||
"""Write 32 bit to a sequence of FPGA registers."""
|
||||
|
@ -429,8 +564,7 @@ class Phaser:
|
|||
* :const:`PHASER_STA_TRF1_LD`: Quadrature upconverter 1 lock detect
|
||||
* :const:`PHASER_STA_TERM0`: ADC channel 0 termination indicator
|
||||
* :const:`PHASER_STA_TERM1`: ADC channel 1 termination indicator
|
||||
* :const:`PHASER_STA_SPI_IDLE`: SPI machine is idle and data registers
|
||||
can be read/written
|
||||
* :const:`PHASER_STA_SPI_IDLE`: SPI machine is idle and data registers can be read/written
|
||||
|
||||
:return: Status register
|
||||
"""
|
||||
|
@ -445,6 +579,27 @@ class Phaser:
|
|||
"""
|
||||
return self.read8(PHASER_ADDR_CRC_ERR)
|
||||
|
||||
@kernel
|
||||
def measure_frame_timestamp(self):
|
||||
"""Measure the timestamp of an arbitrary frame and store it in `self.frame_tstamp`.
|
||||
|
||||
To be used as reference for aligning updates to the FastLink frames.
|
||||
See `get_next_frame_mu()`.
|
||||
"""
|
||||
rtio_output(self.channel_base << 8, 0) # read any register
|
||||
self.frame_tstamp = rtio_input_timestamp(now_mu() + 4 * self.t_frame, self.channel_base)
|
||||
delay(100 * us)
|
||||
|
||||
@kernel
|
||||
def get_next_frame_mu(self):
|
||||
"""Return the timestamp of the frame strictly after `now_mu()`.
|
||||
|
||||
Register updates (DUC, DAC, TRF, etc.) scheduled at this timestamp and multiples
|
||||
of `self.t_frame` later will have deterministic latency to output.
|
||||
"""
|
||||
n = int64((now_mu() - self.frame_tstamp) / self.t_frame)
|
||||
return self.frame_tstamp + (n + 1) * self.t_frame
|
||||
|
||||
@kernel
|
||||
def set_sync_dly(self, dly):
|
||||
"""Set SYNC delay.
|
||||
|
@ -549,6 +704,48 @@ class Phaser:
|
|||
"""
|
||||
return self.dac_read(0x06, div=257) >> 8
|
||||
|
||||
@kernel
|
||||
def dac_sync(self):
|
||||
"""Trigger DAC synchronisation for both output channels.
|
||||
|
||||
The DAC sif_sync is de-asserts, then asserted. The synchronisation is
|
||||
triggered on assertion.
|
||||
|
||||
By default, the fine-mixer (NCO) and QMC are synchronised. This
|
||||
includes applying the latest register settings.
|
||||
|
||||
The synchronisation sources may be configured through the `syncsel_x`
|
||||
fields in the `dac` configuration dictionary (see `__init__()`).
|
||||
|
||||
.. note:: Synchronising the NCO clears the phase-accumulator
|
||||
"""
|
||||
config1f = self.dac_read(0x1f)
|
||||
delay(.4*ms)
|
||||
self.dac_write(0x1f, config1f & ~int32(1 << 1))
|
||||
self.dac_write(0x1f, config1f | (1 << 1))
|
||||
|
||||
@kernel
|
||||
def set_dac_cmix(self, fs_8_step):
|
||||
"""Set the DAC coarse mixer frequency for both channels
|
||||
|
||||
Use of the coarse mixer requires the DAC mixer to be enabled. The mixer
|
||||
can be configured via the `dac` configuration dictionary (see
|
||||
`__init__()`).
|
||||
|
||||
The selected coarse mixer frequency becomes active without explicit
|
||||
synchronisation.
|
||||
|
||||
:param fs_8_step: coarse mixer frequency shift in 125 MHz steps. This
|
||||
should be an integer between -3 and 4 (inclusive).
|
||||
"""
|
||||
# values recommended in data-sheet
|
||||
# 0 1 2 3 4 -3 -2 -1
|
||||
vals = [0b0000, 0b1000, 0b0100, 0b1100, 0b0010, 0b1010, 0b0001, 0b1110]
|
||||
cmix = vals[fs_8_step%8]
|
||||
config0d = self.dac_read(0x0d)
|
||||
delay(.1*ms)
|
||||
self.dac_write(0x0d, (config0d & ~(0b1111 << 12)) | (cmix << 12))
|
||||
|
||||
@kernel
|
||||
def get_dac_alarms(self):
|
||||
"""Read the DAC alarm flags.
|
||||
|
@ -566,7 +763,7 @@ class Phaser:
|
|||
def dac_iotest(self, pattern) -> TInt32:
|
||||
"""Performs a DAC IO test according to the datasheet.
|
||||
|
||||
:param patterm: List of four int32 containing the pattern
|
||||
:param pattern: List of four int32 containing the pattern
|
||||
:return: Bit error mask (16 bits)
|
||||
"""
|
||||
if len(pattern) != 4:
|
||||
|
@ -643,6 +840,8 @@ class Phaser:
|
|||
if good & (1 << o):
|
||||
sum += o
|
||||
count += 1
|
||||
if count == 0:
|
||||
raise ValueError("no good fifo offset")
|
||||
best = ((sum // count) + offset) % 8
|
||||
self.dac_write(0x09, (config9 & 0x1fff) | (best << 13))
|
||||
return best
|
||||
|
@ -653,18 +852,21 @@ class PhaserChannel:
|
|||
|
||||
A Phaser channel contains:
|
||||
|
||||
* multiple oscillators (in the coredevice phy),
|
||||
* multiple :class:`PhaserOscillator` (in the coredevice phy),
|
||||
* an interpolation chain and digital upconverter (DUC) on Phaser,
|
||||
* a :class:`Miqro` instance on Phaser,
|
||||
* several channel-specific settings in the DAC:
|
||||
|
||||
* quadrature modulation compensation QMC
|
||||
* numerically controlled oscillator NCO or coarse mixer CMIX,
|
||||
* the analog quadrature upconverter (in the Phaser-Upconverter hardware
|
||||
variant), and
|
||||
|
||||
* the analog quadrature upconverter (in the Phaser-Upconverter hardware variant), and
|
||||
* a digitally controlled step attenuator.
|
||||
|
||||
Attributes:
|
||||
|
||||
* :attr:`oscillator`: List of five :class:`PhaserOscillator`.
|
||||
* :attr:`miqro`: A :class:`Miqro`.
|
||||
|
||||
.. note:: The amplitude sum of the oscillators must be less than one to
|
||||
avoid clipping or overflow. If any of the DDS or DUC frequencies are
|
||||
|
@ -677,6 +879,8 @@ class PhaserChannel:
|
|||
changes in oscillator parameters, the overshoot can lead to clipping
|
||||
or overflow after the interpolation. Either band-limit any changes
|
||||
in the oscillator parameters or back off the amplitude sufficiently.
|
||||
Miqro is not affected by this. But both the oscillators and Miqro can
|
||||
be affected by intrinsic overshoot of the interpolator on the DAC.
|
||||
"""
|
||||
kernel_invariants = {"index", "phaser", "trf_mmap"}
|
||||
|
||||
|
@ -684,7 +888,9 @@ class PhaserChannel:
|
|||
self.phaser = phaser
|
||||
self.index = index
|
||||
self.trf_mmap = TRF372017(trf).get_mmap()
|
||||
|
||||
self.oscillator = [PhaserOscillator(self, osc) for osc in range(5)]
|
||||
self.miqro = Miqro(self)
|
||||
|
||||
@kernel
|
||||
def get_dac_data(self) -> TInt32:
|
||||
|
@ -761,6 +967,12 @@ class PhaserChannel:
|
|||
def set_nco_frequency_mu(self, ftw):
|
||||
"""Set the NCO frequency.
|
||||
|
||||
This method stages the new NCO frequency, but does not apply it.
|
||||
|
||||
Use of the DAC-NCO requires the DAC mixer and NCO to be enabled. These
|
||||
can be configured via the `dac` configuration dictionary (see
|
||||
`__init__()`).
|
||||
|
||||
:param ftw: NCO frequency tuning word (32 bit)
|
||||
"""
|
||||
self.phaser.dac_write(0x15 + (self.index << 1), ftw >> 16)
|
||||
|
@ -770,6 +982,12 @@ class PhaserChannel:
|
|||
def set_nco_frequency(self, frequency):
|
||||
"""Set the NCO frequency in SI units.
|
||||
|
||||
This method stages the new NCO frequency, but does not apply it.
|
||||
|
||||
Use of the DAC-NCO requires the DAC mixer and NCO to be enabled. These
|
||||
can be configured via the `dac` configuration dictionary (see
|
||||
`__init__()`).
|
||||
|
||||
:param frequency: NCO frequency in Hz (passband from -400 MHz
|
||||
to 400 MHz, wrapping around at +- 500 MHz)
|
||||
"""
|
||||
|
@ -780,6 +998,16 @@ class PhaserChannel:
|
|||
def set_nco_phase_mu(self, pow):
|
||||
"""Set the NCO phase offset.
|
||||
|
||||
By default, the new NCO phase applies on completion of the SPI
|
||||
transfer. This also causes a staged NCO frequency to be applied.
|
||||
Different triggers for applying NCO settings may be configured through
|
||||
the `syncsel_mixerxx` fields in the `dac` configuration dictionary (see
|
||||
`__init__()`).
|
||||
|
||||
Use of the DAC-NCO requires the DAC mixer and NCO to be enabled. These
|
||||
can be configured via the `dac` configuration dictionary (see
|
||||
`__init__()`).
|
||||
|
||||
:param pow: NCO phase offset word (16 bit)
|
||||
"""
|
||||
self.phaser.dac_write(0x12 + self.index, pow)
|
||||
|
@ -788,10 +1016,20 @@ class PhaserChannel:
|
|||
def set_nco_phase(self, phase):
|
||||
"""Set the NCO phase in SI units.
|
||||
|
||||
By default, the new NCO phase applies on completion of the SPI
|
||||
transfer. This also causes a staged NCO frequency to be applied.
|
||||
Different triggers for applying NCO settings may be configured through
|
||||
the `syncsel_mixerxx` fields in the `dac` configuration dictionary (see
|
||||
`__init__()`).
|
||||
|
||||
Use of the DAC-NCO requires the DAC mixer and NCO to be enabled. These
|
||||
can be configured via the `dac` configuration dictionary (see
|
||||
`__init__()`).
|
||||
|
||||
:param phase: NCO phase in turns
|
||||
"""
|
||||
pow = int32(round(phase*(1 << 16)))
|
||||
self.set_duc_phase_mu(pow)
|
||||
self.set_nco_phase_mu(pow)
|
||||
|
||||
@kernel
|
||||
def set_att_mu(self, data):
|
||||
|
@ -886,12 +1124,165 @@ class PhaserChannel:
|
|||
return self.trf_write(0x00000008 | (cnt_mux_sel << 27),
|
||||
readback=True)
|
||||
|
||||
@kernel
|
||||
def cal_trf_vco(self):
|
||||
"""Start calibration of the upconverter (hardware variant) VCO.
|
||||
|
||||
TRF outputs should be disabled during VCO calibration.
|
||||
"""
|
||||
self.trf_write(self.trf_mmap[1] | (1 << 31))
|
||||
|
||||
@kernel
|
||||
def en_trf_out(self, rf=1, lo=0):
|
||||
"""Enable the rf/lo outputs of the upconverter (hardware variant).
|
||||
|
||||
:param rf: 1 to enable RF output, 0 to disable
|
||||
:param lo: 1 to enable LO output, 0 to disable
|
||||
"""
|
||||
data = self.trf_read(0xc)
|
||||
delay(0.1 * ms)
|
||||
# set RF and LO output bits
|
||||
data = data | (1 << 12) | (1 << 13) | (1 << 14)
|
||||
# clear to enable output
|
||||
if rf == 1:
|
||||
data = data ^ (1 << 14)
|
||||
if lo == 1:
|
||||
data = data ^ ((1 << 12) | (1 << 13))
|
||||
self.trf_write(data)
|
||||
|
||||
@kernel
|
||||
def set_servo(self, profile=0, enable=0, hold=0):
|
||||
"""Set the servo configuration.
|
||||
|
||||
:param enable: 1 to enable servo, 0 to disable servo (default). If disabled,
|
||||
the servo is bypassed and hold is enforced since the control loop is broken.
|
||||
:param hold: 1 to hold the servo IIR filter output constant, 0 for normal operation.
|
||||
:param profile: Profile index to select for channel. (0 to 3)
|
||||
"""
|
||||
if (profile < 0) or (profile > 3):
|
||||
raise ValueError("invalid profile index")
|
||||
addr = PHASER_ADDR_SERVO_CFG0 + self.index
|
||||
# enforce hold if the servo is disabled
|
||||
data = (profile << 2) | (((hold | ~enable) & 1) << 1) | (enable & 1)
|
||||
self.phaser.write8(addr, data)
|
||||
|
||||
@kernel
|
||||
def set_iir_mu(self, profile, b0, b1, a1, offset):
|
||||
"""Load a servo profile consiting of the three filter coefficients and an output offset.
|
||||
|
||||
Avoid setting the IIR parameters of the currently active profile.
|
||||
|
||||
The recurrence relation is (all data signed and MSB aligned):
|
||||
|
||||
.. math::
|
||||
a_0 y_n = a_1 y_{n - 1} + b_0 x_n + b_1 x_{n - 1} + o
|
||||
|
||||
Where:
|
||||
|
||||
* :math:`y_n` and :math:`y_{n-1}` are the current and previous
|
||||
filter outputs, clipped to :math:`[0, 1[`.
|
||||
* :math:`x_n` and :math:`x_{n-1}` are the current and previous
|
||||
filter inputs in :math:`[-1, 1[`.
|
||||
* :math:`o` is the offset
|
||||
* :math:`a_0` is the normalization factor :math:`2^{14}`
|
||||
* :math:`a_1` is the feedback gain
|
||||
* :math:`b_0` and :math:`b_1` are the feedforward gains for the two
|
||||
delays
|
||||
|
||||
.. seealso:: :meth:`set_iir`
|
||||
|
||||
:param profile: Profile to set (0 to 3)
|
||||
:param b0: b0 filter coefficient (16 bit signed)
|
||||
:param b1: b1 filter coefficient (16 bit signed)
|
||||
:param a1: a1 filter coefficient (16 bit signed)
|
||||
:param offset: Output offset (16 bit signed)
|
||||
"""
|
||||
if (profile < 0) or (profile > 3):
|
||||
raise ValueError("invalid profile index")
|
||||
# 32 byte-sized data registers per channel and 8 (2 bytes * (3 coefficients + 1 offset)) registers per profile
|
||||
addr = PHASER_ADDR_SERVO_DATA_BASE + (8 * profile) + (self.index * 32)
|
||||
for data in [b0, b1, a1, offset]:
|
||||
self.phaser.write16(addr, data)
|
||||
addr += 2
|
||||
|
||||
@kernel
|
||||
def set_iir(self, profile, kp, ki=0., g=0., x_offset=0., y_offset=0.):
|
||||
"""Set servo profile IIR coefficients.
|
||||
|
||||
Avoid setting the IIR parameters of the currently active profile.
|
||||
|
||||
Gains are given in units of output full per scale per input full scale.
|
||||
|
||||
.. note:: Due to inherent constraints of the fixed point datatypes and IIR
|
||||
filters, the ``x_offset`` (setpoint) resolution depends on the selected
|
||||
gains. Low ``ki`` gains will lead to a low ``x_offset`` resolution.
|
||||
|
||||
The transfer function is (up to time discretization and
|
||||
coefficient quantization errors):
|
||||
|
||||
.. math::
|
||||
H(s) = k_p + \\frac{k_i}{s + \\frac{k_i}{g}}
|
||||
|
||||
Where:
|
||||
* :math:`s = \\sigma + i\\omega` is the complex frequency
|
||||
* :math:`k_p` is the proportional gain
|
||||
* :math:`k_i` is the integrator gain
|
||||
* :math:`g` is the integrator gain limit
|
||||
|
||||
:param profile: Profile number (0-3)
|
||||
:param kp: Proportional gain. This is usually negative (closed
|
||||
loop, positive ADC voltage, positive setpoint). When 0, this
|
||||
implements a pure I controller.
|
||||
:param ki: Integrator gain (rad/s). Equivalent to the gain at 1 Hz.
|
||||
When 0 (the default) this implements a pure P controller.
|
||||
Same sign as ``kp``.
|
||||
:param g: Integrator gain limit (1). When 0 (the default) the
|
||||
integrator gain limit is infinite. Same sign as ``ki``.
|
||||
:param x_offset: IIR input offset. Used as the negative
|
||||
setpoint when stabilizing to a desired input setpoint. Will
|
||||
be converted to an equivalent output offset and added to y_offset.
|
||||
:param y_offset: IIR output offset.
|
||||
"""
|
||||
NORM = 1 << SERVO_COEFF_SHIFT
|
||||
COEFF_MAX = 1 << SERVO_COEFF_WIDTH - 1
|
||||
DATA_MAX = 1 << SERVO_DATA_WIDTH - 1
|
||||
|
||||
kp *= NORM
|
||||
if ki == 0.:
|
||||
# pure P
|
||||
a1 = 0
|
||||
b1 = 0
|
||||
b0 = int(round(kp))
|
||||
else:
|
||||
# I or PI
|
||||
ki *= NORM*SERVO_T_CYCLE/2.
|
||||
if g == 0.:
|
||||
c = 1.
|
||||
a1 = NORM
|
||||
else:
|
||||
c = 1./(1. + ki/(g*NORM))
|
||||
a1 = int(round((2.*c - 1.)*NORM))
|
||||
b0 = int(round(kp + ki*c))
|
||||
b1 = int(round(kp + (ki - 2.*kp)*c))
|
||||
if b1 == -b0:
|
||||
raise ValueError("low integrator gain and/or gain limit")
|
||||
|
||||
if (b0 >= COEFF_MAX or b0 < -COEFF_MAX or
|
||||
b1 >= COEFF_MAX or b1 < -COEFF_MAX):
|
||||
raise ValueError("high gains")
|
||||
|
||||
forward_gain = (b0 + b1) * (1 << SERVO_DATA_WIDTH - 1 - SERVO_COEFF_SHIFT)
|
||||
effective_offset = int(round(DATA_MAX * y_offset + forward_gain * x_offset))
|
||||
|
||||
self.set_iir_mu(profile, b0, b1, a1, effective_offset)
|
||||
|
||||
|
||||
|
||||
class PhaserOscillator:
|
||||
"""Phaser IQ channel oscillator (NCO/DDS).
|
||||
|
||||
.. note:: Latencies between oscillators within a channel and between
|
||||
oscillator paramters (amplitude and phase/frequency) are deterministic
|
||||
oscillator parameters (amplitude and phase/frequency) are deterministic
|
||||
(with respect to the 25 MS/s sample clock) but not matched.
|
||||
"""
|
||||
kernel_invariants = {"channel", "base_addr"}
|
||||
|
@ -943,3 +1334,305 @@ class PhaserOscillator:
|
|||
raise ValueError("amplitude out of bounds")
|
||||
pow = int32(round(phase*(1 << 16)))
|
||||
self.set_amplitude_phase_mu(asf, pow, clr)
|
||||
|
||||
|
||||
class Miqro:
|
||||
"""
|
||||
Miqro pulse generator.
|
||||
|
||||
A Miqro instance represents one RF output. The DSP components are fully
|
||||
contained in the Phaser gateware. The output is generated by with
|
||||
the following data flow:
|
||||
|
||||
**Oscillators**
|
||||
|
||||
* There are n_osc = 16 oscillators with oscillator IDs 0..n_osc-1.
|
||||
* Each oscillator outputs one tone at any given time
|
||||
|
||||
* I/Q (quadrature, a.k.a. complex) 2x16 bit signed data
|
||||
at tau = 4 ns sample intervals, 250 MS/s, Nyquist 125 MHz, bandwidth 200 MHz
|
||||
(from f = -100..+100 MHz, taking into account the interpolation anti-aliasing
|
||||
filters in subsequent interpolators),
|
||||
* 32 bit frequency (f) resolution (~ 1/16 Hz),
|
||||
* 16 bit unsigned amplitude (a) resolution
|
||||
* 16 bit phase offset (p) resolution
|
||||
|
||||
* The output phase p' of each oscillator at time t (boot/reset/initialization of the
|
||||
device at t=0) is then p' = f*t + p (mod 1 turn) where f and p are the (currently
|
||||
active) profile frequency and phase offset.
|
||||
* Note: The terms "phase coherent" and "phase tracking" are defined to refer to this
|
||||
choice of oscillator output phase p'. Note that the phase offset p is not relative to
|
||||
(on top of previous phase/profiles/oscillator history).
|
||||
It is "absolute" in the sense that frequency f and phase offset p fully determine
|
||||
oscillator output phase p' at time t. This is unlike typical DDS behavior.
|
||||
* Frequency, phase, and amplitude of each oscillator are configurable by selecting one of
|
||||
n_profile = 32 profiles 0..n_profile-1. This selection is fast and can be done for
|
||||
each pulse. The phase coherence defined above is guaranteed for each
|
||||
profile individually.
|
||||
* Note: one profile per oscillator (usually profile index 0) should be reserved
|
||||
for the NOP (no operation, identity) profile, usually with zero amplitude.
|
||||
* Data for each profile for each oscillator can be configured
|
||||
individually. Storing profile data should be considered "expensive".
|
||||
* Note: The annotation that some operation is "expensive" does not mean it is
|
||||
impossible, just that it may take a significant amount of time and
|
||||
resources to execute such that it may be impractical when used often or
|
||||
during fast pulse sequences. They are intended for use in calibration and
|
||||
initialization.
|
||||
|
||||
**Summation**
|
||||
|
||||
* The oscillator outputs are added together (wrapping addition).
|
||||
* The user must ensure that the sum of oscillators outputs does not exceed the
|
||||
data range. In general that means that the sum of the amplitudes must not
|
||||
exceed one.
|
||||
|
||||
**Shaper**
|
||||
|
||||
* The summed complex output stream is then multiplied with a the complex-valued
|
||||
output of a triggerable shaper.
|
||||
* Triggering the shaper corresponds to passing a pulse from all oscillators to
|
||||
the RF output.
|
||||
* Selected profiles become active simultaneously (on the same output sample) when
|
||||
triggering the shaper with the first shaper output sample.
|
||||
* The shaper reads (replays) window samples from a memory of size n_window = 1 << 10.
|
||||
* The window memory can be segmented by choosing different start indices
|
||||
to support different windows.
|
||||
* Each window memory segment starts with a header determining segment
|
||||
length and interpolation parameters.
|
||||
* The window samples are interpolated by a factor (rate change) between 1 and
|
||||
r = 1 << 12.
|
||||
* The interpolation order is constant, linear, quadratic, or cubic. This
|
||||
corresponds to interpolation modes from rectangular window (1st order CIC)
|
||||
or zero order hold) to Parzen window (4th order CIC or cubic spline).
|
||||
* This results in support for single shot pulse lengths (envelope support) between
|
||||
tau and a bit more than r * n_window * tau = (1 << 12 + 10) tau ~ 17 ms.
|
||||
* Windows can be configured to be head-less and/or tail-less, meaning, they
|
||||
do not feed zero-amplitude samples into the shaper before and after
|
||||
each window respectively. This is used to implement pulses with arbitrary
|
||||
length or CW output.
|
||||
|
||||
**Overall properties**
|
||||
|
||||
* The DAC may upconvert the signal by applying a frequency offset f1 with
|
||||
phase p1.
|
||||
* In the Upconverter Phaser variant, the analog quadrature upconverter
|
||||
applies another frequency of f2 and phase p2.
|
||||
* The resulting phase of the signal from one oscillator at the SMA output is
|
||||
(f + f1 + f2)*t + p + s(t - t0) + p1 + p2 (mod 1 turn)
|
||||
where s(t - t0) is the phase of the interpolated
|
||||
shaper output, and t0 is the trigger time (fiducial of the shaper).
|
||||
Unsurprisingly the frequency is the derivative of the phase.
|
||||
* Group delays between pulse parameter updates are matched across oscillators,
|
||||
shapers, and channels.
|
||||
* The minimum time to change profiles and phase offsets is ~128 ns (estimate, TBC).
|
||||
This is the minimum pulse interval.
|
||||
The sustained pulse rate of the RTIO PHY/Fastlink is one pulse per Fastlink frame
|
||||
(may be increased, TBC).
|
||||
"""
|
||||
|
||||
def __init__(self, channel):
|
||||
self.channel = channel
|
||||
self.base_addr = (self.channel.phaser.channel_base + 1 +
|
||||
self.channel.index) << 8
|
||||
|
||||
@kernel
|
||||
def reset(self):
|
||||
"""Establish no-output profiles and no-output window and execute them.
|
||||
|
||||
This establishes the first profile (index 0) on all oscillators as zero
|
||||
amplitude, creates a trivial window (one sample with zero amplitude,
|
||||
minimal interpolation), and executes a corresponding pulse.
|
||||
"""
|
||||
for osc in range(16):
|
||||
self.set_profile_mu(osc, profile=0, ftw=0, asf=0)
|
||||
delay(20*us)
|
||||
self.set_window_mu(start=0, iq=[0], order=0)
|
||||
self.pulse(window=0, profiles=[0])
|
||||
|
||||
@kernel
|
||||
def set_profile_mu(self, oscillator, profile, ftw, asf, pow_=0):
|
||||
"""Store an oscillator profile (machine units).
|
||||
|
||||
:param oscillator: Oscillator index (0 to 15)
|
||||
:param profile: Profile index (0 to 31)
|
||||
:param ftw: Frequency tuning word (32 bit signed integer on a 250 MHz clock)
|
||||
:param asf: Amplitude scale factor (16 bit unsigned integer)
|
||||
:param pow_: Phase offset word (16 bit integer)
|
||||
"""
|
||||
if oscillator >= 16:
|
||||
raise ValueError("invalid oscillator index")
|
||||
if profile >= 32:
|
||||
raise ValueError("invalid profile index")
|
||||
self.channel.phaser.write16(PHASER_ADDR_MIQRO_MEM_ADDR,
|
||||
(self.channel.index << 15) | PHASER_MIQRO_SEL_PROFILE |
|
||||
(oscillator << 6) | (profile << 1))
|
||||
self.channel.phaser.write32(PHASER_ADDR_MIQRO_MEM_DATA, ftw)
|
||||
self.channel.phaser.write32(PHASER_ADDR_MIQRO_MEM_DATA,
|
||||
(asf & 0xffff) | (pow_ << 16))
|
||||
|
||||
@kernel
|
||||
def set_profile(self, oscillator, profile, frequency, amplitude, phase=0.):
|
||||
"""Store an oscillator profile.
|
||||
|
||||
:param oscillator: Oscillator index (0 to 15)
|
||||
:param profile: Profile index (0 to 31)
|
||||
:param frequency: Frequency in Hz (passband -100 to 100 MHz).
|
||||
Interpreted in the Nyquist sense, i.e. aliased.
|
||||
:param amplitude: Amplitude in units of full scale (0. to 1.)
|
||||
:param phase: Phase in turns. See :class:`Miqro` for a definition of
|
||||
phase in this context.
|
||||
:return: The quantized 32 bit frequency tuning word
|
||||
"""
|
||||
ftw = int32(round(frequency*((1 << 30)/(62.5*MHz))))
|
||||
asf = int32(round(amplitude*0xffff))
|
||||
if asf < 0 or asf > 0xffff:
|
||||
raise ValueError("amplitude out of bounds")
|
||||
pow_ = int32(round(phase*(1 << 16)))
|
||||
self.set_profile_mu(oscillator, profile, ftw, asf, pow_)
|
||||
return ftw
|
||||
|
||||
@kernel
|
||||
def set_window_mu(self, start, iq, rate=1, shift=0, order=3, head=1, tail=1):
|
||||
"""Store a window segment (machine units)
|
||||
|
||||
:param start: Window start address (0 to 0x3ff)
|
||||
:param iq: List of IQ window samples. Each window sample is an integer
|
||||
containing the signed I part in the 16 LSB and the signed Q part in
|
||||
the 16 MSB. The maximum window length is 0x3fe. The user must
|
||||
ensure that this window does not overlap with other windows in the
|
||||
memory.
|
||||
:param rate: Interpolation rate change (1 to 1 << 12)
|
||||
:param shift: Interpolator amplitude gain compensation in powers of 2 (0 to 63)
|
||||
:param order: Interpolation order from 0 (corresponding to
|
||||
constant/rectangular window/zero-order-hold/1st order CIC interpolation)
|
||||
to 3 (corresponding to cubic/Parzen window/4th order CIC interpolation)
|
||||
:param head: Update the interpolator settings and clear its state at the start
|
||||
of the window. This also implies starting the envelope from zero.
|
||||
:param tail: Feed zeros into the interpolator after the window samples.
|
||||
In the absence of further pulses this will return the output envelope
|
||||
to zero with the chosen interpolation.
|
||||
:return: Next available window memory address after this segment.
|
||||
"""
|
||||
if start >= 1 << 10:
|
||||
raise ValueError("start out of bounds")
|
||||
if len(iq) >= 1 << 10:
|
||||
raise ValueError("window length out of bounds")
|
||||
if rate < 1 or rate > 1 << 12:
|
||||
raise ValueError("rate out of bounds")
|
||||
if shift > 0x3f:
|
||||
raise ValueError("shift out of bounds")
|
||||
if order > 3:
|
||||
raise ValueError("order out of bounds")
|
||||
self.channel.phaser.write16(PHASER_ADDR_MIQRO_MEM_ADDR,
|
||||
(self.channel.index << 15) | start)
|
||||
self.channel.phaser.write32(PHASER_ADDR_MIQRO_MEM_DATA,
|
||||
(len(iq) & 0x3ff) |
|
||||
((rate - 1) << 10) |
|
||||
(shift << 22) |
|
||||
(order << 28) |
|
||||
((head & 1) << 30) |
|
||||
((tail & 1) << 31)
|
||||
)
|
||||
for iqi in iq:
|
||||
self.channel.phaser.write32(PHASER_ADDR_MIQRO_MEM_DATA, iqi)
|
||||
delay(20*us) # slack for long windows
|
||||
return (start + 1 + len(iq)) & 0x3ff
|
||||
|
||||
@kernel
|
||||
def set_window(self, start, iq, period=4*ns, order=3, head=1, tail=1):
|
||||
"""Store a window segment
|
||||
|
||||
:param start: Window start address (0 to 0x3ff)
|
||||
:param iq: List of IQ window samples. Each window sample is a pair of
|
||||
two float numbers -1 to 1, one for each I and Q in units of full scale.
|
||||
The maximum window length is 0x3fe. The user must ensure that this window
|
||||
does not overlap with other windows in the memory.
|
||||
:param period: Desired window sample period in SI units (4*ns to (4 << 12)*ns).
|
||||
:param order: Interpolation order from 0 (corresponding to
|
||||
constant/zero-order-hold/1st order CIC interpolation) to 3 (corresponding
|
||||
to cubic/Parzen/4th order CIC interpolation)
|
||||
:param head: Update the interpolator settings and clear its state at the start
|
||||
of the window. This also implies starting the envelope from zero.
|
||||
:param tail: Feed zeros into the interpolator after the window samples.
|
||||
In the absence of further pulses this will return the output envelope
|
||||
to zero with the chosen interpolation.
|
||||
:return: Actual sample period in SI units
|
||||
"""
|
||||
rate = int32(round(period/(4*ns)))
|
||||
gain = 1.
|
||||
for _ in range(order):
|
||||
gain *= rate
|
||||
shift = 0
|
||||
while gain >= 2.:
|
||||
shift += 1
|
||||
gain *= .5
|
||||
scale = ((1 << 15) - 1)/gain
|
||||
iq_mu = [
|
||||
(int32(round(iqi[0]*scale)) & 0xffff) |
|
||||
(int32(round(iqi[1]*scale)) << 16)
|
||||
for iqi in iq
|
||||
]
|
||||
self.set_window_mu(start, iq_mu, rate, shift, order, head, tail)
|
||||
return (len(iq) + order)*rate*4*ns
|
||||
|
||||
@kernel
|
||||
def encode(self, window, profiles, data):
|
||||
"""Encode window and profile selection
|
||||
|
||||
:param window: Window start address (0 to 0x3ff)
|
||||
:param profiles: List of profile indices for the oscillators. Maximum
|
||||
length 16. Unused oscillators will be set to profile 0.
|
||||
:param data: List of integers to store the encoded data words into.
|
||||
Unused entries will remain untouched. Must contain at least three
|
||||
lements if all oscillators are used and should be initialized to
|
||||
zeros.
|
||||
:return: Number of words from `data` used.
|
||||
"""
|
||||
if len(profiles) > 16:
|
||||
raise ValueError("too many oscillators")
|
||||
if window > 0x3ff:
|
||||
raise ValueError("window start out of bounds")
|
||||
data[0] = window
|
||||
word = 0
|
||||
idx = 10
|
||||
for profile in profiles:
|
||||
if profile > 0x1f:
|
||||
raise ValueError("profile out of bounds")
|
||||
if idx > 32 - 5:
|
||||
word += 1
|
||||
idx = 0
|
||||
data[word] |= profile << idx
|
||||
idx += 5
|
||||
return word + 1
|
||||
|
||||
@kernel
|
||||
def pulse_mu(self, data):
|
||||
"""Emit a pulse (encoded)
|
||||
|
||||
The pulse fiducial timing resolution is 4 ns.
|
||||
|
||||
:param data: List of up to 3 words containing an encoded MIQRO pulse as
|
||||
returned by :meth:`encode`.
|
||||
"""
|
||||
word = len(data)
|
||||
delay_mu(-8*word) # back shift to align
|
||||
while word > 0:
|
||||
word -= 1
|
||||
delay_mu(8)
|
||||
# final write sets pulse stb
|
||||
rtio_output(self.base_addr + word, data[word])
|
||||
|
||||
@kernel
|
||||
def pulse(self, window, profiles):
|
||||
"""Emit a pulse
|
||||
|
||||
This encodes the window and profiles (see :meth:`encode`) and emits them
|
||||
(see :meth:`pulse_mu`).
|
||||
|
||||
:param window: Window start address (0 to 0x3ff)
|
||||
:param profiles: List of profile indices for the oscillators. Maximum
|
||||
length 16. Unused oscillators will select profile 0.
|
||||
"""
|
||||
data = [0, 0, 0]
|
||||
words = self.encode(window, profiles, data)
|
||||
self.pulse_mu(data[:words])
|
||||
|
|
|
@ -1,92 +0,0 @@
|
|||
from collections import defaultdict
|
||||
import subprocess
|
||||
|
||||
|
||||
class Symbolizer:
|
||||
def __init__(self, binary, triple, demangle=True):
|
||||
cmdline = [
|
||||
triple + "-addr2line", "--exe=" + binary,
|
||||
"--addresses", "--functions", "--inlines"
|
||||
]
|
||||
if demangle:
|
||||
cmdline.append("--demangle=rust")
|
||||
self._addr2line = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
|
||||
universal_newlines=True)
|
||||
|
||||
def symbolize(self, addr):
|
||||
self._addr2line.stdin.write("0x{:08x}\n0\n".format(addr))
|
||||
self._addr2line.stdin.flush()
|
||||
self._addr2line.stdout.readline() # 0x[addr]
|
||||
|
||||
result = []
|
||||
while True:
|
||||
function = self._addr2line.stdout.readline().rstrip()
|
||||
|
||||
# check for end marker
|
||||
if function == "0x00000000": # 0x00000000
|
||||
self._addr2line.stdout.readline() # ??
|
||||
self._addr2line.stdout.readline() # ??:0
|
||||
return result
|
||||
|
||||
file, line = self._addr2line.stdout.readline().rstrip().split(":")
|
||||
|
||||
result.append((function, file, line, addr))
|
||||
|
||||
|
||||
class CallgrindWriter:
|
||||
def __init__(self, output, binary, triple, compression=True, demangle=True):
|
||||
self._output = output
|
||||
self._binary = binary
|
||||
self._current = defaultdict(lambda: None)
|
||||
self._ids = defaultdict(lambda: {})
|
||||
self._compression = compression
|
||||
self._symbolizer = Symbolizer(binary, triple, demangle=demangle)
|
||||
|
||||
def _write(self, fmt, *args, **kwargs):
|
||||
self._output.write(fmt.format(*args, **kwargs))
|
||||
self._output.write("\n")
|
||||
|
||||
def _spec(self, spec, value):
|
||||
if self._current[spec] == value:
|
||||
return
|
||||
self._current[spec] = value
|
||||
|
||||
if not self._compression or value == "??":
|
||||
self._write("{}={}", spec, value)
|
||||
return
|
||||
|
||||
spec_ids = self._ids[spec]
|
||||
if value in spec_ids:
|
||||
self._write("{}=({})", spec, spec_ids[value])
|
||||
else:
|
||||
spec_ids[value] = len(spec_ids) + 1
|
||||
self._write("{}=({}) {}", spec, spec_ids[value], value)
|
||||
|
||||
def header(self):
|
||||
self._write("# callgrind format")
|
||||
self._write("version: 1")
|
||||
self._write("creator: ARTIQ")
|
||||
self._write("positions: instr line")
|
||||
self._write("events: Hits")
|
||||
self._write("")
|
||||
self._spec("ob", self._binary)
|
||||
self._spec("cob", self._binary)
|
||||
|
||||
def hit(self, addr, count):
|
||||
for function, file, line, addr in self._symbolizer.symbolize(addr):
|
||||
self._spec("fl", file)
|
||||
self._spec("fn", function)
|
||||
self._write("0x{:08x} {} {}", addr, line, count)
|
||||
|
||||
def edge(self, caller, callee, count):
|
||||
edges = self._symbolizer.symbolize(callee) + self._symbolizer.symbolize(caller)
|
||||
for (callee, caller) in zip(edges, edges[1:]):
|
||||
function, file, line, addr = callee
|
||||
self._spec("cfl", file)
|
||||
self._spec("cfn", function)
|
||||
self._write("calls={} 0x{:08x} {}", count, addr, line)
|
||||
|
||||
function, file, line, addr = caller
|
||||
self._spec("fl", file)
|
||||
self._spec("fn", function)
|
||||
self._write("0x{:08x} {} {}", addr, line, count)
|
|
@ -15,24 +15,26 @@ SPI_CS_PGIA = 1 # separate SPI bus, CS used as RCLK
|
|||
|
||||
|
||||
@portable
|
||||
def adc_mu_to_volt(data, gain=0):
|
||||
def adc_mu_to_volt(data, gain=0, corrected_fs=True):
|
||||
"""Convert ADC data in machine units to Volts.
|
||||
|
||||
:param data: 16 bit signed ADC word
|
||||
:param gain: PGIA gain setting (0: 1, ..., 3: 1000)
|
||||
:param corrected_fs: use corrected ADC FS reference.
|
||||
Should be True for Samplers' revisions after v2.1. False for v2.1 and earlier.
|
||||
:return: Voltage in Volts
|
||||
"""
|
||||
if gain == 0:
|
||||
volt_per_lsb = 20./(1 << 16)
|
||||
volt_per_lsb = 20.48 / (1 << 16) if corrected_fs else 20. / (1 << 16)
|
||||
elif gain == 1:
|
||||
volt_per_lsb = 2./(1 << 16)
|
||||
volt_per_lsb = 2.048 / (1 << 16) if corrected_fs else 2. / (1 << 16)
|
||||
elif gain == 2:
|
||||
volt_per_lsb = .2/(1 << 16)
|
||||
volt_per_lsb = .2048 / (1 << 16) if corrected_fs else .2 / (1 << 16)
|
||||
elif gain == 3:
|
||||
volt_per_lsb = .02/(1 << 16)
|
||||
volt_per_lsb = 0.02048 / (1 << 16) if corrected_fs else .02 / (1 << 16)
|
||||
else:
|
||||
raise ValueError("invalid gain")
|
||||
return data*volt_per_lsb
|
||||
return data * volt_per_lsb
|
||||
|
||||
|
||||
class Sampler:
|
||||
|
@ -48,12 +50,13 @@ class Sampler:
|
|||
:param gains: Initial value for PGIA gains shift register
|
||||
(default: 0x0000). Knowledge of this state is not transferred
|
||||
between experiments.
|
||||
:param hw_rev: Sampler's hardware revision string (default 'v2.2')
|
||||
:param core_device: Core device name
|
||||
"""
|
||||
kernel_invariants = {"bus_adc", "bus_pgia", "core", "cnv", "div"}
|
||||
kernel_invariants = {"bus_adc", "bus_pgia", "core", "cnv", "div", "corrected_fs"}
|
||||
|
||||
def __init__(self, dmgr, spi_adc_device, spi_pgia_device, cnv_device,
|
||||
div=8, gains=0x0000, core_device="core"):
|
||||
div=8, gains=0x0000, hw_rev="v2.2", core_device="core"):
|
||||
self.bus_adc = dmgr.get(spi_adc_device)
|
||||
self.bus_adc.update_xfer_duration_mu(div, 32)
|
||||
self.bus_pgia = dmgr.get(spi_pgia_device)
|
||||
|
@ -62,6 +65,11 @@ class Sampler:
|
|||
self.cnv = dmgr.get(cnv_device)
|
||||
self.div = div
|
||||
self.gains = gains
|
||||
self.corrected_fs = self.use_corrected_fs(hw_rev)
|
||||
|
||||
@staticmethod
|
||||
def use_corrected_fs(hw_rev):
|
||||
return hw_rev != "v2.1"
|
||||
|
||||
@kernel
|
||||
def init(self):
|
||||
|
@ -144,4 +152,4 @@ class Sampler:
|
|||
for i in range(n):
|
||||
channel = i + 8 - len(data)
|
||||
gain = (self.gains >> (channel*2)) & 0b11
|
||||
data[i] = adc_mu_to_volt(adc_data[i], gain)
|
||||
data[i] = adc_mu_to_volt(adc_data[i], gain, self.corrected_fs)
|
||||
|
|
|
@ -1,372 +0,0 @@
|
|||
"""
|
||||
Driver for the Smart Arbitrary Waveform Generator (SAWG) on RTIO.
|
||||
|
||||
The SAWG is an "improved DDS" built in gateware and interfacing to
|
||||
high-speed DACs.
|
||||
|
||||
Output event replacement is supported except on the configuration channel.
|
||||
"""
|
||||
|
||||
|
||||
from artiq.language.types import TInt32, TFloat
|
||||
from numpy import int32, int64
|
||||
from artiq.language.core import kernel
|
||||
from artiq.coredevice.spline import Spline
|
||||
from artiq.coredevice.rtio import rtio_output
|
||||
|
||||
|
||||
# sawg.Config addresses
|
||||
_SAWG_DIV = 0
|
||||
_SAWG_CLR = 1
|
||||
_SAWG_IQ_EN = 2
|
||||
# _SAWF_PAD = 3 # reserved
|
||||
_SAWG_OUT_MIN = 4
|
||||
_SAWG_OUT_MAX = 5
|
||||
_SAWG_DUC_MIN = 6
|
||||
_SAWG_DUC_MAX = 7
|
||||
|
||||
|
||||
class Config:
|
||||
"""SAWG configuration.
|
||||
|
||||
Exposes the configurable quantities of a single SAWG channel.
|
||||
|
||||
Access to the configuration registers for a SAWG channel can not
|
||||
be concurrent. There must be at least :attr:`_rtio_interval` machine
|
||||
units of delay between accesses. Replacement is not supported and will be
|
||||
lead to an ``RTIOCollision`` as this is likely a programming error.
|
||||
All methods therefore advance the timeline by the duration of one
|
||||
configuration register transfer.
|
||||
|
||||
:param channel: RTIO channel number of the channel.
|
||||
:param core: Core device.
|
||||
"""
|
||||
kernel_invariants = {"channel", "core", "_out_scale", "_duc_scale",
|
||||
"_rtio_interval"}
|
||||
|
||||
def __init__(self, channel, core, cordic_gain=1.):
|
||||
self.channel = channel
|
||||
self.core = core
|
||||
# normalized DAC output
|
||||
self._out_scale = (1 << 15) - 1.
|
||||
# normalized DAC output including DUC cordic gain
|
||||
self._duc_scale = self._out_scale/cordic_gain
|
||||
# configuration channel access interval
|
||||
self._rtio_interval = int64(3*self.core.ref_multiplier)
|
||||
|
||||
@kernel
|
||||
def set_div(self, div: TInt32, n: TInt32=0):
|
||||
"""Set the spline evolution divider and current counter value.
|
||||
|
||||
The divider and the spline evolution are synchronized across all
|
||||
spline channels within a SAWG channel. The DDS/DUC phase accumulators
|
||||
always evolves at full speed.
|
||||
|
||||
.. note:: The spline evolution divider has not been tested extensively
|
||||
and is currently considered a technological preview only.
|
||||
|
||||
:param div: Spline evolution divider, such that
|
||||
``t_sawg_spline/t_rtio_coarse = div + 1``. Default: ``0``.
|
||||
:param n: Current value of the counter. Default: ``0``.
|
||||
"""
|
||||
rtio_output((self.channel << 8) | _SAWG_DIV, div | (n << 16))
|
||||
delay_mu(self._rtio_interval)
|
||||
|
||||
@kernel
|
||||
def set_clr(self, clr0: TInt32, clr1: TInt32, clr2: TInt32):
|
||||
"""Set the accumulator clear mode for the three phase accumulators.
|
||||
|
||||
When the ``clr`` bit for a given DDS/DUC phase accumulator is
|
||||
set, that phase accumulator will be cleared with every phase offset
|
||||
RTIO command and the output phase of the DDS/DUC will be
|
||||
exactly the phase RTIO value ("absolute phase update mode").
|
||||
|
||||
.. math::
|
||||
q^\prime(t) = p^\prime + (t - t^\prime) f^\prime
|
||||
|
||||
In turn, when the bit is cleared, the phase RTIO channels
|
||||
determine a phase offset to the current (carrier-) value of the
|
||||
DDS/DUC phase accumulator. This "relative phase update mode" is
|
||||
sometimes also called “continuous phase mode”.
|
||||
|
||||
.. math::
|
||||
q^\prime(t) = q(t^\prime) + (p^\prime - p) +
|
||||
(t - t^\prime) f^\prime
|
||||
|
||||
Where:
|
||||
|
||||
* :math:`q`, :math:`q^\prime`: old/new phase accumulator
|
||||
* :math:`p`, :math:`p^\prime`: old/new phase offset
|
||||
* :math:`f^\prime`: new frequency
|
||||
* :math:`t^\prime`: timestamp of setting new :math:`p`, :math:`f`
|
||||
* :math:`t`: running time
|
||||
|
||||
:param clr0: Auto-clear phase accumulator of the ``phase0``/
|
||||
``frequency0`` DUC. Default: ``True``
|
||||
:param clr1: Auto-clear phase accumulator of the ``phase1``/
|
||||
``frequency1`` DDS. Default: ``True``
|
||||
:param clr2: Auto-clear phase accumulator of the ``phase2``/
|
||||
``frequency2`` DDS. Default: ``True``
|
||||
"""
|
||||
rtio_output((self.channel << 8) | _SAWG_CLR, clr0 |
|
||||
(clr1 << 1) | (clr2 << 2))
|
||||
delay_mu(self._rtio_interval)
|
||||
|
||||
@kernel
|
||||
def set_iq_en(self, i_enable: TInt32, q_enable: TInt32):
|
||||
"""Enable I/Q data on this DAC channel.
|
||||
|
||||
Every pair of SAWG channels forms a buddy pair.
|
||||
The ``iq_en`` configuration controls which DDS data is emitted to the
|
||||
DACs.
|
||||
|
||||
Refer to the documentation of :class:`SAWG` for a mathematical
|
||||
description of ``i_enable`` and ``q_enable``.
|
||||
|
||||
.. note:: Quadrature data from the buddy channel is currently
|
||||
a technological preview only. The data is ignored in the SAWG
|
||||
gateware and not added to the DAC output.
|
||||
This is equivalent to the ``q_enable`` switch always being ``0``.
|
||||
|
||||
:param i_enable: Controls adding the in-phase
|
||||
DUC-DDS data of *this* SAWG channel to *this* DAC channel.
|
||||
Default: ``1``.
|
||||
:param q_enable: controls adding the quadrature
|
||||
DUC-DDS data of this SAWG's *buddy* channel to *this* DAC
|
||||
channel. Default: ``0``.
|
||||
"""
|
||||
rtio_output((self.channel << 8) | _SAWG_IQ_EN, i_enable |
|
||||
(q_enable << 1))
|
||||
delay_mu(self._rtio_interval)
|
||||
|
||||
@kernel
|
||||
def set_duc_max_mu(self, limit: TInt32):
|
||||
"""Set the digital up-converter (DUC) I and Q data summing junctions
|
||||
upper limit. In machine units.
|
||||
|
||||
The default limits are chosen to reach maximum and minimum DAC output
|
||||
amplitude.
|
||||
|
||||
For a description of the limiter functions in normalized units see:
|
||||
|
||||
.. seealso:: :meth:`set_duc_max`
|
||||
"""
|
||||
rtio_output((self.channel << 8) | _SAWG_DUC_MAX, limit)
|
||||
delay_mu(self._rtio_interval)
|
||||
|
||||
@kernel
|
||||
def set_duc_min_mu(self, limit: TInt32):
|
||||
""".. seealso:: :meth:`set_duc_max_mu`"""
|
||||
rtio_output((self.channel << 8) | _SAWG_DUC_MIN, limit)
|
||||
delay_mu(self._rtio_interval)
|
||||
|
||||
@kernel
|
||||
def set_out_max_mu(self, limit: TInt32):
|
||||
""".. seealso:: :meth:`set_duc_max_mu`"""
|
||||
rtio_output((self.channel << 8) | _SAWG_OUT_MAX, limit)
|
||||
delay_mu(self._rtio_interval)
|
||||
|
||||
@kernel
|
||||
def set_out_min_mu(self, limit: TInt32):
|
||||
""".. seealso:: :meth:`set_duc_max_mu`"""
|
||||
rtio_output((self.channel << 8) | _SAWG_OUT_MIN, limit)
|
||||
delay_mu(self._rtio_interval)
|
||||
|
||||
@kernel
|
||||
def set_duc_max(self, limit: TFloat):
|
||||
"""Set the digital up-converter (DUC) I and Q data summing junctions
|
||||
upper limit.
|
||||
|
||||
Each of the three summing junctions has a saturating adder with
|
||||
configurable upper and lower limits. The three summing junctions are:
|
||||
|
||||
* At the in-phase input to the ``phase0``/``frequency0`` fast DUC,
|
||||
after the anti-aliasing FIR filter.
|
||||
* At the quadrature input to the ``phase0``/``frequency0``
|
||||
fast DUC, after the anti-aliasing FIR filter. The in-phase and
|
||||
quadrature data paths both use the same limits.
|
||||
* Before the DAC, where the following three data streams
|
||||
are added together:
|
||||
|
||||
* the output of the ``offset`` spline,
|
||||
* (optionally, depending on ``i_enable``) the in-phase output
|
||||
of the ``phase0``/``frequency0`` fast DUC, and
|
||||
* (optionally, depending on ``q_enable``) the quadrature
|
||||
output of the ``phase0``/``frequency0`` fast DUC of the
|
||||
buddy channel.
|
||||
|
||||
Refer to the documentation of :class:`SAWG` for a mathematical
|
||||
description of the summing junctions.
|
||||
|
||||
:param limit: Limit value ``[-1, 1]``. The output of the limiter will
|
||||
never exceed this limit. The default limits are the full range
|
||||
``[-1, 1]``.
|
||||
|
||||
.. seealso::
|
||||
* :meth:`set_duc_max`: Upper limit of the in-phase and quadrature
|
||||
inputs to the DUC.
|
||||
* :meth:`set_duc_min`: Lower limit of the in-phase and quadrature
|
||||
inputs to the DUC.
|
||||
* :meth:`set_out_max`: Upper limit of the DAC output.
|
||||
* :meth:`set_out_min`: Lower limit of the DAC output.
|
||||
"""
|
||||
self.set_duc_max_mu(int32(round(limit*self._duc_scale)))
|
||||
|
||||
@kernel
|
||||
def set_duc_min(self, limit: TFloat):
|
||||
""".. seealso:: :meth:`set_duc_max`"""
|
||||
self.set_duc_min_mu(int32(round(limit*self._duc_scale)))
|
||||
|
||||
@kernel
|
||||
def set_out_max(self, limit: TFloat):
|
||||
""".. seealso:: :meth:`set_duc_max`"""
|
||||
self.set_out_max_mu(int32(round(limit*self._out_scale)))
|
||||
|
||||
@kernel
|
||||
def set_out_min(self, limit: TFloat):
|
||||
""".. seealso:: :meth:`set_duc_max`"""
|
||||
self.set_out_min_mu(int32(round(limit*self._out_scale)))
|
||||
|
||||
|
||||
class SAWG:
|
||||
"""Smart arbitrary waveform generator channel.
|
||||
The channel is parametrized as: ::
|
||||
|
||||
oscillators = exp(2j*pi*(frequency0*t + phase0))*(
|
||||
amplitude1*exp(2j*pi*(frequency1*t + phase1)) +
|
||||
amplitude2*exp(2j*pi*(frequency2*t + phase2)))
|
||||
|
||||
output = (offset +
|
||||
i_enable*Re(oscillators) +
|
||||
q_enable*Im(buddy_oscillators))
|
||||
|
||||
This parametrization can be viewed as two complex (quadrature) oscillators
|
||||
(``frequency1``/``phase1`` and ``frequency2``/``phase2``) that are
|
||||
executing and sampling at the coarse RTIO frequency. They can represent
|
||||
frequencies within the first Nyquist zone from ``-f_rtio_coarse/2`` to
|
||||
``f_rtio_coarse/2``.
|
||||
|
||||
.. note:: The coarse RTIO frequency ``f_rtio_coarse`` is the inverse of
|
||||
``ref_period*multiplier``. Both are arguments of the ``Core`` device,
|
||||
specified in the device database ``device_db.py``.
|
||||
|
||||
The sum of their outputs is then interpolated by a factor of
|
||||
:attr:`parallelism` (2, 4, 8 depending on the bitstream) using a
|
||||
finite-impulse-response (FIR) anti-aliasing filter (more accurately
|
||||
a half-band filter).
|
||||
|
||||
The filter is followed by a configurable saturating limiter.
|
||||
|
||||
After the limiter, the data is shifted in frequency using a complex
|
||||
digital up-converter (DUC, ``frequency0``/``phase0``) running at
|
||||
:attr:`parallelism` times the coarse RTIO frequency. The first Nyquist
|
||||
zone of the DUC extends from ``-f_rtio_coarse*parallelism/2`` to
|
||||
``f_rtio_coarse*parallelism/2``. Other Nyquist zones are usable depending
|
||||
on the interpolation/modulation options configured in the DAC.
|
||||
|
||||
The real/in-phase data after digital up-conversion can be offset using
|
||||
another spline interpolator ``offset``.
|
||||
|
||||
The ``i_enable``/``q_enable`` switches enable emission of quadrature
|
||||
signals for later analog quadrature mixing distinguishing upper and lower
|
||||
sidebands and thus doubling the bandwidth. They can also be used to emit
|
||||
four-tone signals.
|
||||
|
||||
.. note:: Quadrature data from the buddy channel is currently
|
||||
ignored in the SAWG gateware and not added to the DAC output.
|
||||
This is equivalent to the ``q_enable`` switch always being ``0``.
|
||||
|
||||
The configuration channel and the nine
|
||||
:class:`artiq.coredevice.spline.Spline` interpolators are accessible as
|
||||
attributes:
|
||||
|
||||
* :attr:`config`: :class:`Config`
|
||||
* :attr:`offset`, :attr:`amplitude1`, :attr:`amplitude2`: in units
|
||||
of full scale
|
||||
* :attr:`phase0`, :attr:`phase1`, :attr:`phase2`: in units of turns
|
||||
* :attr:`frequency0`, :attr:`frequency1`, :attr:`frequency2`: in units
|
||||
of Hz
|
||||
|
||||
.. note:: The latencies (pipeline depths) of the nine data channels (i.e.
|
||||
all except :attr:`config`) are matched. Equivalent channels (e.g.
|
||||
:attr:`phase1` and :attr:`phase2`) are exactly matched. Channels of
|
||||
different type or functionality (e.g. :attr:`offset` vs
|
||||
:attr:`amplitude1`, DDS vs DUC, :attr:`phase0` vs :attr:`phase1`) are
|
||||
only matched to within one coarse RTIO cycle.
|
||||
|
||||
:param channel_base: RTIO channel number of the first channel (amplitude).
|
||||
The configuration channel and frequency/phase/amplitude channels are
|
||||
then assumed to be successive channels.
|
||||
:param parallelism: Number of output samples per coarse RTIO clock cycle.
|
||||
:param core_device: Name of the core device that this SAWG is on.
|
||||
"""
|
||||
kernel_invariants = {"channel_base", "core", "parallelism",
|
||||
"amplitude1", "frequency1", "phase1",
|
||||
"amplitude2", "frequency2", "phase2",
|
||||
"frequency0", "phase0", "offset"}
|
||||
|
||||
def __init__(self, dmgr, channel_base, parallelism, core_device="core"):
|
||||
self.core = dmgr.get(core_device)
|
||||
self.channel_base = channel_base
|
||||
self.parallelism = parallelism
|
||||
width = 16
|
||||
time_width = 16
|
||||
cordic_gain = 1.646760258057163 # Cordic(width=16, guard=None).gain
|
||||
head_room = 1.001
|
||||
self.config = Config(channel_base, self.core, cordic_gain)
|
||||
self.offset = Spline(width, time_width, channel_base + 1,
|
||||
self.core, 2.*head_room)
|
||||
self.amplitude1 = Spline(width, time_width, channel_base + 2,
|
||||
self.core, 2*head_room*cordic_gain**2)
|
||||
self.frequency1 = Spline(3*width, time_width, channel_base + 3,
|
||||
self.core, 1/self.core.coarse_ref_period)
|
||||
self.phase1 = Spline(width, time_width, channel_base + 4,
|
||||
self.core, 1.)
|
||||
self.amplitude2 = Spline(width, time_width, channel_base + 5,
|
||||
self.core, 2*head_room*cordic_gain**2)
|
||||
self.frequency2 = Spline(3*width, time_width, channel_base + 6,
|
||||
self.core, 1/self.core.coarse_ref_period)
|
||||
self.phase2 = Spline(width, time_width, channel_base + 7,
|
||||
self.core, 1.)
|
||||
self.frequency0 = Spline(2*width, time_width, channel_base + 8,
|
||||
self.core,
|
||||
parallelism/self.core.coarse_ref_period)
|
||||
self.phase0 = Spline(width, time_width, channel_base + 9,
|
||||
self.core, 1.)
|
||||
|
||||
@kernel
|
||||
def reset(self):
|
||||
"""Re-establish initial conditions.
|
||||
|
||||
This clears all spline interpolators, accumulators and configuration
|
||||
settings.
|
||||
|
||||
This method advances the timeline by the time required to perform all
|
||||
7 writes to the configuration channel, plus 9 coarse RTIO cycles.
|
||||
"""
|
||||
self.config.set_div(0, 0)
|
||||
self.config.set_clr(1, 1, 1)
|
||||
self.config.set_iq_en(1, 0)
|
||||
self.config.set_duc_min(-1.)
|
||||
self.config.set_duc_max(1.)
|
||||
self.config.set_out_min(-1.)
|
||||
self.config.set_out_max(1.)
|
||||
self.frequency0.set_mu(0)
|
||||
coarse_cycle = int64(self.core.ref_multiplier)
|
||||
delay_mu(coarse_cycle)
|
||||
self.frequency1.set_mu(0)
|
||||
delay_mu(coarse_cycle)
|
||||
self.frequency2.set_mu(0)
|
||||
delay_mu(coarse_cycle)
|
||||
self.phase0.set_mu(0)
|
||||
delay_mu(coarse_cycle)
|
||||
self.phase1.set_mu(0)
|
||||
delay_mu(coarse_cycle)
|
||||
self.phase2.set_mu(0)
|
||||
delay_mu(coarse_cycle)
|
||||
self.amplitude1.set_mu(0)
|
||||
delay_mu(coarse_cycle)
|
||||
self.amplitude2.set_mu(0)
|
||||
delay_mu(coarse_cycle)
|
||||
self.offset.set_mu(0)
|
||||
delay_mu(coarse_cycle)
|
|
@ -1,54 +0,0 @@
|
|||
from artiq.language.core import kernel, delay
|
||||
from artiq.language.units import us
|
||||
|
||||
|
||||
class ShiftReg:
|
||||
"""Driver for shift registers/latch combos connected to TTLs"""
|
||||
kernel_invariants = {"dt", "n"}
|
||||
|
||||
def __init__(self, dmgr, clk, ser, latch, n=32, dt=10*us, ser_in=None):
|
||||
self.core = dmgr.get("core")
|
||||
self.clk = dmgr.get(clk)
|
||||
self.ser = dmgr.get(ser)
|
||||
self.latch = dmgr.get(latch)
|
||||
self.n = n
|
||||
self.dt = dt
|
||||
if ser_in is not None:
|
||||
self.ser_in = dmgr.get(ser_in)
|
||||
|
||||
@kernel
|
||||
def set(self, data):
|
||||
"""Sets the values of the latch outputs. This does not
|
||||
advance the timeline and the waveform is generated before
|
||||
`now`."""
|
||||
delay(-2*(self.n + 1)*self.dt)
|
||||
for i in range(self.n):
|
||||
if (data >> (self.n-i-1)) & 1 == 0:
|
||||
self.ser.off()
|
||||
else:
|
||||
self.ser.on()
|
||||
self.clk.off()
|
||||
delay(self.dt)
|
||||
self.clk.on()
|
||||
delay(self.dt)
|
||||
self.clk.off()
|
||||
self.latch.on()
|
||||
delay(self.dt)
|
||||
self.latch.off()
|
||||
delay(self.dt)
|
||||
|
||||
@kernel
|
||||
def get(self):
|
||||
delay(-2*(self.n + 1)*self.dt)
|
||||
data = 0
|
||||
for i in range(self.n):
|
||||
data <<= 1
|
||||
self.ser_in.sample_input()
|
||||
if self.ser_in.sample_get():
|
||||
data |= 1
|
||||
delay(self.dt)
|
||||
self.clk.on()
|
||||
delay(self.dt)
|
||||
self.clk.off()
|
||||
delay(self.dt)
|
||||
return data
|
|
@ -0,0 +1,623 @@
|
|||
from artiq.language.core import *
|
||||
from artiq.language.types import *
|
||||
from artiq.coredevice.rtio import rtio_output, rtio_input_data
|
||||
from artiq.coredevice import spi2 as spi
|
||||
from artiq.language.units import us
|
||||
|
||||
|
||||
@portable
|
||||
def shuttler_volt_to_mu(volt):
|
||||
"""Return the equivalent DAC code. Valid input range is from -10 to
|
||||
10 - LSB.
|
||||
"""
|
||||
return round((1 << 16) * (volt / 20.0)) & 0xffff
|
||||
|
||||
|
||||
class Config:
|
||||
"""Shuttler configuration registers interface.
|
||||
|
||||
The configuration registers control waveform phase auto-clear, and pre-DAC
|
||||
gain & offset values for calibration with ADC on the Shuttler AFE card.
|
||||
|
||||
To find the calibrated DAC code, the Shuttler Core first multiplies the
|
||||
output data with pre-DAC gain, then adds the offset.
|
||||
|
||||
.. note::
|
||||
The DAC code is capped at 0x7fff and 0x8000.
|
||||
|
||||
:param channel: RTIO channel number of this interface.
|
||||
:param core_device: Core device name.
|
||||
"""
|
||||
kernel_invariants = {
|
||||
"core", "channel", "target_base", "target_read",
|
||||
"target_gain", "target_offset", "target_clr"
|
||||
}
|
||||
|
||||
def __init__(self, dmgr, channel, core_device="core"):
|
||||
self.core = dmgr.get(core_device)
|
||||
self.channel = channel
|
||||
self.target_base = channel << 8
|
||||
self.target_read = 1 << 6
|
||||
self.target_gain = 0 * (1 << 4)
|
||||
self.target_offset = 1 * (1 << 4)
|
||||
self.target_clr = 1 * (1 << 5)
|
||||
|
||||
@kernel
|
||||
def set_clr(self, clr):
|
||||
"""Set/Unset waveform phase clear bits.
|
||||
|
||||
Each bit corresponds to a Shuttler waveform generator core. Setting a
|
||||
clear bit forces the Shuttler Core to clear the phase accumulator on
|
||||
waveform trigger (See :class:`Trigger` for the trigger method).
|
||||
Otherwise, the phase accumulator increments from its original value.
|
||||
|
||||
:param clr: Waveform phase clear bits. The MSB corresponds to Channel
|
||||
15, LSB corresponds to Channel 0.
|
||||
"""
|
||||
rtio_output(self.target_base | self.target_clr, clr)
|
||||
|
||||
@kernel
|
||||
def set_gain(self, channel, gain):
|
||||
"""Set the 16-bits pre-DAC gain register of a Shuttler Core channel.
|
||||
|
||||
The `gain` parameter represents the decimal portion of the gain
|
||||
factor. The MSB represents 0.5 and the sign bit. Hence, the valid
|
||||
total gain value (1 +/- 0.gain) ranges from 0.5 to 1.5 - LSB.
|
||||
|
||||
:param channel: Shuttler Core channel to be configured.
|
||||
:param gain: Shuttler Core channel gain.
|
||||
"""
|
||||
rtio_output(self.target_base | self.target_gain | channel, gain)
|
||||
|
||||
@kernel
|
||||
def get_gain(self, channel):
|
||||
"""Return the pre-DAC gain value of a Shuttler Core channel.
|
||||
|
||||
:param channel: The Shuttler Core channel.
|
||||
:return: Pre-DAC gain value. See :meth:`set_gain`.
|
||||
"""
|
||||
rtio_output(self.target_base | self.target_gain |
|
||||
self.target_read | channel, 0)
|
||||
return rtio_input_data(self.channel)
|
||||
|
||||
@kernel
|
||||
def set_offset(self, channel, offset):
|
||||
"""Set the 16-bits pre-DAC offset register of a Shuttler Core channel.
|
||||
|
||||
.. seealso::
|
||||
:meth:`shuttler_volt_to_mu`
|
||||
|
||||
:param channel: Shuttler Core channel to be configured.
|
||||
:param offset: Shuttler Core channel offset.
|
||||
"""
|
||||
rtio_output(self.target_base | self.target_offset | channel, offset)
|
||||
|
||||
@kernel
|
||||
def get_offset(self, channel):
|
||||
"""Return the pre-DAC offset value of a Shuttler Core channel.
|
||||
|
||||
:param channel: The Shuttler Core channel.
|
||||
:return: Pre-DAC offset value. See :meth:`set_offset`.
|
||||
"""
|
||||
rtio_output(self.target_base | self.target_offset |
|
||||
self.target_read | channel, 0)
|
||||
return rtio_input_data(self.channel)
|
||||
|
||||
|
||||
class DCBias:
|
||||
"""Shuttler Core cubic DC-bias spline.
|
||||
|
||||
A Shuttler channel can generate a waveform `w(t)` that is the sum of a
|
||||
cubic spline `a(t)` and a sinusoid modulated in amplitude by a cubic
|
||||
spline `b(t)` and in phase/frequency by a quadratic spline `c(t)`, where
|
||||
|
||||
.. math::
|
||||
w(t) = a(t) + b(t) * cos(c(t))
|
||||
|
||||
And `t` corresponds to time in seconds.
|
||||
This class controls the cubic spline `a(t)`, in which
|
||||
|
||||
.. math::
|
||||
a(t) = p_0 + p_1t + \\frac{p_2t^2}{2} + \\frac{p_3t^3}{6}
|
||||
|
||||
And `a(t)` is in Volt.
|
||||
|
||||
:param channel: RTIO channel number of this DC-bias spline interface.
|
||||
:param core_device: Core device name.
|
||||
"""
|
||||
kernel_invariants = {"core", "channel", "target_o"}
|
||||
|
||||
def __init__(self, dmgr, channel, core_device="core"):
|
||||
self.core = dmgr.get(core_device)
|
||||
self.channel = channel
|
||||
self.target_o = channel << 8
|
||||
|
||||
@kernel
|
||||
def set_waveform(self, a0: TInt32, a1: TInt32, a2: TInt64, a3: TInt64):
|
||||
"""Set the DC-bias spline waveform.
|
||||
|
||||
Given `a(t)` as defined in :class:`DCBias`, the coefficients should be
|
||||
configured by the following formulae.
|
||||
|
||||
.. math::
|
||||
T &= 8*10^{-9}
|
||||
|
||||
a_0 &= p_0
|
||||
|
||||
a_1 &= p_1T + \\frac{p_2T^2}{2} + \\frac{p_3T^3}{6}
|
||||
|
||||
a_2 &= p_2T^2 + p_3T^3
|
||||
|
||||
a_3 &= p_3T^3
|
||||
|
||||
:math:`a_0`, :math:`a_1`, :math:`a_2` and :math:`a_3` are 16, 32, 48
|
||||
and 48 bits in width respectively. See :meth:`shuttler_volt_to_mu` for
|
||||
machine unit conversion.
|
||||
|
||||
Note: The waveform is not updated to the Shuttler Core until
|
||||
triggered. See :class:`Trigger` for the update triggering mechanism.
|
||||
|
||||
:param a0: The :math:`a_0` coefficient in machine unit.
|
||||
:param a1: The :math:`a_1` coefficient in machine unit.
|
||||
:param a2: The :math:`a_2` coefficient in machine unit.
|
||||
:param a3: The :math:`a_3` coefficient in machine unit.
|
||||
"""
|
||||
coef_words = [
|
||||
a0,
|
||||
a1,
|
||||
a1 >> 16,
|
||||
a2 & 0xFFFF,
|
||||
(a2 >> 16) & 0xFFFF,
|
||||
(a2 >> 32) & 0xFFFF,
|
||||
a3 & 0xFFFF,
|
||||
(a3 >> 16) & 0xFFFF,
|
||||
(a3 >> 32) & 0xFFFF,
|
||||
]
|
||||
|
||||
for i in range(len(coef_words)):
|
||||
rtio_output(self.target_o | i, coef_words[i])
|
||||
delay_mu(int64(self.core.ref_multiplier))
|
||||
|
||||
|
||||
class DDS:
|
||||
"""Shuttler Core DDS spline.
|
||||
|
||||
A Shuttler channel can generate a waveform `w(t)` that is the sum of a
|
||||
cubic spline `a(t)` and a sinusoid modulated in amplitude by a cubic
|
||||
spline `b(t)` and in phase/frequency by a quadratic spline `c(t)`, where
|
||||
|
||||
.. math::
|
||||
w(t) = a(t) + b(t) * cos(c(t))
|
||||
|
||||
And `t` corresponds to time in seconds.
|
||||
This class controls the cubic spline `b(t)` and quadratic spline `c(t)`,
|
||||
in which
|
||||
|
||||
.. math::
|
||||
b(t) &= g * (q_0 + q_1t + \\frac{q_2t^2}{2} + \\frac{q_3t^3}{6})
|
||||
|
||||
c(t) &= r_0 + r_1t + \\frac{r_2t^2}{2}
|
||||
|
||||
And `b(t)` is in Volt, `c(t)` is in number of turns. Note that `b(t)`
|
||||
contributes to a constant gain of :math:`g=1.64676`.
|
||||
|
||||
:param channel: RTIO channel number of this DC-bias spline interface.
|
||||
:param core_device: Core device name.
|
||||
"""
|
||||
kernel_invariants = {"core", "channel", "target_o"}
|
||||
|
||||
def __init__(self, dmgr, channel, core_device="core"):
|
||||
self.core = dmgr.get(core_device)
|
||||
self.channel = channel
|
||||
self.target_o = channel << 8
|
||||
|
||||
@kernel
|
||||
def set_waveform(self, b0: TInt32, b1: TInt32, b2: TInt64, b3: TInt64,
|
||||
c0: TInt32, c1: TInt32, c2: TInt32):
|
||||
"""Set the DDS spline waveform.
|
||||
|
||||
Given `b(t)` and `c(t)` as defined in :class:`DDS`, the coefficients
|
||||
should be configured by the following formulae.
|
||||
|
||||
.. math::
|
||||
T &= 8*10^{-9}
|
||||
|
||||
b_0 &= q_0
|
||||
|
||||
b_1 &= q_1T + \\frac{q_2T^2}{2} + \\frac{q_3T^3}{6}
|
||||
|
||||
b_2 &= q_2T^2 + q_3T^3
|
||||
|
||||
b_3 &= q_3T^3
|
||||
|
||||
c_0 &= r_0
|
||||
|
||||
c_1 &= r_1T + \\frac{r_2T^2}{2}
|
||||
|
||||
c_2 &= r_2T^2
|
||||
|
||||
:math:`b_0`, :math:`b_1`, :math:`b_2` and :math:`b_3` are 16, 32, 48
|
||||
and 48 bits in width respectively. See :meth:`shuttler_volt_to_mu` for
|
||||
machine unit conversion. :math:`c_0`, :math:`c_1` and :math:`c_2` are
|
||||
16, 32 and 32 bits in width respectively.
|
||||
|
||||
Note: The waveform is not updated to the Shuttler Core until
|
||||
triggered. See :class:`Trigger` for the update triggering mechanism.
|
||||
|
||||
:param b0: The :math:`b_0` coefficient in machine unit.
|
||||
:param b1: The :math:`b_1` coefficient in machine unit.
|
||||
:param b2: The :math:`b_2` coefficient in machine unit.
|
||||
:param b3: The :math:`b_3` coefficient in machine unit.
|
||||
:param c0: The :math:`c_0` coefficient in machine unit.
|
||||
:param c1: The :math:`c_1` coefficient in machine unit.
|
||||
:param c2: The :math:`c_2` coefficient in machine unit.
|
||||
"""
|
||||
coef_words = [
|
||||
b0,
|
||||
b1,
|
||||
b1 >> 16,
|
||||
b2 & 0xFFFF,
|
||||
(b2 >> 16) & 0xFFFF,
|
||||
(b2 >> 32) & 0xFFFF,
|
||||
b3 & 0xFFFF,
|
||||
(b3 >> 16) & 0xFFFF,
|
||||
(b3 >> 32) & 0xFFFF,
|
||||
c0,
|
||||
c1,
|
||||
c1 >> 16,
|
||||
c2,
|
||||
c2 >> 16,
|
||||
]
|
||||
|
||||
for i in range(len(coef_words)):
|
||||
rtio_output(self.target_o | i, coef_words[i])
|
||||
delay_mu(int64(self.core.ref_multiplier))
|
||||
|
||||
|
||||
class Trigger:
|
||||
"""Shuttler Core spline coefficients update trigger.
|
||||
|
||||
:param channel: RTIO channel number of the trigger interface.
|
||||
:param core_device: Core device name.
|
||||
"""
|
||||
kernel_invariants = {"core", "channel", "target_o"}
|
||||
|
||||
def __init__(self, dmgr, channel, core_device="core"):
|
||||
self.core = dmgr.get(core_device)
|
||||
self.channel = channel
|
||||
self.target_o = channel << 8
|
||||
|
||||
@kernel
|
||||
def trigger(self, trig_out):
|
||||
"""Triggers coefficient update of (a) Shuttler Core channel(s).
|
||||
|
||||
Each bit corresponds to a Shuttler waveform generator core. Setting
|
||||
`trig_out` bits commits the pending coefficient update (from
|
||||
`set_waveform` in :class:`DCBias` and :class:`DDS`) to the Shuttler Core
|
||||
synchronously.
|
||||
|
||||
:param trig_out: Coefficient update trigger bits. The MSB corresponds
|
||||
to Channel 15, LSB corresponds to Channel 0.
|
||||
"""
|
||||
rtio_output(self.target_o, trig_out)
|
||||
|
||||
|
||||
RELAY_SPI_CONFIG = (0*spi.SPI_OFFLINE | 1*spi.SPI_END |
|
||||
0*spi.SPI_INPUT | 0*spi.SPI_CS_POLARITY |
|
||||
0*spi.SPI_CLK_POLARITY | 0*spi.SPI_CLK_PHASE |
|
||||
0*spi.SPI_LSB_FIRST | 0*spi.SPI_HALF_DUPLEX)
|
||||
|
||||
ADC_SPI_CONFIG = (0*spi.SPI_OFFLINE | 0*spi.SPI_END |
|
||||
0*spi.SPI_INPUT | 0*spi.SPI_CS_POLARITY |
|
||||
1*spi.SPI_CLK_POLARITY | 1*spi.SPI_CLK_PHASE |
|
||||
0*spi.SPI_LSB_FIRST | 0*spi.SPI_HALF_DUPLEX)
|
||||
|
||||
# SPI clock write and read dividers
|
||||
# CS should assert at least 9.5 ns after clk pulse
|
||||
SPIT_RELAY_WR = 4
|
||||
# 25 ns high/low pulse hold (limiting for write)
|
||||
SPIT_ADC_WR = 4
|
||||
SPIT_ADC_RD = 16
|
||||
|
||||
# SPI CS line
|
||||
CS_RELAY = 1 << 0
|
||||
CS_LED = 1 << 1
|
||||
CS_ADC = 1 << 0
|
||||
|
||||
# Referenced AD4115 registers
|
||||
_AD4115_REG_STATUS = 0x00
|
||||
_AD4115_REG_ADCMODE = 0x01
|
||||
_AD4115_REG_DATA = 0x04
|
||||
_AD4115_REG_ID = 0x07
|
||||
_AD4115_REG_CH0 = 0x10
|
||||
_AD4115_REG_SETUPCON0 = 0x20
|
||||
|
||||
|
||||
class Relay:
|
||||
"""Shuttler AFE relay switches.
|
||||
|
||||
It controls the AFE relay switches and the LEDs. Switch on the relay to
|
||||
enable AFE output; And off to disable the output. The LEDs indicates the
|
||||
relay status.
|
||||
|
||||
.. note::
|
||||
The relay does not disable ADC measurements. Voltage of any channels
|
||||
can still be read by the ADC even after switching off the relays.
|
||||
|
||||
:param spi_device: SPI bus device name.
|
||||
:param core_device: Core device name.
|
||||
"""
|
||||
kernel_invariant = {"core", "bus"}
|
||||
|
||||
def __init__(self, dmgr, spi_device, core_device="core"):
|
||||
self.core = dmgr.get(core_device)
|
||||
self.bus = dmgr.get(spi_device)
|
||||
|
||||
@kernel
|
||||
def init(self):
|
||||
"""Initialize SPI device.
|
||||
|
||||
Configures the SPI bus to 16-bits, write-only, simultaneous relay
|
||||
switches and LED control.
|
||||
"""
|
||||
self.bus.set_config_mu(
|
||||
RELAY_SPI_CONFIG, 16, SPIT_RELAY_WR, CS_RELAY | CS_LED)
|
||||
|
||||
@kernel
|
||||
def enable(self, en: TInt32):
|
||||
"""Enable/Disable relay switches of corresponding channels.
|
||||
|
||||
Each bit corresponds to the relay switch of a channel. Asserting a bit
|
||||
turns on the corresponding relay switch; Deasserting the same bit
|
||||
turns off the switch instead.
|
||||
|
||||
:param en: Switch enable bits. The MSB corresponds to Channel 15, LSB
|
||||
corresponds to Channel 0.
|
||||
"""
|
||||
self.bus.write(en << 16)
|
||||
|
||||
|
||||
class ADC:
|
||||
"""Shuttler AFE ADC (AD4115) driver.
|
||||
|
||||
:param spi_device: SPI bus device name.
|
||||
:param core_device: Core device name.
|
||||
"""
|
||||
kernel_invariant = {"core", "bus"}
|
||||
|
||||
def __init__(self, dmgr, spi_device, core_device="core"):
|
||||
self.core = dmgr.get(core_device)
|
||||
self.bus = dmgr.get(spi_device)
|
||||
|
||||
@kernel
|
||||
def read_id(self) -> TInt32:
|
||||
"""Read the product ID of the ADC.
|
||||
|
||||
The expected return value is 0x38DX, the 4 LSbs are don't cares.
|
||||
|
||||
:return: The read-back product ID.
|
||||
"""
|
||||
return self.read16(_AD4115_REG_ID)
|
||||
|
||||
@kernel
|
||||
def reset(self):
|
||||
"""AD4115 reset procedure.
|
||||
|
||||
This performs a write operation of 96 serial clock cycles with DIN
|
||||
held at high. It resets the entire device, including the register
|
||||
contents.
|
||||
|
||||
.. note::
|
||||
The datasheet only requires 64 cycles, but reasserting `CS_n` right
|
||||
after the transfer appears to interrupt the start-up sequence.
|
||||
"""
|
||||
self.bus.set_config_mu(ADC_SPI_CONFIG, 32, SPIT_ADC_WR, CS_ADC)
|
||||
self.bus.write(0xffffffff)
|
||||
self.bus.write(0xffffffff)
|
||||
self.bus.set_config_mu(
|
||||
ADC_SPI_CONFIG | spi.SPI_END, 32, SPIT_ADC_WR, CS_ADC)
|
||||
self.bus.write(0xffffffff)
|
||||
|
||||
@kernel
|
||||
def read8(self, addr: TInt32) -> TInt32:
|
||||
"""Read from 8 bit register.
|
||||
|
||||
:param addr: Register address.
|
||||
:return: Read-back register content.
|
||||
"""
|
||||
self.bus.set_config_mu(
|
||||
ADC_SPI_CONFIG | spi.SPI_END | spi.SPI_INPUT,
|
||||
16, SPIT_ADC_RD, CS_ADC)
|
||||
self.bus.write((addr | 0x40) << 24)
|
||||
return self.bus.read() & 0xff
|
||||
|
||||
@kernel
|
||||
def read16(self, addr: TInt32) -> TInt32:
|
||||
"""Read from 16 bit register.
|
||||
|
||||
:param addr: Register address.
|
||||
:return: Read-back register content.
|
||||
"""
|
||||
self.bus.set_config_mu(
|
||||
ADC_SPI_CONFIG | spi.SPI_END | spi.SPI_INPUT,
|
||||
24, SPIT_ADC_RD, CS_ADC)
|
||||
self.bus.write((addr | 0x40) << 24)
|
||||
return self.bus.read() & 0xffff
|
||||
|
||||
@kernel
|
||||
def read24(self, addr: TInt32) -> TInt32:
|
||||
"""Read from 24 bit register.
|
||||
|
||||
:param addr: Register address.
|
||||
:return: Read-back register content.
|
||||
"""
|
||||
self.bus.set_config_mu(
|
||||
ADC_SPI_CONFIG | spi.SPI_END | spi.SPI_INPUT,
|
||||
32, SPIT_ADC_RD, CS_ADC)
|
||||
self.bus.write((addr | 0x40) << 24)
|
||||
return self.bus.read() & 0xffffff
|
||||
|
||||
@kernel
|
||||
def write8(self, addr: TInt32, data: TInt32):
|
||||
"""Write to 8 bit register.
|
||||
|
||||
:param addr: Register address.
|
||||
:param data: Data to be written.
|
||||
"""
|
||||
self.bus.set_config_mu(
|
||||
ADC_SPI_CONFIG | spi.SPI_END, 16, SPIT_ADC_WR, CS_ADC)
|
||||
self.bus.write(addr << 24 | (data & 0xff) << 16)
|
||||
|
||||
@kernel
|
||||
def write16(self, addr: TInt32, data: TInt32):
|
||||
"""Write to 16 bit register.
|
||||
|
||||
:param addr: Register address.
|
||||
:param data: Data to be written.
|
||||
"""
|
||||
self.bus.set_config_mu(
|
||||
ADC_SPI_CONFIG | spi.SPI_END, 24, SPIT_ADC_WR, CS_ADC)
|
||||
self.bus.write(addr << 24 | (data & 0xffff) << 8)
|
||||
|
||||
@kernel
|
||||
def write24(self, addr: TInt32, data: TInt32):
|
||||
"""Write to 24 bit register.
|
||||
|
||||
:param addr: Register address.
|
||||
:param data: Data to be written.
|
||||
"""
|
||||
self.bus.set_config_mu(
|
||||
ADC_SPI_CONFIG | spi.SPI_END, 32, SPIT_ADC_WR, CS_ADC)
|
||||
self.bus.write(addr << 24 | (data & 0xffffff))
|
||||
|
||||
@kernel
|
||||
def read_ch(self, channel: TInt32) -> TFloat:
|
||||
"""Sample a Shuttler channel on the AFE.
|
||||
|
||||
It performs a single conversion using profile 0 and setup 0, on the
|
||||
selected channel. The sample is then recovered and converted to volt.
|
||||
|
||||
:param channel: Shuttler channel to be sampled.
|
||||
:return: Voltage sample in volt.
|
||||
"""
|
||||
# Always configure Profile 0 for single conversion
|
||||
self.write16(_AD4115_REG_CH0, 0x8000 | ((channel * 2 + 1) << 4))
|
||||
self.write16(_AD4115_REG_SETUPCON0, 0x1300)
|
||||
self.single_conversion()
|
||||
|
||||
delay(100*us)
|
||||
adc_code = self.read24(_AD4115_REG_DATA)
|
||||
return ((adc_code / (1 << 23)) - 1) * 2.5 / 0.1
|
||||
|
||||
@kernel
|
||||
def single_conversion(self):
|
||||
"""Place the ADC in single conversion mode.
|
||||
|
||||
The ADC returns to standby mode after the conversion is complete.
|
||||
"""
|
||||
self.write16(_AD4115_REG_ADCMODE, 0x8010)
|
||||
|
||||
@kernel
|
||||
def standby(self):
|
||||
"""Place the ADC in standby mode and disables power down the clock.
|
||||
|
||||
The ADC can be returned to single conversion mode by calling
|
||||
:meth:`single_conversion`.
|
||||
"""
|
||||
# Selecting internal XO (0b00) also disables clock during standby
|
||||
self.write16(_AD4115_REG_ADCMODE, 0x8020)
|
||||
|
||||
@kernel
|
||||
def power_down(self):
|
||||
"""Place the ADC in power-down mode.
|
||||
|
||||
The ADC must be reset before returning to other modes.
|
||||
|
||||
.. note::
|
||||
The AD4115 datasheet suggests placing the ADC in standby mode
|
||||
before power-down. This is to prevent accidental entry into the
|
||||
power-down mode.
|
||||
|
||||
.. seealso::
|
||||
:meth:`standby`
|
||||
|
||||
:meth:`power_up`
|
||||
|
||||
"""
|
||||
self.write16(_AD4115_REG_ADCMODE, 0x8030)
|
||||
|
||||
@kernel
|
||||
def power_up(self):
|
||||
"""Exit the ADC power-down mode.
|
||||
|
||||
The ADC should be in power-down mode before calling this method.
|
||||
|
||||
.. seealso::
|
||||
:meth:`power_down`
|
||||
"""
|
||||
self.reset()
|
||||
# Although the datasheet claims 500 us reset wait time, only waiting
|
||||
# for ~500 us can result in DOUT pin stuck in high
|
||||
delay(2500*us)
|
||||
|
||||
@kernel
|
||||
def calibrate(self, volts, trigger, config, samples=[-5.0, 0.0, 5.0]):
|
||||
"""Calibrate the Shuttler waveform generator using the ADC on the AFE.
|
||||
|
||||
It finds the average slope rate and average offset by samples, and
|
||||
compensate by writing the pre-DAC gain and offset registers in the
|
||||
configuration registers.
|
||||
|
||||
.. note::
|
||||
If the pre-calibration slope rate < 1, the calibration procedure
|
||||
will introduce a pre-DAC gain compensation. However, this may
|
||||
saturate the pre-DAC voltage code. (See :class:`Config` notes).
|
||||
Shuttler cannot cover the entire +/- 10 V range in this case.
|
||||
|
||||
.. seealso::
|
||||
:meth:`Config.set_gain`
|
||||
|
||||
:meth:`Config.set_offset`
|
||||
|
||||
:param volts: A list of all 16 cubic DC-bias spline.
|
||||
(See :class:`DCBias`)
|
||||
:param trigger: The Shuttler spline coefficient update trigger.
|
||||
:param config: The Shuttler Core configuration registers.
|
||||
:param samples: A list of sample voltages for calibration. There must
|
||||
be at least 2 samples to perform slope rate calculation.
|
||||
"""
|
||||
assert len(volts) == 16
|
||||
assert len(samples) > 1
|
||||
|
||||
measurements = [0.0] * len(samples)
|
||||
|
||||
for ch in range(16):
|
||||
# Find the average slope rate and offset
|
||||
for i in range(len(samples)):
|
||||
self.core.break_realtime()
|
||||
volts[ch].set_waveform(
|
||||
shuttler_volt_to_mu(samples[i]), 0, 0, 0)
|
||||
trigger.trigger(1 << ch)
|
||||
measurements[i] = self.read_ch(ch)
|
||||
|
||||
# Find the average output slope
|
||||
slope_sum = 0.0
|
||||
for i in range(len(samples) - 1):
|
||||
slope_sum += (measurements[i+1] - measurements[i])/(samples[i+1] - samples[i])
|
||||
slope_avg = slope_sum / (len(samples) - 1)
|
||||
|
||||
gain_code = int32(1 / slope_avg * (2 ** 16)) & 0xffff
|
||||
|
||||
# Scale the measurements by 1/slope, find average offset
|
||||
offset_sum = 0.0
|
||||
for i in range(len(samples)):
|
||||
offset_sum += (measurements[i] / slope_avg) - samples[i]
|
||||
offset_avg = offset_sum / len(samples)
|
||||
|
||||
offset_code = shuttler_volt_to_mu(-offset_avg)
|
||||
|
||||
self.core.break_realtime()
|
||||
config.set_gain(ch, gain_code)
|
||||
|
||||
delay_mu(int64(self.core.ref_multiplier))
|
||||
config.set_offset(ch, offset_code)
|
|
@ -72,6 +72,10 @@ class SPIMaster:
|
|||
self.channel = channel
|
||||
self.update_xfer_duration_mu(div, length)
|
||||
|
||||
@staticmethod
|
||||
def get_rtio_channels(channel, **kwargs):
|
||||
return [(channel, None)]
|
||||
|
||||
@portable
|
||||
def frequency_to_div(self, f):
|
||||
"""Convert a SPI clock frequency to the closest SPI clock divider."""
|
||||
|
@ -273,9 +277,8 @@ class NRTSPIMaster:
|
|||
def set_config_mu(self, flags=0, length=8, div=6, cs=1):
|
||||
"""Set the ``config`` register.
|
||||
|
||||
Note that the non-realtime SPI cores are usually clocked by the system
|
||||
clock and not the RTIO clock. In many cases, the SPI configuration is
|
||||
already set by the firmware and you do not need to call this method.
|
||||
In many cases, the SPI configuration is already set by the firmware
|
||||
and you do not need to call this method.
|
||||
"""
|
||||
spi_set_config(self.busno, flags, length, div, cs)
|
||||
|
||||
|
|
|
@ -1,228 +0,0 @@
|
|||
from numpy import int32, int64
|
||||
from artiq.language.core import kernel, portable, delay
|
||||
from artiq.coredevice.rtio import rtio_output, rtio_output_wide
|
||||
from artiq.language.types import TInt32, TInt64, TFloat
|
||||
|
||||
|
||||
class Spline:
|
||||
r"""Spline interpolating RTIO channel.
|
||||
|
||||
One knot of a polynomial basis spline (B-spline) :math:`u(t)`
|
||||
is defined by the coefficients :math:`u_n` up to order :math:`n = k`.
|
||||
If the coefficients are evaluated starting at time :math:`t_0`,
|
||||
the output :math:`u(t)` for :math:`t > t_0, t_0` is:
|
||||
|
||||
.. math::
|
||||
u(t) &= \sum_{n=0}^k \frac{u_n}{n!} (t - t_0)^n \\
|
||||
&= u_0 + u_1 (t - t_0) + \frac{u_2}{2} (t - t_0)^2 + \dots
|
||||
|
||||
This class contains multiple methods to convert spline knot data from SI
|
||||
to machine units and multiple methods that set the current spline
|
||||
coefficient data. None of these advance the timeline. The :meth:`smooth`
|
||||
method is the only method that advances the timeline.
|
||||
|
||||
:param width: Width in bits of the quantity that this spline controls
|
||||
:param time_width: Width in bits of the time counter of this spline
|
||||
:param channel: RTIO channel number
|
||||
:param core_device: Core device that this spline is attached to
|
||||
:param scale: Scale for conversion between machine units and physical
|
||||
units; to be given as the "full scale physical value".
|
||||
"""
|
||||
|
||||
kernel_invariants = {"channel", "core", "scale", "width",
|
||||
"time_width", "time_scale"}
|
||||
|
||||
def __init__(self, width, time_width, channel, core_device, scale=1.):
|
||||
self.core = core_device
|
||||
self.channel = channel
|
||||
self.width = width
|
||||
self.scale = float((int64(1) << width) / scale)
|
||||
self.time_width = time_width
|
||||
self.time_scale = float((1 << time_width) *
|
||||
core_device.coarse_ref_period)
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def to_mu(self, value: TFloat) -> TInt32:
|
||||
"""Convert floating point ``value`` from physical units to 32 bit
|
||||
integer machine units."""
|
||||
return int32(round(value*self.scale))
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def from_mu(self, value: TInt32) -> TFloat:
|
||||
"""Convert 32 bit integer ``value`` from machine units to floating
|
||||
point physical units."""
|
||||
return value/self.scale
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def to_mu64(self, value: TFloat) -> TInt64:
|
||||
"""Convert floating point ``value`` from physical units to 64 bit
|
||||
integer machine units."""
|
||||
return int64(round(value*self.scale))
|
||||
|
||||
@kernel
|
||||
def set_mu(self, value: TInt32):
|
||||
"""Set spline value (machine units).
|
||||
|
||||
:param value: Spline value in integer machine units.
|
||||
"""
|
||||
rtio_output(self.channel << 8, value)
|
||||
|
||||
@kernel(flags={"fast-math"})
|
||||
def set(self, value: TFloat):
|
||||
"""Set spline value.
|
||||
|
||||
:param value: Spline value relative to full-scale.
|
||||
"""
|
||||
if self.width > 32:
|
||||
l = [int32(0)] * 2
|
||||
self.pack_coeff_mu([self.to_mu64(value)], l)
|
||||
rtio_output_wide(self.channel << 8, l)
|
||||
else:
|
||||
rtio_output(self.channel << 8, self.to_mu(value))
|
||||
|
||||
@kernel
|
||||
def set_coeff_mu(self, value): # TList(TInt32)
|
||||
"""Set spline raw values.
|
||||
|
||||
:param value: Spline packed raw values.
|
||||
"""
|
||||
rtio_output_wide(self.channel << 8, value)
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def pack_coeff_mu(self, coeff, packed): # TList(TInt64), TList(TInt32)
|
||||
"""Pack coefficients into RTIO data
|
||||
|
||||
:param coeff: TList(TInt64) list of machine units spline coefficients.
|
||||
Lowest (zeroth) order first. The coefficient list is zero-extended
|
||||
by the RTIO gateware.
|
||||
:param packed: TList(TInt32) list for packed RTIO data. Must be
|
||||
pre-allocated. Length in bits is
|
||||
``n*width + (n - 1)*n//2*time_width``
|
||||
"""
|
||||
pos = 0
|
||||
for i in range(len(coeff)):
|
||||
wi = self.width + i*self.time_width
|
||||
ci = coeff[i]
|
||||
while wi != 0:
|
||||
j = pos//32
|
||||
used = pos - 32*j
|
||||
avail = 32 - used
|
||||
if avail > wi:
|
||||
avail = wi
|
||||
cij = int32(ci)
|
||||
if avail != 32:
|
||||
cij &= (1 << avail) - 1
|
||||
packed[j] |= cij << used
|
||||
ci >>= avail
|
||||
wi -= avail
|
||||
pos += avail
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def coeff_to_mu(self, coeff, coeff64): # TList(TFloat), TList(TInt64)
|
||||
"""Convert a floating point list of coefficients into a 64 bit
|
||||
integer (preallocated).
|
||||
|
||||
:param coeff: TList(TFloat) list of coefficients in physical units.
|
||||
:param coeff64: TList(TInt64) preallocated list of coefficients in
|
||||
machine units.
|
||||
"""
|
||||
for i in range(len(coeff)):
|
||||
vi = coeff[i] * self.scale
|
||||
for j in range(i):
|
||||
vi *= self.time_scale
|
||||
ci = int64(round(vi))
|
||||
coeff64[i] = ci
|
||||
# artiq.wavesynth.coefficients.discrete_compensate:
|
||||
if i == 2:
|
||||
coeff64[1] += ci >> self.time_width + 1
|
||||
elif i == 3:
|
||||
coeff64[2] += ci >> self.time_width
|
||||
coeff64[1] += ci // 6 >> 2*self.time_width
|
||||
|
||||
def coeff_as_packed_mu(self, coeff64):
|
||||
"""Pack 64 bit integer machine units coefficients into 32 bit integer
|
||||
RTIO data list.
|
||||
|
||||
This is a host-only method that can be used to generate packed
|
||||
spline coefficient data to be frozen into kernels at compile time.
|
||||
"""
|
||||
n = len(coeff64)
|
||||
width = n*self.width + (n - 1)*n//2*self.time_width
|
||||
packed = [int32(0)] * ((width + 31)//32)
|
||||
self.pack_coeff_mu(coeff64, packed)
|
||||
return packed
|
||||
|
||||
def coeff_as_packed(self, coeff):
|
||||
"""Convert floating point spline coefficients into 32 bit integer
|
||||
packed data.
|
||||
|
||||
This is a host-only method that can be used to generate packed
|
||||
spline coefficient data to be frozen into kernels at compile time.
|
||||
"""
|
||||
coeff64 = [int64(0)] * len(coeff)
|
||||
self.coeff_to_mu(coeff, coeff64)
|
||||
return self.coeff_as_packed_mu(coeff64)
|
||||
|
||||
@kernel(flags={"fast-math"})
|
||||
def set_coeff(self, coeff): # TList(TFloat)
|
||||
"""Set spline coefficients.
|
||||
|
||||
Missing coefficients (high order) are zero-extended byt the RTIO
|
||||
gateware.
|
||||
|
||||
If more coefficients are supplied than the gateware supports the extra
|
||||
coefficients are ignored.
|
||||
|
||||
:param value: List of floating point spline coefficients,
|
||||
lowest order (constant) coefficient first. Units are the
|
||||
unit of this spline's value times increasing powers of 1/s.
|
||||
"""
|
||||
n = len(coeff)
|
||||
coeff64 = [int64(0)] * n
|
||||
self.coeff_to_mu(coeff, coeff64)
|
||||
width = n*self.width + (n - 1)*n//2*self.time_width
|
||||
packed = [int32(0)] * ((width + 31)//32)
|
||||
self.pack_coeff_mu(coeff64, packed)
|
||||
self.set_coeff_mu(packed)
|
||||
|
||||
@kernel(flags={"fast-math"})
|
||||
def smooth(self, start: TFloat, stop: TFloat, duration: TFloat,
|
||||
order: TInt32):
|
||||
"""Initiate an interpolated value change.
|
||||
|
||||
For zeroth order (step) interpolation, the step is at
|
||||
``start + duration/2``.
|
||||
|
||||
First order interpolation corresponds to a linear value ramp from
|
||||
``start`` to ``stop`` over ``duration``.
|
||||
|
||||
The third order interpolation is constrained to have zero first
|
||||
order derivative at both `start` and `stop`.
|
||||
|
||||
For first order and third order interpolation (linear and cubic)
|
||||
the interpolator needs to be stopped explicitly at the stop time
|
||||
(e.g. by setting spline coefficient data or starting a new
|
||||
:meth:`smooth` interpolation).
|
||||
|
||||
This method advances the timeline by ``duration``.
|
||||
|
||||
:param start: Initial value of the change. In physical units.
|
||||
:param stop: Final value of the change. In physical units.
|
||||
:param duration: Duration of the interpolation. In physical units.
|
||||
:param order: Order of the interpolation. Only 0, 1,
|
||||
and 3 are valid: step, linear, cubic.
|
||||
"""
|
||||
if order == 0:
|
||||
delay(duration/2.)
|
||||
self.set_coeff([stop])
|
||||
delay(duration/2.)
|
||||
elif order == 1:
|
||||
self.set_coeff([start, (stop - start)/duration])
|
||||
delay(duration)
|
||||
elif order == 3:
|
||||
v2 = 6.*(stop - start)/(duration*duration)
|
||||
self.set_coeff([start, 0., v2, -2.*v2/duration])
|
||||
delay(duration)
|
||||
else:
|
||||
raise ValueError("Invalid interpolation order. "
|
||||
"Supported orders are: 0, 1, 3.")
|
|
@ -1,12 +0,0 @@
|
|||
from artiq.language.core import syscall
|
||||
from artiq.language.types import TInt32, TNone
|
||||
|
||||
|
||||
@syscall(flags={"nounwind", "nowrite"})
|
||||
def mfspr(spr: TInt32) -> TInt32:
|
||||
raise NotImplementedError("syscall not simulated")
|
||||
|
||||
|
||||
@syscall(flags={"nowrite", "nowrite"})
|
||||
def mtspr(spr: TInt32, value: TInt32) -> TNone:
|
||||
raise NotImplementedError("syscall not simulated")
|
|
@ -23,12 +23,12 @@ def y_mu_to_full_scale(y):
|
|||
|
||||
|
||||
@portable
|
||||
def adc_mu_to_volts(x, gain):
|
||||
def adc_mu_to_volts(x, gain, corrected_fs=True):
|
||||
"""Convert servo ADC data from machine units to Volt."""
|
||||
val = (x >> 1) & 0xffff
|
||||
mask = 1 << 15
|
||||
val = -(val & mask) + (val & ~mask)
|
||||
return sampler.adc_mu_to_volt(val, gain)
|
||||
return sampler.adc_mu_to_volt(val, gain, corrected_fs)
|
||||
|
||||
|
||||
class SUServo:
|
||||
|
@ -57,38 +57,38 @@ class SUServo:
|
|||
|
||||
:param channel: RTIO channel number
|
||||
:param pgia_device: Name of the Sampler PGIA gain setting SPI bus
|
||||
:param cpld0_device: Name of the first Urukul CPLD SPI bus
|
||||
:param cpld1_device: Name of the second Urukul CPLD SPI bus
|
||||
:param dds0_device: Name of the AD9910 device for the DDS on the first
|
||||
Urukul
|
||||
:param dds1_device: Name of the AD9910 device for the DDS on the second
|
||||
Urukul
|
||||
:param cpld_devices: Names of the Urukul CPLD SPI buses
|
||||
:param dds_devices: Names of the AD9910 devices
|
||||
:param gains: Initial value for PGIA gains shift register
|
||||
(default: 0x0000). Knowledge of this state is not transferred
|
||||
between experiments.
|
||||
:param sampler_hw_rev: Sampler's revision string
|
||||
:param core_device: Core device name
|
||||
"""
|
||||
kernel_invariants = {"channel", "core", "pgia", "cpld0", "cpld1",
|
||||
"dds0", "dds1", "ref_period_mu"}
|
||||
kernel_invariants = {"channel", "core", "pgia", "cplds", "ddses",
|
||||
"ref_period_mu", "corrected_fs"}
|
||||
|
||||
def __init__(self, dmgr, channel, pgia_device,
|
||||
cpld0_device, cpld1_device,
|
||||
dds0_device, dds1_device,
|
||||
gains=0x0000, core_device="core"):
|
||||
cpld_devices, dds_devices,
|
||||
gains=0x0000, sampler_hw_rev="v2.2", core_device="core"):
|
||||
|
||||
self.core = dmgr.get(core_device)
|
||||
self.pgia = dmgr.get(pgia_device)
|
||||
self.pgia.update_xfer_duration_mu(div=4, length=16)
|
||||
self.dds0 = dmgr.get(dds0_device)
|
||||
self.dds1 = dmgr.get(dds1_device)
|
||||
self.cpld0 = dmgr.get(cpld0_device)
|
||||
self.cpld1 = dmgr.get(cpld1_device)
|
||||
assert len(dds_devices) == len(cpld_devices)
|
||||
self.ddses = [dmgr.get(dds) for dds in dds_devices]
|
||||
self.cplds = [dmgr.get(cpld) for cpld in cpld_devices]
|
||||
self.channel = channel
|
||||
self.gains = gains
|
||||
self.ref_period_mu = self.core.seconds_to_mu(
|
||||
self.core.coarse_ref_period)
|
||||
self.corrected_fs = sampler.Sampler.use_corrected_fs(sampler_hw_rev)
|
||||
assert self.ref_period_mu == self.core.ref_multiplier
|
||||
|
||||
@staticmethod
|
||||
def get_rtio_channels(channel, **kwargs):
|
||||
return [(channel, None)]
|
||||
|
||||
@kernel
|
||||
def init(self):
|
||||
"""Initialize the servo, Sampler and both Urukuls.
|
||||
|
@ -109,17 +109,15 @@ class SUServo:
|
|||
sampler.SPI_CONFIG | spi.SPI_END,
|
||||
16, 4, sampler.SPI_CS_PGIA)
|
||||
|
||||
self.cpld0.init(blind=True)
|
||||
cfg0 = self.cpld0.cfg_reg
|
||||
self.cpld0.cfg_write(cfg0 | (0xf << urukul.CFG_MASK_NU))
|
||||
self.dds0.init(blind=True)
|
||||
self.cpld0.cfg_write(cfg0)
|
||||
for i in range(len(self.cplds)):
|
||||
cpld = self.cplds[i]
|
||||
dds = self.ddses[i]
|
||||
|
||||
self.cpld1.init(blind=True)
|
||||
cfg1 = self.cpld1.cfg_reg
|
||||
self.cpld1.cfg_write(cfg1 | (0xf << urukul.CFG_MASK_NU))
|
||||
self.dds1.init(blind=True)
|
||||
self.cpld1.cfg_write(cfg1)
|
||||
cpld.init(blind=True)
|
||||
prev_cpld_cfg = cpld.cfg_reg
|
||||
cpld.cfg_write(prev_cpld_cfg | (0xf << urukul.CFG_MASK_NU))
|
||||
dds.init(blind=True)
|
||||
cpld.cfg_write(prev_cpld_cfg)
|
||||
|
||||
@kernel
|
||||
def write(self, addr, value):
|
||||
|
@ -242,7 +240,7 @@ class SUServo:
|
|||
"""
|
||||
val = self.get_adc_mu(channel)
|
||||
gain = (self.gains >> (channel*2)) & 0b11
|
||||
return adc_mu_to_volts(val, gain)
|
||||
return adc_mu_to_volts(val, gain, self.corrected_fs)
|
||||
|
||||
|
||||
class Channel:
|
||||
|
@ -257,9 +255,15 @@ class Channel:
|
|||
self.servo = dmgr.get(servo_device)
|
||||
self.core = self.servo.core
|
||||
self.channel = channel
|
||||
# FIXME: this assumes the mem channel is right after the control
|
||||
# channels
|
||||
self.servo_channel = self.channel + 8 - self.servo.channel
|
||||
# This assumes the mem channel is right after the control channels
|
||||
# Make sure this is always the case in eem.py
|
||||
self.servo_channel = (self.channel + 4 * len(self.servo.cplds) -
|
||||
self.servo.channel)
|
||||
self.dds = self.servo.ddses[self.servo_channel // 4]
|
||||
|
||||
@staticmethod
|
||||
def get_rtio_channels(channel, **kwargs):
|
||||
return [(channel, None)]
|
||||
|
||||
@kernel
|
||||
def set(self, en_out, en_iir=0, profile=0):
|
||||
|
@ -311,12 +315,8 @@ class Channel:
|
|||
see :meth:`dds_offset_to_mu`
|
||||
:param phase: DDS phase in turns
|
||||
"""
|
||||
if self.servo_channel < 4:
|
||||
dds = self.servo.dds0
|
||||
else:
|
||||
dds = self.servo.dds1
|
||||
ftw = dds.frequency_to_ftw(frequency)
|
||||
pow_ = dds.turns_to_pow(phase)
|
||||
ftw = self.dds.frequency_to_ftw(frequency)
|
||||
pow_ = self.dds.turns_to_pow(phase)
|
||||
offs = self.dds_offset_to_mu(offset)
|
||||
self.set_dds_mu(profile, ftw, offs, pow_)
|
||||
|
||||
|
|
|
@ -4,20 +4,21 @@ class TRF372017:
|
|||
For possible values, documentation, and explanation, see the datasheet.
|
||||
https://www.ti.com/lit/gpn/trf372017
|
||||
"""
|
||||
rdiv = 21 # 13b
|
||||
rdiv = 2 # 13b - highest valid f_PFD
|
||||
ref_inv = 0
|
||||
neg_vco = 1
|
||||
icp = 0 # 1.94 mA, 5b
|
||||
icp_double = 0
|
||||
cal_clk_sel = 12 # /16, 4b
|
||||
cal_clk_sel = 0b1110 # div64, 4b
|
||||
|
||||
ndiv = 420 # 16b
|
||||
pll_div_sel = 0 # /1, 2b
|
||||
prsc_sel = 1 # 8/9
|
||||
# default f_vco is 2.875 GHz
|
||||
nint = 23 # 16b - lowest value suitable for fractional & integer mode
|
||||
pll_div_sel = 0b01 # div2, 2b
|
||||
prsc_sel = 0 # 4/5
|
||||
vco_sel = 2 # 2b
|
||||
vcosel_mode = 0
|
||||
cal_acc = 0b00 # 2b
|
||||
en_cal = 1
|
||||
en_cal = 0 # leave at 0 - calibration is performed in `Phaser.init()`
|
||||
|
||||
nfrac = 0 # 25b
|
||||
|
||||
|
@ -27,9 +28,9 @@ class TRF372017:
|
|||
pwd_vcomux = 0
|
||||
pwd_div124 = 0
|
||||
pwd_presc = 0
|
||||
pwd_out_buff = 1
|
||||
pwd_lo_div = 1
|
||||
pwd_tx_div = 0
|
||||
pwd_out_buff = 1 # leave at 1 - only enable outputs after calibration
|
||||
pwd_lo_div = 1 # leave at 1 - only enable outputs after calibration
|
||||
pwd_tx_div = 1 # leave at 1 - only enable outputs after calibration
|
||||
pwd_bb_vcm = 0
|
||||
pwd_dc_off = 0
|
||||
en_extvco = 0
|
||||
|
@ -59,8 +60,8 @@ class TRF372017:
|
|||
ioff = 0x80 # 8b
|
||||
qoff = 0x80 # 8b
|
||||
vref_sel = 4 # 0.85 V, 3b
|
||||
tx_div_sel = 1 # div2, 2b
|
||||
lo_div_sel = 3 # div8, 2b
|
||||
tx_div_sel = 0 # div1, 2b
|
||||
lo_div_sel = 0 # div1, 2b
|
||||
tx_div_bias = 1 # 37.5 µA, 2b
|
||||
lo_div_bias = 2 # 50 µA, 2b
|
||||
|
||||
|
@ -84,6 +85,7 @@ class TRF372017:
|
|||
setattr(self, key, value)
|
||||
|
||||
def get_mmap(self):
|
||||
"""Memory map for TRF372017"""
|
||||
mmap = []
|
||||
mmap.append(
|
||||
0x9 |
|
||||
|
@ -92,9 +94,10 @@ class TRF372017:
|
|||
(self.cal_clk_sel << 27))
|
||||
mmap.append(
|
||||
0xa |
|
||||
(self.ndiv << 5) | (self.pll_div_sel << 21) | (self.prsc_sel << 23) |
|
||||
(self.vco_sel << 26) | (self.vcosel_mode << 28) |
|
||||
(self.cal_acc << 29) | (self.en_cal << 31))
|
||||
(self.nint << 5) | (self.pll_div_sel << 21) |
|
||||
(self.prsc_sel << 23) | (self.vco_sel << 26) |
|
||||
(self.vcosel_mode << 28) | (self.cal_acc << 29) |
|
||||
(self.en_cal << 31))
|
||||
mmap.append(0xb | (self.nfrac << 5))
|
||||
mmap.append(
|
||||
0xc |
|
||||
|
|
|
@ -36,6 +36,10 @@ class TTLOut:
|
|||
self.channel = channel
|
||||
self.target_o = channel << 8
|
||||
|
||||
@staticmethod
|
||||
def get_rtio_channels(channel, **kwargs):
|
||||
return [(channel, None)]
|
||||
|
||||
@kernel
|
||||
def output(self):
|
||||
pass
|
||||
|
@ -128,6 +132,10 @@ class TTLInOut:
|
|||
self.target_sens = (channel << 8) + 2
|
||||
self.target_sample = (channel << 8) + 3
|
||||
|
||||
@staticmethod
|
||||
def get_rtio_channels(channel, **kwargs):
|
||||
return [(channel, None)]
|
||||
|
||||
@kernel
|
||||
def set_oe(self, oe):
|
||||
rtio_output(self.target_oe, 1 if oe else 0)
|
||||
|
@ -465,6 +473,10 @@ class TTLClockGen:
|
|||
|
||||
self.acc_width = numpy.int64(acc_width)
|
||||
|
||||
@staticmethod
|
||||
def get_rtio_channels(channel, **kwargs):
|
||||
return [(channel, None)]
|
||||
|
||||
@portable
|
||||
def frequency_to_ftw(self, frequency):
|
||||
"""Returns the frequency tuning word corresponding to the given
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
from numpy import int32, int64
|
||||
|
||||
from artiq.language.core import kernel, delay, portable, at_mu, now_mu
|
||||
from artiq.language.units import us, ms
|
||||
|
||||
from numpy import int32, int64
|
||||
from artiq.language.types import TInt32, TFloat, TBool
|
||||
|
||||
from artiq.coredevice import spi2 as spi
|
||||
|
||||
|
||||
SPI_CONFIG = (0*spi.SPI_OFFLINE | 0*spi.SPI_END |
|
||||
0*spi.SPI_INPUT | 1*spi.SPI_CS_POLARITY |
|
||||
0*spi.SPI_CLK_POLARITY | 0*spi.SPI_CLK_PHASE |
|
||||
0*spi.SPI_LSB_FIRST | 0*spi.SPI_HALF_DUPLEX)
|
||||
SPI_CONFIG = (0 * spi.SPI_OFFLINE | 0 * spi.SPI_END |
|
||||
0 * spi.SPI_INPUT | 1 * spi.SPI_CS_POLARITY |
|
||||
0 * spi.SPI_CLK_POLARITY | 0 * spi.SPI_CLK_PHASE |
|
||||
0 * spi.SPI_LSB_FIRST | 0 * spi.SPI_HALF_DUPLEX)
|
||||
|
||||
# SPI clock write and read dividers
|
||||
SPIT_CFG_WR = 2
|
||||
|
@ -52,6 +52,9 @@ CS_DDS_CH1 = 5
|
|||
CS_DDS_CH2 = 6
|
||||
CS_DDS_CH3 = 7
|
||||
|
||||
# Default profile
|
||||
DEFAULT_PROFILE = 7
|
||||
|
||||
|
||||
@portable
|
||||
def urukul_cfg(rf_sw, led, profile, io_update, mask_nu,
|
||||
|
@ -105,7 +108,7 @@ class _RegIOUpdate:
|
|||
self.cpld = cpld
|
||||
|
||||
@kernel
|
||||
def pulse(self, t):
|
||||
def pulse(self, t: TFloat):
|
||||
cfg = self.cpld.cfg_reg
|
||||
self.cpld.cfg_write(cfg | (1 << CFG_IO_UPDATE))
|
||||
delay(t)
|
||||
|
@ -117,7 +120,7 @@ class _DummySync:
|
|||
self.cpld = cpld
|
||||
|
||||
@kernel
|
||||
def set_mu(self, ftw):
|
||||
def set_mu(self, ftw: TInt32):
|
||||
pass
|
||||
|
||||
|
||||
|
@ -188,7 +191,7 @@ class CPLD:
|
|||
assert sync_div is None
|
||||
sync_div = 0
|
||||
|
||||
self.cfg_reg = urukul_cfg(rf_sw=rf_sw, led=0, profile=0,
|
||||
self.cfg_reg = urukul_cfg(rf_sw=rf_sw, led=0, profile=DEFAULT_PROFILE,
|
||||
io_update=0, mask_nu=0, clk_sel=clk_sel,
|
||||
sync_sel=sync_sel,
|
||||
rst=0, io_rst=0, clk_div=clk_div)
|
||||
|
@ -196,12 +199,12 @@ class CPLD:
|
|||
self.sync_div = sync_div
|
||||
|
||||
@kernel
|
||||
def cfg_write(self, cfg):
|
||||
def cfg_write(self, cfg: TInt32):
|
||||
"""Write to the configuration register.
|
||||
|
||||
See :func:`urukul_cfg` for possible flags.
|
||||
|
||||
:param data: 24 bit data to be written. Will be stored at
|
||||
:param cfg: 24 bit data to be written. Will be stored at
|
||||
:attr:`cfg_reg`.
|
||||
"""
|
||||
self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END, 24,
|
||||
|
@ -210,7 +213,7 @@ class CPLD:
|
|||
self.cfg_reg = cfg
|
||||
|
||||
@kernel
|
||||
def sta_read(self):
|
||||
def sta_read(self) -> TInt32:
|
||||
"""Read the status register.
|
||||
|
||||
Use any of the following functions to extract values:
|
||||
|
@ -229,7 +232,7 @@ class CPLD:
|
|||
return self.bus.read()
|
||||
|
||||
@kernel
|
||||
def init(self, blind=False):
|
||||
def init(self, blind: TBool = False):
|
||||
"""Initialize and detect Urukul.
|
||||
|
||||
Resets the DDS I/O interface and verifies correct CPLD gateware
|
||||
|
@ -247,12 +250,12 @@ class CPLD:
|
|||
proto_rev = urukul_sta_proto_rev(self.sta_read())
|
||||
if proto_rev != STA_PROTO_REV_MATCH:
|
||||
raise ValueError("Urukul proto_rev mismatch")
|
||||
delay(100*us) # reset, slack
|
||||
delay(100 * us) # reset, slack
|
||||
self.cfg_write(cfg)
|
||||
if self.sync_div:
|
||||
at_mu(now_mu() & ~0xf) # align to RTIO/2
|
||||
self.set_sync_div(self.sync_div) # 125 MHz/2 = 1 GHz/16
|
||||
delay(1*ms) # DDS wake up
|
||||
delay(1 * ms) # DDS wake up
|
||||
|
||||
@kernel
|
||||
def io_rst(self):
|
||||
|
@ -261,7 +264,7 @@ class CPLD:
|
|||
self.cfg_write(self.cfg_reg & ~(1 << CFG_IO_RST))
|
||||
|
||||
@kernel
|
||||
def cfg_sw(self, channel, on):
|
||||
def cfg_sw(self, channel: TInt32, on: TBool):
|
||||
"""Configure the RF switches through the configuration register.
|
||||
|
||||
These values are logically OR-ed with the LVDS lines on EEM1.
|
||||
|
@ -277,19 +280,41 @@ class CPLD:
|
|||
self.cfg_write(c)
|
||||
|
||||
@kernel
|
||||
def cfg_switches(self, state):
|
||||
def cfg_switches(self, state: TInt32):
|
||||
"""Configure all four RF switches through the configuration register.
|
||||
|
||||
:param state: RF switch state as a 4 bit integer.
|
||||
"""
|
||||
self.cfg_write((self.cfg_reg & ~0xf) | state)
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def mu_to_att(self, att_mu: TInt32) -> TFloat:
|
||||
"""Convert a digital attenuation setting to dB.
|
||||
|
||||
:param att_mu: Digital attenuation setting.
|
||||
:return: Attenuation setting in dB.
|
||||
"""
|
||||
return (255 - (att_mu & 0xff)) / 8
|
||||
|
||||
@portable(flags={"fast-math"})
|
||||
def att_to_mu(self, att: TFloat) -> TInt32:
|
||||
"""Convert an attenuation setting in dB to machine units.
|
||||
|
||||
:param att: Attenuation setting in dB.
|
||||
:return: Digital attenuation setting.
|
||||
"""
|
||||
code = int32(255) - int32(round(att * 8))
|
||||
if code < 0 or code > 255:
|
||||
raise ValueError("Invalid urukul.CPLD attenuation!")
|
||||
return code
|
||||
|
||||
@kernel
|
||||
def set_att_mu(self, channel, att):
|
||||
def set_att_mu(self, channel: TInt32, att: TInt32):
|
||||
"""Set digital step attenuator in machine units.
|
||||
|
||||
This method will also write the attenuator settings of the three other channels. Use
|
||||
:meth:`get_att_mu` to retrieve the hardware state set in previous experiments.
|
||||
This method will also write the attenuator settings of the three
|
||||
other channels. Use :meth:`get_att_mu` to retrieve the hardware
|
||||
state set in previous experiments.
|
||||
|
||||
:param channel: Attenuator channel (0-3).
|
||||
:param att: 8-bit digital attenuation setting:
|
||||
|
@ -300,7 +325,7 @@ class CPLD:
|
|||
self.set_all_att_mu(a)
|
||||
|
||||
@kernel
|
||||
def set_all_att_mu(self, att_reg):
|
||||
def set_all_att_mu(self, att_reg: TInt32):
|
||||
"""Set all four digital step attenuators (in machine units).
|
||||
|
||||
.. seealso:: :meth:`set_att_mu`
|
||||
|
@ -313,7 +338,7 @@ class CPLD:
|
|||
self.att_reg = att_reg
|
||||
|
||||
@kernel
|
||||
def set_att(self, channel, att):
|
||||
def set_att(self, channel: TInt32, att: TFloat):
|
||||
"""Set digital step attenuator in SI units.
|
||||
|
||||
This method will write the attenuator settings of all four channels.
|
||||
|
@ -325,16 +350,16 @@ class CPLD:
|
|||
attenuation. Minimum attenuation is 0*dB, maximum attenuation is
|
||||
31.5*dB.
|
||||
"""
|
||||
code = 255 - int32(round(att*8))
|
||||
if code < 0 or code > 255:
|
||||
raise ValueError("Invalid urukul.CPLD attenuation!")
|
||||
self.set_att_mu(channel, code)
|
||||
self.set_att_mu(channel, self.att_to_mu(att))
|
||||
|
||||
@kernel
|
||||
def get_att_mu(self):
|
||||
def get_att_mu(self) -> TInt32:
|
||||
"""Return the digital step attenuator settings in machine units.
|
||||
|
||||
The result is stored and will be used in future calls of :meth:`set_att_mu`.
|
||||
The result is stored and will be used in future calls of
|
||||
:meth:`set_att_mu` and :meth:`set_att`.
|
||||
|
||||
.. seealso:: :meth:`get_channel_att_mu`
|
||||
|
||||
:return: 32 bit attenuator settings
|
||||
"""
|
||||
|
@ -343,13 +368,41 @@ class CPLD:
|
|||
self.bus.write(0) # shift in zeros, shift out current value
|
||||
self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END, 32,
|
||||
SPIT_ATT_WR, CS_ATT)
|
||||
delay(10*us)
|
||||
delay(10 * us)
|
||||
self.att_reg = self.bus.read()
|
||||
self.bus.write(self.att_reg) # shift in current value again and latch
|
||||
return self.att_reg
|
||||
|
||||
@kernel
|
||||
def set_sync_div(self, div):
|
||||
def get_channel_att_mu(self, channel: TInt32) -> TInt32:
|
||||
"""Get digital step attenuator value for a channel in machine units.
|
||||
|
||||
The result is stored and will be used in future calls of
|
||||
:meth:`set_att_mu` and :meth:`set_att`.
|
||||
|
||||
.. seealso:: :meth:`get_att_mu`
|
||||
|
||||
:param channel: Attenuator channel (0-3).
|
||||
:return: 8-bit digital attenuation setting:
|
||||
255 minimum attenuation, 0 maximum attenuation (31.5 dB)
|
||||
"""
|
||||
return int32((self.get_att_mu() >> (channel * 8)) & 0xff)
|
||||
|
||||
@kernel
|
||||
def get_channel_att(self, channel: TInt32) -> TFloat:
|
||||
"""Get digital step attenuator value for a channel in SI units.
|
||||
|
||||
.. seealso:: :meth:`get_channel_att_mu`
|
||||
|
||||
:param channel: Attenuator channel (0-3).
|
||||
:return: Attenuation setting in dB. Higher value is more
|
||||
attenuation. Minimum attenuation is 0*dB, maximum attenuation is
|
||||
31.5*dB.
|
||||
"""
|
||||
return self.mu_to_att(self.get_channel_att_mu(channel))
|
||||
|
||||
@kernel
|
||||
def set_sync_div(self, div: TInt32):
|
||||
"""Set the SYNC_IN AD9910 pulse generator frequency
|
||||
and align it to the current RTIO timestamp.
|
||||
|
||||
|
@ -361,12 +414,12 @@ class CPLD:
|
|||
Minimum division ratio is 2. Maximum division ratio is 16.
|
||||
"""
|
||||
ftw_max = 1 << 4
|
||||
ftw = ftw_max//div
|
||||
assert ftw*div == ftw_max
|
||||
ftw = ftw_max // div
|
||||
assert ftw * div == ftw_max
|
||||
self.sync.set_mu(ftw)
|
||||
|
||||
@kernel
|
||||
def set_profile(self, profile):
|
||||
def set_profile(self, profile: TInt32):
|
||||
"""Set the PROFILE pins.
|
||||
|
||||
The PROFILE pins are common to all four DDS channels.
|
||||
|
|
|
@ -27,15 +27,15 @@ class Zotino(AD53xx):
|
|||
:param clr_device: CLR RTIO TTLOut channel name.
|
||||
:param div_write: SPI clock divider for write operations (default: 4,
|
||||
50MHz max SPI clock)
|
||||
:param div_read: SPI clock divider for read operations (default: 8, not
|
||||
optimized for speed, but cf data sheet t22: 25ns min SCLK edge to SDO
|
||||
valid)
|
||||
:param div_read: SPI clock divider for read operations (default: 16, not
|
||||
optimized for speed; datasheet says t22: 25ns min SCLK edge to SDO
|
||||
valid, and suggests the SPI speed for reads should be <=20 MHz)
|
||||
:param vref: DAC reference voltage (default: 5.)
|
||||
:param core_device: Core device name (default: "core")
|
||||
"""
|
||||
|
||||
def __init__(self, dmgr, spi_device, ldac_device=None, clr_device=None,
|
||||
div_write=4, div_read=8, vref=5., core="core"):
|
||||
div_write=4, div_read=16, vref=5., core="core"):
|
||||
AD53xx.__init__(self, dmgr=dmgr, spi_device=spi_device,
|
||||
ldac_device=ldac_device, clr_device=clr_device,
|
||||
chip_select=_SPI_CS_DAC, div_write=div_write,
|
||||
|
|
|
@ -45,6 +45,7 @@ class AppletsCCBDock(applets.AppletsDock):
|
|||
self.ccbp_group_action.setMenu(ccbp_group_menu)
|
||||
self.table.addAction(self.ccbp_group_action)
|
||||
self.table.itemSelectionChanged.connect(self.update_group_ccbp_menu)
|
||||
self.update_group_ccbp_menu()
|
||||
|
||||
ccbp_global_menu = QtWidgets.QMenu()
|
||||
actiongroup = QtWidgets.QActionGroup(self.table)
|
||||
|
|
|
@ -3,8 +3,9 @@ import logging
|
|||
|
||||
import numpy as np
|
||||
from PyQt5 import QtCore, QtWidgets
|
||||
from sipyco import pyon
|
||||
|
||||
from artiq.tools import short_format
|
||||
from artiq.tools import scale_from_metadata, short_format, exc_to_warning
|
||||
from artiq.gui.tools import LayoutWidget, QRecursiveFilterProxyModel
|
||||
from artiq.gui.models import DictSyncTreeSepModel
|
||||
from artiq.gui.scientific_spinbox import ScientificSpinBox
|
||||
|
@ -13,73 +14,152 @@ from artiq.gui.scientific_spinbox import ScientificSpinBox
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Editor(QtWidgets.QDialog):
|
||||
def __init__(self, parent, dataset_ctl, key, value):
|
||||
async def rename(key, new_key, value, metadata, persist, dataset_ctl):
|
||||
if key != new_key:
|
||||
await dataset_ctl.delete(key)
|
||||
await dataset_ctl.set(new_key, value, metadata=metadata, persist=persist)
|
||||
|
||||
|
||||
class CreateEditDialog(QtWidgets.QDialog):
|
||||
def __init__(self, parent, dataset_ctl, key=None, value=None, metadata=None, persist=False):
|
||||
QtWidgets.QDialog.__init__(self, parent=parent)
|
||||
self.dataset_ctl = dataset_ctl
|
||||
self.key = key
|
||||
self.initial_type = type(value)
|
||||
|
||||
self.setWindowTitle("Edit dataset")
|
||||
self.setWindowTitle("Create dataset" if key is None else "Edit dataset")
|
||||
grid = QtWidgets.QGridLayout()
|
||||
grid.setRowMinimumHeight(1, 40)
|
||||
grid.setColumnMinimumWidth(2, 60)
|
||||
self.setLayout(grid)
|
||||
|
||||
grid.addWidget(QtWidgets.QLabel("Name:"), 0, 0)
|
||||
grid.addWidget(QtWidgets.QLabel(key), 0, 1)
|
||||
self.name_widget = QtWidgets.QLineEdit()
|
||||
grid.addWidget(self.name_widget, 0, 1)
|
||||
|
||||
grid.addWidget(QtWidgets.QLabel("Value:"), 1, 0)
|
||||
grid.addWidget(self.get_edit_widget(value), 1, 1)
|
||||
self.value_widget = QtWidgets.QLineEdit()
|
||||
self.value_widget.setPlaceholderText('PYON (Python)')
|
||||
grid.addWidget(self.value_widget, 1, 1)
|
||||
self.data_type = QtWidgets.QLabel("data type")
|
||||
grid.addWidget(self.data_type, 1, 2)
|
||||
self.value_widget.textChanged.connect(self.dtype)
|
||||
|
||||
buttons = QtWidgets.QDialogButtonBox(
|
||||
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)
|
||||
grid.setRowStretch(2, 1)
|
||||
grid.addWidget(buttons, 3, 0, 1, 2)
|
||||
buttons.accepted.connect(self.accept)
|
||||
buttons.rejected.connect(self.reject)
|
||||
grid.addWidget(QtWidgets.QLabel("Unit:"), 2, 0)
|
||||
self.unit_widget = QtWidgets.QLineEdit()
|
||||
grid.addWidget(self.unit_widget, 2, 1)
|
||||
|
||||
grid.addWidget(QtWidgets.QLabel("Scale:"), 3, 0)
|
||||
self.scale_widget = QtWidgets.QLineEdit()
|
||||
grid.addWidget(self.scale_widget, 3, 1)
|
||||
|
||||
grid.addWidget(QtWidgets.QLabel("Precision:"), 4, 0)
|
||||
self.precision_widget = QtWidgets.QLineEdit()
|
||||
grid.addWidget(self.precision_widget, 4, 1)
|
||||
|
||||
grid.addWidget(QtWidgets.QLabel("Persist:"), 5, 0)
|
||||
self.box_widget = QtWidgets.QCheckBox()
|
||||
grid.addWidget(self.box_widget, 5, 1)
|
||||
|
||||
self.ok = QtWidgets.QPushButton('&Ok')
|
||||
self.ok.setEnabled(False)
|
||||
self.cancel = QtWidgets.QPushButton('&Cancel')
|
||||
self.buttons = QtWidgets.QDialogButtonBox(self)
|
||||
self.buttons.addButton(
|
||||
self.ok, QtWidgets.QDialogButtonBox.AcceptRole)
|
||||
self.buttons.addButton(
|
||||
self.cancel, QtWidgets.QDialogButtonBox.RejectRole)
|
||||
grid.setRowStretch(6, 1)
|
||||
grid.addWidget(self.buttons, 7, 0, 1, 3, alignment=QtCore.Qt.AlignHCenter)
|
||||
self.buttons.accepted.connect(self.accept)
|
||||
self.buttons.rejected.connect(self.reject)
|
||||
|
||||
self.key = key
|
||||
self.name_widget.setText(key)
|
||||
|
||||
value_edit_string = self.value_to_edit_string(value)
|
||||
if metadata is not None:
|
||||
scale = scale_from_metadata(metadata)
|
||||
t = value.dtype if value is np.ndarray else type(value)
|
||||
if scale != 1 and np.issubdtype(t, np.number):
|
||||
# degenerates to float type
|
||||
value_edit_string = self.value_to_edit_string(
|
||||
np.float64(value / scale))
|
||||
self.unit_widget.setText(metadata.get('unit', ''))
|
||||
self.scale_widget.setText(str(metadata.get('scale', '')))
|
||||
self.precision_widget.setText(str(metadata.get('precision', '')))
|
||||
|
||||
self.value_widget.setText(value_edit_string)
|
||||
self.box_widget.setChecked(persist)
|
||||
|
||||
def accept(self):
|
||||
value = self.initial_type(self.get_edit_widget_value())
|
||||
asyncio.ensure_future(self.dataset_ctl.set(self.key, value))
|
||||
key = self.name_widget.text()
|
||||
value = self.value_widget.text()
|
||||
persist = self.box_widget.isChecked()
|
||||
unit = self.unit_widget.text()
|
||||
scale = self.scale_widget.text()
|
||||
precision = self.precision_widget.text()
|
||||
metadata = {}
|
||||
if unit != "":
|
||||
metadata['unit'] = unit
|
||||
if scale != "":
|
||||
metadata['scale'] = float(scale)
|
||||
if precision != "":
|
||||
metadata['precision'] = int(precision)
|
||||
scale = scale_from_metadata(metadata)
|
||||
value = self.parse_edit_string(value)
|
||||
t = value.dtype if value is np.ndarray else type(value)
|
||||
if scale != 1 and np.issubdtype(t, np.number):
|
||||
# degenerates to float type
|
||||
value = np.float64(value * scale)
|
||||
if self.key and self.key != key:
|
||||
asyncio.ensure_future(exc_to_warning(rename(self.key, key, value, metadata, persist, self.dataset_ctl)))
|
||||
else:
|
||||
asyncio.ensure_future(exc_to_warning(self.dataset_ctl.set(key, value, metadata=metadata, persist=persist)))
|
||||
self.key = key
|
||||
QtWidgets.QDialog.accept(self)
|
||||
|
||||
def get_edit_widget(self, initial_value):
|
||||
raise NotImplementedError
|
||||
def dtype(self):
|
||||
txt = self.value_widget.text()
|
||||
try:
|
||||
result = self.parse_edit_string(txt)
|
||||
# ensure only pyon compatible types are permissable
|
||||
pyon.encode(result)
|
||||
except:
|
||||
pixmap = self.style().standardPixmap(
|
||||
QtWidgets.QStyle.SP_MessageBoxWarning)
|
||||
self.data_type.setPixmap(pixmap)
|
||||
self.ok.setEnabled(False)
|
||||
else:
|
||||
self.data_type.setText(type(result).__name__)
|
||||
self.ok.setEnabled(True)
|
||||
|
||||
def get_edit_widget_value(self):
|
||||
raise NotImplementedError
|
||||
@staticmethod
|
||||
def parse_edit_string(s):
|
||||
if s == "":
|
||||
raise TypeError
|
||||
_eval_dict = {
|
||||
"__builtins__": {},
|
||||
"array": np.array,
|
||||
"null": np.nan,
|
||||
"inf": np.inf
|
||||
}
|
||||
for t_ in pyon._numpy_scalar:
|
||||
_eval_dict[t_] = eval("np.{}".format(t_), {"np": np})
|
||||
return eval(s, _eval_dict, {})
|
||||
|
||||
|
||||
class NumberEditor(Editor):
|
||||
def get_edit_widget(self, initial_value):
|
||||
self.edit_widget = ScientificSpinBox()
|
||||
self.edit_widget.setDecimals(13)
|
||||
self.edit_widget.setPrecision()
|
||||
self.edit_widget.setRelativeStep()
|
||||
self.edit_widget.setValue(float(initial_value))
|
||||
return self.edit_widget
|
||||
|
||||
def get_edit_widget_value(self):
|
||||
return self.edit_widget.value()
|
||||
|
||||
|
||||
class BoolEditor(Editor):
|
||||
def get_edit_widget(self, initial_value):
|
||||
self.edit_widget = QtWidgets.QCheckBox()
|
||||
self.edit_widget.setChecked(bool(initial_value))
|
||||
return self.edit_widget
|
||||
|
||||
def get_edit_widget_value(self):
|
||||
return self.edit_widget.isChecked()
|
||||
|
||||
|
||||
class StringEditor(Editor):
|
||||
def get_edit_widget(self, initial_value):
|
||||
self.edit_widget = QtWidgets.QLineEdit()
|
||||
self.edit_widget.setText(initial_value)
|
||||
return self.edit_widget
|
||||
|
||||
def get_edit_widget_value(self):
|
||||
return self.edit_widget.text()
|
||||
@staticmethod
|
||||
def value_to_edit_string(v):
|
||||
t = type(v)
|
||||
r = ""
|
||||
if isinstance(v, np.generic):
|
||||
r += t.__name__
|
||||
r += "("
|
||||
r += repr(v)
|
||||
r += ")"
|
||||
elif v is None:
|
||||
return r
|
||||
else:
|
||||
r += repr(v)
|
||||
return r
|
||||
|
||||
|
||||
class Model(DictSyncTreeSepModel):
|
||||
|
@ -92,13 +172,13 @@ class Model(DictSyncTreeSepModel):
|
|||
if column == 1:
|
||||
return "Y" if v[0] else "N"
|
||||
elif column == 2:
|
||||
return short_format(v[1])
|
||||
return short_format(v[1], v[2])
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
|
||||
class DatasetsDock(QtWidgets.QDockWidget):
|
||||
def __init__(self, datasets_sub, dataset_ctl):
|
||||
def __init__(self, dataset_sub, dataset_ctl):
|
||||
QtWidgets.QDockWidget.__init__(self, "Datasets")
|
||||
self.setObjectName("Datasets")
|
||||
self.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
|
||||
|
@ -120,6 +200,11 @@ class DatasetsDock(QtWidgets.QDockWidget):
|
|||
grid.addWidget(self.table, 1, 0)
|
||||
|
||||
self.table.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
|
||||
create_action = QtWidgets.QAction("New dataset", self.table)
|
||||
create_action.triggered.connect(self.create_clicked)
|
||||
create_action.setShortcut("CTRL+N")
|
||||
create_action.setShortcutContext(QtCore.Qt.WidgetShortcut)
|
||||
self.table.addAction(create_action)
|
||||
edit_action = QtWidgets.QAction("Edit dataset", self.table)
|
||||
edit_action.triggered.connect(self.edit_clicked)
|
||||
edit_action.setShortcut("RETURN")
|
||||
|
@ -133,7 +218,7 @@ class DatasetsDock(QtWidgets.QDockWidget):
|
|||
self.table.addAction(delete_action)
|
||||
|
||||
self.table_model = Model(dict())
|
||||
datasets_sub.add_setmodel_callback(self.set_model)
|
||||
dataset_sub.add_setmodel_callback(self.set_model)
|
||||
|
||||
def _search_datasets(self):
|
||||
if hasattr(self, "table_model_filter"):
|
||||
|
@ -146,25 +231,17 @@ class DatasetsDock(QtWidgets.QDockWidget):
|
|||
self.table_model_filter.setSourceModel(self.table_model)
|
||||
self.table.setModel(self.table_model_filter)
|
||||
|
||||
def create_clicked(self):
|
||||
CreateEditDialog(self, self.dataset_ctl).open()
|
||||
|
||||
def edit_clicked(self):
|
||||
idx = self.table.selectedIndexes()
|
||||
if idx:
|
||||
idx = self.table_model_filter.mapToSource(idx[0])
|
||||
key = self.table_model.index_to_key(idx)
|
||||
if key is not None:
|
||||
persist, value = self.table_model.backing_store[key]
|
||||
t = type(value)
|
||||
if np.issubdtype(t, np.number):
|
||||
dialog_cls = NumberEditor
|
||||
elif np.issubdtype(t, np.bool_):
|
||||
dialog_cls = BoolEditor
|
||||
elif np.issubdtype(t, np.unicode_):
|
||||
dialog_cls = StringEditor
|
||||
else:
|
||||
logger.error("Cannot edit dataset %s: "
|
||||
"type %s is not supported", key, t)
|
||||
return
|
||||
dialog_cls(self, self.dataset_ctl, key, value).open()
|
||||
persist, value, metadata = self.table_model.backing_store[key]
|
||||
CreateEditDialog(self, self.dataset_ctl, key, value, metadata, persist).open()
|
||||
|
||||
def delete_clicked(self):
|
||||
idx = self.table.selectedIndexes()
|
||||
|
|
|
@ -11,7 +11,9 @@ from sipyco import pyon
|
|||
|
||||
from artiq.gui.entries import procdesc_to_entry, ScanEntry
|
||||
from artiq.gui.fuzzy_select import FuzzySelectWidget
|
||||
from artiq.gui.tools import LayoutWidget, log_level_to_name, get_open_file_name
|
||||
from artiq.gui.tools import (LayoutWidget, WheelFilter,
|
||||
log_level_to_name, get_open_file_name)
|
||||
from artiq.tools import parse_devarg_override, unparse_devarg_override
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -23,15 +25,6 @@ logger = logging.getLogger(__name__)
|
|||
# 2. file:<class name>@<file name>
|
||||
|
||||
|
||||
class _WheelFilter(QtCore.QObject):
|
||||
def eventFilter(self, obj, event):
|
||||
if (event.type() == QtCore.QEvent.Wheel and
|
||||
event.modifiers() != QtCore.Qt.NoModifier):
|
||||
event.ignore()
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class _ArgumentEditor(QtWidgets.QTreeWidget):
|
||||
def __init__(self, manager, dock, expurl):
|
||||
self.manager = manager
|
||||
|
@ -55,7 +48,7 @@ class _ArgumentEditor(QtWidgets.QTreeWidget):
|
|||
self.setStyleSheet("QTreeWidget {background: " +
|
||||
self.palette().midlight().color().name() + " ;}")
|
||||
|
||||
self.viewport().installEventFilter(_WheelFilter(self.viewport()))
|
||||
self.viewport().installEventFilter(WheelFilter(self.viewport(), True))
|
||||
|
||||
self._groups = dict()
|
||||
self._arg_to_widgets = dict()
|
||||
|
@ -159,12 +152,29 @@ class _ArgumentEditor(QtWidgets.QTreeWidget):
|
|||
self._groups[name] = group
|
||||
return group
|
||||
|
||||
def update_argument(self, name, argument):
|
||||
widgets = self._arg_to_widgets[name]
|
||||
|
||||
# Qt needs a setItemWidget() to handle layout correctly,
|
||||
# simply replacing the entry inside the LayoutWidget
|
||||
# results in a bug.
|
||||
|
||||
widgets["entry"].deleteLater()
|
||||
widgets["entry"] = procdesc_to_entry(argument["desc"])(argument)
|
||||
widgets["disable_other_scans"].setVisible(
|
||||
isinstance(widgets["entry"], ScanEntry))
|
||||
widgets["fix_layout"].deleteLater()
|
||||
widgets["fix_layout"] = LayoutWidget()
|
||||
widgets["fix_layout"].addWidget(widgets["entry"])
|
||||
self.setItemWidget(widgets["widget_item"], 1, widgets["fix_layout"])
|
||||
self.updateGeometries()
|
||||
|
||||
def _recompute_argument_clicked(self, name):
|
||||
asyncio.ensure_future(self._recompute_argument(name))
|
||||
|
||||
async def _recompute_argument(self, name):
|
||||
try:
|
||||
expdesc = await self.manager.compute_expdesc(self.expurl)
|
||||
expdesc, _ = await self.manager.compute_expdesc(self.expurl)
|
||||
except:
|
||||
logger.error("Could not recompute argument '%s' of '%s'",
|
||||
name, self.expurl, exc_info=True)
|
||||
|
@ -175,22 +185,7 @@ class _ArgumentEditor(QtWidgets.QTreeWidget):
|
|||
state = procdesc_to_entry(procdesc).default_state(procdesc)
|
||||
argument["desc"] = procdesc
|
||||
argument["state"] = state
|
||||
|
||||
# Qt needs a setItemWidget() to handle layout correctly,
|
||||
# simply replacing the entry inside the LayoutWidget
|
||||
# results in a bug.
|
||||
|
||||
widgets = self._arg_to_widgets[name]
|
||||
|
||||
widgets["entry"].deleteLater()
|
||||
widgets["entry"] = procdesc_to_entry(procdesc)(argument)
|
||||
widgets["disable_other_scans"].setVisible(
|
||||
isinstance(widgets["entry"], ScanEntry))
|
||||
widgets["fix_layout"].deleteLater()
|
||||
widgets["fix_layout"] = LayoutWidget()
|
||||
widgets["fix_layout"].addWidget(widgets["entry"])
|
||||
self.setItemWidget(widgets["widget_item"], 1, widgets["fix_layout"])
|
||||
self.updateGeometries()
|
||||
self.update_argument(name, argument)
|
||||
|
||||
def _disable_other_scans(self, current_name):
|
||||
for name, widgets in self._arg_to_widgets.items():
|
||||
|
@ -216,6 +211,15 @@ class _ArgumentEditor(QtWidgets.QTreeWidget):
|
|||
pass
|
||||
self.verticalScrollBar().setValue(state["scroll"])
|
||||
|
||||
# Hooks that allow user-supplied argument editors to react to imminent user
|
||||
# actions. Here, we always keep the manager-stored submission arguments
|
||||
# up-to-date, so no further action is required.
|
||||
def about_to_submit(self):
|
||||
pass
|
||||
|
||||
def about_to_close(self):
|
||||
pass
|
||||
|
||||
|
||||
log_levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
|
||||
|
||||
|
@ -241,7 +245,8 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
|||
self.manager = manager
|
||||
self.expurl = expurl
|
||||
|
||||
self.argeditor = _ArgumentEditor(self.manager, self, self.expurl)
|
||||
editor_class = self.manager.get_argument_editor_class(expurl)
|
||||
self.argeditor = editor_class(self.manager, self, self.expurl)
|
||||
self.layout.addWidget(self.argeditor, 0, 0, 1, 5)
|
||||
self.layout.setRowStretch(0, 1)
|
||||
|
||||
|
@ -258,7 +263,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
|||
datetime.setDate(QtCore.QDate.currentDate())
|
||||
else:
|
||||
datetime.setDateTime(QtCore.QDateTime.fromMSecsSinceEpoch(
|
||||
scheduling["due_date"]*1000))
|
||||
int(scheduling["due_date"]*1000)))
|
||||
datetime_en.setChecked(scheduling["due_date"] is not None)
|
||||
|
||||
def update_datetime(dt):
|
||||
|
@ -301,7 +306,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
|||
flush = self.flush
|
||||
flush.setToolTip("Flush the pipeline (of current- and higher-priority "
|
||||
"experiments) before starting the experiment")
|
||||
self.layout.addWidget(flush, 2, 2, 1, 2)
|
||||
self.layout.addWidget(flush, 2, 2)
|
||||
|
||||
flush.setChecked(scheduling["flush"])
|
||||
|
||||
|
@ -309,6 +314,20 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
|||
scheduling["flush"] = bool(checked)
|
||||
flush.stateChanged.connect(update_flush)
|
||||
|
||||
devarg_override = QtWidgets.QComboBox()
|
||||
devarg_override.setEditable(True)
|
||||
devarg_override.lineEdit().setPlaceholderText("Override device arguments")
|
||||
devarg_override.lineEdit().setClearButtonEnabled(True)
|
||||
devarg_override.insertItem(0, "core:analyze_at_run_end=True")
|
||||
self.layout.addWidget(devarg_override, 2, 3)
|
||||
|
||||
devarg_override.setCurrentText(options["devarg_override"])
|
||||
|
||||
def update_devarg_override(text):
|
||||
options["devarg_override"] = text
|
||||
devarg_override.editTextChanged.connect(update_devarg_override)
|
||||
self.devarg_override = devarg_override
|
||||
|
||||
log_level = QtWidgets.QComboBox()
|
||||
log_level.addItems(log_levels)
|
||||
log_level.setCurrentIndex(1)
|
||||
|
@ -329,6 +348,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
|||
if "repo_rev" in options:
|
||||
repo_rev = QtWidgets.QLineEdit()
|
||||
repo_rev.setPlaceholderText("current")
|
||||
repo_rev.setClearButtonEnabled(True)
|
||||
repo_rev_label = QtWidgets.QLabel("Revision:")
|
||||
repo_rev_label.setToolTip("Experiment repository revision "
|
||||
"(commit ID) to use")
|
||||
|
@ -369,6 +389,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
|||
self.hdf5_load_directory = os.path.expanduser("~")
|
||||
|
||||
def submit_clicked(self):
|
||||
self.argeditor.about_to_submit()
|
||||
try:
|
||||
self.manager.submit(self.expurl)
|
||||
except:
|
||||
|
@ -391,7 +412,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
|||
|
||||
async def _recompute_arguments_task(self, overrides=dict()):
|
||||
try:
|
||||
expdesc = await self.manager.compute_expdesc(self.expurl)
|
||||
expdesc, ui_name = await self.manager.compute_expdesc(self.expurl)
|
||||
except:
|
||||
logger.error("Could not recompute experiment description of '%s'",
|
||||
self.expurl, exc_info=True)
|
||||
|
@ -404,12 +425,13 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
|||
arginfo[k][0]["default"].insert(0, v)
|
||||
else:
|
||||
arginfo[k][0]["default"] = v
|
||||
self.manager.initialize_submission_arguments(self.expurl, arginfo)
|
||||
self.manager.initialize_submission_arguments(self.expurl, arginfo, ui_name)
|
||||
|
||||
argeditor_state = self.argeditor.save_state()
|
||||
self.argeditor.deleteLater()
|
||||
|
||||
self.argeditor = _ArgumentEditor(self.manager, self, self.expurl)
|
||||
editor_class = self.manager.get_argument_editor_class(self.expurl)
|
||||
self.argeditor = editor_class(self.manager, self, self.expurl)
|
||||
self.argeditor.restore_state(argeditor_state)
|
||||
self.layout.addWidget(self.argeditor, 0, 0, 1, 5)
|
||||
|
||||
|
@ -422,7 +444,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
|||
|
||||
async def _recompute_sched_options_task(self):
|
||||
try:
|
||||
expdesc = await self.manager.compute_expdesc(self.expurl)
|
||||
expdesc, _ = await self.manager.compute_expdesc(self.expurl)
|
||||
except:
|
||||
logger.error("Could not recompute experiment description of '%s'",
|
||||
self.expurl, exc_info=True)
|
||||
|
@ -459,6 +481,9 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
|||
return
|
||||
|
||||
try:
|
||||
if "devarg_override" in expid:
|
||||
self.devarg_override.setCurrentText(
|
||||
unparse_devarg_override(expid["devarg_override"]))
|
||||
self.log_level.setCurrentIndex(log_levels.index(
|
||||
log_level_to_name(expid["log_level"])))
|
||||
if ("repo_rev" in expid and
|
||||
|
@ -473,6 +498,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
|||
await self._recompute_arguments_task(arguments)
|
||||
|
||||
def closeEvent(self, event):
|
||||
self.argeditor.about_to_close()
|
||||
self.sigClosed.emit()
|
||||
QtWidgets.QMdiSubWindow.closeEvent(self, event)
|
||||
|
||||
|
@ -544,7 +570,13 @@ class _QuickOpenDialog(QtWidgets.QDialog):
|
|||
|
||||
|
||||
class ExperimentManager:
|
||||
def __init__(self, main_window,
|
||||
#: Global registry for custom argument editor classes, indexed by the experiment
|
||||
#: `argument_ui` string; can be populated by dashboard plugins such as ndscan.
|
||||
#: If no handler for a requested UI name is found, the default built-in argument
|
||||
#: editor will be used.
|
||||
argument_ui_classes = dict()
|
||||
|
||||
def __init__(self, main_window, dataset_sub,
|
||||
explist_sub, schedule_sub,
|
||||
schedule_ctl, experiment_db_ctl):
|
||||
self.main_window = main_window
|
||||
|
@ -555,7 +587,10 @@ class ExperimentManager:
|
|||
self.submission_scheduling = dict()
|
||||
self.submission_options = dict()
|
||||
self.submission_arguments = dict()
|
||||
self.argument_ui_names = dict()
|
||||
|
||||
self.datasets = dict()
|
||||
dataset_sub.add_setmodel_callback(self.set_dataset_model)
|
||||
self.explist = dict()
|
||||
explist_sub.add_setmodel_callback(self.set_explist_model)
|
||||
self.schedule = dict()
|
||||
|
@ -570,6 +605,9 @@ class ExperimentManager:
|
|||
quick_open_shortcut.setContext(QtCore.Qt.ApplicationShortcut)
|
||||
quick_open_shortcut.activated.connect(self.show_quick_open)
|
||||
|
||||
def set_dataset_model(self, model):
|
||||
self.datasets = model
|
||||
|
||||
def set_explist_model(self, model):
|
||||
self.explist = model.backing_store
|
||||
|
||||
|
@ -586,6 +624,17 @@ class ExperimentManager:
|
|||
else:
|
||||
raise ValueError("Malformed experiment URL")
|
||||
|
||||
def get_argument_editor_class(self, expurl):
|
||||
ui_name = self.argument_ui_names.get(expurl, None)
|
||||
if not ui_name and expurl[:5] == "repo:":
|
||||
ui_name = self.explist.get(expurl[5:], {}).get("argument_ui", None)
|
||||
if ui_name:
|
||||
result = self.argument_ui_classes.get(ui_name, None)
|
||||
if result:
|
||||
return result
|
||||
logger.warning("Ignoring unknown argument UI '%s'", ui_name)
|
||||
return _ArgumentEditor
|
||||
|
||||
def get_submission_scheduling(self, expurl):
|
||||
if expurl in self.submission_scheduling:
|
||||
return self.submission_scheduling[expurl]
|
||||
|
@ -608,14 +657,15 @@ class ExperimentManager:
|
|||
else:
|
||||
# mutated by _ExperimentDock
|
||||
options = {
|
||||
"log_level": logging.WARNING
|
||||
"log_level": logging.WARNING,
|
||||
"devarg_override": ""
|
||||
}
|
||||
if expurl[:5] == "repo:":
|
||||
options["repo_rev"] = None
|
||||
self.submission_options[expurl] = options
|
||||
return options
|
||||
|
||||
def initialize_submission_arguments(self, expurl, arginfo):
|
||||
def initialize_submission_arguments(self, expurl, arginfo, ui_name):
|
||||
arguments = OrderedDict()
|
||||
for name, (procdesc, group, tooltip) in arginfo.items():
|
||||
state = procdesc_to_entry(procdesc).default_state(procdesc)
|
||||
|
@ -626,7 +676,22 @@ class ExperimentManager:
|
|||
"state": state, # mutated by entries
|
||||
}
|
||||
self.submission_arguments[expurl] = arguments
|
||||
self.argument_ui_names[expurl] = ui_name
|
||||
return arguments
|
||||
|
||||
def set_argument_value(self, expurl, name, value):
|
||||
try:
|
||||
argument = self.submission_arguments[expurl][name]
|
||||
if argument["desc"]["ty"] == "Scannable":
|
||||
ty = value["ty"]
|
||||
argument["state"]["selected"] = ty
|
||||
argument["state"][ty] = value
|
||||
else:
|
||||
argument["state"] = value
|
||||
if expurl in self.open_experiments.keys():
|
||||
self.open_experiments[expurl].argeditor.update_argument(name, argument)
|
||||
except:
|
||||
logger.warn("Failed to set value for argument \"{}\" in experiment: {}.".format(name, expurl), exc_info=1)
|
||||
|
||||
def get_submission_arguments(self, expurl):
|
||||
if expurl in self.submission_arguments:
|
||||
|
@ -635,9 +700,9 @@ class ExperimentManager:
|
|||
if expurl[:5] != "repo:":
|
||||
raise ValueError("Submission arguments must be preinitialized "
|
||||
"when not using repository")
|
||||
arginfo = self.explist[expurl[5:]]["arginfo"]
|
||||
arguments = self.initialize_submission_arguments(expurl, arginfo)
|
||||
return arguments
|
||||
class_desc = self.explist[expurl[5:]]
|
||||
return self.initialize_submission_arguments(expurl,
|
||||
class_desc["arginfo"], class_desc.get("argument_ui", None))
|
||||
|
||||
def open_experiment(self, expurl):
|
||||
if expurl in self.open_experiments:
|
||||
|
@ -688,7 +753,14 @@ class ExperimentManager:
|
|||
entry_cls = procdesc_to_entry(argument["desc"])
|
||||
argument_values[name] = entry_cls.state_to_value(argument["state"])
|
||||
|
||||
try:
|
||||
devarg_override = parse_devarg_override(options["devarg_override"])
|
||||
except:
|
||||
logger.error("Failed to parse device argument overrides for %s", expurl)
|
||||
return
|
||||
|
||||
expid = {
|
||||
"devarg_override": devarg_override,
|
||||
"log_level": options["log_level"],
|
||||
"file": file,
|
||||
"class_name": class_name,
|
||||
|
@ -726,7 +798,7 @@ class ExperimentManager:
|
|||
else:
|
||||
repo_match = "repo_rev" not in expid
|
||||
if (repo_match and
|
||||
expid["file"] == file and
|
||||
("file" in expid and expid["file"] == file) and
|
||||
expid["class_name"] == class_name):
|
||||
rids.append(rid)
|
||||
asyncio.ensure_future(self._request_term_multiple(rids))
|
||||
|
@ -739,13 +811,15 @@ class ExperimentManager:
|
|||
revision = None
|
||||
description = await self.experiment_db_ctl.examine(
|
||||
file, use_repository, revision)
|
||||
return description[class_name]
|
||||
class_desc = description[class_name]
|
||||
return class_desc, class_desc.get("argument_ui", None)
|
||||
|
||||
async def open_file(self, file):
|
||||
description = await self.experiment_db_ctl.examine(file, False)
|
||||
for class_name, class_desc in description.items():
|
||||
expurl = "file:{}@{}".format(class_name, file)
|
||||
self.initialize_submission_arguments(expurl, class_desc["arginfo"])
|
||||
self.initialize_submission_arguments(expurl, class_desc["arginfo"],
|
||||
class_desc.get("argument_ui", None))
|
||||
if expurl in self.open_experiments:
|
||||
self.open_experiments[expurl].close()
|
||||
self.open_experiment(expurl)
|
||||
|
@ -758,6 +832,7 @@ class ExperimentManager:
|
|||
"options": self.submission_options,
|
||||
"arguments": self.submission_arguments,
|
||||
"docks": self.dock_states,
|
||||
"argument_uis": self.argument_ui_names,
|
||||
"open_docks": set(self.open_experiments.keys())
|
||||
}
|
||||
|
||||
|
@ -768,6 +843,7 @@ class ExperimentManager:
|
|||
self.submission_scheduling = state["scheduling"]
|
||||
self.submission_options = state["options"]
|
||||
self.submission_arguments = state["arguments"]
|
||||
self.argument_ui_names = state.get("argument_uis", {})
|
||||
for expurl in state["open_docks"]:
|
||||
self.open_experiment(expurl)
|
||||
|
||||
|
|
|
@ -159,7 +159,7 @@ class WaitingPanel(LayoutWidget):
|
|||
class ExplorerDock(QtWidgets.QDockWidget):
|
||||
def __init__(self, exp_manager, d_shortcuts,
|
||||
explist_sub, explist_status_sub,
|
||||
schedule_ctl, experiment_db_ctl):
|
||||
schedule_ctl, experiment_db_ctl, device_db_ctl):
|
||||
QtWidgets.QDockWidget.__init__(self, "Explorer")
|
||||
self.setObjectName("Explorer")
|
||||
self.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
|
||||
|
@ -251,6 +251,12 @@ class ExplorerDock(QtWidgets.QDockWidget):
|
|||
scan_repository_action.triggered.connect(scan_repository)
|
||||
self.el.addAction(scan_repository_action)
|
||||
|
||||
scan_ddb_action = QtWidgets.QAction("Scan device database", self.el)
|
||||
def scan_ddb():
|
||||
asyncio.ensure_future(device_db_ctl.scan())
|
||||
scan_ddb_action.triggered.connect(scan_ddb)
|
||||
self.el.addAction(scan_ddb_action)
|
||||
|
||||
self.current_directory = ""
|
||||
open_file_action = QtWidgets.QAction("Open file outside repository",
|
||||
self.el)
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import asyncio
|
||||
import logging
|
||||
import textwrap
|
||||
from collections import namedtuple
|
||||
|
||||
from PyQt5 import QtCore, QtWidgets, QtGui
|
||||
|
@ -7,12 +8,27 @@ from PyQt5 import QtCore, QtWidgets, QtGui
|
|||
from sipyco.sync_struct import Subscriber
|
||||
|
||||
from artiq.coredevice.comm_moninj import *
|
||||
from artiq.coredevice.ad9910 import (
|
||||
_AD9910_REG_PROFILE0, _AD9910_REG_PROFILE7,
|
||||
_AD9910_REG_FTW, _AD9910_REG_CFR1
|
||||
)
|
||||
from artiq.coredevice.ad9912_reg import AD9912_POW1, AD9912_SER_CONF
|
||||
from artiq.gui.tools import LayoutWidget
|
||||
from artiq.gui.flowlayout import FlowLayout
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class _CancellableLineEdit(QtWidgets.QLineEdit):
|
||||
def escapePressedConnect(self, cb):
|
||||
self.esc_cb = cb
|
||||
|
||||
def keyPressEvent(self, event):
|
||||
key = event.key()
|
||||
if key == QtCore.Qt.Key_Escape:
|
||||
self.esc_cb(event)
|
||||
QtWidgets.QLineEdit.keyPressEvent(self, event)
|
||||
|
||||
|
||||
class _TTLWidget(QtWidgets.QFrame):
|
||||
def __init__(self, dm, channel, force_out, title):
|
||||
|
@ -168,15 +184,172 @@ class _SimpleDisplayWidget(QtWidgets.QFrame):
|
|||
raise NotImplementedError
|
||||
|
||||
|
||||
class _DDSWidget(_SimpleDisplayWidget):
|
||||
def __init__(self, dm, bus_channel, channel, title):
|
||||
class _DDSModel:
|
||||
def __init__(self, dds_type, ref_clk, cpld=None, pll=1, clk_div=0):
|
||||
self.cpld = cpld
|
||||
self.cur_frequency = 0
|
||||
self.cur_reg = 0
|
||||
self.dds_type = dds_type
|
||||
self.is_urukul = dds_type in ["AD9910", "AD9912"]
|
||||
|
||||
if dds_type == "AD9914":
|
||||
self.ftw_per_hz = 2**32 / ref_clk
|
||||
else:
|
||||
if dds_type == "AD9910":
|
||||
max_freq = 1 << 32
|
||||
clk_mult = [4, 1, 2, 4]
|
||||
elif dds_type == "AD9912": # AD9912
|
||||
max_freq = 1 << 48
|
||||
clk_mult = [1, 1, 2, 4]
|
||||
else:
|
||||
raise NotImplementedError
|
||||
sysclk = ref_clk / clk_mult[clk_div] * pll
|
||||
self.ftw_per_hz = 1 / sysclk * max_freq
|
||||
|
||||
def monitor_update(self, probe, value):
|
||||
if self.dds_type == "AD9912":
|
||||
value = value << 16
|
||||
self.cur_frequency = self._ftw_to_freq(value)
|
||||
|
||||
def _ftw_to_freq(self, ftw):
|
||||
return ftw / self.ftw_per_hz
|
||||
|
||||
|
||||
class _DDSWidget(QtWidgets.QFrame):
|
||||
def __init__(self, dm, title, bus_channel=0, channel=0, dds_model=None):
|
||||
self.dm = dm
|
||||
self.bus_channel = bus_channel
|
||||
self.channel = channel
|
||||
self.dds_name = title
|
||||
self.cur_frequency = 0
|
||||
_SimpleDisplayWidget.__init__(self, title)
|
||||
self.dds_model = dds_model
|
||||
|
||||
QtWidgets.QFrame.__init__(self)
|
||||
|
||||
self.setFrameShape(QtWidgets.QFrame.Box)
|
||||
self.setFrameShadow(QtWidgets.QFrame.Raised)
|
||||
|
||||
grid = QtWidgets.QGridLayout()
|
||||
grid.setContentsMargins(0, 0, 0, 0)
|
||||
grid.setHorizontalSpacing(0)
|
||||
grid.setVerticalSpacing(0)
|
||||
self.setLayout(grid)
|
||||
label = QtWidgets.QLabel(title)
|
||||
label.setAlignment(QtCore.Qt.AlignCenter)
|
||||
grid.addWidget(label, 1, 1)
|
||||
|
||||
# FREQ DATA/EDIT FIELD
|
||||
self.data_stack = QtWidgets.QStackedWidget()
|
||||
|
||||
# page 1: display data
|
||||
grid_disp = LayoutWidget()
|
||||
grid_disp.layout.setContentsMargins(0, 0, 0, 0)
|
||||
grid_disp.layout.setHorizontalSpacing(0)
|
||||
grid_disp.layout.setVerticalSpacing(0)
|
||||
|
||||
self.value_label = QtWidgets.QLabel()
|
||||
self.value_label.setAlignment(QtCore.Qt.AlignCenter)
|
||||
grid_disp.addWidget(self.value_label, 0, 1, 1, 2)
|
||||
|
||||
unit = QtWidgets.QLabel("MHz")
|
||||
unit.setAlignment(QtCore.Qt.AlignCenter)
|
||||
grid_disp.addWidget(unit, 0, 3, 1, 1)
|
||||
|
||||
self.data_stack.addWidget(grid_disp)
|
||||
|
||||
# page 2: edit data
|
||||
grid_edit = LayoutWidget()
|
||||
grid_edit.layout.setContentsMargins(0, 0, 0, 0)
|
||||
grid_edit.layout.setHorizontalSpacing(0)
|
||||
grid_edit.layout.setVerticalSpacing(0)
|
||||
|
||||
self.value_edit = _CancellableLineEdit(self)
|
||||
self.value_edit.setAlignment(QtCore.Qt.AlignRight)
|
||||
grid_edit.addWidget(self.value_edit, 0, 1, 1, 2)
|
||||
unit = QtWidgets.QLabel("MHz")
|
||||
unit.setAlignment(QtCore.Qt.AlignCenter)
|
||||
grid_edit.addWidget(unit, 0, 3, 1, 1)
|
||||
self.data_stack.addWidget(grid_edit)
|
||||
|
||||
grid.addWidget(self.data_stack, 2, 1)
|
||||
|
||||
# BUTTONS
|
||||
self.button_stack = QtWidgets.QStackedWidget()
|
||||
|
||||
# page 1: SET button
|
||||
set_grid = LayoutWidget()
|
||||
|
||||
set_btn = QtWidgets.QToolButton()
|
||||
set_btn.setText("Set")
|
||||
set_btn.setToolTip("Set frequency")
|
||||
set_grid.addWidget(set_btn, 0, 1, 1, 1)
|
||||
|
||||
# for urukuls also allow switching off RF
|
||||
if self.dds_model.is_urukul:
|
||||
off_btn = QtWidgets.QToolButton()
|
||||
off_btn.setText("Off")
|
||||
off_btn.setToolTip("Switch off the output")
|
||||
set_grid.addWidget(off_btn, 0, 2, 1, 1)
|
||||
|
||||
self.button_stack.addWidget(set_grid)
|
||||
|
||||
# page 2: apply/cancel buttons
|
||||
apply_grid = LayoutWidget()
|
||||
apply = QtWidgets.QToolButton()
|
||||
apply.setText("Apply")
|
||||
apply.setToolTip("Apply changes")
|
||||
apply_grid.addWidget(apply, 0, 1, 1, 1)
|
||||
cancel = QtWidgets.QToolButton()
|
||||
cancel.setText("Cancel")
|
||||
cancel.setToolTip("Cancel changes")
|
||||
apply_grid.addWidget(cancel, 0, 2, 1, 1)
|
||||
self.button_stack.addWidget(apply_grid)
|
||||
grid.addWidget(self.button_stack, 3, 1)
|
||||
|
||||
grid.setRowStretch(1, 1)
|
||||
grid.setRowStretch(2, 1)
|
||||
grid.setRowStretch(3, 1)
|
||||
|
||||
set_btn.clicked.connect(self.set_clicked)
|
||||
apply.clicked.connect(self.apply_changes)
|
||||
if self.dds_model.is_urukul:
|
||||
off_btn.clicked.connect(self.off_clicked)
|
||||
off_btn.setToolTip(textwrap.dedent(
|
||||
"""Note: If TTL RTIO sw for the channel is switched high,
|
||||
this button will not disable the channel.
|
||||
Use the TTL override instead."""))
|
||||
self.value_edit.returnPressed.connect(lambda: self.apply_changes(None))
|
||||
self.value_edit.escapePressedConnect(self.cancel_changes)
|
||||
cancel.clicked.connect(self.cancel_changes)
|
||||
|
||||
self.refresh_display()
|
||||
|
||||
def set_clicked(self, set):
|
||||
self.data_stack.setCurrentIndex(1)
|
||||
self.button_stack.setCurrentIndex(1)
|
||||
self.value_edit.setText("{:.7f}"
|
||||
.format(self.cur_frequency/1e6))
|
||||
self.value_edit.setFocus()
|
||||
self.value_edit.selectAll()
|
||||
|
||||
def off_clicked(self, set):
|
||||
self.dm.dds_channel_toggle(self.dds_name, self.dds_model, sw=False)
|
||||
|
||||
def apply_changes(self, apply):
|
||||
self.data_stack.setCurrentIndex(0)
|
||||
self.button_stack.setCurrentIndex(0)
|
||||
frequency = float(self.value_edit.text())*1e6
|
||||
self.dm.dds_set_frequency(self.dds_name, self.dds_model, frequency)
|
||||
|
||||
def cancel_changes(self, cancel):
|
||||
self.data_stack.setCurrentIndex(0)
|
||||
self.button_stack.setCurrentIndex(0)
|
||||
|
||||
def refresh_display(self):
|
||||
self.value.setText("<font size=\"4\">{:.7f}</font><font size=\"2\"> MHz</font>"
|
||||
self.cur_frequency = self.dds_model.cur_frequency
|
||||
self.value_label.setText("<font size=\"4\">{:.7f}</font>"
|
||||
.format(self.cur_frequency/1e6))
|
||||
self.value_edit.setText("{:.7f}"
|
||||
.format(self.cur_frequency/1e6))
|
||||
|
||||
def sort_key(self):
|
||||
|
@ -202,51 +375,74 @@ _WidgetDesc = namedtuple("_WidgetDesc", "uid comment cls arguments")
|
|||
|
||||
|
||||
def setup_from_ddb(ddb):
|
||||
core_addr = None
|
||||
mi_addr = None
|
||||
mi_port = None
|
||||
dds_sysclk = None
|
||||
description = set()
|
||||
|
||||
for k, v in ddb.items():
|
||||
comment = None
|
||||
if "comment" in v:
|
||||
comment = v["comment"]
|
||||
try:
|
||||
if isinstance(v, dict) and v["type"] == "local":
|
||||
if k == "core":
|
||||
core_addr = v["arguments"]["host"]
|
||||
elif v["module"] == "artiq.coredevice.ttl":
|
||||
channel = v["arguments"]["channel"]
|
||||
force_out = v["class"] == "TTLOut"
|
||||
widget = _WidgetDesc(k, comment, _TTLWidget, (channel, force_out, k))
|
||||
description.add(widget)
|
||||
elif (v["module"] == "artiq.coredevice.ad9914"
|
||||
and v["class"] == "AD9914"):
|
||||
bus_channel = v["arguments"]["bus_channel"]
|
||||
channel = v["arguments"]["channel"]
|
||||
dds_sysclk = v["arguments"]["sysclk"]
|
||||
widget = _WidgetDesc(k, comment, _DDSWidget, (bus_channel, channel, k))
|
||||
description.add(widget)
|
||||
elif ( (v["module"] == "artiq.coredevice.ad53xx" and v["class"] == "AD53XX")
|
||||
or (v["module"] == "artiq.coredevice.zotino" and v["class"] == "Zotino")):
|
||||
spi_device = v["arguments"]["spi_device"]
|
||||
spi_device = ddb[spi_device]
|
||||
while isinstance(spi_device, str):
|
||||
spi_device = ddb[spi_device]
|
||||
spi_channel = spi_device["arguments"]["channel"]
|
||||
for channel in range(32):
|
||||
widget = _WidgetDesc((k, channel), comment, _DACWidget, (spi_channel, channel, k))
|
||||
if isinstance(v, dict):
|
||||
comment = v.get("comment")
|
||||
if v["type"] == "local":
|
||||
if v["module"] == "artiq.coredevice.ttl":
|
||||
if "ttl_urukul" in k:
|
||||
continue
|
||||
channel = v["arguments"]["channel"]
|
||||
force_out = v["class"] == "TTLOut"
|
||||
widget = _WidgetDesc(k, comment, _TTLWidget, (channel, force_out, k))
|
||||
description.add(widget)
|
||||
elif (v["module"] == "artiq.coredevice.ad9914"
|
||||
and v["class"] == "AD9914"):
|
||||
bus_channel = v["arguments"]["bus_channel"]
|
||||
channel = v["arguments"]["channel"]
|
||||
dds_sysclk = v["arguments"]["sysclk"]
|
||||
model = _DDSModel(v["class"], dds_sysclk)
|
||||
widget = _WidgetDesc(k, comment, _DDSWidget, (k, bus_channel, channel, model))
|
||||
description.add(widget)
|
||||
elif (v["module"] == "artiq.coredevice.ad9910"
|
||||
and v["class"] == "AD9910") or \
|
||||
(v["module"] == "artiq.coredevice.ad9912"
|
||||
and v["class"] == "AD9912"):
|
||||
channel = v["arguments"]["chip_select"] - 4
|
||||
if channel < 0:
|
||||
continue
|
||||
dds_cpld = v["arguments"]["cpld_device"]
|
||||
spi_dev = ddb[dds_cpld]["arguments"]["spi_device"]
|
||||
bus_channel = ddb[spi_dev]["arguments"]["channel"]
|
||||
pll = v["arguments"]["pll_n"]
|
||||
refclk = ddb[dds_cpld]["arguments"]["refclk"]
|
||||
clk_div = v["arguments"].get("clk_div", 0)
|
||||
model = _DDSModel( v["class"], refclk, dds_cpld, pll, clk_div)
|
||||
widget = _WidgetDesc(k, comment, _DDSWidget, (k, bus_channel, channel, model))
|
||||
description.add(widget)
|
||||
elif ( (v["module"] == "artiq.coredevice.ad53xx" and v["class"] == "AD53xx")
|
||||
or (v["module"] == "artiq.coredevice.zotino" and v["class"] == "Zotino")):
|
||||
spi_device = v["arguments"]["spi_device"]
|
||||
spi_device = ddb[spi_device]
|
||||
while isinstance(spi_device, str):
|
||||
spi_device = ddb[spi_device]
|
||||
spi_channel = spi_device["arguments"]["channel"]
|
||||
for channel in range(32):
|
||||
widget = _WidgetDesc((k, channel), comment, _DACWidget, (spi_channel, channel, k))
|
||||
description.add(widget)
|
||||
elif v["type"] == "controller" and k == "core_moninj":
|
||||
mi_addr = v["host"]
|
||||
mi_port = v.get("port_proxy", 1383)
|
||||
except KeyError:
|
||||
pass
|
||||
return core_addr, dds_sysclk, description
|
||||
return mi_addr, mi_port, description
|
||||
|
||||
|
||||
class _DeviceManager:
|
||||
def __init__(self):
|
||||
self.core_addr = None
|
||||
self.reconnect_core = asyncio.Event()
|
||||
self.core_connection = None
|
||||
self.core_connector_task = asyncio.ensure_future(self.core_connector())
|
||||
def __init__(self, schedule_ctl):
|
||||
self.mi_addr = None
|
||||
self.mi_port = None
|
||||
self.reconnect_mi = asyncio.Event()
|
||||
self.mi_connection = None
|
||||
self.mi_connector_task = asyncio.ensure_future(self.mi_connector())
|
||||
|
||||
self.schedule_ctl = schedule_ctl
|
||||
|
||||
self.ddb = dict()
|
||||
self.description = set()
|
||||
|
@ -265,13 +461,12 @@ class _DeviceManager:
|
|||
return ddb
|
||||
|
||||
def notify(self, mod):
|
||||
core_addr, dds_sysclk, description = setup_from_ddb(self.ddb)
|
||||
mi_addr, mi_port, description = setup_from_ddb(self.ddb)
|
||||
|
||||
if core_addr != self.core_addr:
|
||||
self.core_addr = core_addr
|
||||
self.reconnect_core.set()
|
||||
|
||||
self.dds_sysclk = dds_sysclk
|
||||
if (mi_addr, mi_port) != (self.mi_addr, self.mi_port):
|
||||
self.mi_addr = mi_addr
|
||||
self.mi_port = mi_port
|
||||
self.reconnect_mi.set()
|
||||
|
||||
for to_remove in self.description - description:
|
||||
widget = self.widgets_by_uid[to_remove.uid]
|
||||
|
@ -319,44 +514,172 @@ class _DeviceManager:
|
|||
self.description = description
|
||||
|
||||
def ttl_set_mode(self, channel, mode):
|
||||
if self.core_connection is not None:
|
||||
if self.mi_connection is not None:
|
||||
widget = self.ttl_widgets[channel]
|
||||
if mode == "0":
|
||||
widget.cur_override = True
|
||||
widget.cur_level = False
|
||||
self.core_connection.inject(channel, TTLOverride.level.value, 0)
|
||||
self.core_connection.inject(channel, TTLOverride.oe.value, 1)
|
||||
self.core_connection.inject(channel, TTLOverride.en.value, 1)
|
||||
self.mi_connection.inject(channel, TTLOverride.level.value, 0)
|
||||
self.mi_connection.inject(channel, TTLOverride.oe.value, 1)
|
||||
self.mi_connection.inject(channel, TTLOverride.en.value, 1)
|
||||
elif mode == "1":
|
||||
widget.cur_override = True
|
||||
widget.cur_level = True
|
||||
self.core_connection.inject(channel, TTLOverride.level.value, 1)
|
||||
self.core_connection.inject(channel, TTLOverride.oe.value, 1)
|
||||
self.core_connection.inject(channel, TTLOverride.en.value, 1)
|
||||
self.mi_connection.inject(channel, TTLOverride.level.value, 1)
|
||||
self.mi_connection.inject(channel, TTLOverride.oe.value, 1)
|
||||
self.mi_connection.inject(channel, TTLOverride.en.value, 1)
|
||||
elif mode == "exp":
|
||||
widget.cur_override = False
|
||||
self.core_connection.inject(channel, TTLOverride.en.value, 0)
|
||||
self.mi_connection.inject(channel, TTLOverride.en.value, 0)
|
||||
else:
|
||||
raise ValueError
|
||||
# override state may have changed
|
||||
widget.refresh_display()
|
||||
|
||||
async def _submit_by_content(self, content, class_name, title):
|
||||
expid = {
|
||||
"log_level": logging.WARNING,
|
||||
"content": content,
|
||||
"class_name": class_name,
|
||||
"arguments": {}
|
||||
}
|
||||
scheduling = {
|
||||
"pipeline_name": "main",
|
||||
"priority": 0,
|
||||
"due_date": None,
|
||||
"flush": False
|
||||
}
|
||||
rid = await self.schedule_ctl.submit(
|
||||
scheduling["pipeline_name"],
|
||||
expid,
|
||||
scheduling["priority"], scheduling["due_date"],
|
||||
scheduling["flush"])
|
||||
logger.info("Submitted '%s', RID is %d", title, rid)
|
||||
|
||||
def _dds_faux_injection(self, dds_channel, dds_model, action, title, log_msg):
|
||||
# create kernel and fill it in and send-by-content
|
||||
|
||||
# initialize CPLD (if applicable)
|
||||
if dds_model.is_urukul:
|
||||
# urukuls need CPLD init and switch to on
|
||||
cpld_dev = """self.setattr_device("core_cache")
|
||||
self.setattr_device("{}")""".format(dds_model.cpld)
|
||||
|
||||
# `sta`/`rf_sw`` variables are guaranteed for urukuls
|
||||
# so {action} can use it
|
||||
# if there's no RF enabled, CPLD may have not been initialized
|
||||
# but if there is, it has been initialised - no need to do again
|
||||
cpld_init = """delay(15*ms)
|
||||
was_init = self.core_cache.get("_{cpld}_init")
|
||||
sta = self.{cpld}.sta_read()
|
||||
rf_sw = urukul_sta_rf_sw(sta)
|
||||
if rf_sw == 0 and len(was_init) == 0:
|
||||
delay(15*ms)
|
||||
self.{cpld}.init()
|
||||
self.core_cache.put("_{cpld}_init", [1])
|
||||
""".format(cpld=dds_model.cpld)
|
||||
else:
|
||||
cpld_dev = ""
|
||||
cpld_init = ""
|
||||
|
||||
# AD9912/9910: init channel (if uninitialized)
|
||||
if dds_model.dds_type == "AD9912":
|
||||
# 0xFF before init, 0x99 after
|
||||
channel_init = """
|
||||
if self.{dds_channel}.read({cfgreg}, length=1) == 0xFF:
|
||||
delay(10*ms)
|
||||
self.{dds_channel}.init()
|
||||
""".format(dds_channel=dds_channel, cfgreg=AD9912_SER_CONF)
|
||||
elif dds_model.dds_type == "AD9910":
|
||||
# -1 before init, 2 after
|
||||
channel_init = """
|
||||
if self.{dds_channel}.read32({cfgreg}) == -1:
|
||||
delay(10*ms)
|
||||
self.{dds_channel}.init()
|
||||
""".format(dds_channel=dds_channel, cfgreg=AD9912_SER_CONF)
|
||||
else:
|
||||
channel_init = "self.{dds_channel}.init()".format(dds_channel=dds_channel)
|
||||
|
||||
dds_exp = textwrap.dedent("""
|
||||
from artiq.experiment import *
|
||||
from artiq.coredevice.urukul import *
|
||||
|
||||
class {title}(EnvExperiment):
|
||||
def build(self):
|
||||
self.setattr_device("core")
|
||||
self.setattr_device("{dds_channel}")
|
||||
{cpld_dev}
|
||||
|
||||
@kernel
|
||||
def run(self):
|
||||
self.core.break_realtime()
|
||||
{cpld_init}
|
||||
delay(10*ms)
|
||||
{channel_init}
|
||||
delay(15*ms)
|
||||
{action}
|
||||
""".format(title=title, action=action,
|
||||
dds_channel=dds_channel,
|
||||
cpld_dev=cpld_dev, cpld_init=cpld_init,
|
||||
channel_init=channel_init))
|
||||
asyncio.ensure_future(
|
||||
self._submit_by_content(
|
||||
dds_exp,
|
||||
title,
|
||||
log_msg))
|
||||
|
||||
def dds_set_frequency(self, dds_channel, dds_model, freq):
|
||||
action = "self.{ch}.set({freq})".format(
|
||||
freq=freq, ch=dds_channel)
|
||||
if dds_model.is_urukul:
|
||||
action += """
|
||||
ch_no = self.{ch}.chip_select - 4
|
||||
self.{cpld}.cfg_switches(rf_sw | 1 << ch_no)
|
||||
""".format(ch=dds_channel, cpld=dds_model.cpld)
|
||||
self._dds_faux_injection(
|
||||
dds_channel,
|
||||
dds_model,
|
||||
action,
|
||||
"SetDDS",
|
||||
"Set DDS {} {}MHz".format(dds_channel, freq/1e6))
|
||||
|
||||
def dds_channel_toggle(self, dds_channel, dds_model, sw=True):
|
||||
# urukul only
|
||||
if sw:
|
||||
switch = "| 1 << ch_no"
|
||||
else:
|
||||
switch = "& ~(1 << ch_no)"
|
||||
action = """
|
||||
ch_no = self.{dds_channel}.chip_select - 4
|
||||
self.{cpld}.cfg_switches(rf_sw {switch})
|
||||
""".format(
|
||||
dds_channel=dds_channel,
|
||||
cpld=dds_model.cpld,
|
||||
switch=switch
|
||||
)
|
||||
self._dds_faux_injection(
|
||||
dds_channel,
|
||||
dds_model,
|
||||
action,
|
||||
"ToggleDDS",
|
||||
"Toggle DDS {} {}".format(dds_channel, "on" if sw else "off"))
|
||||
|
||||
def setup_ttl_monitoring(self, enable, channel):
|
||||
if self.core_connection is not None:
|
||||
self.core_connection.monitor_probe(enable, channel, TTLProbe.level.value)
|
||||
self.core_connection.monitor_probe(enable, channel, TTLProbe.oe.value)
|
||||
self.core_connection.monitor_injection(enable, channel, TTLOverride.en.value)
|
||||
self.core_connection.monitor_injection(enable, channel, TTLOverride.level.value)
|
||||
if self.mi_connection is not None:
|
||||
self.mi_connection.monitor_probe(enable, channel, TTLProbe.level.value)
|
||||
self.mi_connection.monitor_probe(enable, channel, TTLProbe.oe.value)
|
||||
self.mi_connection.monitor_injection(enable, channel, TTLOverride.en.value)
|
||||
self.mi_connection.monitor_injection(enable, channel, TTLOverride.level.value)
|
||||
if enable:
|
||||
self.core_connection.get_injection_status(channel, TTLOverride.en.value)
|
||||
self.mi_connection.get_injection_status(channel, TTLOverride.en.value)
|
||||
|
||||
def setup_dds_monitoring(self, enable, bus_channel, channel):
|
||||
if self.core_connection is not None:
|
||||
self.core_connection.monitor_probe(enable, bus_channel, channel)
|
||||
if self.mi_connection is not None:
|
||||
self.mi_connection.monitor_probe(enable, bus_channel, channel)
|
||||
|
||||
def setup_dac_monitoring(self, enable, spi_channel, channel):
|
||||
if self.core_connection is not None:
|
||||
self.core_connection.monitor_probe(enable, spi_channel, channel)
|
||||
if self.mi_connection is not None:
|
||||
self.mi_connection.monitor_probe(enable, spi_channel, channel)
|
||||
|
||||
def monitor_cb(self, channel, probe, value):
|
||||
if channel in self.ttl_widgets:
|
||||
|
@ -366,11 +689,11 @@ class _DeviceManager:
|
|||
elif probe == TTLProbe.oe.value:
|
||||
widget.cur_oe = bool(value)
|
||||
widget.refresh_display()
|
||||
if (channel, probe) in self.dds_widgets:
|
||||
elif (channel, probe) in self.dds_widgets:
|
||||
widget = self.dds_widgets[(channel, probe)]
|
||||
widget.cur_frequency = value*self.dds_sysclk/2**32
|
||||
widget.dds_model.monitor_update(probe, value)
|
||||
widget.refresh_display()
|
||||
if (channel, probe) in self.dac_widgets:
|
||||
elif (channel, probe) in self.dac_widgets:
|
||||
widget = self.dac_widgets[(channel, probe)]
|
||||
widget.cur_value = value
|
||||
widget.refresh_display()
|
||||
|
@ -385,29 +708,31 @@ class _DeviceManager:
|
|||
widget.refresh_display()
|
||||
|
||||
def disconnect_cb(self):
|
||||
logger.error("lost connection to core device moninj")
|
||||
self.reconnect_core.set()
|
||||
logger.error("lost connection to moninj")
|
||||
self.reconnect_mi.set()
|
||||
|
||||
async def core_connector(self):
|
||||
async def mi_connector(self):
|
||||
while True:
|
||||
await self.reconnect_core.wait()
|
||||
self.reconnect_core.clear()
|
||||
if self.core_connection is not None:
|
||||
await self.core_connection.close()
|
||||
self.core_connection = None
|
||||
new_core_connection = CommMonInj(self.monitor_cb, self.injection_status_cb,
|
||||
await self.reconnect_mi.wait()
|
||||
self.reconnect_mi.clear()
|
||||
if self.mi_connection is not None:
|
||||
await self.mi_connection.close()
|
||||
self.mi_connection = None
|
||||
new_mi_connection = CommMonInj(self.monitor_cb, self.injection_status_cb,
|
||||
self.disconnect_cb)
|
||||
try:
|
||||
await new_core_connection.connect(self.core_addr, 1383)
|
||||
await new_mi_connection.connect(self.mi_addr, self.mi_port)
|
||||
except asyncio.CancelledError:
|
||||
logger.info("cancelled connection to core device moninj")
|
||||
logger.info("cancelled connection to moninj")
|
||||
break
|
||||
except:
|
||||
logger.error("failed to connect to core device moninj", exc_info=True)
|
||||
logger.error("failed to connect to moninj. Is aqctl_moninj_proxy running?", exc_info=True)
|
||||
await asyncio.sleep(10.)
|
||||
self.reconnect_core.set()
|
||||
self.reconnect_mi.set()
|
||||
else:
|
||||
self.core_connection = new_core_connection
|
||||
logger.info("ARTIQ dashboard connected to moninj (%s)",
|
||||
self.mi_addr)
|
||||
self.mi_connection = new_mi_connection
|
||||
for ttl_channel in self.ttl_widgets.keys():
|
||||
self.setup_ttl_monitoring(True, ttl_channel)
|
||||
for bus_channel, channel in self.dds_widgets.keys():
|
||||
|
@ -416,13 +741,13 @@ class _DeviceManager:
|
|||
self.setup_dac_monitoring(True, spi_channel, channel)
|
||||
|
||||
async def close(self):
|
||||
self.core_connector_task.cancel()
|
||||
self.mi_connector_task.cancel()
|
||||
try:
|
||||
await asyncio.wait_for(self.core_connector_task, None)
|
||||
await asyncio.wait_for(self.mi_connector_task, None)
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
if self.core_connection is not None:
|
||||
await self.core_connection.close()
|
||||
if self.mi_connection is not None:
|
||||
await self.mi_connection.close()
|
||||
|
||||
|
||||
class _MonInjDock(QtWidgets.QDockWidget):
|
||||
|
@ -448,12 +773,12 @@ class _MonInjDock(QtWidgets.QDockWidget):
|
|||
|
||||
|
||||
class MonInj:
|
||||
def __init__(self):
|
||||
def __init__(self, schedule_ctl):
|
||||
self.ttl_dock = _MonInjDock("TTL")
|
||||
self.dds_dock = _MonInjDock("DDS")
|
||||
self.dac_dock = _MonInjDock("DAC")
|
||||
|
||||
self.dm = _DeviceManager()
|
||||
self.dm = _DeviceManager(schedule_ctl)
|
||||
self.dm.ttl_cb = lambda: self.ttl_dock.layout_widgets(
|
||||
self.dm.ttl_widgets.values())
|
||||
self.dm.dds_cb = lambda: self.dds_dock.layout_widgets(
|
||||
|
|
|
@ -48,7 +48,7 @@ class Model(DictSyncModel):
|
|||
else:
|
||||
return "Outside repo."
|
||||
elif column == 6:
|
||||
return v["expid"]["file"]
|
||||
return v["expid"].get("file", "<none>")
|
||||
elif column == 7:
|
||||
if v["expid"]["class_name"] is None:
|
||||
return ""
|
||||
|
|
|
@ -7,7 +7,11 @@ device_db = {
|
|||
"type": "local",
|
||||
"module": "artiq.coredevice.core",
|
||||
"class": "Core",
|
||||
"arguments": {"host": core_addr, "ref_period": 1e-9}
|
||||
"arguments": {
|
||||
"host": core_addr,
|
||||
"ref_period": 1e-9,
|
||||
"analyzer_proxy": "core_analyzer"
|
||||
}
|
||||
},
|
||||
"core_log": {
|
||||
"type": "controller",
|
||||
|
@ -15,6 +19,20 @@ device_db = {
|
|||
"port": 1068,
|
||||
"command": "aqctl_corelog -p {port} --bind {bind} " + core_addr
|
||||
},
|
||||
"core_moninj": {
|
||||
"type": "controller",
|
||||
"host": "::1",
|
||||
"port_proxy": 1383,
|
||||
"port": 1384,
|
||||
"command": "aqctl_moninj_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr
|
||||
},
|
||||
"core_analyzer": {
|
||||
"type": "controller",
|
||||
"host": "::1",
|
||||
"port_proxy": 1385,
|
||||
"port": 1386,
|
||||
"command": "aqctl_coreanalyzer_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr
|
||||
},
|
||||
"core_cache": {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.cache",
|
||||
|
@ -29,13 +47,13 @@ device_db = {
|
|||
"i2c_switch0": {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.i2c",
|
||||
"class": "PCA9548",
|
||||
"class": "I2CSwitch",
|
||||
"arguments": {"address": 0xe0}
|
||||
},
|
||||
"i2c_switch1": {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.i2c",
|
||||
"class": "PCA9548",
|
||||
"class": "I2CSwitch",
|
||||
"arguments": {"address": 0xe2}
|
||||
},
|
||||
}
|
||||
|
|
|
@ -5,7 +5,11 @@ device_db = {
|
|||
"type": "local",
|
||||
"module": "artiq.coredevice.core",
|
||||
"class": "Core",
|
||||
"arguments": {"host": core_addr, "ref_period": 1/(8*150e6)}
|
||||
"arguments": {
|
||||
"host": core_addr,
|
||||
"ref_period": 1e-9,
|
||||
"analyzer_proxy": "core_analyzer"
|
||||
}
|
||||
},
|
||||
"core_log": {
|
||||
"type": "controller",
|
||||
|
@ -13,6 +17,20 @@ device_db = {
|
|||
"port": 1068,
|
||||
"command": "aqctl_corelog -p {port} --bind {bind} " + core_addr
|
||||
},
|
||||
"core_moninj": {
|
||||
"type": "controller",
|
||||
"host": "::1",
|
||||
"port_proxy": 1383,
|
||||
"port": 1384,
|
||||
"command": "aqctl_moninj_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr
|
||||
},
|
||||
"core_analyzer": {
|
||||
"type": "controller",
|
||||
"host": "::1",
|
||||
"port_proxy": 1385,
|
||||
"port": 1386,
|
||||
"command": "aqctl_coreanalyzer_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr
|
||||
},
|
||||
"core_cache": {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.cache",
|
||||
|
|
|
@ -1,177 +0,0 @@
|
|||
core_addr = "192.168.1.70"
|
||||
|
||||
device_db = {
|
||||
"core": {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.core",
|
||||
"class": "Core",
|
||||
"arguments": {"host": core_addr, "ref_period": 1/(8*150e6)}
|
||||
},
|
||||
"core_log": {
|
||||
"type": "controller",
|
||||
"host": "::1",
|
||||
"port": 1068,
|
||||
"command": "aqctl_corelog -p {port} --bind {bind} " + core_addr
|
||||
},
|
||||
"core_cache": {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.cache",
|
||||
"class": "CoreCache"
|
||||
},
|
||||
"core_dma": {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.dma",
|
||||
"class": "CoreDMA"
|
||||
},
|
||||
}
|
||||
|
||||
device_db.update(
|
||||
spi_urukul0={
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.spi2",
|
||||
"class": "SPIMaster",
|
||||
"arguments": {"channel": 0}
|
||||
},
|
||||
ttl_urukul0_io_update={
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.ttl",
|
||||
"class": "TTLOut",
|
||||
"arguments": {"channel": 1}
|
||||
},
|
||||
ttl_urukul0_sw0={
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.ttl",
|
||||
"class": "TTLOut",
|
||||
"arguments": {"channel": 2}
|
||||
},
|
||||
ttl_urukul0_sw1={
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.ttl",
|
||||
"class": "TTLOut",
|
||||
"arguments": {"channel": 3}
|
||||
},
|
||||
ttl_urukul0_sw2={
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.ttl",
|
||||
"class": "TTLOut",
|
||||
"arguments": {"channel": 4}
|
||||
},
|
||||
ttl_urukul0_sw3={
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.ttl",
|
||||
"class": "TTLOut",
|
||||
"arguments": {"channel": 5}
|
||||
},
|
||||
urukul0_cpld={
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.urukul",
|
||||
"class": "CPLD",
|
||||
"arguments": {
|
||||
"spi_device": "spi_urukul0",
|
||||
"io_update_device": "ttl_urukul0_io_update",
|
||||
"refclk": 150e6,
|
||||
"clk_sel": 2
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
for i in range(4):
|
||||
device_db["urukul0_ch" + str(i)] = {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.ad9910",
|
||||
"class": "AD9910",
|
||||
"arguments": {
|
||||
"pll_n": 16, # 600MHz sample rate
|
||||
"pll_vco": 2,
|
||||
"chip_select": 4 + i,
|
||||
"cpld_device": "urukul0_cpld",
|
||||
"sw_device": "ttl_urukul0_sw" + str(i)
|
||||
}
|
||||
}
|
||||
|
||||
"""
|
||||
artiq_route routing.bin init
|
||||
artiq_route routing.bin set 0 0
|
||||
artiq_route routing.bin set 1 1 0
|
||||
artiq_route routing.bin set 2 1 1 0
|
||||
artiq_route routing.bin set 3 2 0
|
||||
artiq_route routing.bin set 4 2 1 0
|
||||
artiq_coremgmt -D kasli config write -f routing_table routing.bin
|
||||
"""
|
||||
|
||||
for sayma in range(2):
|
||||
amc_base = 0x010000 + sayma*0x020000
|
||||
rtm_base = 0x020000 + sayma*0x020000
|
||||
for i in range(4):
|
||||
device_db["led" + str(4*sayma+i)] = {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.ttl",
|
||||
"class": "TTLOut",
|
||||
"arguments": {"channel": amc_base + i}
|
||||
}
|
||||
for i in range(2):
|
||||
device_db["ttl_mcx" + str(2*sayma+i)] = {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.ttl",
|
||||
"class": "TTLInOut",
|
||||
"arguments": {"channel": amc_base + 4 + i}
|
||||
}
|
||||
for i in range(8):
|
||||
device_db["sawg" + str(8*sayma+i)] = {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.sawg",
|
||||
"class": "SAWG",
|
||||
"arguments": {"channel_base": amc_base + 6 + i*10, "parallelism": 4}
|
||||
}
|
||||
for basemod in range(2):
|
||||
for i in range(4):
|
||||
device_db["sawg_sw" + str(8*sayma+4*basemod+i)] = {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.ttl",
|
||||
"class": "TTLOut",
|
||||
"arguments": {"channel": rtm_base + basemod*9 + i}
|
||||
}
|
||||
att_idx = 2*sayma + basemod
|
||||
device_db["basemod_att_rst_n"+str(att_idx)] = {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.ttl",
|
||||
"class": "TTLOut",
|
||||
"arguments": {"channel": rtm_base + basemod*9 + 4}
|
||||
}
|
||||
device_db["basemod_att_clk"+str(att_idx)] = {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.ttl",
|
||||
"class": "TTLOut",
|
||||
"arguments": {"channel": rtm_base + basemod*9 + 5}
|
||||
}
|
||||
device_db["basemod_att_le"+str(att_idx)] = {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.ttl",
|
||||
"class": "TTLOut",
|
||||
"arguments": {"channel": rtm_base + basemod*9 + 6}
|
||||
}
|
||||
device_db["basemod_att_mosi"+str(att_idx)] = {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.ttl",
|
||||
"class": "TTLOut",
|
||||
"arguments": {"channel": rtm_base + basemod*9 + 7}
|
||||
}
|
||||
device_db["basemod_att_miso"+str(att_idx)] = {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.ttl",
|
||||
"class": "TTLInOut",
|
||||
"arguments": {"channel": rtm_base + basemod*9 + 8}
|
||||
}
|
||||
device_db["basemod_att"+str(att_idx)] = {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.basemod_att",
|
||||
"class": "BaseModAtt",
|
||||
"arguments": {
|
||||
"rst_n": "basemod_att_rst_n"+str(att_idx),
|
||||
"clk": "basemod_att_clk"+str(att_idx),
|
||||
"le": "basemod_att_le"+str(att_idx),
|
||||
"mosi": "basemod_att_mosi"+str(att_idx),
|
||||
"miso": "basemod_att_miso"+str(att_idx),
|
||||
}
|
||||
}
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
from artiq.experiment import *
|
||||
|
||||
|
||||
class BaseMod(EnvExperiment):
|
||||
def build(self):
|
||||
self.setattr_device("core")
|
||||
self.basemods = [self.get_device("basemod_att0"), self.get_device("basemod_att1")]
|
||||
self.rfsws = [self.get_device("sawg_sw"+str(i)) for i in range(8)]
|
||||
|
||||
@kernel
|
||||
def run(self):
|
||||
self.core.reset()
|
||||
for basemod in self.basemods:
|
||||
self.core.break_realtime()
|
||||
delay(10*ms)
|
||||
basemod.reset()
|
||||
delay(10*ms)
|
||||
basemod.set(0.0, 0.0, 0.0, 0.0)
|
||||
delay(10*ms)
|
||||
print(basemod.get_mu())
|
||||
|
||||
self.core.break_realtime()
|
||||
for rfsw in self.rfsws:
|
||||
rfsw.on()
|
||||
delay(1*ms)
|
|
@ -1,37 +0,0 @@
|
|||
from artiq.experiment import *
|
||||
|
||||
|
||||
class Sines2Sayma(EnvExperiment):
|
||||
def build(self):
|
||||
self.setattr_device("core")
|
||||
self.sawgs = [self.get_device("sawg"+str(i)) for i in range(16)]
|
||||
|
||||
@kernel
|
||||
def drtio_is_up(self):
|
||||
for i in range(5):
|
||||
if not self.core.get_rtio_destination_status(i):
|
||||
return False
|
||||
return True
|
||||
|
||||
@kernel
|
||||
def run(self):
|
||||
while True:
|
||||
print("waiting for DRTIO ready...")
|
||||
while not self.drtio_is_up():
|
||||
pass
|
||||
print("OK")
|
||||
|
||||
self.core.reset()
|
||||
|
||||
for sawg in self.sawgs:
|
||||
delay(1*ms)
|
||||
sawg.reset()
|
||||
|
||||
for sawg in self.sawgs:
|
||||
delay(1*ms)
|
||||
sawg.amplitude1.set(.4)
|
||||
# Do not use a sub-multiple of oscilloscope sample rates.
|
||||
sawg.frequency0.set(9*MHz)
|
||||
|
||||
while self.drtio_is_up():
|
||||
pass
|
|
@ -1,89 +0,0 @@
|
|||
from artiq.experiment import *
|
||||
|
||||
|
||||
class SinesUrukulSayma(EnvExperiment):
|
||||
def build(self):
|
||||
self.setattr_device("core")
|
||||
self.setattr_device("urukul0_cpld")
|
||||
|
||||
# Urukul clock output syntonized to the RTIO clock.
|
||||
# Can be used as HMC830 reference on Sayma RTM.
|
||||
# When using this reference, Sayma must be recalibrated every time Urukul
|
||||
# is rebooted, as Urukul is not synchronized to the Kasli.
|
||||
self.urukul_hmc_ref = self.get_device("urukul0_ch3")
|
||||
|
||||
# Urukul measurement channels - compare with SAWG outputs.
|
||||
# When testing sync, do not reboot Urukul, as it is not
|
||||
# synchronized to the Kasli.
|
||||
self.urukul_meas = [self.get_device("urukul0_ch" + str(i)) for i in range(3)]
|
||||
# The same waveform is output on all first 4 SAWG channels (first DAC).
|
||||
self.sawgs = [self.get_device("sawg"+str(i)) for i in range(4)]
|
||||
self.basemod = self.get_device("basemod_att0")
|
||||
self.rfsws = [self.get_device("sawg_sw"+str(i)) for i in range(4)]
|
||||
|
||||
|
||||
# DRTIO destinations:
|
||||
# 0: local
|
||||
# 1: Sayma AMC
|
||||
# 2: Sayma RTM
|
||||
@kernel
|
||||
def drtio_is_up(self):
|
||||
for i in range(3):
|
||||
if not self.core.get_rtio_destination_status(i):
|
||||
return False
|
||||
return True
|
||||
|
||||
@kernel
|
||||
def run(self):
|
||||
f = 9*MHz
|
||||
dds_ftw = self.urukul_meas[0].frequency_to_ftw(f)
|
||||
sawg_ftw = self.sawgs[0].frequency0.to_mu(f)
|
||||
if dds_ftw != sawg_ftw:
|
||||
print("DDS and SAWG FTWs do not match:", dds_ftw, sawg_ftw)
|
||||
return
|
||||
|
||||
self.core.reset()
|
||||
self.urukul0_cpld.init()
|
||||
|
||||
delay(1*ms)
|
||||
self.urukul_hmc_ref.init()
|
||||
self.urukul_hmc_ref.set_mu(0x40000000, asf=self.urukul_hmc_ref.amplitude_to_asf(0.6))
|
||||
self.urukul_hmc_ref.set_att(6.)
|
||||
self.urukul_hmc_ref.sw.on()
|
||||
|
||||
for urukul_ch in self.urukul_meas:
|
||||
delay(1*ms)
|
||||
urukul_ch.init()
|
||||
urukul_ch.set_mu(dds_ftw, asf=urukul_ch.amplitude_to_asf(0.5))
|
||||
urukul_ch.set_att(6.)
|
||||
urukul_ch.sw.on()
|
||||
|
||||
while True:
|
||||
print("waiting for DRTIO ready...")
|
||||
while not self.drtio_is_up():
|
||||
pass
|
||||
print("OK")
|
||||
|
||||
self.core.reset()
|
||||
|
||||
delay(10*ms)
|
||||
self.basemod.reset()
|
||||
delay(10*ms)
|
||||
self.basemod.set(3.0, 3.0, 3.0, 3.0)
|
||||
delay(10*ms)
|
||||
for rfsw in self.rfsws:
|
||||
delay(1*ms)
|
||||
rfsw.on()
|
||||
|
||||
for sawg in self.sawgs:
|
||||
delay(1*ms)
|
||||
sawg.reset()
|
||||
|
||||
for sawg in self.sawgs:
|
||||
delay(1*ms)
|
||||
sawg.amplitude1.set(.4)
|
||||
sawg.frequency0.set_mu(sawg_ftw)
|
||||
sawg.phase0.set_mu(sawg_ftw*now_mu() >> 17)
|
||||
|
||||
while self.drtio_is_up():
|
||||
pass
|
|
@ -0,0 +1,18 @@
|
|||
{
|
||||
"target": "kasli",
|
||||
"variant": "shuttlerdemo",
|
||||
"hw_rev": "v2.0",
|
||||
"drtio_role": "master",
|
||||
"peripherals": [
|
||||
{
|
||||
"type": "shuttler",
|
||||
"ports": [0]
|
||||
},
|
||||
{
|
||||
"type": "dio",
|
||||
"ports": [1],
|
||||
"bank_direction_low": "input",
|
||||
"bank_direction_high": "output"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,330 @@
|
|||
from artiq.experiment import *
|
||||
from artiq.coredevice.shuttler import shuttler_volt_to_mu
|
||||
|
||||
DAC_Fs_MHZ = 125
|
||||
CORDIC_GAIN = 1.64676
|
||||
|
||||
@portable
|
||||
def shuttler_phase_offset(offset_degree):
|
||||
return round(offset_degree / 360 * (2 ** 16))
|
||||
|
||||
@portable
|
||||
def shuttler_freq_mu(freq_mhz):
|
||||
return round(float(2) ** 32 / DAC_Fs_MHZ * freq_mhz)
|
||||
|
||||
@portable
|
||||
def shuttler_chirp_rate_mu(freq_mhz_per_us):
|
||||
return round(float(2) ** 32 * freq_mhz_per_us / (DAC_Fs_MHZ ** 2))
|
||||
|
||||
@portable
|
||||
def shuttler_freq_sweep(start_f_MHz, end_f_MHz, time_us):
|
||||
return shuttler_chirp_rate_mu((end_f_MHz - start_f_MHz)/(time_us))
|
||||
|
||||
@portable
|
||||
def shuttler_volt_amp_mu(volt):
|
||||
return shuttler_volt_to_mu(volt)
|
||||
|
||||
@portable
|
||||
def shuttler_volt_damp_mu(volt_per_us):
|
||||
return round(float(2) ** 32 * (volt_per_us / 20) / DAC_Fs_MHZ)
|
||||
|
||||
@portable
|
||||
def shuttler_volt_ddamp_mu(volt_per_us_square):
|
||||
return round(float(2) ** 48 * (volt_per_us_square / 20) * 2 / (DAC_Fs_MHZ ** 2))
|
||||
|
||||
@portable
|
||||
def shuttler_volt_dddamp_mu(volt_per_us_cube):
|
||||
return round(float(2) ** 48 * (volt_per_us_cube / 20) * 6 / (DAC_Fs_MHZ ** 3))
|
||||
|
||||
@portable
|
||||
def shuttler_dds_amp_mu(volt):
|
||||
return shuttler_volt_amp_mu(volt / CORDIC_GAIN)
|
||||
|
||||
@portable
|
||||
def shuttler_dds_damp_mu(volt_per_us):
|
||||
return shuttler_volt_damp_mu(volt_per_us / CORDIC_GAIN)
|
||||
|
||||
@portable
|
||||
def shuttler_dds_ddamp_mu(volt_per_us_square):
|
||||
return shuttler_volt_ddamp_mu(volt_per_us_square / CORDIC_GAIN)
|
||||
|
||||
@portable
|
||||
def shuttler_dds_dddamp_mu(volt_per_us_cube):
|
||||
return shuttler_volt_dddamp_mu(volt_per_us_cube / CORDIC_GAIN)
|
||||
|
||||
class Shuttler(EnvExperiment):
|
||||
def build(self):
|
||||
self.setattr_device("core")
|
||||
self.setattr_device("core_dma")
|
||||
self.setattr_device("scheduler")
|
||||
self.shuttler0_leds = [ self.get_device("shuttler0_led{}".format(i)) for i in range(2) ]
|
||||
self.setattr_device("shuttler0_config")
|
||||
self.setattr_device("shuttler0_trigger")
|
||||
self.shuttler0_dcbias = [ self.get_device("shuttler0_dcbias{}".format(i)) for i in range(16) ]
|
||||
self.shuttler0_dds = [ self.get_device("shuttler0_dds{}".format(i)) for i in range(16) ]
|
||||
self.setattr_device("shuttler0_relay")
|
||||
self.setattr_device("shuttler0_adc")
|
||||
|
||||
|
||||
@kernel
|
||||
def record(self):
|
||||
with self.core_dma.record("example_waveform"):
|
||||
self.example_waveform()
|
||||
|
||||
@kernel
|
||||
def init(self):
|
||||
self.led()
|
||||
self.relay_init()
|
||||
self.adc_init()
|
||||
self.shuttler_reset()
|
||||
|
||||
@kernel
|
||||
def run(self):
|
||||
self.core.reset()
|
||||
self.core.break_realtime()
|
||||
self.init()
|
||||
|
||||
self.record()
|
||||
example_waveform_handle = self.core_dma.get_handle("example_waveform")
|
||||
|
||||
print("Example Waveforms are on OUT0 and OUT1")
|
||||
self.core.break_realtime()
|
||||
while not(self.scheduler.check_termination()):
|
||||
delay(1*s)
|
||||
self.core_dma.playback_handle(example_waveform_handle)
|
||||
|
||||
@kernel
|
||||
def shuttler_reset(self):
|
||||
for i in range(16):
|
||||
self.shuttler_channel_reset(i)
|
||||
# To avoid RTIO Underflow
|
||||
delay(50*us)
|
||||
|
||||
@kernel
|
||||
def shuttler_channel_reset(self, ch):
|
||||
self.shuttler0_dcbias[ch].set_waveform(
|
||||
a0=0,
|
||||
a1=0,
|
||||
a2=0,
|
||||
a3=0,
|
||||
)
|
||||
self.shuttler0_dds[ch].set_waveform(
|
||||
b0=0,
|
||||
b1=0,
|
||||
b2=0,
|
||||
b3=0,
|
||||
c0=0,
|
||||
c1=0,
|
||||
c2=0,
|
||||
)
|
||||
self.shuttler0_trigger.trigger(1 << ch)
|
||||
|
||||
@kernel
|
||||
def example_waveform(self):
|
||||
# Equation of Output Waveform
|
||||
# w(t_us) = a(t_us) + b(t_us) * cos(c(t_us))
|
||||
# Step 1:
|
||||
# Enable the Output Relay of OUT0 and OUT1
|
||||
# Step 2: Cosine Wave Frequency Sweep from 10kHz to 50kHz in 500us
|
||||
# OUT0: b(t_us) = 1
|
||||
# c(t_us) = 2 * pi * (0.08 * t_us ^ 2 + 0.01 * t_us)
|
||||
# OUT1: b(t_us) = 1
|
||||
# c(t_us) = 2 * pi * (0.05 * t_us)
|
||||
# Step 3(after 500us): Cosine Wave with 180 Degree Phase Offset
|
||||
# OUT0: b(t_us) = 1
|
||||
# c(t_us) = 2 * pi * (0.05 * t_us) + pi
|
||||
# OUT1: b(t_us) = 1
|
||||
# c(t_us) = 2 * pi * (0.05 * t_us)
|
||||
# Step 4(after 500us): Cosine Wave with Amplitude Envelop
|
||||
# OUT0: b(t_us) = -0.0001367187 * t_us ^ 2 + 0.06835937 * t_us
|
||||
# c(t_us) = 2 * pi * (0.05 * t_us)
|
||||
# OUT1: b(t_us) = -0.0001367187 * t_us ^ 2 + 0.06835937 * t_us
|
||||
# c(t_us) = 0
|
||||
# Step 5(after 500us): Sawtooth Wave Modulated with 50kHz Cosine Wave
|
||||
# OUT0: a(t_us) = 0.01 * t_us - 5
|
||||
# b(t_us) = 1
|
||||
# c(t_us) = 2 * pi * (0.05 * t_us)
|
||||
# OUT1: a(t_us) = 0.01 * t_us - 5
|
||||
# Step 6(after 1000us): A Combination of Previous Waveforms
|
||||
# OUT0: a(t_us) = 0.01 * t_us - 5
|
||||
# b(t_us) = -0.0001367187 * t_us ^ 2 + 0.06835937 * t_us
|
||||
# c(t_us) = 2 * pi * (0.08 * t_us ^ 2 + 0.01 * t_us)
|
||||
# Step 7(after 500us): Mirrored Waveform in Step 6
|
||||
# OUT0: a(t_us) = 2.5 + -0.01 * (1000 ^ 2) * t_us
|
||||
# b(t_us) = 0.0001367187 * t_us ^ 2 - 0.06835937 * t_us
|
||||
# c(t_us) = 2 * pi * (-0.08 * t_us ^ 2 + 0.05 * t_us) + pi
|
||||
# Step 8(after 500us):
|
||||
# Disable Output Relay of OUT0 and OUT1
|
||||
# Reset OUT0 and OUT1
|
||||
|
||||
## Step 1 ##
|
||||
self.shuttler0_relay.enable(0b11)
|
||||
|
||||
## Step 2 ##
|
||||
start_f_MHz = 0.01
|
||||
end_f_MHz = 0.05
|
||||
duration_us = 500
|
||||
# OUT0 and OUT1 have their frequency and phase aligned at 500us
|
||||
self.shuttler0_dds[0].set_waveform(
|
||||
b0=shuttler_dds_amp_mu(1.0),
|
||||
b1=0,
|
||||
b2=0,
|
||||
b3=0,
|
||||
c0=0,
|
||||
c1=shuttler_freq_mu(start_f_MHz),
|
||||
c2=shuttler_freq_sweep(start_f_MHz, end_f_MHz, duration_us),
|
||||
)
|
||||
self.shuttler0_dds[1].set_waveform(
|
||||
b0=shuttler_dds_amp_mu(1.0),
|
||||
b1=0,
|
||||
b2=0,
|
||||
b3=0,
|
||||
c0=0,
|
||||
c1=shuttler_freq_mu(end_f_MHz),
|
||||
c2=0,
|
||||
)
|
||||
self.shuttler0_trigger.trigger(0b11)
|
||||
delay(500*us)
|
||||
|
||||
## Step 3 ##
|
||||
# OUT0 and OUT1 has 180 degree phase difference
|
||||
self.shuttler0_dds[0].set_waveform(
|
||||
b0=shuttler_dds_amp_mu(1.0),
|
||||
b1=0,
|
||||
b2=0,
|
||||
b3=0,
|
||||
c0=shuttler_phase_offset(180.0),
|
||||
c1=shuttler_freq_mu(end_f_MHz),
|
||||
c2=0,
|
||||
)
|
||||
# Phase and Output Setting of OUT1 is retained
|
||||
# if the channel is not triggered or config is not cleared
|
||||
self.shuttler0_trigger.trigger(0b1)
|
||||
delay(500*us)
|
||||
|
||||
## Step 4 ##
|
||||
# b(0) = 0, b(250) = 8.545, b(500) = 0
|
||||
self.shuttler0_dds[0].set_waveform(
|
||||
b0=0,
|
||||
b1=shuttler_dds_damp_mu(0.06835937),
|
||||
b2=shuttler_dds_ddamp_mu(-0.0001367187),
|
||||
b3=0,
|
||||
c0=0,
|
||||
c1=shuttler_freq_mu(end_f_MHz),
|
||||
c2=0,
|
||||
)
|
||||
self.shuttler0_dds[1].set_waveform(
|
||||
b0=0,
|
||||
b1=shuttler_dds_damp_mu(0.06835937),
|
||||
b2=shuttler_dds_ddamp_mu(-0.0001367187),
|
||||
b3=0,
|
||||
c0=0,
|
||||
c1=0,
|
||||
c2=0,
|
||||
)
|
||||
self.shuttler0_trigger.trigger(0b11)
|
||||
delay(500*us)
|
||||
|
||||
## Step 5 ##
|
||||
self.shuttler0_dcbias[0].set_waveform(
|
||||
a0=shuttler_volt_amp_mu(-5.0),
|
||||
a1=int32(shuttler_volt_damp_mu(0.01)),
|
||||
a2=0,
|
||||
a3=0,
|
||||
)
|
||||
self.shuttler0_dds[0].set_waveform(
|
||||
b0=shuttler_dds_amp_mu(1.0),
|
||||
b1=0,
|
||||
b2=0,
|
||||
b3=0,
|
||||
c0=0,
|
||||
c1=shuttler_freq_mu(end_f_MHz),
|
||||
c2=0,
|
||||
)
|
||||
self.shuttler0_dcbias[1].set_waveform(
|
||||
a0=shuttler_volt_amp_mu(-5.0),
|
||||
a1=int32(shuttler_volt_damp_mu(0.01)),
|
||||
a2=0,
|
||||
a3=0,
|
||||
)
|
||||
self.shuttler0_dds[1].set_waveform(
|
||||
b0=0,
|
||||
b1=0,
|
||||
b2=0,
|
||||
b3=0,
|
||||
c0=0,
|
||||
c1=0,
|
||||
c2=0,
|
||||
)
|
||||
self.shuttler0_trigger.trigger(0b11)
|
||||
delay(1000*us)
|
||||
|
||||
## Step 6 ##
|
||||
self.shuttler0_dcbias[0].set_waveform(
|
||||
a0=shuttler_volt_amp_mu(-2.5),
|
||||
a1=int32(shuttler_volt_damp_mu(0.01)),
|
||||
a2=0,
|
||||
a3=0,
|
||||
)
|
||||
self.shuttler0_dds[0].set_waveform(
|
||||
b0=0,
|
||||
b1=shuttler_dds_damp_mu(0.06835937),
|
||||
b2=shuttler_dds_ddamp_mu(-0.0001367187),
|
||||
b3=0,
|
||||
c0=0,
|
||||
c1=shuttler_freq_mu(start_f_MHz),
|
||||
c2=shuttler_freq_sweep(start_f_MHz, end_f_MHz, duration_us),
|
||||
)
|
||||
self.shuttler0_trigger.trigger(0b1)
|
||||
self.shuttler_channel_reset(1)
|
||||
delay(500*us)
|
||||
|
||||
## Step 7 ##
|
||||
self.shuttler0_dcbias[0].set_waveform(
|
||||
a0=shuttler_volt_amp_mu(2.5),
|
||||
a1=int32(shuttler_volt_damp_mu(-0.01)),
|
||||
a2=0,
|
||||
a3=0,
|
||||
)
|
||||
self.shuttler0_dds[0].set_waveform(
|
||||
b0=0,
|
||||
b1=shuttler_dds_damp_mu(-0.06835937),
|
||||
b2=shuttler_dds_ddamp_mu(0.0001367187),
|
||||
b3=0,
|
||||
c0=shuttler_phase_offset(180.0),
|
||||
c1=shuttler_freq_mu(end_f_MHz),
|
||||
c2=shuttler_freq_sweep(end_f_MHz, start_f_MHz, duration_us),
|
||||
)
|
||||
self.shuttler0_trigger.trigger(0b1)
|
||||
delay(500*us)
|
||||
|
||||
## Step 8 ##
|
||||
self.shuttler0_relay.enable(0)
|
||||
self.shuttler_channel_reset(0)
|
||||
self.shuttler_channel_reset(1)
|
||||
|
||||
@kernel
|
||||
def led(self):
|
||||
for i in range(2):
|
||||
for j in range(3):
|
||||
self.shuttler0_leds[i].pulse(.1*s)
|
||||
delay(.1*s)
|
||||
|
||||
@kernel
|
||||
def relay_init(self):
|
||||
self.shuttler0_relay.init()
|
||||
self.shuttler0_relay.enable(0x0000)
|
||||
|
||||
@kernel
|
||||
def adc_init(self):
|
||||
delay_mu(int64(self.core.ref_multiplier))
|
||||
self.shuttler0_adc.power_up()
|
||||
|
||||
delay_mu(int64(self.core.ref_multiplier))
|
||||
assert self.shuttler0_adc.read_id() >> 4 == 0x038d
|
||||
|
||||
delay_mu(int64(self.core.ref_multiplier))
|
||||
# The actual output voltage is limited by the hardware, the calculated calibration gain and offset.
|
||||
# For example, if the system has a calibration gain of 1.06, then the max output voltage = 10 / 1.06 = 9.43V.
|
||||
# Setting a value larger than 9.43V will result in overflow.
|
||||
self.shuttler0_adc.calibrate(self.shuttler0_dcbias, self.shuttler0_trigger, self.shuttler0_config)
|
|
@ -5,7 +5,11 @@ device_db = {
|
|||
"type": "local",
|
||||
"module": "artiq.coredevice.core",
|
||||
"class": "Core",
|
||||
"arguments": {"host": core_addr, "ref_period": 1e-9}
|
||||
"arguments": {
|
||||
"host": core_addr,
|
||||
"ref_period": 1e-9,
|
||||
"analyzer_proxy": "core_analyzer"
|
||||
}
|
||||
},
|
||||
"core_log": {
|
||||
"type": "controller",
|
||||
|
@ -13,6 +17,20 @@ device_db = {
|
|||
"port": 1068,
|
||||
"command": "aqctl_corelog -p {port} --bind {bind} " + core_addr
|
||||
},
|
||||
"core_moninj": {
|
||||
"type": "controller",
|
||||
"host": "::1",
|
||||
"port_proxy": 1383,
|
||||
"port": 1384,
|
||||
"command": "aqctl_moninj_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr
|
||||
},
|
||||
"core_analyzer": {
|
||||
"type": "controller",
|
||||
"host": "::1",
|
||||
"port_proxy": 1385,
|
||||
"port": 1386,
|
||||
"command": "aqctl_coreanalyzer_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr
|
||||
},
|
||||
"core_cache": {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.cache",
|
||||
|
@ -27,13 +45,13 @@ device_db = {
|
|||
"i2c_switch0": {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.i2c",
|
||||
"class": "PCA9548",
|
||||
"class": "I2CSwitch",
|
||||
"arguments": {"address": 0xe0}
|
||||
},
|
||||
"i2c_switch1": {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.i2c",
|
||||
"class": "PCA9548",
|
||||
"class": "I2CSwitch",
|
||||
"arguments": {"address": 0xe2}
|
||||
},
|
||||
|
||||
|
@ -191,10 +209,8 @@ device_db = {
|
|||
"arguments": {
|
||||
"channel": 24,
|
||||
"pgia_device": "spi_sampler0_pgia",
|
||||
"cpld0_device": "urukul0_cpld",
|
||||
"cpld1_device": "urukul1_cpld",
|
||||
"dds0_device": "urukul0_dds",
|
||||
"dds1_device": "urukul1_dds"
|
||||
"cpld_devices": ["urukul0_cpld", "urukul1_cpld"],
|
||||
"dds_devices": ["urukul0_dds", "urukul1_dds"],
|
||||
}
|
||||
},
|
||||
|
||||
|
|
|
@ -9,7 +9,11 @@ device_db = {
|
|||
"type": "local",
|
||||
"module": "artiq.coredevice.core",
|
||||
"class": "Core",
|
||||
"arguments": {"host": core_addr, "ref_period": 1e-9}
|
||||
"arguments": {
|
||||
"host": core_addr,
|
||||
"ref_period": 1e-9,
|
||||
"analyzer_proxy": "core_analyzer"
|
||||
}
|
||||
},
|
||||
"core_log": {
|
||||
"type": "controller",
|
||||
|
@ -17,6 +21,20 @@ device_db = {
|
|||
"port": 1068,
|
||||
"command": "aqctl_corelog -p {port} --bind {bind} " + core_addr
|
||||
},
|
||||
"core_moninj": {
|
||||
"type": "controller",
|
||||
"host": "::1",
|
||||
"port_proxy": 1383,
|
||||
"port": 1384,
|
||||
"command": "aqctl_moninj_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr
|
||||
},
|
||||
"core_analyzer": {
|
||||
"type": "controller",
|
||||
"host": "::1",
|
||||
"port_proxy": 1385,
|
||||
"port": 1386,
|
||||
"command": "aqctl_coreanalyzer_proxy --port-proxy {port_proxy} --port-control {port} --bind {bind} " + core_addr
|
||||
},
|
||||
"core_cache": {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.cache",
|
||||
|
@ -31,7 +49,7 @@ device_db = {
|
|||
"i2c_switch": {
|
||||
"type": "local",
|
||||
"module": "artiq.coredevice.i2c",
|
||||
"class": "PCA9548"
|
||||
"class": "I2CSwitch"
|
||||
},
|
||||
|
||||
# Generic TTL
|
||||
|
|
|
@ -20,7 +20,7 @@ class DDSSetter(EnvExperiment):
|
|||
"driver": self.get_device(k),
|
||||
"frequency": self.get_argument(
|
||||
"{}_frequency".format(k),
|
||||
NumberValue(100e6, scale=1e6, unit="MHz", ndecimals=6))
|
||||
NumberValue(100e6, scale=1e6, unit="MHz", precision=6))
|
||||
}
|
||||
|
||||
@kernel
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue