forked from M-Labs/artiq
Compare commits
2292 Commits
fix-scanwi
...
master
Author | SHA1 | Date |
---|---|---|
Simon Renblad | 76fba538b1 | |
Sebastien Bourdeauducq | 8dd8cfa6b0 | |
Sebastien Bourdeauducq | 5df0721811 | |
Sebastien Bourdeauducq | 6326051052 | |
Sebastien Bourdeauducq | 44a95b5dda | |
Sebastien Bourdeauducq | 645b9b8c5f | |
Sebastien Bourdeauducq | 858f0479ba | |
Sebastien Bourdeauducq | 133b26b6ce | |
Sebastien Bourdeauducq | d96213dbbc | |
Sebastien Bourdeauducq | 413d33c3d1 | |
Sebastien Bourdeauducq | c2b53ecb43 | |
Sebastien Bourdeauducq | ede0b37c6e | |
Sebastien Bourdeauducq | 795c4372fa | |
Sebastien Bourdeauducq | 402a5d3376 | |
Sebastien Bourdeauducq | 85850ad9e8 | |
Sebastien Bourdeauducq | 7a863b4f5e | |
Sebastien Bourdeauducq | a26cee6ca7 | |
Sebastien Bourdeauducq | be08862606 | |
Sebastien Bourdeauducq | 05a9422e67 | |
Simon Renblad | b09a39c82e | |
mwojcik | 49267671f9 | |
Sebastien Bourdeauducq | 8ca75a3fb9 | |
Florian Agbuya | 8381b34a79 | |
Sebastien Bourdeauducq | d458fc27bf | |
mwojcik | 9f4b8db2de | |
Florian Agbuya | 1108cebd75 | |
Florian Agbuya | cf7cbd0c3b | |
mwojcik | 1a28069aa2 | |
Sebastien Bourdeauducq | 56418e342e | |
Sebastien Bourdeauducq | 77c6553725 | |
Sebastien Bourdeauducq | e81e8f28cf | |
mwojcik | de10e584f6 | |
Florian Agbuya | 875666f3ec | |
Sebastien Bourdeauducq | 3ad3fac828 | |
Simon Renblad | 49afa116b3 | |
Simon Renblad | 363afb5fc9 | |
Simon Renblad | e7af219505 | |
linuswck | ec2b86b08d | |
linuswck | 8f7d138dbd | |
Sebastien Bourdeauducq | bbe6ff8cac | |
Sebastien Bourdeauducq | c0a6252e77 | |
mwojcik | 6640bf0e82 | |
mwojcik | b3c0d084d4 | |
linuswck | bb0b8a6c00 | |
Sebastien Bourdeauducq | ce80bf5717 | |
Florian Agbuya | 378dd0e5ca | |
jfniedermeyer | 9c68451cae | |
linuswck | 93c9d8bcdf | |
mwojcik | e480bbe8d8 | |
mwojcik | b168f0bb4b | |
Sebastien Bourdeauducq | 6705c9fbfb | |
mwojcik | 5f445f6b92 | |
occheung | 363f7327f1 | |
Sebastien Bourdeauducq | f7abc156cb | |
linuswck | de41bd6655 | |
Simon Renblad | 96941d7c04 | |
mwojcik | f3c79e71e1 | |
Simon Renblad | 333b81f789 | |
Sebastien Bourdeauducq | d070826911 | |
Sebastien Bourdeauducq | 9c90f923d2 | |
Sebastien Bourdeauducq | e23e4d39d7 | |
David Nadlinger | 08eea09d44 | |
mwojcik | 7ab52af603 | |
mwojcik | 973fd88b27 | |
mwojcik | 8d7194941e | |
mwojcik | 0a750c77e8 | |
mwojcik | 1a0fc317df | |
mwojcik | e05be2f8e4 | |
mwojcik | 6f4b8c641e | |
mwojcik | b42816582e | |
Hartmann Michael (IFAG PSS SIS SCE QSE) | 76f1318bc0 | |
Sebastien Bourdeauducq | 0131a8bef2 | |
mwojcik | e63e2a2897 | |
Simon Renblad | 47fc640f75 | |
Simon Renblad | bb7caacb5f | |
Simon Renblad | da9f7cb58a | |
occheung | 43926574da | |
Simon Renblad | 4f3e58db52 | |
Simon Renblad | 13271cea64 | |
occheung | 0e8fa8933f | |
David Nadlinger | 2eb89cb168 | |
occheung | a772dee1cc | |
Simon Renblad | bafb85a274 | |
mwojcik | 0e8aa33979 | |
mwojcik | fcf6c90ba2 | |
linuswck | 0c1b572872 | |
linuswck | ab0d4c41c3 | |
Jonathan Coates | 6eb81494c5 | |
Jonathan Coates | 586d97c6cb | |
David Nadlinger | 892b0eaca2 | |
linuswck | eedac7cf71 | |
linuswck | a61bbf5618 | |
occheung | b7b8f0efa2 | |
occheung | b52f253dbd | |
occheung | 73ab71f443 | |
linuswck | ab8247b3d7 | |
mwojcik | 36b3678853 | |
mwojcik | af77885dfc | |
mwojcik | eb57b3b393 | |
Simon Renblad | 40ac2e03ab | |
occheung | a2fbcb8bfd | |
occheung | 5c64eac8d2 | |
occheung | 477a7b693c | |
occheung | f2694f25eb | |
occheung | 9e1447d104 | |
occheung | 870020bc9f | |
occheung | c2d136f669 | |
occheung | 06426e0ed9 | |
occheung | e443e06e62 | |
occheung | 55150ebdbb | |
occheung | eb08c55abe | |
occheung | 67b6588d95 | |
occheung | 1bb7e9ceef | |
Florian Agbuya | c02a14ba37 | |
Simon Renblad | 1f3b2ef645 | |
linuswck | 372008cb66 | |
linuswck | 85abb1da2c | |
David Nadlinger | 9e5b62a6b1 | |
David Nadlinger | 22ab62324c | |
David Nadlinger | fc74b78a45 | |
Simon Renblad | f01e654b9c | |
David Nadlinger | e45dc948e9 | |
David Mak | 460cbf4499 | |
Florian Agbuya | 6df85478e4 | |
Jonathan Coates | 5c85cef0c2 | |
linuswck | ccb140a929 | |
linuswck | 7c8073c1ce | |
Florian Agbuya | 2f3329181c | |
Sebastien Bourdeauducq | 1ec1ab0502 | |
linuswck | b49fb841ce | |
Florian Agbuya | a619c9f3c2 | |
Florian Agbuya | 0188f31f3a | |
Florian Agbuya | 4e770509db | |
occheung | 7f63bb322d | |
occheung | 5e5d671f4c | |
occheung | 98904ef4c3 | |
Sebastien Bourdeauducq | 73ac414912 | |
occheung | 838cc80922 | |
Simon Renblad | 904afe1632 | |
Simon Renblad | 01d777c977 | |
Sebastien Bourdeauducq | 9556ca53de | |
occheung | df99450faa | |
Sebastien Bourdeauducq | 1f58cd505c | |
linuswck | ddb2b5e3a1 | |
linuswck | b56f7e429a | |
Sebastien Bourdeauducq | 3452d0c423 | |
Sebastien Bourdeauducq | 2139456f80 | |
Sebastien Bourdeauducq | a2a780a3f2 | |
Sebastien Bourdeauducq | 3620358f12 | |
Sebastien Bourdeauducq | 72b0a17542 | |
Sebastien Bourdeauducq | f5cbca9c29 | |
linuswck | 737ff79ae7 | |
linuswck | dc97d3aee6 | |
Sebastien Bourdeauducq | 5d38db19d0 | |
Sebastien Bourdeauducq | 9bee4b9697 | |
linuswck | cd22e42cb4 | |
linuswck | b7bac8c9d8 | |
mwojcik | e8818c812c | |
occheung | 68dd0e029f | |
occheung | 64d3f867a0 | |
Sebastien Bourdeauducq | df662c4262 | |
Sebastien Bourdeauducq | d2ac6aceb3 | |
Sebastien Bourdeauducq | 9b94a09477 | |
David Nadlinger | efbae51f9d | |
David Nadlinger | 8acfa82586 | |
David Nadlinger | 4d636ea593 | |
Sebastien Bourdeauducq | 3ed7e0ed06 | |
Simon Renblad | c4259dab18 | |
mwojcik | c46ac6f87d | |
linuswck | 758b97426a | |
linuswck | c206e92f29 | |
linuswck | cb547c8a46 | |
linuswck | 72a5231493 | |
Denis Ovchinnikov | 07714be8a7 | |
Simon Renblad | 361088ae72 | |
Simon Renblad | a384df17a4 | |
Simon Renblad | 6592b6ea1d | |
Simon Renblad | 2fb085f1a2 | |
Simon Renblad | a7569a0b2d | |
Simon Renblad | 4fbff1648c | |
Simon Renblad | 8f4c8387f9 | |
Simon Renblad | a2d62e6006 | |
Simon Renblad | 3d0feef614 | |
Simon Renblad | 59ad873831 | |
Simon Renblad | 8589da0723 | |
Simon Renblad | 94e076e976 | |
Simon Renblad | a0094aafbb | |
Simon Renblad | 0befadee96 | |
sven-oxionics | b3dc199e6a | |
Florian Agbuya | d73889fb27 | |
Simon Renblad | 9f8bb6445f | |
Simon Renblad | 068a2d1663 | |
Simon Renblad | 6c588b83d7 | |
Simon Renblad | c17f69a51b | |
Simon Renblad | ac504069d2 | |
Simon Renblad | b6a83904b5 | |
Simon Renblad | 25959d0cd6 | |
Simon Renblad | 5695e9f77e | |
Simon Renblad | fe0f6d8a2c | |
Simon Renblad | d1f2727126 | |
Simon Renblad | 16a3ce274f | |
Simon Renblad | af7622d7ab | |
Jonathan Coates | 9a84575649 | |
Simon Renblad | faf85e815a | |
Simon Renblad | 3663a6b8e8 | |
Simon Renblad | 91442e2914 | |
Simon Renblad | 50a6dac178 | |
Simon Renblad | 5292a8de82 | |
Sebastien Bourdeauducq | 7791f85a1a | |
Sebastien Bourdeauducq | 48bc8a2ecc | |
Denis Ovchinnikov | 93882eb3ce | |
Simon Renblad | 7ca02a119d | |
Simon Renblad | 373fe3dbe7 | |
Simon Renblad | 1af98727b7 | |
Simon Renblad | 376f36c965 | |
Simon Renblad | e710d4badd | |
Simon Renblad | bfbe13e51b | |
Simon Renblad | bf38fc8b0f | |
Simon Renblad | 337273acb6 | |
Simon Renblad | 748707e157 | |
Leon Riesebos | 833fd8760e | |
Florian Agbuya | 454597915a | |
Sebastien Bourdeauducq | 77293d53e3 | |
Sebastien Bourdeauducq | a792bc5456 | |
Sebastien Bourdeauducq | 20d4712815 | |
Spaqin | 82bd913f63 | |
Sebastien Bourdeauducq | 115415d120 | |
Florian Agbuya | d140c960bb | |
Egor Savkin | c25c0bd55a | |
Egor Savkin | 30ef8d8cb4 | |
Florian Agbuya | 7ad32d903a | |
Florian Agbuya | bf46ce4a92 | |
den512is | 1f306a2859 | |
Florian Agbuya | 150d325fc1 | |
Florian Agbuya | c298ec4c2e | |
Sebastien Bourdeauducq | 69bf2dfb81 | |
mwojcik | 29cb7e785d | |
Sebastien Bourdeauducq | b97f6a9e44 | |
Sebastien Bourdeauducq | e0ebc1b21d | |
Sebastien Bourdeauducq | c6ddd3af17 | |
Florian Agbuya | e12219e803 | |
Sebastien Bourdeauducq | ff11b5df71 | |
Sebastien Bourdeauducq | c8dc2cbf09 | |
Sebastien Bourdeauducq | c6b29b30fb | |
Sebastien Bourdeauducq | b20d09aad5 | |
Sebastien Bourdeauducq | 6276182c96 | |
Sebastien Bourdeauducq | d103cbea31 | |
Sebastien Bourdeauducq | 9a6bc6dc7b | |
Sebastien Bourdeauducq | fabe88065b | |
Egor Savkin | 748969c21e | |
Sebastien Bourdeauducq | 75f6bdb6a1 | |
Sebastien Bourdeauducq | 41caec797e | |
Sebastien Bourdeauducq | 953a8a9555 | |
Sebastien Bourdeauducq | 444bab2186 | |
Sebastien Bourdeauducq | 0941d3a29a | |
Denis Ovchinnikov | 22e2514ce6 | |
mwojcik | a4895b591a | |
Sebastien Bourdeauducq | ef2cc2cc12 | |
Sebastien Bourdeauducq | 779810163f | |
Sebastien Bourdeauducq | b9c7905b20 | |
Charles Baynham | c2b0c97640 | |
Sebastien Bourdeauducq | 58cc3b8d0a | |
Sebastien Bourdeauducq | 598c7b1d25 | |
Jonathan Coates | ea9fe9b4e1 | |
mwojcik | c1d6fd4bbe | |
mwojcik | ab52748cac | |
mwojcik | ddfe51e7ac | |
mwojcik | 6c96033d41 | |
mwojcik | 0b03126038 | |
mwojcik | fdca1ab7fc | |
mwojcik | c36b6b3b65 | |
mwojcik | c0ca27e6cf | |
Jonathan Coates | 3ca47537b8 | |
Hartmann Michael (IFAG PSS SIS SCE QSE) | df15f53ee9 | |
Sebastien Bourdeauducq | e015483e48 | |
Sebastien Bourdeauducq | c53d333d46 | |
Sebastien Bourdeauducq | 5b94ce82e4 | |
Sebastien Bourdeauducq | 45cd438fb8 | |
Sebastien Bourdeauducq | 0e7e30d46e | |
Sebastien Bourdeauducq | d5a7755584 | |
Sebastien Bourdeauducq | 3ff0be6540 | |
Sebastien Bourdeauducq | 8409a6bb94 | |
Sebastien Bourdeauducq | 2c1438c4b9 | |
Egor Savkin | 5199bea353 | |
mwojcik | a533f2a0cd | |
Jonathan Coates | 0bf57f4ebd | |
Sebastien Bourdeauducq | 4417acd13b | |
Sebastien Bourdeauducq | 4056168875 | |
Egor Savkin | 9331911139 | |
Spaqin | 2f35869eb1 | |
Egor Savkin | aed47d79ff | |
mwojcik | 918d30b900 | |
Egor Savkin | b5d9062ba9 | |
Egor Savkin | 8984f5104a | |
Egor Savkin | d0b8818688 | |
Sebastien Bourdeauducq | 757c00b0fe | |
Sebastien Bourdeauducq | c1474c134a | |
Sebastien Bourdeauducq | dc3db8bb66 | |
Sebastien Bourdeauducq | 97161a3df2 | |
Ikko Eltociear Ashimine | 7ba06bfe61 | |
Spaqin | b225717ddb | |
mwojcik | 696bda5c03 | |
mwojcik | 9150230ea7 | |
Spaqin | e9a153b985 | |
David Nadlinger | 8b1f38b015 | |
Egor Savkin | bbf80875fb | |
Egor Savkin | 1ca09b9484 | |
Spaqin | 84e7515721 | |
Ikko Eltociear Ashimine | 15c18bdc81 | |
Sebastien Bourdeauducq | a9360823b1 | |
Egor Savkin | 1ec0abbfcf | |
mwojcik | 90a6fe1c35 | |
mwojcik | d0437f5672 | |
Michael Hartmann | 07d684a35d | |
Michael Hartmann | 2371c825f5 | |
Egor Savkin | 394138f00f | |
Sebastien Bourdeauducq | 3f5cc4aa10 | |
Sebastien Bourdeauducq | e9c65abebe | |
Sebastien Bourdeauducq | 20e8f17b3d | |
Sebastien Bourdeauducq | 57e87c9717 | |
Sebastien Bourdeauducq | 248cd69673 | |
Sebastien Bourdeauducq | b8968262d7 | |
Sebastien Bourdeauducq | babbbfadb3 | |
Sebastien Bourdeauducq | 514ac953ce | |
Sebastien Bourdeauducq | 0a37a1a4c1 | |
Sebastien Bourdeauducq | 6d37d9d52c | |
Sebastien Bourdeauducq | 5f77d4f5fa | |
Sebastien Bourdeauducq | 2f289c552f | |
Sebastien Bourdeauducq | 9e8bb3c701 | |
Sebastien Bourdeauducq | d872c3ab4d | |
Sebastien Bourdeauducq | f8d93813e9 | |
Sebastien Bourdeauducq | 628b671433 | |
Sebastien Bourdeauducq | daad3d263a | |
Sebastien Bourdeauducq | 80f261437a | |
Sebastien Bourdeauducq | 7fd6dead8f | |
Sebastien Bourdeauducq | 73a4ef89ec | |
mwojcik | 70edc9c5c6 | |
mwojcik | 9042426872 | |
mwojcik | cd860beda2 | |
mwojcik | 627504b60e | |
Sebastien Bourdeauducq | c8ab6c1b2b | |
Sebastien Bourdeauducq | a96bbd8508 | |
Sebastien Bourdeauducq | 6cfd1480a7 | |
Sebastien Bourdeauducq | c401559ed5 | |
Sebastien Bourdeauducq | ea21f474a7 | |
Sebastien Bourdeauducq | cee9f3f44e | |
Sebastien Bourdeauducq | b9bfe090f4 | |
mwojcik | eb3742fb08 | |
Egor Savkin | 070fed755b | |
Sebastien Bourdeauducq | 63f1a6d197 | |
Sebastien Bourdeauducq | 7dafdfe2f7 | |
Sebastien Bourdeauducq | ec893222a4 | |
Sebastien Bourdeauducq | 573a895c1e | |
Sebastien Bourdeauducq | cf2a4972f7 | |
Sebastien Bourdeauducq | 668997a451 | |
Sebastien Bourdeauducq | 5da9794895 | |
Spaqin | 3838dfc1d1 | |
Sebastien Bourdeauducq | 1be7e2a2e1 | |
Sebastien Bourdeauducq | 1bf7188dec | |
mwojcik | bdae594c79 | |
mwojcik | 8dc6902c23 | |
Norman Krackow | dbb77b5356 | |
Sebastien Bourdeauducq | 1fc127c770 | |
David Nadlinger | 88684dbd2a | |
David Nadlinger | b9f13d48aa | |
David Nadlinger | 4bb2a3b9e0 | |
David Nadlinger | f5c408d8d9 | |
Sebastien Bourdeauducq | 4be7f302e4 | |
Spaqin | 17efc28dbe | |
David Nadlinger | 1e0102379b | |
David Nadlinger | ceabeb8d84 | |
SingularitySurfer | 8e476dd502 | |
David Nadlinger | 874d298ceb | |
Egor Savkin | d75ade7be6 | |
Egor Savkin | 2a58981822 | |
Egor Savkin | e80442811e | |
Egor Savkin | 12649720f1 | |
Egor Savkin | 454ae39c5d | |
David Nadlinger | 3c7a394eff | |
David Nadlinger | 740543d4e2 | |
Egor Savkin | b2b559e73b | |
Egor Savkin | 1852491102 | |
Egor Savkin | c591e7e305 | |
David Nadlinger | 261dc6b933 | |
David Nadlinger | 1abedba6dc | |
Egor Savkin | aa2febca53 | |
Egor Savkin | d60a96a715 | |
wlph17 | 3f93f16955 | |
Sebastien Bourdeauducq | 3735b7ea9d | |
Sebastien Bourdeauducq | 195d2aea6a | |
Sebastien Bourdeauducq | 6d179b2bf5 | |
Sebastien Bourdeauducq | 275b00bfc2 | |
Jonathan Coates | b8b6ce14cc | |
Nico Pulido | 88c5109627 | |
David Nadlinger | dee154b35b | |
David Nadlinger | 950b9ac4d6 | |
Egor Savkin | 6c47aac760 | |
mwojcik | f2c1e663a7 | |
Egor Savkin | f7f027001e | |
David Nadlinger | 0b3c232819 | |
Etienne Wodey | d45f9b6950 | |
Sebastien Bourdeauducq | 2fe02cee6f | |
Sebastien Bourdeauducq | 404f24af6b | |
David Nadlinger | 3d25092cbd | |
David Nadlinger | dbbe8e8ed4 | |
David Nadlinger | 8740ec3dd5 | |
David Nadlinger | 6caa779c74 | |
David Nadlinger | 4819016a3c | |
David Nadlinger | 00a27b105a | |
David Nadlinger | beff15de5e | |
火焚 富良 | defc69d9c3 | |
火焚 富良 | e2178f6c86 | |
Sebastien Bourdeauducq | f3f068036a | |
mwojcik | ad000609ce | |
mwojcik | af0b94bb34 | |
mwojcik | 5cd57e8688 | |
mwojcik | f8eb695c0f | |
mwojcik | 458bd8a927 | |
mwojcik | a6856a5e4a | |
mwojcik | 1eb87164be | |
Sebastien Bourdeauducq | f75ddf78b0 | |
Sebastien Bourdeauducq | e0b1098bc0 | |
Robert Jördens | e5c621751f | |
Robert Jördens | 07db770423 | |
Robert Jördens | eb7a0714b3 | |
Robert Jördens | e15b5b50d8 | |
Robert Jördens | 1820e1f715 | |
Robert Jördens | 118b7aca1d | |
Fabian Schwartau | d5e267fadf | |
Sebastien Bourdeauducq | 286f151d9a | |
Sebastien Bourdeauducq | 19b8d28a2e | |
Sebastien Bourdeauducq | 3ffbc5681e | |
Sebastien Bourdeauducq | 192cab887f | |
wlph17 | 9846ee653c | |
fanmingyu212 | 56e6b1428c | |
Michael Birtwell | b895846322 | |
Robert Jördens | a1a4545ed4 | |
Robert Jördens | a0053f7a2b | |
Robert Jördens | 740f3d220b | |
Robert Jördens | 513f9f00f3 | |
Robert Jördens | 5cfa8d9a42 | |
Robert Jördens | 0e4a87826c | |
Sebastien Bourdeauducq | 1709cf9717 | |
Sebastien Bourdeauducq | 4266beeb9c | |
mwojcik | c955ac15ed | |
mwojcik | 81ef484864 | |
mwojcik | f2c3f95040 | |
mwojcik | 616ed3dcc2 | |
Robert Jördens | aedcf205c7 | |
Robert Jördens | 14ab1d4bbc | |
Sebastien Bourdeauducq | a028b5c9f7 | |
Sebastien Bourdeauducq | 6085fe3319 | |
Robert Jördens | af28bf3550 | |
Robert Jördens | 4df880faf6 | |
Robert Jördens | 857fb4ecec | |
Robert Jördens | a91836e5fe | |
Robert Jördens | c5c5c30617 | |
Robert Jördens | 27e3c044ed | |
Robert Jördens | c26fa5eb90 | |
Sebastien Bourdeauducq | 411afbdc23 | |
Sebastien Bourdeauducq | b4287ac9f4 | |
Robert Jördens | 1cc57e2345 | |
Robert Jördens | 263c2751b3 | |
Robert Jördens | 876f26ee30 | |
Robert Jördens | fa3678f8a3 | |
Robert Jördens | f4d325112c | |
Robert Jördens | b6586cd7e4 | |
Robert Jördens | 3809ac5470 | |
Robert Jördens | b9727fdfce | |
Robert Jördens | d6d0c2c866 | |
Robert Jördens | 0df2cadcd3 | |
Robert Jördens | 25c0dc4688 | |
Robert Jördens | cf48232a90 | |
Robert Jördens | a20087848d | |
Robert Jördens | 31663556b8 | |
Robert Jördens | 47f90a58cc | |
Mikołaj Sowiński | 3c7ab498d1 | |
Deepskyhunter | 7c306d5609 | |
mwojcik | b705862ecd | |
fanmingyu212 | 20cb99061e | |
Sebastien Bourdeauducq | 5ef94d30dd | |
kk1050 | 3c72b8d646 | |
Sebastien Bourdeauducq | 27397625ba | |
cc78078 | 3535d0f1ae | |
cc78078 | 185c91f522 | |
Deepskyhunter | f31279411e | |
Alex Wong Tat Hang | a3ae82502c | |
Deepskyhunter | 0cdb06fdf5 | |
Deepskyhunter | 2a7a72b27a | |
kk1050 | 748e28be38 | |
Sebastien Bourdeauducq | 4b1715c80b | |
Robert Jördens | 5985595845 | |
Robert Jördens | a8f498b478 | |
Sebastien Bourdeauducq | db4bccda7e | |
Sebastien Bourdeauducq | 5c461443e4 | |
Sebastien Bourdeauducq | cb711e0ee3 | |
Sebastien Bourdeauducq | 9ba239b8b2 | |
Robert Jördens | 4ea11f4609 | |
SingularitySurfer | 57ac6ec003 | |
Robert Jördens | d2dacc6433 | |
Sebastien Bourdeauducq | 734b2a6747 | |
Deepskyhunter | c7394802bd | |
kk1050 | 7aa6104872 | |
mwojcik | 46f2842d38 | |
mwojcik | c9fb7b410f | |
Spaqin | 8be945d5c7 | |
SingularitySurfer | 9c8ffa54b2 | |
Sebastien Bourdeauducq | d17675e9b5 | |
Sebastien Bourdeauducq | 388b81af19 | |
Deepskyhunter | 02b086c9e5 | |
SingularitySurfer | 953dd899fd | |
SingularitySurfer | 689a2ef8ba | |
SingularitySurfer | d8cfe22501 | |
Deepskyhunter | b4f24dd326 | |
Deepskyhunter | da6d35e7c6 | |
Deepskyhunter | 745f440597 | |
SingularitySurfer | 2e834cf406 | |
SingularitySurfer | 3f8a221c76 | |
SingularitySurfer | ab097b8ef9 | |
SingularitySurfer | 24b4ec46bd | |
Norman Krackow | 56c59e38f0 | |
SingularitySurfer | c0581178d6 | |
SingularitySurfer | 43c94577ce | |
SingularitySurfer | ce4055db3b | |
SingularitySurfer | b67a70392d | |
SingularitySurfer | 57176fedb2 | |
SingularitySurfer | 8bea821f93 | |
SingularitySurfer | 0388161754 | |
SingularitySurfer | 751af3144e | |
SingularitySurfer | 5df766e6da | |
David Nadlinger | e1f9feae8b | |
David Nadlinger | dd928fc014 | |
Sebastien Bourdeauducq | 48cb111035 | |
hartytp | d8597e9dc8 | |
David Nadlinger | 32db6ff978 | |
David Nadlinger | dbc87f08ff | |
David Nadlinger | c4068e6896 | |
David Nadlinger | 85895ab89b | |
kk1050 | 46fb8916bb | |
David Nadlinger | 2d6fc154db | |
David Nadlinger | 4c42f65909 | |
David Nadlinger | f4d639242d | |
SingularitySurfer | d09153411f | |
Norman Krackow | dc49372d57 | |
Norman Krackow | 2044dc3ae5 | |
SingularitySurfer | ae3f1c1c71 | |
Sebastien Bourdeauducq | bf3b155a31 | |
SingularitySurfer | 1bddadc6e2 | |
SingularitySurfer | b0f9fd9c4c | |
Michael Birtwell | 69c4026d2b | |
Deepskyhunter | e47834d82e | |
Spaqin | 4ede14b14d | |
kk1050 | 4ddd2739ee | |
Sebastien Bourdeauducq | e702624720 | |
Sebastien Bourdeauducq | 68ef0073ea | |
Sebastien Bourdeauducq | 71a37bb408 | |
occheung | f79f7db3a2 | |
occheung | 872f8f039f | |
occheung | 50495097e5 | |
Sebastien Bourdeauducq | ca614a3eea | |
Sebastien Bourdeauducq | 8bf6bc4d1f | |
occheung | 6d46c886d7 | |
Sebastien Bourdeauducq | a5b7e958f8 | |
Sebastien Bourdeauducq | 667f36a2e7 | |
Sebastien Bourdeauducq | 7cff63e539 | |
Sebastien Bourdeauducq | df1b19082c | |
Sebastien Bourdeauducq | d478086119 | |
Sebastien Bourdeauducq | 18a08954c1 | |
Sebastien Bourdeauducq | 57086e2349 | |
mwojcik | cf8e583847 | |
mwojcik | d24a36a02a | |
mwojcik | 4bdb4c8e11 | |
Sebastien Bourdeauducq | 8599be5550 | |
Sebastien Bourdeauducq | 9896d78e07 | |
kk1050 | 70503bee6f | |
Laurent Stephenson | 16393efa7c | |
David Nadlinger | 8a7af3f75c | |
Spaqin | 35f30ddf05 | |
Sebastien Bourdeauducq | c440f9fe1b | |
Sebastien Bourdeauducq | 69b6426800 | |
Michael Birtwell | 50dbda4f43 | |
Michael Birtwell | 95378cf9c9 | |
Michael Birtwell | 671453938b | |
Michael Birtwell | 1fe59d27dc | |
Michael Birtwell | 73082d116f | |
Michael Birtwell | 596b9a265c | |
Michael Birtwell | 6ffb1f83ee | |
Michael Birtwell | c60de48a30 | |
Suthep Pomjaksilp | 06ad76b6ab | |
David Nadlinger | b2b84b1fd6 | |
David Nadlinger | 6b5c390d48 | |
David Nadlinger | 2cb08814e8 | |
Sebastien Bourdeauducq | 58b59b99ff | |
Sebastien Bourdeauducq | fa3ee8ad23 | |
Michael Birtwell | cab9d90d01 | |
Sebastien Bourdeauducq | 0a029748ee | |
Leon Riesebos | 386391e3f9 | |
Leon Riesebos | b5dc9fd640 | |
Sebastien Bourdeauducq | c82c358f3a | |
Sebastien Bourdeauducq | 723f41c78b | |
Sebastien Bourdeauducq | 866a83796a | |
Timothy Ballance | f91e106586 | |
Timothy Ballance | a289d69883 | |
Sebastien Bourdeauducq | f89275b02a | |
Sebastien Bourdeauducq | 65d2dd0173 | |
Sebastien Bourdeauducq | 6b33f3b719 | |
Sebastien Bourdeauducq | 80d412a8bf | |
Sebastien Bourdeauducq | 922d2b1619 | |
Sebastien Bourdeauducq | d644e982c8 | |
Sebastien Bourdeauducq | ec1efd7af9 | |
Sebastien Bourdeauducq | 735133a2b4 | |
Sebastien Bourdeauducq | 207717c740 | |
Sebastien Bourdeauducq | 6d92e539b1 | |
Sebastien Bourdeauducq | 6a49b8cb58 | |
Sebastien Bourdeauducq | df1513f0e9 | |
Sebastien Bourdeauducq | d3073022ac | |
Sebastien Bourdeauducq | bbb2c75194 | |
Sebastien Bourdeauducq | 710786388c | |
Sebastien Bourdeauducq | aff569b2c3 | |
Sebastien Bourdeauducq | a159ef642d | |
Sebastien Bourdeauducq | 1a26eb8cf2 | |
Sebastien Bourdeauducq | c1c2d21ba7 | |
Sebastien Bourdeauducq | e5e4d55f84 | |
Sebastien Bourdeauducq | 71e8b49246 | |
pca006132 | ebfeb1869f | |
pca006132 | eb6817c8f1 | |
Sebastien Bourdeauducq | 8415151866 | |
ciciwu | 67ca48fa84 | |
ciciwu | 9a96387dfe | |
Sebastien Bourdeauducq | b02abc2bf4 | |
Sebastien Bourdeauducq | ac55da81d8 | |
spaqin | 232f28c0e8 | |
spaqin | 51fa1b5e5e | |
spaqin | 17ecd35530 | |
Spaqin | a85b4d5f5e | |
David Nadlinger | 9bfbd39fa3 | |
Sebastien Bourdeauducq | 338bb189b4 | |
Leon Riesebos | c4292770f8 | |
Sebastien Bourdeauducq | 2b918ac6f7 | |
Michael Birtwell | 1b80746f48 | |
Michael Birtwell | 2d6215158f | |
mwojcik | c000af9985 | |
mwojcik | 35f91aef68 | |
Sebastien Bourdeauducq | 0da7b83176 | |
Steve Fan | ad656d1e53 | |
Sebastien Bourdeauducq | 69ce09c7c0 | |
Sebastien Bourdeauducq | 6a586c2e4d | |
Sebastien Bourdeauducq | e84056f7e0 | |
Mike Birtwell | a106ed0295 | |
Robert Jördens | c8b9eed9c9 | |
Robert Jördens | 08b65470cd | |
Sebastien Bourdeauducq | 65eab31f23 | |
Sebastien Bourdeauducq | 6dfc854673 | |
Sebastien Bourdeauducq | 5a8928fbf3 | |
Sebastien Bourdeauducq | b3b73948a2 | |
Sebastien Bourdeauducq | 8433cc6731 | |
Sebastien Bourdeauducq | 0649e69d94 | |
Sebastien Bourdeauducq | bbfa926fa6 | |
Sebastien Bourdeauducq | 9e37fb95d6 | |
Sebastien Bourdeauducq | 034a0fdb35 | |
Sebastien Bourdeauducq | 0e178e40ac | |
Sebastien Bourdeauducq | a0070d4396 | |
Sebastien Bourdeauducq | 03a367f565 | |
Sebastien Bourdeauducq | b893d97d7b | |
Sebastien Bourdeauducq | b6f5ba8b5b | |
Sebastien Bourdeauducq | cc69482dad | |
Sebastien Bourdeauducq | 833acb6925 | |
occheung | d5eec652ee | |
occheung | a74196aa27 | |
Steve Fan | 798a412c6f | |
David Nadlinger | e45cb217be | |
Sebastien Bourdeauducq | 8866ab301a | |
Sebastien Bourdeauducq | 3cddb14174 | |
Sebastien Bourdeauducq | 245fe6e9ea | |
Sebastien Bourdeauducq | ef25640937 | |
Sebastien Bourdeauducq | dd3279e506 | |
Sebastien Bourdeauducq | afb98a1903 | |
Steve Fan | 34008b7a21 | |
pca006132 | 93328ad8ee | |
Steve Fan | 234a82aaa9 | |
Sebastien Bourdeauducq | ee511758ce | |
Sebastien Bourdeauducq | e6c18364ae | |
pca006132 | 9d43762695 | |
pca006132 | 4132c450a5 | |
pca006132 | 536b3e0c26 | |
pca006132 | ba34700798 | |
pca006132 | 6ec003c1c9 | |
pca006132 | da4ff44377 | |
pca006132 | 4644e105b1 | |
hartytp | 715bff3ebf | |
Sebastien Bourdeauducq | f58aa3bdf6 | |
Sebastien Bourdeauducq | 4e420fc297 | |
Sebastien Bourdeauducq | 5597be3356 | |
Sebastien Bourdeauducq | f542f045da | |
Sebastien Bourdeauducq | 53878fe1d4 | |
Sebastien Bourdeauducq | 735cd1eb3e | |
Steve Fan | 3f812c4c2c | |
occheung | b6c59a0cb3 | |
Steve Fan | de5892a00a | |
Peter Drmota | 4eee49f889 | |
occheung | 9eee0e5a7b | |
Steve Fan | d7dd75e833 | |
Spaqin | 095fb9e333 | |
Sebastien Bourdeauducq | 4e3e0d129c | |
pca006132 | 12ee326fb4 | |
occheung | 61349f9685 | |
occheung | cea0a15e1e | |
occheung | 8b45f917d1 | |
pca006132 | 6542b65db3 | |
pca006132 | 9f90088fa6 | |
occheung | 5e1847e7c1 | |
occheung | 6f3c49528d | |
Sebastien Bourdeauducq | eaa1505c94 | |
Leon Riesebos | f42bea06a8 | |
occheung | 9d493028e5 | |
Sebastien Bourdeauducq | bbac477092 | |
Steve Fan | c0a7be0a90 | |
Sebastien Bourdeauducq | 9e5e234af3 | |
Sebastien Bourdeauducq | 352317df11 | |
Sebastien Bourdeauducq | a518963a47 | |
Sebastien Bourdeauducq | 37f14d94d0 | |
Sebastien Bourdeauducq | 4f723e19a6 | |
Peter Drmota | 7c664142a5 | |
Etienne Wodey | 33a9ca2684 | |
Sébastien Bourdeauducq | 311a818a49 | |
Sébastien Bourdeauducq | 1def0d98c5 | |
Leon Riesebos | 7ffe4dc2e3 | |
Leon Riesebos | 9e3ea4e8ef | |
Sebastien Bourdeauducq | 12512bfb2f | |
Steve Fan | 4a6bea479a | |
Sebastien Bourdeauducq | 9bbf7eb485 | |
mwojcik | f8a649deda | |
mwojcik | 7953f3d705 | |
mwojcik | f281112779 | |
mwojcik | eec3ea6589 | |
Sebastien Bourdeauducq | 163f5d9128 | |
Etienne Wodey | 9f830b86c0 | |
Sebastien Bourdeauducq | b8e7add785 | |
Sebastien Bourdeauducq | 5a923a0956 | |
David Nadlinger | c6039479e4 | |
David Nadlinger | 63b5727a0c | |
David Nadlinger | 9b01db3d11 | |
Sebastien Bourdeauducq | 6a433b2fce | |
occheung | 5ed9e49b94 | |
occheung | 9423428bb0 | |
Sebastien Bourdeauducq | 7307b30213 | |
Harry Ho | b49f813b17 | |
Peter Drmota | 20e079a381 | |
Sebastien Bourdeauducq | f0c50c80e6 | |
Sebastien Bourdeauducq | 46604300a2 | |
Sebastien Bourdeauducq | c029977a27 | |
Sebastien Bourdeauducq | 80115fcc02 | |
occheung | ac2f55b3ff | |
occheung | db3e5e83e6 | |
occheung | 09945ecc4d | |
occheung | 02119282b8 | |
occheung | 750b0ce46d | |
occheung | 531670d6c5 | |
occheung | 0f660735bf | |
occheung | 0755757601 | |
occheung | 0d708cd61a | |
occheung | 03b803e764 | |
occheung | b3e315e24a | |
occheung | 0898e101e2 | |
occheung | cb247f235f | |
occheung | 90f944481c | |
occheung | d84ad0095b | |
occheung | dd68b4ab82 | |
occheung | c6e0e26440 | |
occheung | 8da924ec0f | |
Robert Jördens | 591507a7c0 | |
Robert Jördens | 5a5b0cc7c0 | |
Spaqin | 69cddc6b86 | |
Spaqin | 9b1d7e297d | |
Harry Ho | 21b07dc667 | |
Robert Jördens | 1ff474893d | |
Robert Jördens | 10c37b87ec | |
Harry Ho | c940f104f1 | |
Harry Ho | 0aa8a739aa | |
Sebastien Bourdeauducq | 43eab14f56 | |
Sebastien Bourdeauducq | cc15a4f572 | |
Sebastien Bourdeauducq | df6aeb99f6 | |
Sebastien Bourdeauducq | bb61f2dae6 | |
Sebastien Bourdeauducq | b0cbad530b | |
Sebastien Bourdeauducq | 92cdfac35a | |
occheung | bf180c168c | |
occheung | d5fa3d131a | |
occheung | 6d3164a912 | |
occheung | 46326716fd | |
occheung | 0a59c889de | |
occheung | 27a7a96626 | |
occheung | a0bf11b465 | |
occheung | 790a20edf6 | |
fanmingyu212 | 178a86bcda | |
Sebastien Bourdeauducq | 35d21c98d3 | |
Sebastien Bourdeauducq | f5100702f6 | |
Sebastien Bourdeauducq | 3c1cbf47d2 | |
Robert Jördens | 3f6bf33298 | |
Harry Ho | 501eb1fa23 | |
Harry Ho | ea9bc04407 | |
occheung | 59065c4663 | |
Spaqin | 1894f0f626 | |
Sebastien Bourdeauducq | 4bfd010f03 | |
Etienne Wodey | a8333053c9 | |
occheung | 7a7e17f7e3 | |
Sebastien Bourdeauducq | 3ed10221d8 | |
Sebastien Bourdeauducq | e8a7a8f41e | |
Sebastien Bourdeauducq | 4834966798 | |
Sebastien Bourdeauducq | 7209e6f279 | |
Sebastien Bourdeauducq | ffb1e3ec2d | |
Sebastien Bourdeauducq | 2d79d824f9 | |
Sebastien Bourdeauducq | 1a0c4219ec | |
Sebastien Bourdeauducq | 2e5c32878f | |
occheung | a573dcf3f9 | |
occheung | 448974fe11 | |
occheung | b091d8cb66 | |
Sebastien Bourdeauducq | d50e24acb1 | |
occheung | 5394d04669 | |
occheung | b8ed5a0d91 | |
occheung | 2213e7ffac | |
occheung | 09ffd9de1e | |
occheung | 051a14abf2 | |
occheung | c6ba0f3cf4 | |
occheung | c812a837ab | |
occheung | a596db404d | |
Sebastien Bourdeauducq | eff7ae5aff | |
Sebastien Bourdeauducq | c78fbe9bd2 | |
Sebastien Bourdeauducq | 17b9d2fc5a | |
Sebastien Bourdeauducq | 5e2664ae7e | |
Sebastien Bourdeauducq | 64ce7e498b | |
Sebastien Bourdeauducq | 952acce65b | |
Sebastien Bourdeauducq | 7ae4b2d9bb | |
Sebastien Bourdeauducq | ce0964e25f | |
occheung | 4fab267593 | |
occheung | dcbd9f905c | |
occheung | 9f6b3f6014 | |
Sebastien Bourdeauducq | 9697ec33eb | |
Sebastien Bourdeauducq | eee80c7697 | |
Sebastien Bourdeauducq | b7efb2f633 | |
Sebastien Bourdeauducq | 9ee03bd438 | |
occheung | 4619a33db4 | |
occheung | 5985f7efb5 | |
Sebastien Bourdeauducq | 6db7280b09 | |
occheung | d8ac429059 | |
occheung | 798774192d | |
occheung | eecd825d23 | |
occheung | 1da0554a49 | |
Sebastien Bourdeauducq | 035d15af9d | |
Sebastien Bourdeauducq | 9addd08587 | |
Sebastien Bourdeauducq | 3e09e48152 | |
occheung | 5d0a8cf9ac | |
occheung | 70507e1b72 | |
occheung | c113cd6bf5 | |
Sebastien Bourdeauducq | 251cd4dcc6 | |
occheung | 61b0170a12 | |
occheung | af263ffe1f | |
occheung | a833974b50 | |
occheung | d623acc29d | |
occheung | 8fa47b8119 | |
occheung | de0f2d4a28 | |
occheung | 9afe63c08a | |
occheung | 29a2f106d1 | |
occheung | b30ed75e69 | |
occheung | 279593f984 | |
occheung | 1ba8c8dfee | |
Sebastien Bourdeauducq | 942bd1a95d | |
occheung | 3d629006df | |
occheung | 7542105f0f | |
occheung | 01ca114c66 | |
occheung | 36171f2c61 | |
occheung | 01e357e5d3 | |
occheung | f77b607b56 | |
occheung | 1293e0750e | |
occheung | fc42d053d9 | |
Sebastien Bourdeauducq | 9adab6c817 | |
Sebastien Bourdeauducq | 8c468d0346 | |
occheung | 1b516b16e2 | |
Sebastien Bourdeauducq | be5ae5c5b4 | |
Sebastien Bourdeauducq | d13efd6587 | |
Sebastien Bourdeauducq | e8fe8409b2 | |
Sebastien Bourdeauducq | cabe5ace8e | |
Sebastien Bourdeauducq | 6629a49e86 | |
Sebastien Bourdeauducq | 43d120359d | |
Sebastien Bourdeauducq | 5656e52581 | |
occheung | 1b8b4baf6a | |
occheung | 905330b0f1 | |
occheung | 50a62b3d42 | |
occheung | 7f0bc9f7f0 | |
occheung | c42adfe6fd | |
occheung | f56152e72f | |
occheung | c800b6c8d3 | |
occheung | e99061b013 | |
occheung | ecedec577c | |
occheung | 252594a606 | |
occheung | 31bf17563c | |
occheung | bfddd8a30f | |
occheung | ad3037d0f6 | |
occheung | daaf6c3401 | |
occheung | 6d9cebfd42 | |
occheung | 96438c9da7 | |
occheung | 6535b2f089 | |
occheung | 45adaa1d98 | |
occheung | 869a282410 | |
occheung | ebb9f298b5 | |
occheung | 97a0132f15 | |
occheung | 37ea863004 | |
occheung | 3ff74e0693 | |
occheung | 448fe0e8cf | |
occheung | 8294d7fea5 | |
occheung | 13032272fd | |
occheung | 46102ee737 | |
occheung | b87ea79d51 | |
occheung | 9aee42f0f2 | |
occheung | 82b4052cd6 | |
Leon Riesebos | 2cf144a60c | |
Robert Jördens | e7a46ec767 | |
Etienne Wodey | 4d7bd3ee32 | |
Etienne Wodey | 075cb26dd7 | |
Etienne Wodey | 7aebf02f84 | |
Etienne Wodey | 61b44d40dd | |
Etienne Wodey | 65f8a97b56 | |
Robert Jördens | 11790c6d7c | |
SingularitySurfer | 65f63e6927 | |
Robert Jördens | a53162d01d | |
SingularitySurfer | 4d21a72407 | |
Mikołaj Sowiński | 898122f3e5 | |
Sebastien Bourdeauducq | 420891ba54 | |
Sebastien Bourdeauducq | 9f94bc61ae | |
Sebastien Bourdeauducq | c69a1316ad | |
Sebastien Bourdeauducq | 477b1516d3 | |
Sebastien Bourdeauducq | e3edb505e3 | |
Sebastien Bourdeauducq | 67847f98f4 | |
mwojcik | 7879d3630b | |
Sebastien Bourdeauducq | 242dfae38e | |
Star Chen | 5111132ef0 | |
Sebastien Bourdeauducq | dc546630e4 | |
Robert Jördens | fd824f7ad0 | |
Harry Ho | c9608c0a89 | |
Star Chen | 6b88ea563d | |
Sebastien Bourdeauducq | 97e994700b | |
Sebastien Bourdeauducq | c3d765f745 | |
Robert Jördens | 1e869aedd3 | |
Sebastien Bourdeauducq | 53a98acfe4 | |
Star Chen | 30e5e06a33 | |
Star Chen | ebb67eaeee | |
Star Chen | 943a95e07a | |
Star Chen | e996b5f635 | |
StarChen | 796aeabb53 | |
Sebastien Bourdeauducq | 4fb8ea5b73 | |
Star Chen | 5cd721c514 | |
Sebastien Bourdeauducq | d327d2a505 | |
Sebastien Bourdeauducq | bc7ce7d6aa | |
Star Chen | 6ce9c26402 | |
occheung | 2204fd2b22 | |
pca006132 | b10d1bdd37 | |
pca006132 | 4ede58e44b | |
Sebastien Bourdeauducq | 51d2861e63 | |
Sebastien Bourdeauducq | 29fd58e34b | |
pca006132 | 0257ecc332 | |
pca006132 | 822e8565f7 | |
pca006132 | 6fb31a7abb | |
pca006132 | 0806b67dbf | |
pca006132 | f531af510c | |
pca006132 | c29a149d16 | |
Etienne Wodey | 094a346974 | |
Etienne Wodey | 68268e3db8 | |
Etienne Wodey | cca654bd47 | |
Etienne Wodey | 8bedf278f0 | |
Etienne Wodey | 12ef907f34 | |
Etienne Wodey | d8b1e59538 | |
Etienne Wodey | b8ab5f2607 | |
Etienne Wodey | 5c23e6edb6 | |
Sebastien Bourdeauducq | 7046aa9c23 | |
Sebastien Bourdeauducq | ea0c7b6173 | |
Star Chen | 9dee8bb9c9 | |
pca006132 | bcb030cc9c | |
Sebastien Bourdeauducq | 522c2f5995 | |
Sebastien Bourdeauducq | ea1dd2da43 | |
Leon Riesebos | 07bd1e27c1 | |
David Nadlinger | b89610bbcd | |
pca006132 | 4c743cf8af | |
pca006132 | 1e9a131386 | |
Harry Ho | 43b2a3791c | |
Sebastien Bourdeauducq | 935e18c1be | |
Robert Jördens | 67d474e6cf | |
fanmingyu212 | 91832aa886 | |
Marius Weber | 129cf8c1dd | |
Charles Baynham | 011f3bdb2e | |
Marius Weber | fb6fad7c64 | |
Marius Weber | 043c9c20d7 | |
Marius Weber | f97baa8aec | |
Marius Weber | 4fa2028671 | |
Marius Weber | 515cfa7dfb | |
Marius Weber | 4f812cc4ed | |
Marius Weber | 407fba232d | |
Marius Weber | 75445fe5f0 | |
Marius Weber | 1c96797de5 | |
Marius Weber | 7404152e4c | |
Marius Weber | eb477ee06b | |
Marius Weber | c7e992e26d | |
Sebastien Bourdeauducq | eb38b664e3 | |
Peter Drmota | 47bf5d36af | |
Leon Riesebos | af4fadcd54 | |
Leon Riesebos | a0cea3a011 | |
Leon Riesebos | 2671c271d4 | |
Leon Riesebos | d745d50245 | |
Leon Riesebos | 4a6201c083 | |
Robert Jördens | ffe1c9f9b1 | |
Marius Weber | bda5aa7c7e | |
Sebastien Bourdeauducq | 78490bef5d | |
David Nadlinger | b7f3eaebf9 | |
Harry Ho | fc59791583 | |
Harry Ho | 8002fcf8bb | |
Harry Ho | 5f32cb7196 | |
Harry Ho | 75efb8985c | |
Sebastien Bourdeauducq | 523fa01343 | |
David Nadlinger | bdaaf3c1d7 | |
David Nadlinger | 6fd088e339 | |
David Nadlinger | be4669d7a5 | |
David Nadlinger | 1f40f3ce15 | |
David Nadlinger | b8cd163978 | |
David Nadlinger | 888696f588 | |
Leon Riesebos | d04bcd8754 | |
Leon Riesebos | c22f731a61 | |
David Nadlinger | 5ba22c11c3 | |
David Nadlinger | c707ccf7d7 | |
David Nadlinger | 557671b7db | |
David Nadlinger | 75c255425d | |
Leon Riesebos | b8f4c6b9bb | |
Leon Riesebos | 1deaa758ce | |
Leon Riesebos | 3c68223337 | |
Leon Riesebos | cd7f9531d7 | |
jonathanpritchard | e577542f6b | |
Sebastien Bourdeauducq | 92fd705990 | |
Sebastien Bourdeauducq | 8deb269b9a | |
Sebastien Bourdeauducq | 489f950406 | |
Sebastien Bourdeauducq | 14d464b4cf | |
Etienne Wodey | 3cd96a951a | |
Etienne Wodey | 2ca9b64ba1 | |
Sebastien Bourdeauducq | d33a206f04 | |
Astro | 3844cde97b | |
Sebastien Bourdeauducq | 22ce5b0299 | |
Etienne Wodey | af411de639 | |
Sebastien Bourdeauducq | e54dd08821 | |
Sebastien Bourdeauducq | 547254e89e | |
Sebastien Bourdeauducq | 49299c00a9 | |
Sebastien Bourdeauducq | 9ef5717de8 | |
Drew | 48a1c305c1 | |
Astro | 461199b903 | |
Astro | 4b2ed67dd7 | |
Sebastien Bourdeauducq | cf9cf0ab6f | |
Sebastien Bourdeauducq | 997a48fb31 | |
Sebastien Bourdeauducq | bbe0c9162a | |
Sebastien Bourdeauducq | 3572e2a9c7 | |
Sebastien Bourdeauducq | 88c212b84f | |
Sebastien Bourdeauducq | db25f4e8f7 | |
Sebastien Bourdeauducq | 6bd9691ba8 | |
Sebastien Bourdeauducq | bfacd1e5b3 | |
Sebastien Bourdeauducq | f7a33a1f99 | |
Sebastien Bourdeauducq | 1213f78ee9 | |
Robert Jördens | 2f5ea67b69 | |
Robert Jördens | 0c634c7a46 | |
Etienne Wodey | d691b05d78 | |
Etienne Wodey | 78e1b9f8e5 | |
Etienne Wodey | 6f8e788620 | |
Etienne Wodey | a8bc98a77b | |
Ilia Sergachev | 78cbab4260 | |
Sebastien Bourdeauducq | 3657055bc0 | |
David Nadlinger | f9872bb7b8 | |
David Nadlinger | f1fd42ea98 | |
pca006132 | 8148fdb8a7 | |
Harry Ho | a0fd5261ea | |
Harry Ho | 7c4eed7a11 | |
David Nadlinger | 1e443a3aea | |
pca006132 | ec72eeda46 | |
pca006132 | 3832b261b1 | |
Harry Ho | 88b14082b6 | |
Harry Ho | 9daf77bd58 | |
Harry Ho | 52afd4ef6b | |
Harry Ho | f6d39fd6ba | |
Harry Ho | f25e86e934 | |
David Nadlinger | c229e76d07 | |
Robert Jördens | 261870bdee | |
Sebastien Bourdeauducq | 641f8bcdd6 | |
David Nadlinger | f11aef74b4 | |
Sebastien Bourdeauducq | c675488a99 | |
Astro | de5f9cd49f | |
Astro | c6807f4594 | |
Astro | 45b5cfce05 | |
Sebastien Bourdeauducq | cb44b0cd1a | |
David Nadlinger | 9b39b1e328 | |
David Nadlinger | f0284b2549 | |
David Nadlinger | 362f8ecb69 | |
David Nadlinger | 96692791cf | |
pca006132 | 5b5db1433b | |
Harry Ho | 3e93d71aeb | |
pca006132 | 636898c302 | |
occheung | 6a5f5088e2 | |
Harry Ho | cff7bcc122 | |
Harry Ho | dc7addf394 | |
Chris Ballance | 43be383c86 | |
Harry Ho | 43ecb3fea6 | |
Harry Ho | 8cd794e9f4 | |
Aadit Rahul Kamat | 19f75f1cfd | |
Aadit Rahul Kamat | 0a14cc5855 | |
occheung | a017dafee6 | |
Harry Ho | 73271600a1 | |
occheung | 3f631c417d | |
occheung | 33d39b261a | |
Sebastien Bourdeauducq | 4b10273a2d | |
Sebastien Bourdeauducq | 1ce505c547 | |
Sebastien Bourdeauducq | 072053c3b2 | |
Sebastien Bourdeauducq | ccdc741e73 | |
Robert Jördens | 33285253fb | |
Leon Riesebos | 3b2c225fc4 | |
Leon Riesebos | 94271504dd | |
SingularitySurfer | 9b4b550f76 | |
SingularitySurfer | cba631610c | |
Robert Jördens | 6ceb3f3095 | |
Harry Ho | d51d4e6ce0 | |
Sebastien Bourdeauducq | eda4850f64 | |
Sebastien Bourdeauducq | 8e46c3c1fd | |
SingularitySurfer | 0605267424 | |
Marius Weber | 3e38833020 | |
David Nadlinger | 9ff47bacab | |
David Nadlinger | a5dcd86fb8 | |
David Nadlinger | d95e619567 | |
David Nadlinger | fcf4763ae7 | |
David Nadlinger | bc6fbecbda | |
David Nadlinger | 292043a0a7 | |
Leon Riesebos | d8a5a8f568 | |
Etienne Wodey | dbcac62fd0 | |
Etienne Wodey | e8730a7e14 | |
Etienne Wodey | 3844123c13 | |
Etienne Wodey | 61dc2b8b64 | |
Etienne Wodey | b200465cce | |
Etienne Wodey | d433f6e86d | |
Etienne Wodey | b856df7c35 | |
Etienne Wodey | 211500089f | |
David Nadlinger | 4f311e7448 | |
David Nadlinger | f0ec987d23 | |
Sebastien Bourdeauducq | ea95d91428 | |
David Nadlinger | a97b4633cb | |
Robert Jördens | 19bd1e38d4 | |
Etienne Wodey | ecef5661ce | |
David Nadlinger | d672d2fc35 | |
David Nadlinger | d5f90f6c9f | |
David Nadlinger | d161fd5d84 | |
David Nadlinger | 94489f9183 | |
Robert Jördens | a9dd0a268c | |
Robert Jördens | 30d1acee9f | |
Robert Jördens | d98357051c | |
Robert Jördens | 139385a571 | |
Sebastien Bourdeauducq | d185f1ac67 | |
Sebastien Bourdeauducq | 3f076bf79b | |
Sebastien Bourdeauducq | 90017da484 | |
Sebastien Bourdeauducq | 6af8655cc7 | |
Sebastien Bourdeauducq | 840364cf0c | |
Sebastien Bourdeauducq | 24259523bb | |
Sebastien Bourdeauducq | ed90450d2c | |
Sebastien Bourdeauducq | 0a37a3dbf7 | |
Sebastien Bourdeauducq | 4027735a6d | |
Sebastien Bourdeauducq | 4000adfb21 | |
Sebastien Bourdeauducq | 59703ad31d | |
Sebastien Bourdeauducq | 7a5996ba79 | |
Sebastien Bourdeauducq | e66d2a6408 | |
Sebastien Bourdeauducq | 57ee57e7ea | |
Sebastien Bourdeauducq | ac35548d0f | |
Sebastien Bourdeauducq | 35c61ce24d | |
hartytp | a058be2ede | |
pca006132 | d0d0a02fd0 | |
pca006132 | e9988f9d3b | |
Sebastien Bourdeauducq | db62cf2abe | |
Sebastien Bourdeauducq | 07d43b6e5f | |
Sebastien Bourdeauducq | 7dfb4af682 | |
Sebastien Bourdeauducq | 96a5df0dc6 | |
Sebastien Bourdeauducq | 6248970ef8 | |
hartytp | cd8c2ce713 | |
hartytp | d780faf4ac | |
hartytp | e6ff2ddc32 | |
hartytp | 7d7be6e711 | |
Sebastien Bourdeauducq | 3fa5d0b963 | |
hartytp | 87911810d6 | |
hartytp | f2f942a8b4 | |
hartytp | 85bb641917 | |
hartytp | f3cd0fc675 | |
hartytp | e5e648bde1 | |
hartytp | c9ae406ac6 | |
hartytp | f6f6045f1a | |
hartytp | b44b870452 | |
hartytp | e9ab434fa7 | |
Sebastien Bourdeauducq | 17c952b8fb | |
hartytp | ebb7ccbfd1 | |
Sebastien Bourdeauducq | 7c2519c912 | |
Sebastien Bourdeauducq | 1bfe977203 | |
Sebastien Bourdeauducq | 66401aee9c | |
Sebastien Bourdeauducq | 6baf3b2198 | |
pca006132 | fe6115bcbb | |
pca006132 | 02f46e8b79 | |
pca006132 | 88d346fa26 | |
Sebastien Bourdeauducq | 9214e0f3e2 | |
Robert Jördens | eecd97ce4c | |
Robert Jördens | c453c24fb0 | |
Robert Jördens | 6c8bddcf8d | |
Robert Jördens | 569e5e56cd | |
Robert Jördens | 2fba3cfc78 | |
Robert Jördens | fec2f8b763 | |
Robert Jördens | a65239957f | |
Robert Jördens | 6e6480ec21 | |
Robert Jördens | 03d5f985f8 | |
Robert Jördens | ef65ee18bd | |
Robert Jördens | 50b4eb4840 | |
Robert Jördens | c55f2222dc | |
Robert Jördens | ad096f294c | |
Robert Jördens | 85d16e3e5f | |
Robert Jördens | 5c76f5c319 | |
Robert Jördens | fd5e221898 | |
Robert Jördens | 3e036e365a | |
Robert Jördens | fdb2867757 | |
Robert Jördens | d730851397 | |
Robert Jördens | f0959fb871 | |
Robert Jördens | b15e388b5f | |
Sebastien Bourdeauducq | 29c940f4e3 | |
Robert Jördens | 868a9a1f0c | |
Robert Jördens | c18f515bf9 | |
Robert Jördens | f3b0398720 | |
Robert Jördens | 9b58b712a6 | |
Robert Jördens | ff57813a9c | |
Robert Jördens | 07418258ae | |
Robert Jördens | 3a79ef740b | |
Robert Jördens | b449e7202b | |
Robert Jördens | b619f657b9 | |
Robert Jördens | c3728678d6 | |
Robert Jördens | e505dfed5b | |
Robert Jördens | fdd2d6f2fb | |
Sebastien Bourdeauducq | bff611a888 | |
Robert Jördens | 4e24700205 | |
Robert Jördens | 8aaeaa604e | |
Robert Jördens | e69bb0aeb3 | |
pca006132 | 6195b1d3a0 | |
Robert Jördens | 56aa22caeb | |
Astro | 1b475bdac4 | |
Harry Ho | 458a411320 | |
Sebastien Bourdeauducq | 47e88dfcbe | |
Astro | 002a71dd8d | |
Sebastien Bourdeauducq | 4398a2d5fa | |
Sebastien Bourdeauducq | f0289d49ab | |
Sebastien Bourdeauducq | 8d5dc0ad2a | |
Sebastien Bourdeauducq | f294d039b3 | |
Astro | 91df3d7290 | |
Harry Ho | 3d84135810 | |
Harry Ho | dfbf3311cb | |
Harry Ho | 1ad9deaf91 | |
Astro | 45ae6202c0 | |
Robert Jördens | 272dc5d36a | |
pca006132 | b2572003ac | |
pca006132 | 69f0699ebd | |
Sebastien Bourdeauducq | 7cf974a6a7 | |
Robert Jördens | 68bfa04abb | |
Robert Jördens | 96fc248d7c | |
Robert Jördens | c10ac2c92a | |
Robert Jördens | e5e2392240 | |
Robert Jördens | d1be1212ab | |
pca006132 | 26bc5d2405 | |
pca006132 | aac2194759 | |
pca006132 | 7181ff66a6 | |
pca006132 | cfddc13294 | |
Robert Jördens | 20fcfd95e9 | |
Robert Jördens | bcefb06e19 | |
Robert Jördens | 11c9def589 | |
Paweł Kulik | eb350c3459 | |
Robert Jördens | 63e4b95325 | |
Robert Jördens | a27a03ab3c | |
Robert Jördens | 7e584d0da1 | |
Robert Jördens | 3e99f1ce5a | |
Robert Jördens | a34a647ec4 | |
Robert Jördens | aa0154d8e2 | |
Sebastien Bourdeauducq | 5f6aa02b61 | |
David Nadlinger | 69718fca90 | |
pca006132 | a46573e97a | |
pca006132 | b05cbcbc24 | |
Sebastien Bourdeauducq | 48008eaf5f | |
Sebastien Bourdeauducq | d8cd5023f6 | |
David Nadlinger | c6f0c4dca4 | |
David Nadlinger | daf57969b2 | |
David Nadlinger | 778f2cf905 | |
David Nadlinger | 53d64d08a8 | |
David Nadlinger | d35f659d25 | |
David Nadlinger | a39bd69ca4 | |
David Nadlinger | ae47d4c0ec | |
David Nadlinger | 8e262acd1e | |
David Nadlinger | 33d931a5b7 | |
David Nadlinger | b00ba5ece1 | |
David Nadlinger | ad34df3de1 | |
David Nadlinger | 8783ba2072 | |
David Nadlinger | 5472e830f6 | |
David Nadlinger | 8eddb9194a | |
David Nadlinger | 1c645d8857 | |
David Nadlinger | df8f1c5c5a | |
David Nadlinger | cc00ae9580 | |
David Nadlinger | be7d78253f | |
David Nadlinger | faea886c44 | |
David Nadlinger | 56a872ccc0 | |
David Nadlinger | ef260adca8 | |
David Nadlinger | 0da4a61d99 | |
David Nadlinger | 78afa2ea8e | |
David Nadlinger | 4d48470320 | |
David Nadlinger | d37503f21d | |
David Nadlinger | da255bee1b | |
David Nadlinger | 4426e4144f | |
David Nadlinger | 0d8fbd4f19 | |
David Nadlinger | 7bdd6785b7 | |
David Nadlinger | 4d002c7934 | |
David Nadlinger | a7e855b319 | |
David Nadlinger | 48fb80017f | |
David Nadlinger | 9af6e5747d | |
David Nadlinger | e77c7d1c39 | |
David Nadlinger | ef57cad1a3 | |
David Nadlinger | a9a975e5d4 | |
David Nadlinger | 504b8f0148 | |
David Nadlinger | dea3c0c572 | |
David Nadlinger | e82357d180 | |
David Nadlinger | cb1cadb46a | |
David Nadlinger | 38c17622cc | |
David Nadlinger | c95a978ab6 | |
David Nadlinger | bc17bb4d1a | |
David Nadlinger | 632c5bc937 | |
David Nadlinger | 40f59561f2 | |
David Nadlinger | d882f8a3f0 | |
David Nadlinger | 575be2aeca | |
David Nadlinger | 56010c49fb | |
David Nadlinger | 6ea836183d | |
pmldrmota | 1df62862cd | |
Sebastien Bourdeauducq | 504f72a02c | |
Sebastien Bourdeauducq | 5f36e49f91 | |
pca006132 | 3bfd372c20 | |
Sebastien Bourdeauducq | e3c5775584 | |
Sebastien Bourdeauducq | 9c9dc3d0ef | |
David Nadlinger | ae999db8f6 | |
Sebastien Bourdeauducq | 709026d945 | |
Sebastien Bourdeauducq | 455e4859b7 | |
Sebastien Bourdeauducq | 5fd0d0bbb6 | |
David Nadlinger | cf19c9512d | |
David Nadlinger | f8d1506922 | |
cw-mlabs | e4b16428f5 | |
cw-mlabs | 8dd9a6d024 | |
Charles Baynham | 9b44ec7bc6 | |
David Nadlinger | 1c72585c1b | |
David Nadlinger | 57e759a1ed | |
Sebastien Bourdeauducq | 2a2f5c4d58 | |
Sebastien Bourdeauducq | 553a49e194 | |
Sebastien Bourdeauducq | 8510bf4e55 | |
pca006132 | eb28d7be3a | |
pca006132 | f78d673079 | |
Robert Jördens | e31ee1f0b3 | |
Sebastien Bourdeauducq | 4340a5cfc1 | |
Sebastien Bourdeauducq | f2e0d27334 | |
Sebastien Bourdeauducq | 901be75ba4 | |
Sebastien Bourdeauducq | 8719bab726 | |
Sebastien Bourdeauducq | f273a9aacc | |
Sebastien Bourdeauducq | 2d1f1fff7f | |
Sebastien Bourdeauducq | 85b5a04acf | |
Sebastien Bourdeauducq | 13501115f6 | |
Donald Sebastian Leung | f265976df6 | |
David Nadlinger | 3f0cf6e683 | |
Sebastien Bourdeauducq | 95807234d9 | |
Sebastien Bourdeauducq | 906256cc02 | |
Sebastien Bourdeauducq | 5d58a195c0 | |
Sebastien Bourdeauducq | fb6a8899f4 | |
Sebastien Bourdeauducq | 89c53c35e8 | |
David Nadlinger | f36692638c | |
Sebastien Bourdeauducq | 91c93e1ad8 | |
Sebastien Bourdeauducq | 4ad46e0e40 | |
David Nadlinger | 966ed5d013 | |
David Nadlinger | 7955b63b00 | |
David Nadlinger | d87042597a | |
charlesbaynham | 2429a266f6 | |
charlesbaynham | ce7e92a75e | |
Harry Ho | 1a17d0c869 | |
Harry Ho | 6156bd4088 | |
Sebastien Bourdeauducq | a18d2468e9 | |
Robert Jördens | 9822b88d9b | |
Sebastien Bourdeauducq | cb76f9da89 | |
Sebastien Bourdeauducq | bd9eec15c0 | |
Sebastien Bourdeauducq | d5c1eaa16e | |
Sebastien Bourdeauducq | 02900d79d0 | |
Sebastien Bourdeauducq | d8b5bcf019 | |
Sebastien Bourdeauducq | 8b939b7cb3 | |
Charles Baynham | 5db2afc7a7 | |
Charles Baynham | 692c466838 | |
Charles Baynham | 8858ba8095 | |
Marius Weber | 2538840756 | |
Marius Weber | b3b6cb8efe | |
Sebastien Bourdeauducq | 4e9a529e5a | |
Sebastien Bourdeauducq | 60e5f1c18e | |
Sebastien Bourdeauducq | 1f2182d4c7 | |
Sebastien Bourdeauducq | 35f1814235 | |
Sebastien Bourdeauducq | b83afedf43 | |
Sebastien Bourdeauducq | 4982fde898 | |
Sebastien Bourdeauducq | ef4e5bc69b | |
Sebastien Bourdeauducq | 85e92ae28c | |
Sebastien Bourdeauducq | 7e400a78f4 | |
Sebastien Bourdeauducq | 140a26ad7e | |
Sebastien Bourdeauducq | 4228e0205c | |
Sebastien Bourdeauducq | 3a7819704a | |
Sebastien Bourdeauducq | 251a0101a6 | |
Sebastien Bourdeauducq | d19f28fa84 | |
Sebastien Bourdeauducq | 9bc43b2dbf | |
Sebastien Bourdeauducq | 77e6fdb7a7 | |
Robert Jördens | ea79ba4622 | |
Sebastien Bourdeauducq | e8b73876ab | |
Sebastien Bourdeauducq | de57039e6e | |
Sebastien Bourdeauducq | 9dc24f255e | |
Sebastien Bourdeauducq | fb0ade77a9 | |
Sebastien Bourdeauducq | ec7b2bea12 | |
Sebastien Bourdeauducq | 0f4be22274 | |
Sebastien Bourdeauducq | 3c823a483a | |
Sebastien Bourdeauducq | 4d601c2102 | |
Sebastien Bourdeauducq | 61d4614b61 | |
Sebastien Bourdeauducq | facc0357d8 | |
Sebastien Bourdeauducq | ffd3172e02 | |
Sebastien Bourdeauducq | 8f608fa2fa | |
Etienne Wodey | 90d08988b2 | |
Etienne Wodey | 9b03a365ed | |
Sebastien Bourdeauducq | 371d923385 | |
Sebastien Bourdeauducq | 9294efabc0 | |
Sebastien Bourdeauducq | 4a8d361ace | |
Sebastien Bourdeauducq | 9e66dd7075 | |
Robert Jördens | 380de177e7 | |
Robert Jördens | e803830b3b | |
Sebastien Bourdeauducq | 8dbf30b23e | |
Sebastien Bourdeauducq | 8451e58fbe | |
Paweł K | 2a909839ff | |
Sebastien Bourdeauducq | 6d26def3ce | |
Sebastien Bourdeauducq | 52ec849008 | |
Sebastien Bourdeauducq | c7de1f2e6b | |
Sebastien Bourdeauducq | bf9f4e380a | |
Sebastien Bourdeauducq | ffb24e9fff | |
Sebastien Bourdeauducq | 5f8e20b1a1 | |
Sebastien Bourdeauducq | dfa033eb87 | |
Sebastien Bourdeauducq | dee16edb78 | |
Sebastien Bourdeauducq | f4d8f77268 | |
Sebastien Bourdeauducq | bfcbffcd8d | |
Sebastien Bourdeauducq | 82cdb7f933 | |
Robert Jördens | 248230a89e | |
Robert Jördens | c45a872cba | |
Robert Jördens | 2c4e5bfee4 | |
Sebastien Bourdeauducq | 8f9948a1ff | |
Sebastien Bourdeauducq | e427aaaa66 | |
Sebastien Bourdeauducq | 62a52cb086 | |
Sebastien Bourdeauducq | 6b428ef3be | |
Robert Jördens | 7ab0282234 | |
Robert Jördens | 9368c26d1c | |
Etienne Wodey | da531404e8 | |
Robert Jördens | 01a6e77d89 | |
Sebastien Bourdeauducq | ec03767dcf | |
Sebastien Bourdeauducq | 5c299de3b4 | |
Sebastien Bourdeauducq | 45efee724e | |
Sebastien Bourdeauducq | 6c3e71a83a | |
Sebastien Bourdeauducq | 344f8bd12a | |
Sebastien Bourdeauducq | 833f428391 | |
Sebastien Bourdeauducq | 6c948c7726 | |
Sebastien Bourdeauducq | 50302d57c0 | |
Sebastien Bourdeauducq | 105dd60c78 | |
Sebastien Bourdeauducq | 3242e9ec6c | |
Sebastien Bourdeauducq | 8ec0f2e717 | |
Sebastien Bourdeauducq | d5895b8999 | |
Sebastien Bourdeauducq | e7ef23d30c | |
Sebastien Bourdeauducq | ea3bce6fe3 | |
Sebastien Bourdeauducq | d685619bcd | |
Sebastien Bourdeauducq | 9d7196bdb7 | |
Sebastien Bourdeauducq | e87d864063 | |
Sebastien Bourdeauducq | 8edbc33d0e | |
Sebastien Bourdeauducq | 9dd011f4ad | |
Sebastien Bourdeauducq | 583a18dd5f | |
David Nadlinger | d8c81d6d05 | |
David Nadlinger | 2c34f0214b | |
Robert Jördens | eebae01503 | |
Sebastien Bourdeauducq | 3f32d78c0e | |
Sebastien Bourdeauducq | bb04b082a7 | |
David Nadlinger | 1e864b7e2d | |
Sebastien Bourdeauducq | a666766f38 | |
Sebastien Bourdeauducq | 5c6e394928 | |
Sebastien Bourdeauducq | 642a305c6a | |
Sebastien Bourdeauducq | f57f235dca | |
Sebastien Bourdeauducq | 9e15ff7e6a | |
Sebastien Bourdeauducq | dfad27125e | |
Sebastien Bourdeauducq | b5e1bd3fa2 | |
David Nadlinger | e8b9fcf0bb | |
David Nadlinger | af31c6ea21 | |
Sebastien Bourdeauducq | fb2076a026 | |
Sebastien Bourdeauducq | b2480f0edc | |
Sebastien Bourdeauducq | d4e039cede | |
Sebastien Bourdeauducq | 106d25b32a | |
Sebastien Bourdeauducq | 8759c8d360 | |
Sebastien Bourdeauducq | c3030f4ffb | |
Sebastien Bourdeauducq | cab8c8249e | |
Sebastien Bourdeauducq | b7f1623197 | |
Sebastien Bourdeauducq | c5137eeb62 | |
Sebastien Bourdeauducq | 1c9cbe6285 | |
David Nadlinger | 8f518c6b05 | |
David Nadlinger | 594ff45750 | |
David Nadlinger | fb2b634c4a | |
Sebastien Bourdeauducq | 6ee15fbcae | |
David Nadlinger | d3508b014f | |
David Nadlinger | 0279a60a55 | |
Sebastien Bourdeauducq | 5fefdcc324 | |
Sebastien Bourdeauducq | 8d13aeb96c | |
Sebastien Bourdeauducq | ac09f3a5da | |
Sebastien Bourdeauducq | 52112d54f9 | |
Sebastien Bourdeauducq | 6f52540569 | |
Sebastien Bourdeauducq | 13486f3acf | |
Sebastien Bourdeauducq | 150a02117c | |
Sebastien Bourdeauducq | 307a6ca140 | |
Sebastien Bourdeauducq | f7a5df8d81 | |
Sebastien Bourdeauducq | 4919fb8765 | |
Sebastien Bourdeauducq | 0d4eccc1a5 | |
Sebastien Bourdeauducq | f633c62e8d | |
Sebastien Bourdeauducq | 14e09582b6 | |
Sebastien Bourdeauducq | 439576f59d | |
Sebastien Bourdeauducq | 2b5213b013 | |
Sebastien Bourdeauducq | 05e2e1899a | |
Sebastien Bourdeauducq | 4148efd2ee | |
Sebastien Bourdeauducq | d43fe644f0 | |
Sebastien Bourdeauducq | 0499f83580 | |
Sebastien Bourdeauducq | 46a776d06e | |
Sebastien Bourdeauducq | f35f658bc5 | |
Sebastien Bourdeauducq | bcd061f141 | |
Sebastien Bourdeauducq | 883310d83e | |
Sebastien Bourdeauducq | 57a5bea43a | |
Sebastien Bourdeauducq | da9237de53 | |
Paweł Kulik | 3851a02a3a | |
Paweł Kulik | 14e250c78f | |
Sebastien Bourdeauducq | 7098854b0f | |
Robert Jördens | 05c5fed07d | |
Robert Jördens | 56074cfffa | |
Robert Jördens | 86e1924493 | |
Sebastien Bourdeauducq | eb271f383b | |
Sebastien Bourdeauducq | 39d5ca11f4 | |
Sebastien Bourdeauducq | 87894102e5 | |
Sebastien Bourdeauducq | 2e55e39ac7 | |
Sebastien Bourdeauducq | 354d82cfe3 | |
Sebastien Bourdeauducq | 4a03ca928d | |
Sebastien Bourdeauducq | 68cab5be8c | |
Sebastien Bourdeauducq | bcd2383c9d | |
Sebastien Bourdeauducq | 4832bfb08c | |
Sebastien Bourdeauducq | 449d2c4f08 | |
Robert Jördens | e0687b77f5 | |
Sebastien Bourdeauducq | c536f6c4df | |
Sebastien Bourdeauducq | ae50da09c4 | |
Sebastien Bourdeauducq | fe0c324b38 | |
Sebastien Bourdeauducq | fa41c946ea | |
Sebastien Bourdeauducq | c5dbab1929 | |
gthickman | 56d4b70e01 | |
Fabian Schmid | f73e2a3d30 | |
Sebastien Bourdeauducq | f330e285fb | |
Sebastien Bourdeauducq | 9f459f37dc | |
Sebastien Bourdeauducq | ac8a5b60c0 | |
Sébastien Bourdeauducq | b7292f0195 | |
Sébastien Bourdeauducq | cf6a7f243f | |
Sebastien Bourdeauducq | 57979da59a | |
Sebastien Bourdeauducq | 3adc799785 | |
Sebastien Bourdeauducq | eefc6ae80d | |
Sebastien Bourdeauducq | 310a627e16 | |
Sebastien Bourdeauducq | 9b6b683d87 | |
Sebastien Bourdeauducq | 6bb931a17b | |
Sebastien Bourdeauducq | 29d84f5655 | |
Sebastien Bourdeauducq | db13747279 | |
Sebastien Bourdeauducq | 4707aef45c | |
Sebastien Bourdeauducq | 7a92f443c2 | |
Sebastien Bourdeauducq | 3f10363b01 | |
Sebastien Bourdeauducq | 4416378d21 | |
Garrett | f8a7e278b8 | |
Garrett | 3a19ba7e62 | |
Garrett | 4ad3651022 | |
Garrett | 6d34eb3bb0 | |
Garrett | 61ca46ec3f | |
Sebastien Bourdeauducq | fd7081830c | |
Sebastien Bourdeauducq | e65303cfe9 | |
Sebastien Bourdeauducq | 3fd6962bd2 | |
Sebastien Bourdeauducq | 6644903843 | |
Sebastien Bourdeauducq | 5279bc275a | |
David Nadlinger | bc3b55b1a8 | |
Sebastien Bourdeauducq | b25a17fa37 | |
Sebastien Bourdeauducq | 307f39e900 | |
Sebastien Bourdeauducq | 9dc82bd766 | |
Sebastien Bourdeauducq | e2f9f59472 | |
Sebastien Bourdeauducq | 98854473dd | |
Sebastien Bourdeauducq | 29b4d87943 | |
Sebastien Bourdeauducq | 5362f92b39 | |
Sebastien Bourdeauducq | deadfead2a | |
Sebastien Bourdeauducq | 42af76326f | |
Sebastien Bourdeauducq | a78e493b72 | |
Sebastien Bourdeauducq | 389a8f587a | |
Sebastien Bourdeauducq | 9a35a2ed81 | |
Sebastien Bourdeauducq | bc050fdeec | |
Sebastien Bourdeauducq | 228e44a059 | |
Sebastien Bourdeauducq | dc71039934 | |
Sebastien Bourdeauducq | 3042476230 | |
Sebastien Bourdeauducq | c96de7454d | |
Sebastien Bourdeauducq | 88dbff46f4 | |
Sebastien Bourdeauducq | 462cf5967e | |
Robert Jördens | 1f15e55021 | |
David Nadlinger | 611bcc4db4 | |
David Nadlinger | 5d7f22ffa4 | |
Sebastien Bourdeauducq | f2f7170d20 | |
Sebastien Bourdeauducq | 47a83c71f1 | |
Sebastien Bourdeauducq | 818d6b2f5a | |
Sebastien Bourdeauducq | 8f76a3218e | |
Sebastien Bourdeauducq | 1c5e749036 | |
Sebastien Bourdeauducq | d26d80410e | |
Sebastien Bourdeauducq | 6d5dcb4211 | |
Sebastien Bourdeauducq | 05e8f24c24 | |
Sebastien Bourdeauducq | 62b49882b9 | |
Sebastien Bourdeauducq | a8f85860c4 | |
Sebastien Bourdeauducq | d42ff81144 | |
Sebastien Bourdeauducq | 8fa3c6460e | |
Sebastien Bourdeauducq | 37d0a5dc19 | |
Sebastien Bourdeauducq | bc060b7f01 | |
Sebastien Bourdeauducq | 40d64fc782 | |
Sebastien Bourdeauducq | 21a1c6de3f | |
Sebastien Bourdeauducq | 6cf06fba7b | |
Tim Ballance | c64c8b4ddc | |
David Nadlinger | 371388ecbe | |
Sebastien Bourdeauducq | 9c5ff4fc04 | |
Sebastien Bourdeauducq | 3aade3b59a | |
Sebastien Bourdeauducq | 314d9b5d06 | |
Sebastien Bourdeauducq | 4df2c5d1fb | |
Sebastien Bourdeauducq | 5ee81dc643 | |
Sebastien Bourdeauducq | 4b3baf4825 | |
Sebastien Bourdeauducq | 03007b896e | |
Sebastien Bourdeauducq | ebd5d890f1 | |
Sebastien Bourdeauducq | 90e3b83e80 | |
Sebastien Bourdeauducq | 97a0dee3e8 | |
Sebastien Bourdeauducq | 1bc7743e03 | |
Sebastien Bourdeauducq | a421820a32 | |
Sebastien Bourdeauducq | f8e4cc37d0 | |
Sebastien Bourdeauducq | f62dc7e1d4 | |
Sebastien Bourdeauducq | c4c884b8ce | |
Sebastien Bourdeauducq | fdba0bfbbc | |
Sebastien Bourdeauducq | 1c6c22fde9 | |
Sebastien Bourdeauducq | ad63908aff | |
Sebastien Bourdeauducq | 5ad65b9d30 | |
Sebastien Bourdeauducq | e6ff44301b | |
Sebastien Bourdeauducq | e9b81f6e33 | |
Sebastien Bourdeauducq | 7cd02d30b7 | |
Sebastien Bourdeauducq | b3b85135a3 | |
Sebastien Bourdeauducq | 346c985347 | |
Sebastien Bourdeauducq | e2a924449d | |
Sebastien Bourdeauducq | 4198033657 | |
Sebastien Bourdeauducq | 5612b31860 | |
Sebastien Bourdeauducq | a8cf4c2b18 | |
Sebastien Bourdeauducq | 1bc5d44a7c | |
Sebastien Bourdeauducq | bb5ff46f7d | |
Sebastien Bourdeauducq | 7b95814cf5 | |
Sebastien Bourdeauducq | 58b7bdcecc | |
Sebastien Bourdeauducq | 96fc4a21e8 | |
Tim Ballance | ada3b39f4e | |
Tim Ballance | 448080e71d | |
Sebastien Bourdeauducq | 6aa68e1715 | |
Sebastien Bourdeauducq | 6cb0f5de59 | |
Sebastien Bourdeauducq | 0cf8a46bbd | |
Sebastien Bourdeauducq | 6f533727cb | |
Sebastien Bourdeauducq | 4c1fe1de0d | |
Charles Baynham | 0b1fb255a9 | |
Charles Baynham | e50a6d5aaf | |
Robert Jördens | f0e87d2e59 | |
Sebastien Bourdeauducq | 4e77be0511 | |
Sebastien Bourdeauducq | 694b85f37a | |
Charles Baynham | b7abf2fb53 | |
Sebastien Bourdeauducq | 38fca01189 | |
Sebastien Bourdeauducq | 991c686d72 | |
Robert Jördens | f4dd7e5e29 | |
Sebastien Bourdeauducq | 7492a59f6d | |
David Nadlinger | 6d6f66338b | |
Charles Baynham | ddd34e5a9c | |
Sebastien Bourdeauducq | 98caaebade | |
Sebastien Bourdeauducq | 21021beb08 | |
Sebastien Bourdeauducq | 436662be52 | |
Sebastien Bourdeauducq | 69c2acd9d7 | |
Sebastien Bourdeauducq | cfb5ef5548 | |
Sebastien Bourdeauducq | 0b9168994f | |
Charles Baynham | d31f30a436 | |
Charles Baynham | 7ac8feea19 | |
Sebastien Bourdeauducq | 1fb317778a | |
Astro | 90e8e074cd | |
Astro | 71b3c66af9 | |
Sebastien Bourdeauducq | 959679d8b7 | |
Sebastien Bourdeauducq | c03c35f375 | |
Sebastien Bourdeauducq | cf47fa44d8 | |
Sebastien Bourdeauducq | 98cd9a539c | |
Astro | afe162ceca | |
Astro | a8aabd3815 | |
Astro | 8fc5ce902f | |
Astro | d666f3d573 | |
Sebastien Bourdeauducq | 1fd2322662 | |
Sebastien Bourdeauducq | 24082b687e | |
Sebastien Bourdeauducq | 9331fafab0 | |
Sebastien Bourdeauducq | 5c3974c265 | |
Sebastien Bourdeauducq | 19620948bf | |
Sebastien Bourdeauducq | efc43142a6 | |
Sebastien Bourdeauducq | 44969b03ad | |
Sebastien Bourdeauducq | 2776c5b16b | |
Robert Jördens | e9b78b62db | |
Sebastien Bourdeauducq | 5f8acb3f96 | |
Sebastien Bourdeauducq | f861459ace | |
David Nadlinger | 99e490f9ff | |
David Nadlinger | 4446ebf9ca | |
Robert Jördens | 3f0657f2a8 | |
Sebastien Bourdeauducq | 7a5dcbe60e | |
Sebastien Bourdeauducq | b8870997d0 | |
Sebastien Bourdeauducq | 623446f82c | |
Sebastien Bourdeauducq | 831b3514d3 | |
Sebastien Bourdeauducq | 930291f606 | |
Sebastien Bourdeauducq | e95b7b9d4b | |
Sebastien Bourdeauducq | 75e8d54c27 | |
Sebastien Bourdeauducq | 2ffc843790 | |
David Nadlinger | 280915d54f | |
Sebastien Bourdeauducq | 34222b3f38 | |
Sebastien Bourdeauducq | 5f461d08cd | |
Robert Jördens | f7e10759dc | |
Sebastien Bourdeauducq | e4fff390a8 | |
Sebastien Bourdeauducq | dceb5ae501 | |
Sebastien Bourdeauducq | f8dba7ae35 | |
Robert Jördens | 4d01410ce5 | |
Robert Jördens | 8407c526e8 | |
Sebastien Bourdeauducq | 5a9bb0ecba | |
David Nadlinger | 8bf9640185 | |
David Nadlinger | 34f48f57cc | |
Sebastien Bourdeauducq | f6edceb23d | |
whitequark | b8b9fa51bd | |
David Nadlinger | 0353966ef7 | |
David Nadlinger | 720838a23e | |
Robert Jördens | 53789ba9aa | |
Robert Jördens | 6655e567df | |
Sebastien Bourdeauducq | 53c778ae2d | |
Sebastien Bourdeauducq | a947867887 | |
Sebastien Bourdeauducq | 66a66b03b4 | |
Sebastien Bourdeauducq | 87ce24e867 | |
Sebastien Bourdeauducq | 43e58c939c | |
Sebastien Bourdeauducq | b04e15741b | |
Sebastien Bourdeauducq | 995a4428e7 | |
Sebastien Bourdeauducq | 6b20d50639 | |
Sebastien Bourdeauducq | c68581537b | |
Sebastien Bourdeauducq | 2183dcf23e | |
Robert Jördens | 636b4cae5a | |
Robert Jördens | 591de0e579 | |
Robert Jördens | 967d192cbe | |
Sebastien Bourdeauducq | 8853cf8df9 | |
Sebastien Bourdeauducq | 1a898c423a | |
Sebastien Bourdeauducq | 836dc9b927 | |
Sebastien Bourdeauducq | 834d03527b | |
Sebastien Bourdeauducq | e3c58d5872 | |
Sebastien Bourdeauducq | 5008302f88 | |
Sebastien Bourdeauducq | 74e4b01201 | |
Brad Bondurant | adf3df2bb5 | |
Sebastien Bourdeauducq | 704b5c6305 | |
Sebastien Bourdeauducq | bc2cfd77f5 | |
Sebastien Bourdeauducq | cdef50c0dd | |
Sebastien Bourdeauducq | 34c61db790 | |
Sebastien Bourdeauducq | 88b6496c8c | |
Sebastien Bourdeauducq | 9dcaae6395 | |
Sebastien Bourdeauducq | b4779969d0 | |
Sebastien Bourdeauducq | 874542f33f | |
hartytp | cfe1f56f73 | |
Sebastien Bourdeauducq | d9fc2847c1 | |
David Nadlinger | 84b91ee8bd | |
David Nadlinger | 76d4598971 | |
David Nadlinger | d4275ea1e0 | |
Marius Weber | 1e01e6b834 | |
hartytp | c2b4f0cfe3 | |
hartytp | bbcd1db025 | |
hartytp | 30fe624fe5 | |
Sebastien Bourdeauducq | 1a3d71760d | |
Sebastien Bourdeauducq | f551491a84 | |
Sebastien Bourdeauducq | 72f7f8386f | |
Sebastien Bourdeauducq | f11bf43283 | |
Sebastien Bourdeauducq | 86f462f40e | |
Sebastien Bourdeauducq | fda3cb2482 | |
Robert Jördens | ead9a42842 | |
Robert Jördens | 0c9b810501 | |
Robert Jördens | 1d2cc60e0d | |
Sebastien Bourdeauducq | 086180812c | |
Sebastien Bourdeauducq | 7e7f463d11 | |
Sebastien Bourdeauducq | 4e230bb768 | |
Sebastien Bourdeauducq | 4fab405b36 | |
Sebastien Bourdeauducq | 948ea396c0 | |
Sebastien Bourdeauducq | fa19b30313 | |
Sebastien Bourdeauducq | 3209197b0b | |
Sebastien Bourdeauducq | a1c97ec4dd | |
Sebastien Bourdeauducq | 51c123bad8 | |
Sebastien Bourdeauducq | b13961f246 | |
Sebastien Bourdeauducq | 4cc9bd33ce | |
Sebastien Bourdeauducq | 9ffb6867e2 | |
Sebastien Bourdeauducq | 5bef72dbd6 | |
Sebastien Bourdeauducq | be99496124 | |
Sebastien Bourdeauducq | d958b20bb8 | |
Sebastien Bourdeauducq | 119f64243f | |
Sebastien Bourdeauducq | bb39eedf5b | |
Sebastien Bourdeauducq | 93f4f31f45 | |
Sebastien Bourdeauducq | 2ada5227f0 | |
Sebastien Bourdeauducq | deffb77a7e | |
Sebastien Bourdeauducq | 56033b60a7 | |
Sebastien Bourdeauducq | 5613565293 | |
Sebastien Bourdeauducq | eaec519ac8 | |
Sebastien Bourdeauducq | d4781e9a8a | |
Sebastien Bourdeauducq | 62e9b2d85e | |
Sebastien Bourdeauducq | 4c1fb0c2a1 | |
Sebastien Bourdeauducq | 73d6078883 | |
David Nadlinger | 4d215cf541 | |
David Nadlinger | dc7a642b26 | |
Sebastien Bourdeauducq | b4b9ec50b5 | |
David Nadlinger | cdaf554736 | |
David Nadlinger | 25e7e0c177 | |
Sebastien Bourdeauducq | 97b7ed557b | |
Sebastien Bourdeauducq | b8e2b82adb | |
Sebastien Bourdeauducq | 4f6367d1c1 | |
David Nadlinger | cd7a5a3683 | |
David Nadlinger | b4ddf4c86b | |
David Nadlinger | 236b30ac5f | |
David Nadlinger | 88fd5c8440 | |
David Nadlinger | 990e0b7dd9 | |
David Nadlinger | baf102dbb2 | |
David Nadlinger | 8e225433a5 | |
David Nadlinger | b8ff627be9 | |
David Nadlinger | fc95183e04 | |
David Nadlinger | f9af058b96 | |
Sebastien Bourdeauducq | 3634cfac86 | |
Sebastien Bourdeauducq | 4580f3dac8 | |
Chris Ballance | 4499ef1748 | |
Sebastien Bourdeauducq | 5d31cf2268 | |
Sebastien Bourdeauducq | 560849e693 | |
Sebastien Bourdeauducq | bbb8c00518 | |
Sebastien Bourdeauducq | 18fbe0b081 | |
Sebastien Bourdeauducq | c7205ad82f | |
Sebastien Bourdeauducq | d07c6fcfea | |
Chris Ballance | 7cdcaf0d00 | |
Sebastien Bourdeauducq | 33b28f6e56 | |
Sebastien Bourdeauducq | 2ec5a58c59 | |
Chris Ballance | af785b9a9c | |
Sebastien Bourdeauducq | 8d2c1be44f | |
Sebastien Bourdeauducq | c2622297bd | |
Sebastien Bourdeauducq | a01425bc9c | |
Sebastien Bourdeauducq | adcd79ecb3 | |
Sebastien Bourdeauducq | e61320d409 | |
Chris Ballance | fcf8828cb6 | |
Sebastien Bourdeauducq | 5e7c83c9cf | |
Sebastien Bourdeauducq | e504262b67 | |
Sebastien Bourdeauducq | 346299e7f8 | |
Sebastien Bourdeauducq | 04e0c23e78 | |
Sebastien Bourdeauducq | 852048dce4 | |
Sebastien Bourdeauducq | 964a349a19 | |
Chris Ballance | 8a5789d616 | |
Chris Ballance | c56c3e5588 | |
Chris Ballance | 8659c769cb | |
Sebastien Bourdeauducq | b2177eff81 | |
Sebastien Bourdeauducq | 227c729f56 | |
David Nadlinger | b3db3ea6fc | |
David Nadlinger | 5fd92a6175 | |
Sebastien Bourdeauducq | e47ba4b35e | |
Sebastien Bourdeauducq | b219f8b5c7 | |
Sebastien Bourdeauducq | fc9d4c7bdc | |
Sebastien Bourdeauducq | 25bcebd1f6 | |
Sebastien Bourdeauducq | 3be5112363 | |
Sebastien Bourdeauducq | ed2d8dfa7a | |
Sebastien Bourdeauducq | b2bee3da96 | |
Sebastien Bourdeauducq | 6ff7fce4bd | |
Sebastien Bourdeauducq | bffb1cf141 | |
Sebastien Bourdeauducq | 24a3b31f22 | |
Sebastien Bourdeauducq | 26dd4e5160 | |
Sebastien Bourdeauducq | 0d86702503 | |
Sebastien Bourdeauducq | f65bcbcbb2 | |
Sebastien Bourdeauducq | 0d05d4b813 | |
Sebastien Bourdeauducq | ec966de007 | |
Sebastien Bourdeauducq | ceead218bc | |
Sebastien Bourdeauducq | 4db115aef3 | |
Sebastien Bourdeauducq | 62c7f75a9e | |
Sebastien Bourdeauducq | d45249197c | |
Sebastien Bourdeauducq | de3992bbdd | |
Sebastien Bourdeauducq | 791f830ef6 | |
Sebastien Bourdeauducq | d39338d59f | |
Sebastien Bourdeauducq | d79a6ee41c | |
Sebastien Bourdeauducq | 62985fbd29 | |
Sebastien Bourdeauducq | 1c35c051a5 | |
Sebastien Bourdeauducq | 05b128469f | |
Sebastien Bourdeauducq | cd60803f21 | |
Sebastien Bourdeauducq | 269f0a4d6f | |
Sebastien Bourdeauducq | 8049c52d06 | |
Sebastien Bourdeauducq | 8edc2318ab | |
Robert Jördens | aee8965897 | |
Robert Jördens | ec6588174b | |
Robert Jördens | b57cad77ad | |
Sebastien Bourdeauducq | 2a60914cb9 | |
Sebastien Bourdeauducq | 62bfccc264 | |
Sebastien Bourdeauducq | 87a1ea6587 | |
Sebastien Bourdeauducq | 84619adbab | |
Sebastien Bourdeauducq | 7c0353b4be | |
Astro | 5557491cc7 | |
Sebastien Bourdeauducq | 596d3e20d7 | |
Sebastien Bourdeauducq | 40a0cf806d | |
Sebastien Bourdeauducq | 861ad0a62c | |
David Nadlinger | 1cd0f5a552 | |
Sebastien Bourdeauducq | bcda53ee2f | |
Sebastien Bourdeauducq | a486756890 | |
David Nadlinger | 944a3b34c3 | |
Sebastien Bourdeauducq | 322861225e | |
Sebastien Bourdeauducq | f657c44e3d | |
Sebastien Bourdeauducq | af9988c79c | |
Sebastien Bourdeauducq | 0b08baef10 | |
Sebastien Bourdeauducq | aa17037193 | |
Sebastien Bourdeauducq | 30fb7c1049 | |
Sebastien Bourdeauducq | bd22c8e200 | |
Sebastien Bourdeauducq | ea2956bcb8 | |
Sebastien Bourdeauducq | 6ad2e13515 | |
Sebastien Bourdeauducq | 2104a93f78 | |
Sebastien Bourdeauducq | ff4e4f15ed | |
Sebastien Bourdeauducq | 74c16e038e | |
Sebastien Bourdeauducq | 3a84790c44 | |
Sebastien Bourdeauducq | edb12a6a5d | |
Sebastien Bourdeauducq | ed030704d2 | |
Sebastien Bourdeauducq | c32bf770ab | |
whitequark | 0a84dd38c1 | |
David Nadlinger | 01c3000ef3 | |
David Nadlinger | 56b2e0c262 | |
David Nadlinger | bf84226c7d | |
David Nadlinger | 820326960e | |
Sébastien Bourdeauducq | dc14f8258f | |
Sébastien Bourdeauducq | e3cf4fd342 | |
Sébastien Bourdeauducq | 566f5094d9 | |
Sébastien Bourdeauducq | d83251098a | |
Sebastien Bourdeauducq | 3a21794b79 | |
Sebastien Bourdeauducq | f673ce276f | |
Sebastien Bourdeauducq | 8194f74252 | |
Sebastien Bourdeauducq | 7c6abfb2ce | |
Sebastien Bourdeauducq | ee611c5c30 | |
Sebastien Bourdeauducq | 2aab84453d | |
Sebastien Bourdeauducq | a52234b5ff | |
Sebastien Bourdeauducq | 13c4d935a2 | |
Sebastien Bourdeauducq | e424927049 | |
Sebastien Bourdeauducq | 7584639acd | |
Sebastien Bourdeauducq | 07ac42505b | |
Sebastien Bourdeauducq | f0f50bf1dc | |
Robert Jördens | 2de1eaa521 | |
Sebastien Bourdeauducq | 7994c294af | |
Sebastien Bourdeauducq | 1cfd26dc2e | |
Sebastien Bourdeauducq | 306d9cf5d0 | |
Sebastien Bourdeauducq | 744ef03fa1 | |
Sebastien Bourdeauducq | 6f1bb5c351 | |
Sebastien Bourdeauducq | 3e8fe3f29d | |
David Nadlinger | ef934ad958 | |
Chris Ballance | eaa1b44b00 | |
hartytp | 0ebff04ad7 | |
hartytp | f6142816b8 | |
hartytp | fe63c9b366 | |
hartytp | df6c1fca2c | |
hartytp | 87e85bcc14 | |
whitequark | d6eb2b023a | |
David Nadlinger | 0da799fa46 | |
Sebastien Bourdeauducq | b56c7cec1e | |
Sebastien Bourdeauducq | d0b6f92b11 | |
Sebastien Bourdeauducq | 2f7364563c | |
Sebastien Bourdeauducq | 5a7460a38e | |
Sebastien Bourdeauducq | ea431b6982 | |
Sebastien Bourdeauducq | ec230d6560 | |
Sebastien Bourdeauducq | 8119000982 | |
Sebastien Bourdeauducq | 82106dcd95 | |
Sebastien Bourdeauducq | 8bbd4207d8 | |
Sebastien Bourdeauducq | bdb6678cec | |
Sebastien Bourdeauducq | d3c608aaec | |
Sebastien Bourdeauducq | fa3b40141d | |
Sebastien Bourdeauducq | ec8560911f | |
Sebastien Bourdeauducq | c591009220 | |
Sebastien Bourdeauducq | 9d0d02a561 | |
Sebastien Bourdeauducq | ed6aa29897 | |
Sebastien Bourdeauducq | 2e8decbce3 | |
Sebastien Bourdeauducq | 9ae57fd51e | |
Robert Jördens | 90c9fa446f | |
Sebastien Bourdeauducq | 7a5d28b73d | |
Sebastien Bourdeauducq | 1a42e23fb4 | |
Sebastien Bourdeauducq | eebff6d77f | |
Sebastien Bourdeauducq | b9e3fab49c | |
Sebastien Bourdeauducq | 145f08f3fe | |
Sebastien Bourdeauducq | ba21dc8498 | |
Sebastien Bourdeauducq | 3acee87df2 | |
Sebastien Bourdeauducq | cfe66549ff | |
Sebastien Bourdeauducq | 2b0d63db23 | |
Sebastien Bourdeauducq | bdd4e52a53 | |
Sebastien Bourdeauducq | 47312e55d3 | |
Sebastien Bourdeauducq | 443d6d8688 | |
Sebastien Bourdeauducq | 3b6f47886e | |
Sebastien Bourdeauducq | 74fdd04622 | |
Sebastien Bourdeauducq | 81b0046f98 | |
Sebastien Bourdeauducq | 8254560577 | |
Sebastien Bourdeauducq | 214394e3b0 | |
Sebastien Bourdeauducq | fdbf1cc2b2 | |
Sebastien Bourdeauducq | 7e5c062c2c | |
Sebastien Bourdeauducq | f73ffe44f9 | |
Sebastien Bourdeauducq | 8632b553d2 | |
Sebastien Bourdeauducq | d1ef036948 | |
Sebastien Bourdeauducq | 9966e789fc | |
Sebastien Bourdeauducq | 359fb1f207 | |
Sebastien Bourdeauducq | cb04230f86 | |
Sebastien Bourdeauducq | 3356717316 | |
Sebastien Bourdeauducq | 4941fb3300 | |
Sebastien Bourdeauducq | cc9420d2c8 | |
Robert Jördens | 8c5a502591 | |
Sebastien Bourdeauducq | bbac92442f | |
Sebastien Bourdeauducq | a92cc91dcb | |
Sebastien Bourdeauducq | f8b39b0b9a | |
Sebastien Bourdeauducq | 07b5b0d36d | |
Sebastien Bourdeauducq | 31592fc8e4 | |
Sebastien Bourdeauducq | 0a0e8c3c93 | |
Sebastien Bourdeauducq | 3917a0ef46 | |
Sebastien Bourdeauducq | 154269b77a | |
Sebastien Bourdeauducq | 3b5fd3ac11 | |
Sebastien Bourdeauducq | 330c5610e9 | |
Sebastien Bourdeauducq | 390f05f762 | |
Sebastien Bourdeauducq | d7e6f104d2 | |
Sebastien Bourdeauducq | 81f2b2c864 | |
Sebastien Bourdeauducq | 01f1df7e50 | |
Robert Jördens | b692981c8e | |
Sebastien Bourdeauducq | a0eba5b09b | |
Sebastien Bourdeauducq | 2e3555de85 | |
Sebastien Bourdeauducq | 9ee5fea88d | |
Robert Jördens | 91e375ce6a | |
Sebastien Bourdeauducq | 2174935231 | |
Robert Jördens | 81ff3d4b29 | |
Sebastien Bourdeauducq | 4869636a55 | |
Sebastien Bourdeauducq | 79ffd1e0bf | |
Sebastien Bourdeauducq | a9678dd9f2 | |
Sebastien Bourdeauducq | e024fa89e5 | |
Sebastien Bourdeauducq | 84f7d006e8 | |
Sebastien Bourdeauducq | 30051133b7 | |
Sebastien Bourdeauducq | 30b2f54baa | |
Sebastien Bourdeauducq | bff8c8cb05 | |
Sebastien Bourdeauducq | a987d2b2e5 | |
Sebastien Bourdeauducq | bc532e0088 | |
David Nadlinger | 4ba4e9c540 | |
David Nadlinger | 0dab7ecd73 | |
David Nadlinger | e24e893303 | |
David Nadlinger | 8aac5f7695 | |
David Nadlinger | 5c62648ed6 | |
David Nadlinger | e165a9a352 | |
David Nadlinger | c213ab13ba | |
David Nadlinger | bd71852427 | |
David Nadlinger | 90c144a685 | |
Sebastien Bourdeauducq | a2ff2cc173 | |
Robert Jördens | 40187d1957 | |
Robert Jördens | 385916a9a4 | |
Robert Jördens | 2bea5e3d58 | |
Sebastien Bourdeauducq | a467b8f851 | |
Sebastien Bourdeauducq | 1e3ef15446 | |
Robert Jördens | 4e142dfbeb | |
Sebastien Bourdeauducq | 689714965b | |
David Nadlinger | 05f6dafb2c | |
David Nadlinger | 1c71ae636a | |
David Nadlinger | 67a6882e91 | |
David Nadlinger | a565f77538 | |
TPH | 3c0e3e5910 | |
Sebastien Bourdeauducq | b8a230a67e | |
Sebastien Bourdeauducq | 4cb9f77fd8 | |
David Nadlinger | 6c52359e59 | |
David Nadlinger | 24b1b9a480 | |
whitequark | 425cd7851e | |
whitequark | 49682d0159 | |
David Nadlinger | cd725a8352 | |
David Nadlinger | f3323a35d5 | |
David Nadlinger | 48fc175a6b | |
Drew | f2c1d32e54 | |
David Nadlinger | 3e84ec2bf1 | |
David Nadlinger | b3ef6e2253 | |
Drew | 66861e6708 | |
Robert Jördens | 101671fbbf | |
Drew | 58e872e7b5 | |
Drew | 99a0f61b35 | |
Drew | 721c6f3bcc | |
Robert Jördens | 088530604e | |
Robert Jördens | 19748fe495 | |
Robert Jördens | b25ab1fc88 | |
Sebastien Bourdeauducq | f8a94725e9 | |
Sebastien Bourdeauducq | 9b213b17af | |
Sebastien Bourdeauducq | c7b18952b8 | |
Sebastien Bourdeauducq | 62599c5f91 | |
Drew | b3b0b6f0a5 | |
David Nadlinger | 101ed5d534 | |
Drew | 40370c4d45 | |
David Nadlinger | 4fb434674d | |
Sebastien Bourdeauducq | 0d3e7ba805 | |
Sebastien Bourdeauducq | 887cb110a7 | |
David Nadlinger | cadde970e1 | |
David Nadlinger | 7bcdeb825b | |
David Nadlinger | 4d793d7149 | |
Sebastien Bourdeauducq | 332bd6090f | |
Sebastien Bourdeauducq | 3217488824 | |
Sebastien Bourdeauducq | b5501aaf00 | |
Sebastien Bourdeauducq | 66b3132c28 | |
Sebastien Bourdeauducq | 6e43c41103 | |
Sebastien Bourdeauducq | cf9447ab77 | |
Sebastien Bourdeauducq | 2c3510497b | |
Sebastien Bourdeauducq | d6fea22174 | |
Sebastien Bourdeauducq | 2100a8b1f1 | |
Drew | 94cdad6c1d | |
Drew Risinger | b58d59a9e7 | |
Drew | 3e5cea5d89 | |
Sebastien Bourdeauducq | a93fdb8c9d | |
Sebastien Bourdeauducq | 62d7c89c48 | |
Sebastien Bourdeauducq | 0972d61e81 | |
Sebastien Bourdeauducq | f007895fad | |
Sebastien Bourdeauducq | 10ebf63c47 | |
Sebastien Bourdeauducq | d6a3172a3e | |
Sebastien Bourdeauducq | 4af8fd6a0d | |
Sebastien Bourdeauducq | 175f8b8ccc | |
Sebastien Bourdeauducq | 77126ce5b3 | |
Sebastien Bourdeauducq | ab9ca0ee0a | |
Sebastien Bourdeauducq | cc58318500 | |
Sebastien Bourdeauducq | f5cda3689e | |
Sebastien Bourdeauducq | e85df13127 | |
Sebastien Bourdeauducq | ec52a1003d | |
Sebastien Bourdeauducq | d42d607547 | |
Sebastien Bourdeauducq | 7a6bdcb041 | |
Sebastien Bourdeauducq | 48793b7ecf | |
Sebastien Bourdeauducq | e2799803cb | |
Sebastien Bourdeauducq | 1e7ba3227f | |
Sebastien Bourdeauducq | 421ad9c916 | |
Sebastien Bourdeauducq | e80d80f133 | |
Drew | d60b95f481 | |
Robert Jördens | a7d4d3bda9 | |
Sebastien Bourdeauducq | 35bdf26f01 | |
David Nadlinger | e608d6ffd3 | |
David Nadlinger | 8e30c4574b | |
Sebastien Bourdeauducq | 38ce7ab8ff | |
Sebastien Bourdeauducq | c09ab8502c | |
Joachim Schiele | 73941d4661 | |
Robert Jördens | 79eadb9465 | |
Robert Jördens | 6df4ae934f | |
Robert Jördens | efd400b02c | |
David Nadlinger | d4c393b2a8 | |
Robert Jördens | d90eb3ae88 | |
Robert Jördens | baf88050fd | |
Kaifeng | cc143d5fec | |
Sebastien Bourdeauducq | 6aa341bc44 | |
Sebastien Bourdeauducq | 421834fa3e | |
Sebastien Bourdeauducq | 981a77834a | |
Sebastien Bourdeauducq | d931967e5c | |
Sebastien Bourdeauducq | dd03fdfd1a | |
Sebastien Bourdeauducq | 8940009e1a | |
Sebastien Bourdeauducq | 2e66788c6c | |
Sebastien Bourdeauducq | ad39c76a56 | |
Sebastien Bourdeauducq | 7e14f3ca4e | |
Sebastien Bourdeauducq | fd00021a52 | |
Sebastien Bourdeauducq | 7f55376c75 | |
Sebastien Bourdeauducq | dce4f036db | |
Sebastien Bourdeauducq | 156afb48ee | |
Paweł K | 57caa7b149 | |
Sebastien Bourdeauducq | 3fd95b86c2 | |
Sebastien Bourdeauducq | 5c162ed5e6 | |
Sebastien Bourdeauducq | 0507101e31 | |
Sebastien Bourdeauducq | c56c0ba41f | |
Sebastien Bourdeauducq | 09141e5bee | |
Sebastien Bourdeauducq | 450a035f9e | |
Sebastien Bourdeauducq | ae8ef18f47 | |
Sebastien Bourdeauducq | b32e89444c | |
Sebastien Bourdeauducq | af9ea1f324 | |
Robert Jördens | a81c12de94 | |
Sebastien Bourdeauducq | 58ea111b8b | |
Sebastien Bourdeauducq | bf50dcf76d | |
Sebastien Bourdeauducq | 8f9858be4c | |
Sebastien Bourdeauducq | 22a223bf82 | |
Sebastien Bourdeauducq | f5befba5c9 | |
Sebastien Bourdeauducq | 53e79f553f | |
Sebastien Bourdeauducq | b5cdb1c1e0 | |
Sebastien Bourdeauducq | a3e0b1c5b4 | |
Sebastien Bourdeauducq | 78d4b3a7da | |
Sebastien Bourdeauducq | 69e699c7bd | |
Robert Jördens | 3ad68f65c5 | |
Robert Jördens | d1eee7c0ea | |
Sebastien Bourdeauducq | 1b841805f6 | |
Sebastien Bourdeauducq | de9d21ffc8 | |
whitequark | dd829afebd | |
whitequark | 583bba8777 | |
whitequark | 0edae64afb | |
Sebastien Bourdeauducq | 59033d2588 | |
Sebastien Bourdeauducq | 84a6b3d09b | |
Sebastien Bourdeauducq | 1f7858b80b | |
Sebastien Bourdeauducq | e509ab8553 | |
Sebastien Bourdeauducq | c990b5e4f1 | |
Sebastien Bourdeauducq | a0cc7311ad | |
Sebastien Bourdeauducq | 0bee43aa58 | |
Sebastien Bourdeauducq | bec25cbaa0 | |
Sebastien Bourdeauducq | e8d58b35b4 | |
Sebastien Bourdeauducq | d18546550e | |
Sebastien Bourdeauducq | 2549e623c1 | |
Sebastien Bourdeauducq | f74dda639f | |
Sebastien Bourdeauducq | 8caea0e6d3 | |
Sebastien Bourdeauducq | aadf5112b7 | |
Sebastien Bourdeauducq | fae95e73ad | |
Sebastien Bourdeauducq | 3d0c3cc1cf | |
Sebastien Bourdeauducq | ad0254c17b | |
Sebastien Bourdeauducq | efd735a6ab | |
Sebastien Bourdeauducq | 9f96b6bcda | |
Sebastien Bourdeauducq | 969a305c5a | |
Sebastien Bourdeauducq | b92350b0f6 | |
Sebastien Bourdeauducq | 212892d92f | |
Sebastien Bourdeauducq | 73f0de7c79 | |
Sebastien Bourdeauducq | 1b7f403a4b | |
Sebastien Bourdeauducq | 53a979e74d | |
Sebastien Bourdeauducq | 251d90c3d5 | |
Sebastien Bourdeauducq | b86b6dcc09 | |
Sebastien Bourdeauducq | 08be176369 | |
Sebastien Bourdeauducq | 3d965910f7 | |
Sebastien Bourdeauducq | 142c952e3d | |
Sebastien Bourdeauducq | 62642957cd | |
Sebastien Bourdeauducq | 970d1bf147 | |
Sebastien Bourdeauducq | eda15a596c | |
Sebastien Bourdeauducq | 2b44786f73 | |
whitequark | c33f74dabe | |
Sebastien Bourdeauducq | c8cd830118 | |
Sebastien Bourdeauducq | f7ad7a99e3 | |
Sebastien Bourdeauducq | 3cbdf2fbac | |
Sebastien Bourdeauducq | d38755feff | |
whitequark | 1990ab35d3 | |
Sebastien Bourdeauducq | cd61ee858c | |
Sebastien Bourdeauducq | c0c413196a | |
Sebastien Bourdeauducq | f097b4104c | |
Sebastien Bourdeauducq | 0017cb756e | |
Sebastien Bourdeauducq | 2f010e0109 | |
Sebastien Bourdeauducq | 20ed393c1e | |
Sebastien Bourdeauducq | f8c6fa5ad6 | |
Sebastien Bourdeauducq | 65da1fee4a | |
Sebastien Bourdeauducq | d19550daf8 | |
Sebastien Bourdeauducq | ae72e3a51e | |
Sebastien Bourdeauducq | 1ef39a98a7 | |
Sebastien Bourdeauducq | e95638e0a7 | |
Sebastien Bourdeauducq | 042b0065de | |
Sebastien Bourdeauducq | fa872c3341 | |
Sebastien Bourdeauducq | 6cf3db3485 | |
Sebastien Bourdeauducq | 5a9cc004f2 | |
Sebastien Bourdeauducq | 0befec7d26 | |
Sebastien Bourdeauducq | 420e1cb1d0 | |
Sebastien Bourdeauducq | e36a8536d7 | |
Sebastien Bourdeauducq | 5bcd40ff59 | |
Sebastien Bourdeauducq | edf403b837 | |
Sebastien Bourdeauducq | 95432a4ac1 | |
Sebastien Bourdeauducq | 8227037a84 | |
Sebastien Bourdeauducq | 41972d6773 | |
Sebastien Bourdeauducq | 051bafbfd9 | |
Sebastien Bourdeauducq | 251b9a2b0d | |
Sebastien Bourdeauducq | 5439abaa9d | |
Sebastien Bourdeauducq | 36e3fedfc6 | |
Sebastien Bourdeauducq | e6bd835b5d | |
Sebastien Bourdeauducq | 2679a35082 | |
Sebastien Bourdeauducq | c0c5867f9e | |
Sebastien Bourdeauducq | a23af67f2b | |
Sebastien Bourdeauducq | f5b386c0d8 | |
Sebastien Bourdeauducq | b38c57d73b | |
Sebastien Bourdeauducq | 3d29a7ed14 | |
Sebastien Bourdeauducq | 2fff96802b | |
Sebastien Bourdeauducq | 19a14b68b1 | |
Sebastien Bourdeauducq | 264078baba | |
Sebastien Bourdeauducq | e01efbcb8a | |
Sebastien Bourdeauducq | 4d889c0c4e | |
Sebastien Bourdeauducq | 663432adbd | |
Sebastien Bourdeauducq | bc1d3fda6a | |
Sebastien Bourdeauducq | 31bef9918e | |
Sebastien Bourdeauducq | 7ec45efdcf | |
Sebastien Bourdeauducq | 014cfd8dbd | |
Sebastien Bourdeauducq | 7ae44f3417 | |
Sebastien Bourdeauducq | 496d1b08fd | |
Sebastien Bourdeauducq | ec302747e0 | |
Sebastien Bourdeauducq | d5577ec0d0 | |
Sebastien Bourdeauducq | df61b85988 | |
Sebastien Bourdeauducq | 87e0384e97 | |
Sebastien Bourdeauducq | 92be9324df | |
Sebastien Bourdeauducq | 2884d595b3 | |
Sebastien Bourdeauducq | 839f748a1d | |
Sebastien Bourdeauducq | 5f20d79408 | |
Sebastien Bourdeauducq | 1450e17a73 | |
Sebastien Bourdeauducq | 19ae9ac1b1 | |
Sebastien Bourdeauducq | 3d531cc923 | |
Sebastien Bourdeauducq | 4e4398afa6 | |
Sebastien Bourdeauducq | 15b16695c6 | |
Sebastien Bourdeauducq | 778f1de121 | |
Sebastien Bourdeauducq | 00fabee1ca | |
Sebastien Bourdeauducq | f3fe818049 | |
Sebastien Bourdeauducq | 0fe2a6801e | |
Sebastien Bourdeauducq | 6768dbab6c | |
Sebastien Bourdeauducq | 88b7529d09 | |
Sebastien Bourdeauducq | 078c862618 | |
Sebastien Bourdeauducq | 6057cb797c | |
Sebastien Bourdeauducq | 4f963e1e11 | |
Sebastien Bourdeauducq | ce6e390d5f | |
Sebastien Bourdeauducq | aa64e6c1c6 |
|
@ -1 +0,0 @@
|
||||||
artiq/_version.py export-subst
|
|
|
@ -8,9 +8,9 @@ about: Report a bug in ARTIQ
|
||||||
|
|
||||||
# Bug Report
|
# Bug Report
|
||||||
|
|
||||||
<!-- Thanks for reporting a bug report to ARTIQ! You can also discuss issues and ask questions on IRC (the [#m-labs channel on freenode](https://webchat.freenode.net/?channels=m-labs) or on the [ARTIQ mailing list](https://ssl.serverraum.org/lists/listinfo/artiq). Please check Github/those forums to avoid posting a repeat issue.
|
<!-- Thanks for reporting a bug report to ARTIQ! You can also discuss issues and ask questions on IRC (the [#m-labs channel on freenode](https://webchat.freenode.net/?channels=m-labs) or on the [forum](https://forum.m-labs.hk). Please check Github/those forums to avoid posting a repeat issue.
|
||||||
|
|
||||||
Context helps us fix issues faster, so please include the following:
|
Context helps us fix issues faster, so please include the following when relevant:
|
||||||
-->
|
-->
|
||||||
|
|
||||||
## One-Line Summary
|
## One-Line Summary
|
||||||
|
@ -34,11 +34,12 @@ Behavior
|
||||||
* Text description
|
* Text description
|
||||||
* Log message, tracebacks, screen shots where relevant
|
* Log message, tracebacks, screen shots where relevant
|
||||||
|
|
||||||
### Your System
|
### Your System (omit irrelevant parts)
|
||||||
|
|
||||||
* Operating System:
|
* Operating System:
|
||||||
* Conda version:
|
* ARTIQ version: (with recent versions of ARTIQ, run ``artiq_client --version``)
|
||||||
* ARTIQ version: (package or git commit id, versions for bitstream, bootloader, runtime and host software). Run `conda list`
|
* Version of the gateware and runtime loaded in the core device: (in the output of ``artiq_coremgmt -D .... log``)
|
||||||
|
* If using Conda, output of `conda list` (please submit as a file attachment, as this tends to be long)
|
||||||
* Hardware involved:
|
* Hardware involved:
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
|
|
@ -1,19 +1,23 @@
|
||||||
---
|
---
|
||||||
name: Support question
|
name: Support question
|
||||||
about: Questions about ARTIQ that are not covered in the documentation. (Latest = https://m-labs.hk/artiq/manual-master/ or Stable = https://m-labs.hk/artiq/manual/)
|
about: Questions about ARTIQ that are not covered in the documentation
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
# Question
|
# Question
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Make sure you check the ARTIQ documentation before posting a question.
|
Make sure you check the ARTIQ documentation before posting a question.
|
||||||
Don't forget you can search it!
|
Don't forget you can search it!
|
||||||
Latest docs: https://m-labs.hk/artiq/manual-master/
|
|
||||||
Stable branch docs: https://m-labs.hk/artiq/manual/
|
|
||||||
|
|
||||||
Can also ask on IRC: https://webchat.freenode.net/?channels=m-labs or
|
Beta version: https://m-labs.hk/artiq/manual-beta/
|
||||||
check mailing list archives: https://ssl.serverraum.org/lists-archive/artiq/
|
Stable version: https://m-labs.hk/artiq/manual/
|
||||||
|
|
||||||
|
The forum is also a very good place for questions: https://forum.m-labs.hk/
|
||||||
|
Can also ask on IRC: https://webchat.freenode.net/?channels=m-labs or
|
||||||
|
check mailing list archives: https://ssl.serverraum.org/lists-archive/artiq/
|
||||||
|
|
||||||
|
Remember: if you have this question then others probably do too! The best way of thanking the people who help you with this issue is to contribute to ARTIQ by submitting a pull request to update the documentation.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
## Category: FILL_IN
|
## Category: FILL_IN
|
||||||
|
|
|
@ -38,7 +38,7 @@ Closes #XXX
|
||||||
### All Pull Requests
|
### All Pull Requests
|
||||||
|
|
||||||
- [x] Use correct spelling and grammar.
|
- [x] Use correct spelling and grammar.
|
||||||
- [ ] Update [RELEASE_NOTES.md](../RELEASE_NOTES.md) if there are noteworthy changes, especially if there are changes to existing APIs.
|
- [ ] Update [RELEASE_NOTES.rst](../RELEASE_NOTES.rst) if there are noteworthy changes, especially if there are changes to existing APIs.
|
||||||
- [ ] Close/update issues.
|
- [ ] Close/update issues.
|
||||||
- [ ] Check the copyright situation of your changes and sign off your patches (`git commit --signoff`, see [copyright](../CONTRIBUTING.rst#copyright-and-sign-off)).
|
- [ ] Check the copyright situation of your changes and sign off your patches (`git commit --signoff`, see [copyright](../CONTRIBUTING.rst#copyright-and-sign-off)).
|
||||||
|
|
||||||
|
@ -47,7 +47,6 @@ Closes #XXX
|
||||||
- [ ] Run `flake8` to check code style (follow PEP-8 style). `flake8` has issues with parsing Migen/gateware code, ignore as necessary.
|
- [ ] Run `flake8` to check code style (follow PEP-8 style). `flake8` has issues with parsing Migen/gateware code, ignore as necessary.
|
||||||
- [ ] Test your changes or have someone test them. Mention what was tested and how.
|
- [ ] Test your changes or have someone test them. Mention what was tested and how.
|
||||||
- [ ] Add and check docstrings and comments
|
- [ ] Add and check docstrings and comments
|
||||||
- [ ] Check, test, and update the conda recipes in [conda/](../doc/)
|
|
||||||
- [ ] Check, test, and update the [unittests in /artiq/test/](../artiq/test/) or [gateware simulations in /artiq/gateware/test](../artiq/gateware/test)
|
- [ ] Check, test, and update the [unittests in /artiq/test/](../artiq/test/) or [gateware simulations in /artiq/gateware/test](../artiq/gateware/test)
|
||||||
|
|
||||||
### Documentation Changes
|
### Documentation Changes
|
||||||
|
|
|
@ -29,6 +29,7 @@ __pycache__/
|
||||||
/repository/
|
/repository/
|
||||||
/results
|
/results
|
||||||
/last_rid.pyon
|
/last_rid.pyon
|
||||||
/dataset_db.pyon
|
/dataset_db.mdb
|
||||||
|
/dataset_db.mdb-lock
|
||||||
/device_db*.py
|
/device_db*.py
|
||||||
/test*
|
/test*
|
||||||
|
|
|
@ -7,9 +7,8 @@ Reporting Issues/Bugs
|
||||||
|
|
||||||
Thanks for `reporting issues to ARTIQ
|
Thanks for `reporting issues to ARTIQ
|
||||||
<https://github.com/m-labs/artiq/issues/new>`_! You can also discuss issues and
|
<https://github.com/m-labs/artiq/issues/new>`_! You can also discuss issues and
|
||||||
ask questions on IRC (the `#m-labs channel on freenode
|
ask questions on IRC (the #m-labs channel on OFTC), the `Mattermost chat
|
||||||
<https://webchat.freenode.net/?channels=m-labs>`_) or on the `mailing list
|
<https://chat.m-labs.hk>`_, or on the `forum <https://forum.m-labs.hk>`_.
|
||||||
<https://ssl.serverraum.org/lists/listinfo/artiq>`_.
|
|
||||||
|
|
||||||
The best bug reports are those which contain sufficient information. With
|
The best bug reports are those which contain sufficient information. With
|
||||||
accurate and comprehensive context, an issue can be resolved quickly and
|
accurate and comprehensive context, an issue can be resolved quickly and
|
||||||
|
@ -17,19 +16,19 @@ efficiently. Please consider adding the following data to your issue
|
||||||
report if possible:
|
report if possible:
|
||||||
|
|
||||||
* A clear and unique summary that fits into one line. Also check that
|
* A clear and unique summary that fits into one line. Also check that
|
||||||
this issue has not jet been reported. If it has, add additional information there.
|
this issue has not yet been reported. If it has, add additional information there.
|
||||||
* Precise steps to reproduce (list of actions that leads to the issue)
|
* Precise steps to reproduce (list of actions that leads to the issue)
|
||||||
* Expected behavior (what should happen)
|
* Expected behavior (what should happen)
|
||||||
* Actual behavior (what happens instead)
|
* Actual behavior (what happens instead)
|
||||||
* Logging message, trace backs, screen shots where relevant
|
* Logging message, trace backs, screen shots where relevant
|
||||||
* Components involved:
|
* Components involved (omit irrelevant parts):
|
||||||
|
|
||||||
* Operating system
|
* Operating System
|
||||||
* Conda version
|
* ARTIQ version (with recent versions of ARTIQ, run ``artiq_client --version``)
|
||||||
* ARTIQ version (package or git commit id, versions for bitstream, BIOS,
|
* Version of the gateware and runtime loaded in the core device (in the output of ``artiq_coremgmt -D .... log``)
|
||||||
runtime and host software)
|
|
||||||
* Hardware involved
|
* Hardware involved
|
||||||
|
|
||||||
|
|
||||||
For in-depth information on bug reporting, see:
|
For in-depth information on bug reporting, see:
|
||||||
|
|
||||||
http://www.chiark.greenend.org.uk/~sgtatham/bugs.html
|
http://www.chiark.greenend.org.uk/~sgtatham/bugs.html
|
||||||
|
@ -42,10 +41,8 @@ Contributing Code
|
||||||
ARTIQ welcomes contributions. Write bite-sized patches that can stand alone,
|
ARTIQ welcomes contributions. Write bite-sized patches that can stand alone,
|
||||||
clean them up, write proper commit messages, add docstrings and unittests. Then
|
clean them up, write proper commit messages, add docstrings and unittests. Then
|
||||||
``git rebase`` them onto the current master or merge the current master. Verify
|
``git rebase`` them onto the current master or merge the current master. Verify
|
||||||
that the testsuite passes. Then prepare a pull request or send patches to the
|
that the testsuite passes. Then submit a pull request. Expect your contribution
|
||||||
`mailing list <https://ssl.serverraum.org/lists/listinfo/artiq>`_ to be
|
to be held up to coding standards (e.g. use ``flake8`` to check yourself).
|
||||||
discussed. Expect your contribution to be held up to coding standards (e.g. use
|
|
||||||
``flake8`` to check yourself).
|
|
||||||
|
|
||||||
Checklist for Code Contributions
|
Checklist for Code Contributions
|
||||||
--------------------------------
|
--------------------------------
|
||||||
|
@ -67,7 +64,6 @@ Checklist for Code Contributions
|
||||||
- Update ``RELEASE_NOTES.md`` if there are noteworthy changes, especially if
|
- Update ``RELEASE_NOTES.md`` if there are noteworthy changes, especially if
|
||||||
there are changes to existing APIs
|
there are changes to existing APIs
|
||||||
- Check, test, and update the documentation in `doc/`
|
- Check, test, and update the documentation in `doc/`
|
||||||
- Check, test, and update the conda recipes in `conda/`
|
|
||||||
- Check, test, and update the unittests
|
- Check, test, and update the unittests
|
||||||
- Close and/or update issues
|
- Close and/or update issues
|
||||||
|
|
||||||
|
|
|
@ -1,94 +1,17 @@
|
||||||
Release process
|
|
||||||
===============
|
|
||||||
|
|
||||||
Maintain ``RELEASE_NOTES.rst`` with a list of new features and API changes in each major release.
|
|
||||||
|
|
||||||
Major releases
|
|
||||||
--------------
|
|
||||||
|
|
||||||
1. Create branch release-X from master.
|
|
||||||
2. Tag the next commit in master X+1.0.dev.
|
|
||||||
3. Ensure that release versions of all packages required are available under the ``main`` label in conda. Ensure that new packages in ``main`` do not break older ARTIQ releases.
|
|
||||||
4. In the release-X branch, remove any unfinished features.
|
|
||||||
5. Test and fix any problems found. Apply fixes to both master and release-X.
|
|
||||||
6. If you have willing testers for release candidates, tag X.0rc1 in the release-X branch (generally use signed annotated tags, i.e. ``git tag -sa X.0rc1``), have it build, and point testers there. Iterate over the previous points with new release candidates if necessary.
|
|
||||||
7. Tag X.0 in the release-X branch, build it, and copy its packages to ``main`` channel.
|
|
||||||
8. Mint a new DOI from Zenodo and update the README/introduction.
|
|
||||||
9. Update the m-labs.hk/artiq/manual redirect to point to m-labs.hk/artiq/manual-release-X (edit /artiq/.htaccess).
|
|
||||||
10. "Draft a new release" and close the milestone on GitHub.
|
|
||||||
11. Deprecate the old release documentation with a banner in
|
|
||||||
doc/manual/_templates/layout.html in the old ``release-(X-1)`` branch.
|
|
||||||
|
|
||||||
Minor (bugfix) releases
|
|
||||||
-----------------------
|
|
||||||
|
|
||||||
1. Backport bugfixes from the master branch or fix bugs specific to old releases into the currently maintained release-X branch(es).
|
|
||||||
2. When significant bugs have been fixed, tag X.Y+1.
|
|
||||||
3. To help dealing with regressions, no new features or refactorings should be implemented in release-X branches. Those happen in the master branch, and then a new release-X+1 branch is created.
|
|
||||||
4. "Draft a new release" and close the milestone on GitHub.
|
|
||||||
|
|
||||||
Sharing development boards
|
Sharing development boards
|
||||||
==========================
|
==========================
|
||||||
|
|
||||||
To avoid conflicts for development boards on the server, while using a board you must hold the corresponding lock file present in ``/var/lib/artiq/boards``. Holding the lock file grants you exclusive access to the board.
|
To avoid conflicts for development boards on the server, while using a board you must hold the corresponding lock file present in the ``/tmp`` folder of the machine to which the board is connected. Holding the lock file grants you exclusive access to the board.
|
||||||
|
|
||||||
To lock the KC705 for 30 minutes or until Ctrl-C is pressed:
|
For example, to lock the KC705 until ENTER is pressed:
|
||||||
::
|
::
|
||||||
flock --verbose /var/lib/artiq/boards/kc705-1 sleep 1800
|
ssh rpi-1.m-labs.hk "flock /tmp/board_lock-kc705-1 -c 'echo locked; read; echo unlocked'"
|
||||||
|
|
||||||
Check that the command acquires the lock, i.e. prints something such as:
|
|
||||||
::
|
|
||||||
flock: getting lock took 0.000003 seconds
|
|
||||||
flock: executing sleep
|
|
||||||
|
|
||||||
To lock the KC705 for the duration of the execution of a shell:
|
|
||||||
::
|
|
||||||
flock /var/lib/artiq/boards/kc705-1 bash
|
|
||||||
|
|
||||||
You may also use this script:
|
|
||||||
::
|
|
||||||
#!/bin/bash
|
|
||||||
exec flock /var/lib/artiq/boards/$1 bash --rcfile <(cat ~/.bashrc; echo PS1=\"[$1\ lock]\ \$PS1\")
|
|
||||||
|
|
||||||
If the board is already locked by another user, the ``flock`` commands above will wait for the lock to be released.
|
If the board is already locked by another user, the ``flock`` commands above will wait for the lock to be released.
|
||||||
|
|
||||||
To determine which user is locking a board, use:
|
To determine which user is locking a board, use a command such as:
|
||||||
::
|
::
|
||||||
fuser -v /var/lib/artiq/boards/kc705-1
|
ssh rpi-1.m-labs.hk "fuser -v /tmp/board_lock-kc705-1"
|
||||||
|
|
||||||
|
|
||||||
Selecting a development board with artiq_flash
|
|
||||||
==============================================
|
|
||||||
|
|
||||||
The board lock file also contains the openocd commands for selecting the corresponding developer board:
|
|
||||||
::
|
|
||||||
artiq_flash -I "$(cat /var/lib/artiq/boards/sayma-1)"
|
|
||||||
|
|
||||||
|
|
||||||
Using developer tools
|
|
||||||
=====================
|
|
||||||
|
|
||||||
ARTIQ ships with an ``artiq_devtool`` binary, which automates common actions arising when developing the board gateware and firmware on a machine other than the one to which the board is connected.
|
|
||||||
|
|
||||||
.. argparse::
|
|
||||||
:ref: artiq.frontend.artiq_devtool.get_argparser
|
|
||||||
:prog: artiq_devtool
|
|
||||||
|
|
||||||
To build and flash the firmware for ``sayma_amc_standalone`` target:
|
|
||||||
::
|
|
||||||
artiq_devtool -t sayma_amc_standalone build flash+log
|
|
||||||
|
|
||||||
To build the same target, flash it to the 3rd connected board, and forward the core device ports (1380, 1381, ...) as well as logs on the serial port:
|
|
||||||
::
|
|
||||||
artiq_devtool -t sayma_amc_standalone -b sayma-3 build flash connect
|
|
||||||
|
|
||||||
While the previous command is running, to build a new firmware and hotswap it, i.e. run without reflashing the board:
|
|
||||||
::
|
|
||||||
artiq_devtool -t sayma_amc_standalone build hotswap
|
|
||||||
|
|
||||||
While the previous command is running, to reset a board, e.g. if it became unresponsive:
|
|
||||||
::
|
|
||||||
artiq_devtool -t sayma_amc_standalone reset
|
|
||||||
|
|
||||||
|
|
||||||
Deleting git branches
|
Deleting git branches
|
||||||
|
|
|
@ -3,3 +3,6 @@ graft artiq/examples
|
||||||
include artiq/gui/logo*.svg
|
include artiq/gui/logo*.svg
|
||||||
include versioneer.py
|
include versioneer.py
|
||||||
include artiq/_version.py
|
include artiq/_version.py
|
||||||
|
include artiq/coredevice/coredevice_generic.schema.json
|
||||||
|
include artiq/compiler/kernel.ld
|
||||||
|
include artiq/afws.pem
|
||||||
|
|
21
README.rst
21
README.rst
|
@ -4,23 +4,23 @@
|
||||||
.. image:: https://raw.githubusercontent.com/m-labs/artiq/master/doc/logo/artiq.png
|
.. image:: https://raw.githubusercontent.com/m-labs/artiq/master/doc/logo/artiq.png
|
||||||
:target: https://m-labs.hk/artiq
|
:target: https://m-labs.hk/artiq
|
||||||
|
|
||||||
ARTIQ (Advanced Real-Time Infrastructure for Quantum physics) is the next-generation control system for quantum information experiments.
|
ARTIQ (Advanced Real-Time Infrastructure for Quantum physics) is a leading-edge control and data acquisition system for quantum information experiments.
|
||||||
It is maintained and developed by `M-Labs <https://m-labs.hk>`_ and the initial development was for and in partnership with the `Ion Storage Group at NIST <https://www.nist.gov/pml/time-and-frequency-division/ion-storage>`_. ARTIQ is free software and offered to the entire research community as a solution equally applicable to other challenging control tasks, including outside the field of ion trapping. Several other laboratories (e.g. at the University of Oxford, the Army Research Lab, and the University of Maryland) have later adopted ARTIQ as their control system and have contributed to it.
|
It is maintained and developed by `M-Labs <https://m-labs.hk>`_ and the initial development was for and in partnership with the `Ion Storage Group at NIST <https://www.nist.gov/pml/time-and-frequency-division/ion-storage>`_. ARTIQ is free software and offered to the entire research community as a solution equally applicable to other challenging control tasks, including outside the field of ion trapping. Many laboratories around the world have adopted ARTIQ as their control system, with over a hundred Sinara hardware crates deployed, and some have `contributed <https://m-labs.hk/experiment-control/funding/>`_ to it.
|
||||||
|
|
||||||
The system features a high-level programming language that helps describing complex experiments, which is compiled and executed on dedicated hardware with nanosecond timing resolution and sub-microsecond latency. It includes graphical user interfaces to parametrize and schedule experiments and to visualize and explore the results.
|
The system features a high-level programming language that helps describing complex experiments, which is compiled and executed on dedicated hardware with nanosecond timing resolution and sub-microsecond latency. It includes graphical user interfaces to parametrize and schedule experiments and to visualize and explore the results.
|
||||||
|
|
||||||
ARTIQ uses FPGA hardware to perform its time-critical tasks. The `Sinara hardware <https://github.com/sinara-hw>`_, and in particular the Kasli FPGA carrier, is designed to work with ARTIQ.
|
ARTIQ uses FPGA hardware to perform its time-critical tasks. The `Sinara hardware <https://github.com/sinara-hw>`_, and in particular the Kasli FPGA carrier, is designed to work with ARTIQ.
|
||||||
ARTIQ is designed to be portable to hardware platforms from different vendors and FPGA manufacturers.
|
ARTIQ is designed to be portable to hardware platforms from different vendors and FPGA manufacturers.
|
||||||
Several different configurations of a `high-end FPGA evaluation kit <http://www.xilinx.com/products/boards-and-kits/ek-k7-kc705-g.html>`_ are also used and supported. FPGA platforms can be combined with any number of additional peripherals, either already accessible from ARTIQ or made accessible with little effort.
|
Several different configurations of a `FPGA evaluation kit <https://www.xilinx.com/products/boards-and-kits/ek-k7-kc705-g.html>`_ and of a `Zynq evaluation kit <https://www.xilinx.com/products/boards-and-kits/ek-z7-zc706-g.html>`_ are also used and supported. FPGA platforms can be combined with any number of additional peripherals, either already accessible from ARTIQ or made accessible with little effort.
|
||||||
|
|
||||||
ARTIQ and its dependencies are available in the form of `conda packages <https://conda.anaconda.org/m-labs/label/main>`_ for both Linux and Windows.
|
ARTIQ and its dependencies are available in the form of Nix packages (for Linux) and MSYS2 packages (for Windows). See `the manual <https://m-labs.hk/experiment-control/resources/>`_ for installation instructions.
|
||||||
Packages containing pre-compiled binary images to be loaded onto the hardware platforms are supplied for each configuration.
|
Packages containing pre-compiled binary images to be loaded onto the hardware platforms are supplied for each configuration.
|
||||||
Like any open source software ARTIQ can equally be built and installed directly from `source <https://github.com/m-labs/artiq>`_.
|
Like any open source software ARTIQ can equally be built and installed directly from `source <https://github.com/m-labs/artiq>`_.
|
||||||
|
|
||||||
ARTIQ is supported by M-Labs and developed openly.
|
ARTIQ is supported by M-Labs and developed openly.
|
||||||
Components, features, fixes, improvements, and extensions are funded by and developed for the partnering research groups.
|
Components, features, fixes, improvements, and extensions are often `funded <https://m-labs.hk/experiment-control/funding/>`_ by and developed for the partnering research groups.
|
||||||
|
|
||||||
Technologies employed include `Python <https://www.python.org/>`_, `Migen <https://github.com/m-labs/migen>`_, `MiSoC <https://github.com/m-labs/misoc>`_/`mor1kx <https://github.com/openrisc/mor1kx>`_, `LLVM <http://llvm.org/>`_/`llvmlite <https://github.com/numba/llvmlite>`_, and `Qt5 <http://www.qt.io/>`_.
|
Core technologies employed include `Python <https://www.python.org/>`_, `Migen <https://github.com/m-labs/migen>`_, `Migen-AXI <https://github.com/peteut/migen-axi>`_, `Rust <https://www.rust-lang.org/>`_, `MiSoC <https://github.com/m-labs/misoc>`_/`VexRiscv <https://github.com/SpinalHDL/VexRiscv>`_, `LLVM <https://llvm.org/>`_/`llvmlite <https://github.com/numba/llvmlite>`_, and `Qt5 <https://www.qt.io/>`_.
|
||||||
|
|
||||||
Website: https://m-labs.hk/artiq
|
Website: https://m-labs.hk/artiq
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ Website: https://m-labs.hk/artiq
|
||||||
License
|
License
|
||||||
=======
|
=======
|
||||||
|
|
||||||
Copyright (C) 2014-2018 M-Labs Limited.
|
Copyright (C) 2014-2023 M-Labs Limited.
|
||||||
|
|
||||||
ARTIQ is free software: you can redistribute it and/or modify
|
ARTIQ is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Lesser General Public License as published by
|
it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
@ -48,9 +48,10 @@ The ARTIQ manifesto
|
||||||
===================
|
===================
|
||||||
|
|
||||||
The free and open dissemination of methods and results is central to scientific progress.
|
The free and open dissemination of methods and results is central to scientific progress.
|
||||||
The ARTIQ authors, contributors, and supporters consider the free and open exchange of scientific tools to be equally important and have chosen the licensing terms of ARTIQ accordingly.
|
|
||||||
ARTIQ, including its gateware, the firmware, and the ARTIQ tools and libraries are licensed as LGPLv3+.
|
The ARTIQ and Sinara authors, contributors, and supporters consider the free and open exchange of scientific tools to be equally important and have chosen the licensing terms of ARTIQ and Sinara accordingly. ARTIQ, including its gateware, the firmware, and the ARTIQ tools and libraries are licensed as LGPLv3+. The Sinara hardware designs are licensed under CERN OHL.
|
||||||
This ensures that a user of ARTIQ obtains broad rights to use, redistribute, and modify it.
|
This ensures that a user of ARTIQ or Sinara hardware designs obtains broad rights to use, redistribute, study, and modify them.
|
||||||
|
|
||||||
The following statements are intended to clarify the interpretation and application of the licensing terms:
|
The following statements are intended to clarify the interpretation and application of the licensing terms:
|
||||||
|
|
||||||
* There is no requirement to distribute any unmodified, modified, or extended versions of ARTIQ. Only when distributing ARTIQ the source needs to be made available.
|
* There is no requirement to distribute any unmodified, modified, or extended versions of ARTIQ. Only when distributing ARTIQ the source needs to be made available.
|
||||||
|
|
|
@ -3,13 +3,320 @@
|
||||||
Release notes
|
Release notes
|
||||||
=============
|
=============
|
||||||
|
|
||||||
|
ARTIQ-8 (Unreleased)
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
Highlights:
|
||||||
|
|
||||||
|
* New hardware support:
|
||||||
|
- Support for Shuttler, a 16-channel 125MSPS DAC card intended for ion transport.
|
||||||
|
Waveform generator and user API are similar to the NIST PDQ.
|
||||||
|
- Implemented Phaser-servo. This requires recent gateware on Phaser.
|
||||||
|
- Almazny v1.2 with finer RF switch control.
|
||||||
|
- Metlino and Sayma support has been dropped due to complications with synchronous RTIO clocking.
|
||||||
|
- More user LEDs are exposed to RTIO on Kasli.
|
||||||
|
- Implemented Phaser-MIQRO support. This requires the proprietary Phaser MIQRO gateware
|
||||||
|
variant from QUARTIQ.
|
||||||
|
- Sampler: fixed ADC MU to Volt conversion factor for Sampler v2.2+.
|
||||||
|
For earlier hardware versions, specify the hardware version in the device
|
||||||
|
database file (e.g. ``"hw_rev": "v2.1"``) to use the correct conversion factor.
|
||||||
|
* Support for distributed DMA, where DMA is run directly on satellites for corresponding
|
||||||
|
RTIO events, increasing bandwidth in scenarios with heavy satellite usage.
|
||||||
|
* Support for subkernels, where kernels are run on satellite device CPUs to offload some
|
||||||
|
of the processing and RTIO operations.
|
||||||
|
* CPU (on softcore platforms) and AXI bus (on Zynq) are now clocked synchronously with the RTIO
|
||||||
|
clock, to facilitate implementation of local processing on DRTIO satellites, and to slightly
|
||||||
|
reduce RTIO latency.
|
||||||
|
* Support for DRTIO-over-EEM, used with Shuttler.
|
||||||
|
* Added channel names to RTIO error messages.
|
||||||
|
* GUI:
|
||||||
|
- Implemented Applet Request Interfaces which allow applets to modify datasets and set the
|
||||||
|
current values of widgets in the dashboard's experiment windows.
|
||||||
|
- Implemented a new EntryArea widget which allows argument entry widgets to be used in applets.
|
||||||
|
- The "Close all applets" command (shortcut: Ctrl-Alt-W) now ignores docked applets,
|
||||||
|
making it a convenient way to clean up after exploratory work without destroying a
|
||||||
|
carefully arranged default workspace.
|
||||||
|
- Hotkeys now organize experiment windows in the order they were last interacted with:
|
||||||
|
+ CTRL+SHIFT+T tiles experiment windows
|
||||||
|
+ CTRL+SHIFT+C cascades experiment windows
|
||||||
|
* Persistent datasets are now stored in a LMDB database for improved performance.
|
||||||
|
* Python's built-in types (such as ``float``, or ``List[...]``) can now be used in type annotations on
|
||||||
|
kernel functions.
|
||||||
|
* Full Python 3.10 support.
|
||||||
|
* MSYS2 packaging for Windows, which replaces Conda. Conda packages are still available to
|
||||||
|
support legacy installations, but may be removed in a future release.
|
||||||
|
|
||||||
|
Breaking changes:
|
||||||
|
|
||||||
|
* ``SimpleApplet`` now calls widget constructors with an additional ``ctl`` parameter for control
|
||||||
|
operations, which includes dataset operations. It can be ignored if not needed. For an example usage,
|
||||||
|
refer to the ``big_number.py`` applet.
|
||||||
|
* ``SimpleApplet`` and ``TitleApplet`` now call ``data_changed`` with additional parameters. Derived applets
|
||||||
|
should change the function signature as below:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
# SimpleApplet
|
||||||
|
def data_changed(self, value, metadata, persist, mods)
|
||||||
|
# SimpleApplet (old version)
|
||||||
|
def data_changed(self, data, mods)
|
||||||
|
# TitleApplet
|
||||||
|
def data_changed(self, value, metadata, persist, mods, title)
|
||||||
|
# TitleApplet (old version)
|
||||||
|
def data_changed(self, data, mods, title)
|
||||||
|
|
||||||
|
Accesses to the data argument should be replaced as below:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
data[key][0] ==> persist[key]
|
||||||
|
data[key][1] ==> value[key]
|
||||||
|
|
||||||
|
* The ``ndecimals`` parameter in ``NumberValue`` and ``Scannable`` has been renamed to ``precision``.
|
||||||
|
Parameters after and including ``scale`` in both constructors are now keyword-only.
|
||||||
|
Refer to the updated ``no_hardware/arguments_demo.py`` example for current usage.
|
||||||
|
* Almazny v1.2 is incompatible with the legacy versions and is the default.
|
||||||
|
To use legacy versions, specify ``almazny_hw_rev`` in the JSON description.
|
||||||
|
* kasli_generic.py has been merged into kasli.py, and the demonstration designs without JSON descriptions
|
||||||
|
have been removed. The base classes remain present in kasli.py to support third-party flows without
|
||||||
|
JSON descriptions.
|
||||||
|
* Legacy PYON databases should be converted to LMDB with the script below:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
from sipyco import pyon
|
||||||
|
import lmdb
|
||||||
|
|
||||||
|
old = pyon.load_file("dataset_db.pyon")
|
||||||
|
new = lmdb.open("dataset_db.mdb", subdir=False, map_size=2**30)
|
||||||
|
with new.begin(write=True) as txn:
|
||||||
|
for key, value in old.items():
|
||||||
|
txn.put(key.encode(), pyon.encode((value, {})).encode())
|
||||||
|
new.close()
|
||||||
|
|
||||||
|
* ``artiq.wavesynth`` has been removed.
|
||||||
|
|
||||||
|
ARTIQ-7
|
||||||
|
-------
|
||||||
|
|
||||||
|
Highlights:
|
||||||
|
|
||||||
|
* New hardware support:
|
||||||
|
- Kasli-SoC, a new EEM carrier based on a Zynq SoC, enabling much faster kernel execution
|
||||||
|
(see: https://arxiv.org/abs/2111.15290).
|
||||||
|
- DRTIO support on Zynq-based devices (Kasli-SoC and ZC706).
|
||||||
|
- DRTIO support on KC705.
|
||||||
|
- HVAMP_8CH 8 channel HV amplifier for Fastino / Zotinos
|
||||||
|
- Almazny mezzanine board for Mirny
|
||||||
|
- Phaser: improved documentation, exposed the DAC coarse mixer and ``sif_sync``, exposed upconverter calibration
|
||||||
|
and enabling/disabling of upconverter LO & RF outputs, added helpers to align Phaser updates to the
|
||||||
|
RTIO timeline (``get_next_frame_mu()``).
|
||||||
|
- Urukul: ``get()``, ``get_mu()``, ``get_att()``, and ``get_att_mu()`` functions added for AD9910 and AD9912.
|
||||||
|
* Softcore targets now use the RISC-V architecture (VexRiscv) instead of OR1K (mor1kx).
|
||||||
|
* Gateware FPU is supported on KC705 and Kasli 2.0.
|
||||||
|
* Faster compilation for large arrays/lists.
|
||||||
|
* Faster exception handling.
|
||||||
|
* Several exception handling bugs fixed.
|
||||||
|
* Support for a simpler shared library system with faster calls into the runtime. This is only used by the NAC3
|
||||||
|
compiler (nac3ld) and improves RTIO output performance (test_pulse_rate) by 9-10%.
|
||||||
|
* Moninj improvements:
|
||||||
|
- Urukul monitoring and frequency setting (through dashboard) is now supported.
|
||||||
|
- Core device moninj is now proxied via the ``aqctl_moninj_proxy`` controller.
|
||||||
|
* The configuration entry ``rtio_clock`` supports multiple clocking settings, deprecating the usage
|
||||||
|
of compile-time options.
|
||||||
|
* Added support for 100MHz RTIO clock in DRTIO.
|
||||||
|
* Previously detected RTIO async errors are reported to the host after each kernel terminates and a
|
||||||
|
warning is logged. The warning is additional to the one already printed in the core device log
|
||||||
|
immediately upon detection of the error.
|
||||||
|
* Extended Kasli gateware JSON description with configuration for SPI over DIO.
|
||||||
|
* TTL outputs can be now configured to work as a clock generator from the JSON.
|
||||||
|
* On Kasli, the number of FIFO lanes in the scalable events dispatcher (SED) can now be configured in
|
||||||
|
the JSON.
|
||||||
|
* ``artiq_ddb_template`` generates edge-counter keys that start with the key of the corresponding
|
||||||
|
TTL device (e.g. ``ttl_0_counter`` for the edge counter on TTL device ``ttl_0``).
|
||||||
|
* ``artiq_master`` now has an ``--experiment-subdir`` option to scan only a subdirectory of the
|
||||||
|
repository when building the list of experiments.
|
||||||
|
* Experiments can now be submitted by-content.
|
||||||
|
* The master can now optionally log all experiments submitted into a CSV file.
|
||||||
|
* Removed worker DB warning for writing a dataset that is also in the archive.
|
||||||
|
* Experiments can now call ``scheduler.check_termination()`` to test if the user
|
||||||
|
has requested graceful termination.
|
||||||
|
* ARTIQ command-line programs and controllers now exit cleanly on Ctrl-C.
|
||||||
|
* ``artiq_coremgmt reboot`` now reloads gateware as well, providing a more thorough and reliable
|
||||||
|
device reset (7-series FPGAs only).
|
||||||
|
* Firmware and gateware can now be built on-demand on the M-Labs server using ``afws_client``
|
||||||
|
(subscribers only). Self-compilation remains possible.
|
||||||
|
* Easier-to-use packaging via Nix Flakes.
|
||||||
|
* Python 3.10 support (experimental).
|
||||||
|
|
||||||
|
Breaking changes:
|
||||||
|
|
||||||
|
* Due to the new RISC-V CPU, the device database entry for the core device needs to be updated.
|
||||||
|
The ``target`` parameter needs to be set to ``rv32ima`` for Kasli 1.x and to ``rv32g`` for all
|
||||||
|
other boards. Freshly generated device database templates already contain this update.
|
||||||
|
* Updated Phaser-Upconverter default frequency 2.875 GHz. The new default uses the target PFD
|
||||||
|
frequency of the hardware design.
|
||||||
|
* ``Phaser.init()`` now disables all Kasli-oscillators. This avoids full power RF output being
|
||||||
|
generated for some configurations.
|
||||||
|
* Phaser: fixed coarse mixer frequency configuration
|
||||||
|
* Mirny: Added extra delays in ``ADF5356.sync()``. This avoids the need of an extra delay before
|
||||||
|
calling ``ADF5356.init()``.
|
||||||
|
* The deprecated ``set_dataset(..., save=...)`` is no longer supported.
|
||||||
|
* The ``PCA9548`` I2C switch class was renamed to ``I2CSwitch``, to accommodate support for PCA9547,
|
||||||
|
and possibly other switches in future. Readback has been removed, and now only one channel per
|
||||||
|
switch is supported.
|
||||||
|
|
||||||
|
|
||||||
|
ARTIQ-6
|
||||||
|
-------
|
||||||
|
|
||||||
|
Highlights:
|
||||||
|
|
||||||
|
* New hardware support:
|
||||||
|
- Phaser, a quad channel 1GS/s RF generator card with dual IQ upconverter and dual 5MS/s
|
||||||
|
ADC and FPGA.
|
||||||
|
- Zynq SoC core device (ZC706), enabling kernels to run on 1 GHz CPU core with a floating-point
|
||||||
|
unit for faster computations. This currently requires an external
|
||||||
|
repository (https://git.m-labs.hk/m-labs/artiq-zynq).
|
||||||
|
- Mirny 4-channel wide-band PLL/VCO-based microwave frequency synthesiser
|
||||||
|
- Fastino 32-channel, 3MS/s per channel, 16-bit DAC EEM
|
||||||
|
- Kasli 2.0, an improved core device with 12 built-in EEM slots, faster FPGA, 4 SFPs, and
|
||||||
|
high-precision clock recovery circuitry for DRTIO (to be supported in ARTIQ-7).
|
||||||
|
* ARTIQ Python (core device kernels):
|
||||||
|
- Multidimensional arrays are now available on the core device, using NumPy syntax.
|
||||||
|
Elementwise operations (e.g. ``+``, ``/``), matrix multiplication (``@``) and
|
||||||
|
multidimensional indexing are supported; slices and views are not yet.
|
||||||
|
- Trigonometric and other common math functions from NumPy are now available on the
|
||||||
|
core device (e.g. ``numpy.sin``), both for scalar arguments and implicitly
|
||||||
|
broadcast across multidimensional arrays.
|
||||||
|
- Failed assertions now raise ``AssertionError``\ s instead of aborting kernel
|
||||||
|
execution.
|
||||||
|
* Performance improvements:
|
||||||
|
- SERDES TTL inputs can now detect edges on pulses that are shorter
|
||||||
|
than the RTIO period (https://github.com/m-labs/artiq/issues/1432)
|
||||||
|
- Improved performance for kernel RPC involving list and array.
|
||||||
|
* Coredevice SI to mu conversions now always return valid codes, or raise a ``ValueError``.
|
||||||
|
* Zotino now exposes ``voltage_to_mu()``
|
||||||
|
* ``ad9910``:
|
||||||
|
- The maximum amplitude scale factor is now ``0x3fff`` (was ``0x3ffe`` before).
|
||||||
|
- The default single-tone profile is now 7 (was 0).
|
||||||
|
- Added option to ``set_mu()`` that affects the ASF, FTW and POW registers
|
||||||
|
instead of the single-tone profile register.
|
||||||
|
* Mirny now supports HW revision independent, human readable ``clk_sel`` parameters:
|
||||||
|
"XO", "SMA", and "MMCX". Passing an integer is backwards compatible.
|
||||||
|
* Dashboard:
|
||||||
|
- Applets now restart if they are running and a ccb call changes their spec
|
||||||
|
- A "Quick Open" dialog to open experiments by typing part of their name can
|
||||||
|
be brought up Ctrl-P (Ctrl+Return to immediately submit the selected entry
|
||||||
|
with the default arguments).
|
||||||
|
- The Applets dock now has a context menu command to quickly close all open
|
||||||
|
applets (shortcut: Ctrl-Alt-W).
|
||||||
|
* Experiment results are now always saved to HDF5, even if ``run()`` fails.
|
||||||
|
* Core device: ``panic_reset 1`` now correctly resets the kernel CPU as well if
|
||||||
|
communication CPU panic occurs.
|
||||||
|
* NumberValue accepts a ``type`` parameter specifying the output as ``int`` or ``float``
|
||||||
|
* A parameter ``--identifier-str`` has been added to many targets to aid
|
||||||
|
with reproducible builds.
|
||||||
|
* Python 3.7 support in Conda packages.
|
||||||
|
* `kasli_generic` JSON descriptions are now validated against a
|
||||||
|
schema. Description defaults have moved from Python to the
|
||||||
|
schema. Warns if ARTIQ version is too old.
|
||||||
|
|
||||||
|
Breaking changes:
|
||||||
|
|
||||||
|
* ``artiq_netboot`` has been moved to its own repository at
|
||||||
|
https://git.m-labs.hk/m-labs/artiq-netboot
|
||||||
|
* Core device watchdogs have been removed.
|
||||||
|
* The ARTIQ compiler now implements arrays following NumPy semantics, rather than as a
|
||||||
|
thin veneer around lists. Most prior use cases of NumPy arrays in kernels should work
|
||||||
|
unchanged with the new implementation, but the behavior might differ slightly in some
|
||||||
|
cases (for instance, non-rectangular arrays are not currently supported).
|
||||||
|
* ``quamash`` has been replaced with ``qasync``.
|
||||||
|
* Protocols are updated to use device endian.
|
||||||
|
* Analyzer dump format includes a byte for device endianness.
|
||||||
|
* To support variable numbers of Urukul cards in the future, the
|
||||||
|
``artiq.coredevice.suservo.SUServo`` constructor now accepts two device name lists,
|
||||||
|
``cpld_devices`` and ``dds_devices``, rather than four individual arguments.
|
||||||
|
* Experiment classes with underscore-prefixed names are now ignored when ``artiq_client``
|
||||||
|
determines which experiment to submit (consistent with ``artiq_run``).
|
||||||
|
|
||||||
|
ARTIQ-5
|
||||||
|
-------
|
||||||
|
|
||||||
|
Highlights:
|
||||||
|
|
||||||
|
* Performance improvements:
|
||||||
|
- Faster RTIO event submission (1.5x improvement in pulse rate test)
|
||||||
|
See: https://github.com/m-labs/artiq/issues/636
|
||||||
|
- Faster compilation times (3 seconds saved on kernel compilation time on a typical
|
||||||
|
medium-size experiment)
|
||||||
|
See: https://github.com/m-labs/artiq/commit/611bcc4db4ed604a32d9678623617cd50e968cbf
|
||||||
|
* Improved packaging and build system:
|
||||||
|
- new continuous integration/delivery infrastructure based on Nix and Hydra,
|
||||||
|
providing reproducibility, speed and independence.
|
||||||
|
- rolling release process (https://github.com/m-labs/artiq/issues/1326).
|
||||||
|
- firmware, gateware and device database templates are automatically built for all
|
||||||
|
supported Kasli variants.
|
||||||
|
- new JSON description format for generic Kasli systems.
|
||||||
|
- Nix packages are now supported.
|
||||||
|
- many Conda problems worked around.
|
||||||
|
- controllers are now out-of-tree.
|
||||||
|
- split packages that enable lightweight applications that communicate with ARTIQ,
|
||||||
|
e.g. controllers running on non-x86 single-board computers.
|
||||||
|
* Improved Urukul support:
|
||||||
|
- AD9910 RAM mode.
|
||||||
|
- Configurable refclk divider and PLL bypass.
|
||||||
|
- More reliable phase synchronization at high sample rates.
|
||||||
|
- Synchronization calibration data can be read from EEPROM.
|
||||||
|
* A gateware-level input edge counter has been added, which offers higher
|
||||||
|
throughput and increased flexibility over the usual TTL input PHYs where
|
||||||
|
edge timestamps are not required. See ``artiq.coredevice.edge_counter`` for
|
||||||
|
the core device driver and ``artiq.gateware.rtio.phy.edge_counter``/
|
||||||
|
``artiq.gateware.eem.DIO.add_std`` for the gateware components.
|
||||||
|
* With DRTIO, Siphaser uses a better calibration mechanism.
|
||||||
|
See: https://github.com/m-labs/artiq/commit/cc58318500ecfa537abf24127f2c22e8fe66e0f8
|
||||||
|
* Schedule updates can be sent to influxdb (artiq_influxdb_schedule).
|
||||||
|
* Experiments can now programatically set their default pipeline, priority, and flush flag.
|
||||||
|
* List datasets can now be efficiently appended to from experiments using
|
||||||
|
``artiq.language.environment.HasEnvironment.append_to_dataset``.
|
||||||
|
* The core device now supports IPv6.
|
||||||
|
* To make development easier, the bootloader can receive firmware and secondary FPGA
|
||||||
|
gateware from the network.
|
||||||
|
* Python 3.7 compatibility (Nix and source builds only, no Conda).
|
||||||
|
* Various other bugs from 4.0 fixed.
|
||||||
|
* Preliminary Sayma v2 and Metlino hardware support.
|
||||||
|
|
||||||
|
Breaking changes:
|
||||||
|
|
||||||
|
* The ``artiq.coredevice.ad9910.AD9910`` and
|
||||||
|
``artiq.coredevice.ad9914.AD9914`` phase reference timestamp parameters
|
||||||
|
have been renamed to ``ref_time_mu`` for consistency, as they are in machine
|
||||||
|
units.
|
||||||
|
* The controller manager now ignores device database entries without the
|
||||||
|
``command`` key set to facilitate sharing of devices between multiple
|
||||||
|
masters.
|
||||||
|
* The meaning of the ``-d/--dir`` and ``--srcbuild`` options of ``artiq_flash``
|
||||||
|
has changed.
|
||||||
|
* Controllers for third-party devices are now out-of-tree.
|
||||||
|
* ``aqctl_corelog`` now filters log messages below the ``WARNING`` level by default.
|
||||||
|
This behavior can be changed using the ``-v`` and ``-q`` options like the other
|
||||||
|
programs.
|
||||||
|
* On Kasli the firmware now starts with a unique default MAC address
|
||||||
|
from EEPROM if `mac` is absent from the flash config.
|
||||||
|
* The ``-e/--experiment`` switch of ``artiq_run`` and ``artiq_compile``
|
||||||
|
has been renamed ``-c/--class-name``.
|
||||||
|
* ``artiq_devtool`` has been removed.
|
||||||
|
* Much of ``artiq.protocols`` has been moved to a separate package ``sipyco``.
|
||||||
|
``artiq_rpctool`` has been renamed to ``sipyco_rpctool``.
|
||||||
|
|
||||||
|
|
||||||
ARTIQ-4
|
ARTIQ-4
|
||||||
-------
|
-------
|
||||||
|
|
||||||
4.0
|
4.0
|
||||||
***
|
***
|
||||||
|
|
||||||
* The ``artiq.coredevice.ttl`` drivers no longer track the timestamps of
|
* The ``artiq.coredevice.ttl`` drivers no longer track the timestamps of
|
||||||
submitted events in software, requiring the user to explicitly specify the
|
submitted events in software, requiring the user to explicitly specify the
|
||||||
timeout for ``count()``/``timestamp_mu()``. Support for ``sync()`` has been dropped.
|
timeout for ``count()``/``timestamp_mu()``. Support for ``sync()`` has been dropped.
|
||||||
|
|
||||||
|
@ -67,6 +374,11 @@ ARTIQ-4
|
||||||
clocks dynamically (i.e. without device restart) is no longer supported.
|
clocks dynamically (i.e. without device restart) is no longer supported.
|
||||||
* ``set_dataset(..., save=True)`` has been renamed
|
* ``set_dataset(..., save=True)`` has been renamed
|
||||||
``set_dataset(..., archive=True)``.
|
``set_dataset(..., archive=True)``.
|
||||||
|
* On the AD9914 DDS, when switching to ``PHASE_MODE_CONTINUOUS`` from another mode,
|
||||||
|
use the returned value of the last ``set_mu`` call as the phase offset for
|
||||||
|
``PHASE_MODE_CONTINUOUS`` to avoid a phase discontinuity. This is no longer done
|
||||||
|
automatically. If one phase glitch when entering ``PHASE_MODE_CONTINUOUS`` is not
|
||||||
|
an issue, this recommendation can be ignored.
|
||||||
|
|
||||||
|
|
||||||
ARTIQ-3
|
ARTIQ-3
|
||||||
|
|
|
@ -1,11 +1,7 @@
|
||||||
from ._version import get_versions
|
from ._version import get_version
|
||||||
__version__ = get_versions()['version']
|
__version__ = get_version()
|
||||||
del get_versions
|
del get_version
|
||||||
|
|
||||||
import os
|
import os
|
||||||
__artiq_dir__ = os.path.dirname(os.path.abspath(__file__))
|
__artiq_dir__ = os.path.dirname(os.path.abspath(__file__))
|
||||||
del os
|
del os
|
||||||
|
|
||||||
from ._version import get_versions
|
|
||||||
__version__ = get_versions()['version']
|
|
||||||
del get_versions
|
|
||||||
|
|
|
@ -1,520 +1,7 @@
|
||||||
|
|
||||||
# This file helps to compute a version number in source trees obtained from
|
|
||||||
# git-archive tarball (such as those provided by githubs download-from-tag
|
|
||||||
# feature). Distribution tarballs (built by setup.py sdist) and build
|
|
||||||
# directories (produced by setup.py build) will contain a much shorter file
|
|
||||||
# that just contains the computed version number.
|
|
||||||
|
|
||||||
# This file is released into the public domain. Generated by
|
|
||||||
# versioneer-0.18 (https://github.com/warner/python-versioneer)
|
|
||||||
|
|
||||||
"""Git implementation of _version.py."""
|
|
||||||
|
|
||||||
import errno
|
|
||||||
import os
|
import os
|
||||||
import re
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
|
|
||||||
|
def get_rev():
|
||||||
|
return os.getenv("VERSIONEER_REV", default="unknown")
|
||||||
|
|
||||||
def get_keywords():
|
def get_version():
|
||||||
"""Get the keywords needed to look up the version information."""
|
return os.getenv("VERSIONEER_OVERRIDE", default="8.0+unknown.beta")
|
||||||
# these strings will be replaced by git during git-archive.
|
|
||||||
# setup.py/versioneer.py will grep for the variable names, so they must
|
|
||||||
# each be defined on a line of their own. _version.py will just call
|
|
||||||
# get_keywords().
|
|
||||||
git_refnames = "$Format:%d$"
|
|
||||||
git_full = "$Format:%H$"
|
|
||||||
git_date = "$Format:%ci$"
|
|
||||||
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
|
|
||||||
return keywords
|
|
||||||
|
|
||||||
|
|
||||||
class VersioneerConfig:
|
|
||||||
"""Container for Versioneer configuration parameters."""
|
|
||||||
|
|
||||||
|
|
||||||
def get_config():
|
|
||||||
"""Create, populate and return the VersioneerConfig() object."""
|
|
||||||
# these strings are filled in when 'setup.py versioneer' creates
|
|
||||||
# _version.py
|
|
||||||
cfg = VersioneerConfig()
|
|
||||||
cfg.VCS = "git"
|
|
||||||
cfg.style = "pep440"
|
|
||||||
cfg.tag_prefix = ""
|
|
||||||
cfg.parentdir_prefix = "artiq-"
|
|
||||||
cfg.versionfile_source = "artiq/_version.py"
|
|
||||||
cfg.verbose = False
|
|
||||||
return cfg
|
|
||||||
|
|
||||||
|
|
||||||
class NotThisMethod(Exception):
|
|
||||||
"""Exception raised if a method is not valid for the current scenario."""
|
|
||||||
|
|
||||||
|
|
||||||
LONG_VERSION_PY = {}
|
|
||||||
HANDLERS = {}
|
|
||||||
|
|
||||||
|
|
||||||
def register_vcs_handler(vcs, method): # decorator
|
|
||||||
"""Decorator to mark a method as the handler for a particular VCS."""
|
|
||||||
def decorate(f):
|
|
||||||
"""Store f in HANDLERS[vcs][method]."""
|
|
||||||
if vcs not in HANDLERS:
|
|
||||||
HANDLERS[vcs] = {}
|
|
||||||
HANDLERS[vcs][method] = f
|
|
||||||
return f
|
|
||||||
return decorate
|
|
||||||
|
|
||||||
|
|
||||||
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
|
|
||||||
env=None):
|
|
||||||
"""Call the given command(s)."""
|
|
||||||
assert isinstance(commands, list)
|
|
||||||
p = None
|
|
||||||
for c in commands:
|
|
||||||
try:
|
|
||||||
dispcmd = str([c] + args)
|
|
||||||
# remember shell=False, so use git.cmd on windows, not just git
|
|
||||||
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
|
|
||||||
stdout=subprocess.PIPE,
|
|
||||||
stderr=(subprocess.PIPE if hide_stderr
|
|
||||||
else None))
|
|
||||||
break
|
|
||||||
except EnvironmentError:
|
|
||||||
e = sys.exc_info()[1]
|
|
||||||
if e.errno == errno.ENOENT:
|
|
||||||
continue
|
|
||||||
if verbose:
|
|
||||||
print("unable to run %s" % dispcmd)
|
|
||||||
print(e)
|
|
||||||
return None, None
|
|
||||||
else:
|
|
||||||
if verbose:
|
|
||||||
print("unable to find command, tried %s" % (commands,))
|
|
||||||
return None, None
|
|
||||||
stdout = p.communicate()[0].strip()
|
|
||||||
if sys.version_info[0] >= 3:
|
|
||||||
stdout = stdout.decode()
|
|
||||||
if p.returncode != 0:
|
|
||||||
if verbose:
|
|
||||||
print("unable to run %s (error)" % dispcmd)
|
|
||||||
print("stdout was %s" % stdout)
|
|
||||||
return None, p.returncode
|
|
||||||
return stdout, p.returncode
|
|
||||||
|
|
||||||
|
|
||||||
def versions_from_parentdir(parentdir_prefix, root, verbose):
|
|
||||||
"""Try to determine the version from the parent directory name.
|
|
||||||
|
|
||||||
Source tarballs conventionally unpack into a directory that includes both
|
|
||||||
the project name and a version string. We will also support searching up
|
|
||||||
two directory levels for an appropriately named parent directory
|
|
||||||
"""
|
|
||||||
rootdirs = []
|
|
||||||
|
|
||||||
for i in range(3):
|
|
||||||
dirname = os.path.basename(root)
|
|
||||||
if dirname.startswith(parentdir_prefix):
|
|
||||||
return {"version": dirname[len(parentdir_prefix):],
|
|
||||||
"full-revisionid": None,
|
|
||||||
"dirty": False, "error": None, "date": None}
|
|
||||||
else:
|
|
||||||
rootdirs.append(root)
|
|
||||||
root = os.path.dirname(root) # up a level
|
|
||||||
|
|
||||||
if verbose:
|
|
||||||
print("Tried directories %s but none started with prefix %s" %
|
|
||||||
(str(rootdirs), parentdir_prefix))
|
|
||||||
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
|
|
||||||
|
|
||||||
|
|
||||||
@register_vcs_handler("git", "get_keywords")
|
|
||||||
def git_get_keywords(versionfile_abs):
|
|
||||||
"""Extract version information from the given file."""
|
|
||||||
# the code embedded in _version.py can just fetch the value of these
|
|
||||||
# keywords. When used from setup.py, we don't want to import _version.py,
|
|
||||||
# so we do it with a regexp instead. This function is not used from
|
|
||||||
# _version.py.
|
|
||||||
keywords = {}
|
|
||||||
try:
|
|
||||||
f = open(versionfile_abs, "r")
|
|
||||||
for line in f.readlines():
|
|
||||||
if line.strip().startswith("git_refnames ="):
|
|
||||||
mo = re.search(r'=\s*"(.*)"', line)
|
|
||||||
if mo:
|
|
||||||
keywords["refnames"] = mo.group(1)
|
|
||||||
if line.strip().startswith("git_full ="):
|
|
||||||
mo = re.search(r'=\s*"(.*)"', line)
|
|
||||||
if mo:
|
|
||||||
keywords["full"] = mo.group(1)
|
|
||||||
if line.strip().startswith("git_date ="):
|
|
||||||
mo = re.search(r'=\s*"(.*)"', line)
|
|
||||||
if mo:
|
|
||||||
keywords["date"] = mo.group(1)
|
|
||||||
f.close()
|
|
||||||
except EnvironmentError:
|
|
||||||
pass
|
|
||||||
return keywords
|
|
||||||
|
|
||||||
|
|
||||||
@register_vcs_handler("git", "keywords")
|
|
||||||
def git_versions_from_keywords(keywords, tag_prefix, verbose):
|
|
||||||
"""Get version information from git keywords."""
|
|
||||||
if not keywords:
|
|
||||||
raise NotThisMethod("no keywords at all, weird")
|
|
||||||
date = keywords.get("date")
|
|
||||||
if date is not None:
|
|
||||||
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
|
|
||||||
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
|
|
||||||
# -like" string, which we must then edit to make compliant), because
|
|
||||||
# it's been around since git-1.5.3, and it's too difficult to
|
|
||||||
# discover which version we're using, or to work around using an
|
|
||||||
# older one.
|
|
||||||
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
|
|
||||||
refnames = keywords["refnames"].strip()
|
|
||||||
if refnames.startswith("$Format"):
|
|
||||||
if verbose:
|
|
||||||
print("keywords are unexpanded, not using")
|
|
||||||
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
|
|
||||||
refs = set([r.strip() for r in refnames.strip("()").split(",")])
|
|
||||||
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
|
|
||||||
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
|
|
||||||
TAG = "tag: "
|
|
||||||
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
|
|
||||||
if not tags:
|
|
||||||
# Either we're using git < 1.8.3, or there really are no tags. We use
|
|
||||||
# a heuristic: assume all version tags have a digit. The old git %d
|
|
||||||
# expansion behaves like git log --decorate=short and strips out the
|
|
||||||
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
|
|
||||||
# between branches and tags. By ignoring refnames without digits, we
|
|
||||||
# filter out many common branch names like "release" and
|
|
||||||
# "stabilization", as well as "HEAD" and "master".
|
|
||||||
tags = set([r for r in refs if re.search(r'\d', r)])
|
|
||||||
if verbose:
|
|
||||||
print("discarding '%s', no digits" % ",".join(refs - tags))
|
|
||||||
if verbose:
|
|
||||||
print("likely tags: %s" % ",".join(sorted(tags)))
|
|
||||||
for ref in sorted(tags):
|
|
||||||
# sorting will prefer e.g. "2.0" over "2.0rc1"
|
|
||||||
if ref.startswith(tag_prefix):
|
|
||||||
r = ref[len(tag_prefix):]
|
|
||||||
if verbose:
|
|
||||||
print("picking %s" % r)
|
|
||||||
return {"version": r,
|
|
||||||
"full-revisionid": keywords["full"].strip(),
|
|
||||||
"dirty": False, "error": None,
|
|
||||||
"date": date}
|
|
||||||
# no suitable tags, so version is "0+unknown", but full hex is still there
|
|
||||||
if verbose:
|
|
||||||
print("no suitable tags, using unknown + full revision id")
|
|
||||||
return {"version": "0+unknown",
|
|
||||||
"full-revisionid": keywords["full"].strip(),
|
|
||||||
"dirty": False, "error": "no suitable tags", "date": None}
|
|
||||||
|
|
||||||
|
|
||||||
@register_vcs_handler("git", "pieces_from_vcs")
|
|
||||||
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
|
|
||||||
"""Get version from 'git describe' in the root of the source tree.
|
|
||||||
|
|
||||||
This only gets called if the git-archive 'subst' keywords were *not*
|
|
||||||
expanded, and _version.py hasn't already been rewritten with a short
|
|
||||||
version string, meaning we're inside a checked out source tree.
|
|
||||||
"""
|
|
||||||
GITS = ["git"]
|
|
||||||
if sys.platform == "win32":
|
|
||||||
GITS = ["git.cmd", "git.exe"]
|
|
||||||
|
|
||||||
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
|
|
||||||
hide_stderr=True)
|
|
||||||
if rc != 0:
|
|
||||||
if verbose:
|
|
||||||
print("Directory %s not under git control" % root)
|
|
||||||
raise NotThisMethod("'git rev-parse --git-dir' returned error")
|
|
||||||
|
|
||||||
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
|
|
||||||
# if there isn't one, this yields HEX[-dirty] (no NUM)
|
|
||||||
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
|
|
||||||
"--always", "--long", "--abbrev=8",
|
|
||||||
"--match", "%s*" % tag_prefix],
|
|
||||||
cwd=root)
|
|
||||||
# --long was added in git-1.5.5
|
|
||||||
if describe_out is None:
|
|
||||||
raise NotThisMethod("'git describe' failed")
|
|
||||||
describe_out = describe_out.strip()
|
|
||||||
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
|
|
||||||
if full_out is None:
|
|
||||||
raise NotThisMethod("'git rev-parse' failed")
|
|
||||||
full_out = full_out.strip()
|
|
||||||
|
|
||||||
pieces = {}
|
|
||||||
pieces["long"] = full_out
|
|
||||||
pieces["short"] = full_out[:8] # maybe improved later
|
|
||||||
pieces["error"] = None
|
|
||||||
|
|
||||||
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
|
|
||||||
# TAG might have hyphens.
|
|
||||||
git_describe = describe_out
|
|
||||||
|
|
||||||
# look for -dirty suffix
|
|
||||||
dirty = git_describe.endswith("-dirty")
|
|
||||||
pieces["dirty"] = dirty
|
|
||||||
if dirty:
|
|
||||||
git_describe = git_describe[:git_describe.rindex("-dirty")]
|
|
||||||
|
|
||||||
# now we have TAG-NUM-gHEX or HEX
|
|
||||||
|
|
||||||
if "-" in git_describe:
|
|
||||||
# TAG-NUM-gHEX
|
|
||||||
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
|
|
||||||
if not mo:
|
|
||||||
# unparseable. Maybe git-describe is misbehaving?
|
|
||||||
pieces["error"] = ("unable to parse git-describe output: '%s'"
|
|
||||||
% describe_out)
|
|
||||||
return pieces
|
|
||||||
|
|
||||||
# tag
|
|
||||||
full_tag = mo.group(1)
|
|
||||||
if not full_tag.startswith(tag_prefix):
|
|
||||||
if verbose:
|
|
||||||
fmt = "tag '%s' doesn't start with prefix '%s'"
|
|
||||||
print(fmt % (full_tag, tag_prefix))
|
|
||||||
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
|
|
||||||
% (full_tag, tag_prefix))
|
|
||||||
return pieces
|
|
||||||
pieces["closest-tag"] = full_tag[len(tag_prefix):]
|
|
||||||
|
|
||||||
# distance: number of commits since tag
|
|
||||||
pieces["distance"] = int(mo.group(2))
|
|
||||||
|
|
||||||
# commit: short hex revision ID
|
|
||||||
pieces["short"] = mo.group(3)
|
|
||||||
|
|
||||||
else:
|
|
||||||
# HEX: no tags
|
|
||||||
pieces["closest-tag"] = None
|
|
||||||
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
|
|
||||||
cwd=root)
|
|
||||||
pieces["distance"] = int(count_out) # total number of commits
|
|
||||||
|
|
||||||
# commit date: see ISO-8601 comment in git_versions_from_keywords()
|
|
||||||
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
|
|
||||||
cwd=root)[0].strip()
|
|
||||||
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
|
|
||||||
|
|
||||||
return pieces
|
|
||||||
|
|
||||||
|
|
||||||
def plus_or_dot(pieces):
|
|
||||||
"""Return a + if we don't already have one, else return a ."""
|
|
||||||
if "+" in pieces.get("closest-tag", ""):
|
|
||||||
return "."
|
|
||||||
return "+"
|
|
||||||
|
|
||||||
|
|
||||||
def render_pep440(pieces):
|
|
||||||
"""Build up version string, with post-release "local version identifier".
|
|
||||||
|
|
||||||
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
|
|
||||||
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
|
|
||||||
|
|
||||||
Exceptions:
|
|
||||||
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
|
|
||||||
"""
|
|
||||||
if pieces["closest-tag"]:
|
|
||||||
rendered = pieces["closest-tag"]
|
|
||||||
if pieces["distance"] or pieces["dirty"]:
|
|
||||||
rendered += plus_or_dot(pieces)
|
|
||||||
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
|
|
||||||
if pieces["dirty"]:
|
|
||||||
rendered += ".dirty"
|
|
||||||
else:
|
|
||||||
# exception #1
|
|
||||||
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
|
|
||||||
pieces["short"])
|
|
||||||
if pieces["dirty"]:
|
|
||||||
rendered += ".dirty"
|
|
||||||
return rendered
|
|
||||||
|
|
||||||
|
|
||||||
def render_pep440_pre(pieces):
|
|
||||||
"""TAG[.post.devDISTANCE] -- No -dirty.
|
|
||||||
|
|
||||||
Exceptions:
|
|
||||||
1: no tags. 0.post.devDISTANCE
|
|
||||||
"""
|
|
||||||
if pieces["closest-tag"]:
|
|
||||||
rendered = pieces["closest-tag"]
|
|
||||||
if pieces["distance"]:
|
|
||||||
rendered += ".post.dev%d" % pieces["distance"]
|
|
||||||
else:
|
|
||||||
# exception #1
|
|
||||||
rendered = "0.post.dev%d" % pieces["distance"]
|
|
||||||
return rendered
|
|
||||||
|
|
||||||
|
|
||||||
def render_pep440_post(pieces):
|
|
||||||
"""TAG[.postDISTANCE[.dev0]+gHEX] .
|
|
||||||
|
|
||||||
The ".dev0" means dirty. Note that .dev0 sorts backwards
|
|
||||||
(a dirty tree will appear "older" than the corresponding clean one),
|
|
||||||
but you shouldn't be releasing software with -dirty anyways.
|
|
||||||
|
|
||||||
Exceptions:
|
|
||||||
1: no tags. 0.postDISTANCE[.dev0]
|
|
||||||
"""
|
|
||||||
if pieces["closest-tag"]:
|
|
||||||
rendered = pieces["closest-tag"]
|
|
||||||
if pieces["distance"] or pieces["dirty"]:
|
|
||||||
rendered += ".post%d" % pieces["distance"]
|
|
||||||
if pieces["dirty"]:
|
|
||||||
rendered += ".dev0"
|
|
||||||
rendered += plus_or_dot(pieces)
|
|
||||||
rendered += "g%s" % pieces["short"]
|
|
||||||
else:
|
|
||||||
# exception #1
|
|
||||||
rendered = "0.post%d" % pieces["distance"]
|
|
||||||
if pieces["dirty"]:
|
|
||||||
rendered += ".dev0"
|
|
||||||
rendered += "+g%s" % pieces["short"]
|
|
||||||
return rendered
|
|
||||||
|
|
||||||
|
|
||||||
def render_pep440_old(pieces):
|
|
||||||
"""TAG[.postDISTANCE[.dev0]] .
|
|
||||||
|
|
||||||
The ".dev0" means dirty.
|
|
||||||
|
|
||||||
Eexceptions:
|
|
||||||
1: no tags. 0.postDISTANCE[.dev0]
|
|
||||||
"""
|
|
||||||
if pieces["closest-tag"]:
|
|
||||||
rendered = pieces["closest-tag"]
|
|
||||||
if pieces["distance"] or pieces["dirty"]:
|
|
||||||
rendered += ".post%d" % pieces["distance"]
|
|
||||||
if pieces["dirty"]:
|
|
||||||
rendered += ".dev0"
|
|
||||||
else:
|
|
||||||
# exception #1
|
|
||||||
rendered = "0.post%d" % pieces["distance"]
|
|
||||||
if pieces["dirty"]:
|
|
||||||
rendered += ".dev0"
|
|
||||||
return rendered
|
|
||||||
|
|
||||||
|
|
||||||
def render_git_describe(pieces):
|
|
||||||
"""TAG[-DISTANCE-gHEX][-dirty].
|
|
||||||
|
|
||||||
Like 'git describe --tags --dirty --always'.
|
|
||||||
|
|
||||||
Exceptions:
|
|
||||||
1: no tags. HEX[-dirty] (note: no 'g' prefix)
|
|
||||||
"""
|
|
||||||
if pieces["closest-tag"]:
|
|
||||||
rendered = pieces["closest-tag"]
|
|
||||||
if pieces["distance"]:
|
|
||||||
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
|
|
||||||
else:
|
|
||||||
# exception #1
|
|
||||||
rendered = pieces["short"]
|
|
||||||
if pieces["dirty"]:
|
|
||||||
rendered += "-dirty"
|
|
||||||
return rendered
|
|
||||||
|
|
||||||
|
|
||||||
def render_git_describe_long(pieces):
|
|
||||||
"""TAG-DISTANCE-gHEX[-dirty].
|
|
||||||
|
|
||||||
Like 'git describe --tags --dirty --always -long'.
|
|
||||||
The distance/hash is unconditional.
|
|
||||||
|
|
||||||
Exceptions:
|
|
||||||
1: no tags. HEX[-dirty] (note: no 'g' prefix)
|
|
||||||
"""
|
|
||||||
if pieces["closest-tag"]:
|
|
||||||
rendered = pieces["closest-tag"]
|
|
||||||
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
|
|
||||||
else:
|
|
||||||
# exception #1
|
|
||||||
rendered = pieces["short"]
|
|
||||||
if pieces["dirty"]:
|
|
||||||
rendered += "-dirty"
|
|
||||||
return rendered
|
|
||||||
|
|
||||||
|
|
||||||
def render(pieces, style):
|
|
||||||
"""Render the given version pieces into the requested style."""
|
|
||||||
if pieces["error"]:
|
|
||||||
return {"version": "unknown",
|
|
||||||
"full-revisionid": pieces.get("long"),
|
|
||||||
"dirty": None,
|
|
||||||
"error": pieces["error"],
|
|
||||||
"date": None}
|
|
||||||
|
|
||||||
if not style or style == "default":
|
|
||||||
style = "pep440" # the default
|
|
||||||
|
|
||||||
if style == "pep440":
|
|
||||||
rendered = render_pep440(pieces)
|
|
||||||
elif style == "pep440-pre":
|
|
||||||
rendered = render_pep440_pre(pieces)
|
|
||||||
elif style == "pep440-post":
|
|
||||||
rendered = render_pep440_post(pieces)
|
|
||||||
elif style == "pep440-old":
|
|
||||||
rendered = render_pep440_old(pieces)
|
|
||||||
elif style == "git-describe":
|
|
||||||
rendered = render_git_describe(pieces)
|
|
||||||
elif style == "git-describe-long":
|
|
||||||
rendered = render_git_describe_long(pieces)
|
|
||||||
else:
|
|
||||||
raise ValueError("unknown style '%s'" % style)
|
|
||||||
|
|
||||||
return {"version": rendered, "full-revisionid": pieces["long"],
|
|
||||||
"dirty": pieces["dirty"], "error": None,
|
|
||||||
"date": pieces.get("date")}
|
|
||||||
|
|
||||||
|
|
||||||
def get_versions():
|
|
||||||
"""Get version information or return default if unable to do so."""
|
|
||||||
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
|
|
||||||
# __file__, we can work backwards from there to the root. Some
|
|
||||||
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
|
|
||||||
# case we can only use expanded keywords.
|
|
||||||
|
|
||||||
cfg = get_config()
|
|
||||||
verbose = cfg.verbose
|
|
||||||
|
|
||||||
try:
|
|
||||||
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
|
|
||||||
verbose)
|
|
||||||
except NotThisMethod:
|
|
||||||
pass
|
|
||||||
|
|
||||||
try:
|
|
||||||
root = os.path.realpath(__file__)
|
|
||||||
# versionfile_source is the relative path from the top of the source
|
|
||||||
# tree (where the .git directory might live) to this file. Invert
|
|
||||||
# this to find the root from __file__.
|
|
||||||
for i in cfg.versionfile_source.split('/'):
|
|
||||||
root = os.path.dirname(root)
|
|
||||||
except NameError:
|
|
||||||
return {"version": "0+unknown", "full-revisionid": None,
|
|
||||||
"dirty": None,
|
|
||||||
"error": "unable to find root of source tree",
|
|
||||||
"date": None}
|
|
||||||
|
|
||||||
try:
|
|
||||||
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
|
|
||||||
return render(pieces, cfg.style)
|
|
||||||
except NotThisMethod:
|
|
||||||
pass
|
|
||||||
|
|
||||||
try:
|
|
||||||
if cfg.parentdir_prefix:
|
|
||||||
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
|
|
||||||
except NotThisMethod:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return {"version": "0+unknown", "full-revisionid": None,
|
|
||||||
"dirty": None,
|
|
||||||
"error": "unable to compute version", "date": None}
|
|
||||||
|
|
|
@ -1,22 +1,96 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
from PyQt5 import QtWidgets
|
from PyQt5 import QtWidgets, QtCore, QtGui
|
||||||
|
|
||||||
from artiq.applets.simple import SimpleApplet
|
from artiq.applets.simple import SimpleApplet
|
||||||
|
from artiq.tools import scale_from_metadata
|
||||||
|
from artiq.gui.tools import LayoutWidget
|
||||||
|
|
||||||
|
|
||||||
class NumberWidget(QtWidgets.QLCDNumber):
|
class QResponsiveLCDNumber(QtWidgets.QLCDNumber):
|
||||||
def __init__(self, args):
|
doubleClicked = QtCore.pyqtSignal()
|
||||||
QtWidgets.QLCDNumber.__init__(self)
|
|
||||||
self.setDigitCount(args.digit_count)
|
def mouseDoubleClickEvent(self, event):
|
||||||
|
self.doubleClicked.emit()
|
||||||
|
|
||||||
|
|
||||||
|
class QCancellableLineEdit(QtWidgets.QLineEdit):
|
||||||
|
editCancelled = QtCore.pyqtSignal()
|
||||||
|
|
||||||
|
def keyPressEvent(self, event):
|
||||||
|
if event.key() == QtCore.Qt.Key_Escape:
|
||||||
|
self.editCancelled.emit()
|
||||||
|
else:
|
||||||
|
super().keyPressEvent(event)
|
||||||
|
|
||||||
|
|
||||||
|
class NumberWidget(LayoutWidget):
|
||||||
|
def __init__(self, args, req):
|
||||||
|
LayoutWidget.__init__(self)
|
||||||
self.dataset_name = args.dataset
|
self.dataset_name = args.dataset
|
||||||
|
self.req = req
|
||||||
|
self.metadata = dict()
|
||||||
|
|
||||||
def data_changed(self, data, mods):
|
self.number_area = QtWidgets.QStackedWidget()
|
||||||
|
self.addWidget(self.number_area, 0, 0)
|
||||||
|
|
||||||
|
self.unit_area = QtWidgets.QLabel()
|
||||||
|
self.unit_area.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
|
||||||
|
self.addWidget(self.unit_area, 0, 1)
|
||||||
|
|
||||||
|
self.lcd_widget = QResponsiveLCDNumber()
|
||||||
|
self.lcd_widget.setDigitCount(args.digit_count)
|
||||||
|
self.lcd_widget.doubleClicked.connect(self.start_edit)
|
||||||
|
self.number_area.addWidget(self.lcd_widget)
|
||||||
|
|
||||||
|
self.edit_widget = QCancellableLineEdit()
|
||||||
|
self.edit_widget.setValidator(QtGui.QDoubleValidator())
|
||||||
|
self.edit_widget.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
|
||||||
|
self.edit_widget.editCancelled.connect(self.cancel_edit)
|
||||||
|
self.edit_widget.returnPressed.connect(self.confirm_edit)
|
||||||
|
self.number_area.addWidget(self.edit_widget)
|
||||||
|
|
||||||
|
font = QtGui.QFont()
|
||||||
|
font.setPointSize(60)
|
||||||
|
self.edit_widget.setFont(font)
|
||||||
|
|
||||||
|
unit_font = QtGui.QFont()
|
||||||
|
unit_font.setPointSize(20)
|
||||||
|
self.unit_area.setFont(unit_font)
|
||||||
|
|
||||||
|
self.number_area.setCurrentWidget(self.lcd_widget)
|
||||||
|
|
||||||
|
def start_edit(self):
|
||||||
|
# QLCDNumber value property contains the value of zero
|
||||||
|
# if the displayed value is not a number.
|
||||||
|
self.edit_widget.setText(str(self.lcd_widget.value()))
|
||||||
|
self.edit_widget.selectAll()
|
||||||
|
self.edit_widget.setFocus()
|
||||||
|
self.number_area.setCurrentWidget(self.edit_widget)
|
||||||
|
|
||||||
|
def confirm_edit(self):
|
||||||
|
scale = scale_from_metadata(self.metadata)
|
||||||
|
val = float(self.edit_widget.text())
|
||||||
|
val *= scale
|
||||||
|
self.req.set_dataset(self.dataset_name, val, **self.metadata)
|
||||||
|
self.number_area.setCurrentWidget(self.lcd_widget)
|
||||||
|
|
||||||
|
def cancel_edit(self):
|
||||||
|
self.number_area.setCurrentWidget(self.lcd_widget)
|
||||||
|
|
||||||
|
def data_changed(self, value, metadata, persist, mods):
|
||||||
try:
|
try:
|
||||||
n = float(data[self.dataset_name][1])
|
self.metadata = metadata[self.dataset_name]
|
||||||
|
# This applet will degenerate other scalar types to native float on edit
|
||||||
|
# Use the dashboard ChangeEditDialog for consistent type casting
|
||||||
|
val = float(value[self.dataset_name])
|
||||||
|
scale = scale_from_metadata(self.metadata)
|
||||||
|
val /= scale
|
||||||
except (KeyError, ValueError, TypeError):
|
except (KeyError, ValueError, TypeError):
|
||||||
n = "---"
|
val = "---"
|
||||||
self.display(n)
|
|
||||||
|
unit = self.metadata.get("unit", "")
|
||||||
|
self.unit_area.setText(unit)
|
||||||
|
self.lcd_widget.display(val)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
|
@ -7,13 +7,13 @@ from artiq.applets.simple import SimpleApplet
|
||||||
|
|
||||||
|
|
||||||
class Image(pyqtgraph.ImageView):
|
class Image(pyqtgraph.ImageView):
|
||||||
def __init__(self, args):
|
def __init__(self, args, req):
|
||||||
pyqtgraph.ImageView.__init__(self)
|
pyqtgraph.ImageView.__init__(self)
|
||||||
self.args = args
|
self.args = args
|
||||||
|
|
||||||
def data_changed(self, data, mods):
|
def data_changed(self, value, metadata, persist, mods):
|
||||||
try:
|
try:
|
||||||
img = data[self.args.img][1]
|
img = value[self.args.img]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
return
|
return
|
||||||
self.setImage(img)
|
self.setImage(img)
|
||||||
|
|
|
@ -1,33 +1,47 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
import PyQt5 # make sure pyqtgraph imports Qt5
|
import PyQt5 # make sure pyqtgraph imports Qt5
|
||||||
|
from PyQt5.QtCore import QTimer
|
||||||
import pyqtgraph
|
import pyqtgraph
|
||||||
|
|
||||||
from artiq.applets.simple import TitleApplet
|
from artiq.applets.simple import TitleApplet
|
||||||
|
|
||||||
|
|
||||||
class HistogramPlot(pyqtgraph.PlotWidget):
|
class HistogramPlot(pyqtgraph.PlotWidget):
|
||||||
def __init__(self, args):
|
def __init__(self, args, req):
|
||||||
pyqtgraph.PlotWidget.__init__(self)
|
pyqtgraph.PlotWidget.__init__(self)
|
||||||
self.args = args
|
self.args = args
|
||||||
|
self.timer = QTimer()
|
||||||
|
self.timer.setSingleShot(True)
|
||||||
|
self.timer.timeout.connect(self.length_warning)
|
||||||
|
|
||||||
def data_changed(self, data, mods, title):
|
def data_changed(self, value, metadata, persist, mods, title):
|
||||||
try:
|
try:
|
||||||
y = data[self.args.y][1]
|
y = value[self.args.y]
|
||||||
if self.args.x is None:
|
if self.args.x is None:
|
||||||
x = None
|
x = None
|
||||||
else:
|
else:
|
||||||
x = data[self.args.x][1]
|
x = value[self.args.x]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
return
|
return
|
||||||
if x is None:
|
if x is None:
|
||||||
x = list(range(len(y)+1))
|
x = list(range(len(y)+1))
|
||||||
|
|
||||||
if len(y) and len(x) == len(y) + 1:
|
if len(y) and len(x) == len(y) + 1:
|
||||||
|
self.timer.stop()
|
||||||
self.clear()
|
self.clear()
|
||||||
self.plot(x, y, stepMode=True, fillLevel=0,
|
self.plot(x, y, stepMode=True, fillLevel=0,
|
||||||
brush=(0, 0, 255, 150))
|
brush=(0, 0, 255, 150))
|
||||||
self.setTitle(title)
|
self.setTitle(title)
|
||||||
|
else:
|
||||||
|
if not self.timer.isActive():
|
||||||
|
self.timer.start(1000)
|
||||||
|
|
||||||
|
def length_warning(self):
|
||||||
|
self.clear()
|
||||||
|
text = "⚠️ dataset lengths mismatch:\n"\
|
||||||
|
"There should be one more bin boundaries than there are Y values"
|
||||||
|
self.addItem(pyqtgraph.TextItem(text))
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
|
@ -2,39 +2,58 @@
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import PyQt5 # make sure pyqtgraph imports Qt5
|
import PyQt5 # make sure pyqtgraph imports Qt5
|
||||||
|
from PyQt5.QtCore import QTimer
|
||||||
import pyqtgraph
|
import pyqtgraph
|
||||||
|
|
||||||
from artiq.applets.simple import TitleApplet
|
from artiq.applets.simple import TitleApplet
|
||||||
|
|
||||||
|
|
||||||
class XYPlot(pyqtgraph.PlotWidget):
|
class XYPlot(pyqtgraph.PlotWidget):
|
||||||
def __init__(self, args):
|
def __init__(self, args, req):
|
||||||
pyqtgraph.PlotWidget.__init__(self)
|
pyqtgraph.PlotWidget.__init__(self)
|
||||||
self.args = args
|
self.args = args
|
||||||
|
self.timer = QTimer()
|
||||||
|
self.timer.setSingleShot(True)
|
||||||
|
self.timer.timeout.connect(self.length_warning)
|
||||||
|
self.mismatch = {'X values': False,
|
||||||
|
'Error bars': False,
|
||||||
|
'Fit values': False}
|
||||||
|
|
||||||
def data_changed(self, data, mods, title):
|
def data_changed(self, value, metadata, persist, mods, title):
|
||||||
try:
|
try:
|
||||||
y = data[self.args.y][1]
|
y = value[self.args.y]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
return
|
return
|
||||||
x = data.get(self.args.x, (False, None))[1]
|
x = value.get(self.args.x, (False, None))
|
||||||
if x is None:
|
if x is None:
|
||||||
x = np.arange(len(y))
|
x = np.arange(len(y))
|
||||||
error = data.get(self.args.error, (False, None))[1]
|
error = value.get(self.args.error, (False, None))
|
||||||
fit = data.get(self.args.fit, (False, None))[1]
|
fit = value.get(self.args.fit, (False, None))
|
||||||
|
|
||||||
if not len(y) or len(y) != len(x):
|
if not len(y) or len(y) != len(x):
|
||||||
return
|
self.mismatch['X values'] = True
|
||||||
|
else:
|
||||||
|
self.mismatch['X values'] = False
|
||||||
if error is not None and hasattr(error, "__len__"):
|
if error is not None and hasattr(error, "__len__"):
|
||||||
if not len(error):
|
if not len(error):
|
||||||
error = None
|
error = None
|
||||||
elif len(error) != len(y):
|
elif len(error) != len(y):
|
||||||
return
|
self.mismatch['Error bars'] = True
|
||||||
|
else:
|
||||||
|
self.mismatch['Error bars'] = False
|
||||||
if fit is not None:
|
if fit is not None:
|
||||||
if not len(fit):
|
if not len(fit):
|
||||||
fit = None
|
fit = None
|
||||||
elif len(fit) != len(y):
|
elif len(fit) != len(y):
|
||||||
return
|
self.mismatch['Fit values'] = True
|
||||||
|
else:
|
||||||
|
self.mismatch['Fit values'] = False
|
||||||
|
if not any(self.mismatch.values()):
|
||||||
|
self.timer.stop()
|
||||||
|
else:
|
||||||
|
if not self.timer.isActive():
|
||||||
|
self.timer.start(1000)
|
||||||
|
return
|
||||||
|
|
||||||
self.clear()
|
self.clear()
|
||||||
self.plot(x, y, pen=None, symbol="x")
|
self.plot(x, y, pen=None, symbol="x")
|
||||||
|
@ -50,6 +69,13 @@ class XYPlot(pyqtgraph.PlotWidget):
|
||||||
xi = np.argsort(x)
|
xi = np.argsort(x)
|
||||||
self.plot(x[xi], fit[xi])
|
self.plot(x[xi], fit[xi])
|
||||||
|
|
||||||
|
def length_warning(self):
|
||||||
|
self.clear()
|
||||||
|
text = "⚠️ dataset lengths mismatch:\n"
|
||||||
|
errors = ', '.join([k for k, v in self.mismatch.items() if v])
|
||||||
|
text = ' '.join([errors, "should have the same length as Y values"])
|
||||||
|
self.addItem(pyqtgraph.TextItem(text))
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
applet = TitleApplet(XYPlot)
|
applet = TitleApplet(XYPlot)
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from PyQt5 import QtWidgets
|
from PyQt5 import QtWidgets
|
||||||
|
from PyQt5.QtCore import QTimer
|
||||||
import pyqtgraph
|
import pyqtgraph
|
||||||
|
|
||||||
from artiq.applets.simple import SimpleApplet
|
from artiq.applets.simple import SimpleApplet
|
||||||
|
@ -21,7 +22,7 @@ def _compute_ys(histogram_bins, histograms_counts):
|
||||||
# pyqtgraph.GraphicsWindow fails to behave like a regular Qt widget
|
# pyqtgraph.GraphicsWindow fails to behave like a regular Qt widget
|
||||||
# and breaks embedding. Do not use as top widget.
|
# and breaks embedding. Do not use as top widget.
|
||||||
class XYHistPlot(QtWidgets.QSplitter):
|
class XYHistPlot(QtWidgets.QSplitter):
|
||||||
def __init__(self, args):
|
def __init__(self, args, req):
|
||||||
QtWidgets.QSplitter.__init__(self)
|
QtWidgets.QSplitter.__init__(self)
|
||||||
self.resize(1000, 600)
|
self.resize(1000, 600)
|
||||||
self.setWindowTitle("XY/Histogram")
|
self.setWindowTitle("XY/Histogram")
|
||||||
|
@ -37,6 +38,10 @@ class XYHistPlot(QtWidgets.QSplitter):
|
||||||
self.hist_plot_data = None
|
self.hist_plot_data = None
|
||||||
|
|
||||||
self.args = args
|
self.args = args
|
||||||
|
self.timer = QTimer()
|
||||||
|
self.timer.setSingleShot(True)
|
||||||
|
self.timer.timeout.connect(self.length_warning)
|
||||||
|
self.mismatch = {'bins': False, 'xs': False}
|
||||||
|
|
||||||
def _set_full_data(self, xs, histogram_bins, histograms_counts):
|
def _set_full_data(self, xs, histogram_bins, histograms_counts):
|
||||||
self.xy_plot.clear()
|
self.xy_plot.clear()
|
||||||
|
@ -59,9 +64,9 @@ class XYHistPlot(QtWidgets.QSplitter):
|
||||||
point.histogram_index = index
|
point.histogram_index = index
|
||||||
point.histogram_counts = counts
|
point.histogram_counts = counts
|
||||||
|
|
||||||
self.hist_plot_data = self.hist_plot.plot(
|
text = "click on a data point at the left\n"\
|
||||||
stepMode=True, fillLevel=0,
|
"to see the corresponding histogram"
|
||||||
brush=(0, 0, 255, 150))
|
self.hist_plot.addItem(pyqtgraph.TextItem(text))
|
||||||
|
|
||||||
def _set_partial_data(self, xs, histograms_counts):
|
def _set_partial_data(self, xs, histograms_counts):
|
||||||
ys = _compute_ys(self.histogram_bins, histograms_counts)
|
ys = _compute_ys(self.histogram_bins, histograms_counts)
|
||||||
|
@ -87,8 +92,17 @@ class XYHistPlot(QtWidgets.QSplitter):
|
||||||
else:
|
else:
|
||||||
self.arrow.setPos(position)
|
self.arrow.setPos(position)
|
||||||
self.selected_index = spot_item.histogram_index
|
self.selected_index = spot_item.histogram_index
|
||||||
self.hist_plot_data.setData(x=self.histogram_bins,
|
|
||||||
y=spot_item.histogram_counts)
|
if self.hist_plot_data is None:
|
||||||
|
self.hist_plot.clear()
|
||||||
|
self.hist_plot_data = self.hist_plot.plot(
|
||||||
|
x=self.histogram_bins,
|
||||||
|
y=spot_item.histogram_counts,
|
||||||
|
stepMode=True, fillLevel=0,
|
||||||
|
brush=(0, 0, 255, 150))
|
||||||
|
else:
|
||||||
|
self.hist_plot_data.setData(x=self.histogram_bins,
|
||||||
|
y=spot_item.histogram_counts)
|
||||||
|
|
||||||
def _can_use_partial(self, mods):
|
def _can_use_partial(self, mods):
|
||||||
if self.hist_plot_data is None:
|
if self.hist_plot_data is None:
|
||||||
|
@ -110,18 +124,48 @@ class XYHistPlot(QtWidgets.QSplitter):
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def data_changed(self, data, mods):
|
def data_changed(self, value, metadata, persist, mods):
|
||||||
try:
|
try:
|
||||||
xs = data[self.args.xs][1]
|
xs = value[self.args.xs]
|
||||||
histogram_bins = data[self.args.histogram_bins][1]
|
histogram_bins = value[self.args.histogram_bins]
|
||||||
histograms_counts = data[self.args.histograms_counts][1]
|
histograms_counts = value[self.args.histograms_counts]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
return
|
return
|
||||||
|
if len(xs) != histograms_counts.shape[0]:
|
||||||
|
self.mismatch['xs'] = True
|
||||||
|
else:
|
||||||
|
self.mismatch['xs'] = False
|
||||||
|
if histograms_counts.shape[1] != len(histogram_bins) - 1:
|
||||||
|
self.mismatch['bins'] = True
|
||||||
|
else:
|
||||||
|
self.mismatch['bins'] = False
|
||||||
|
if any(self.mismatch.values()):
|
||||||
|
if not self.timer.isActive():
|
||||||
|
self.timer.start(1000)
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
self.timer.stop()
|
||||||
if self._can_use_partial(mods):
|
if self._can_use_partial(mods):
|
||||||
self._set_partial_data(xs, histograms_counts)
|
self._set_partial_data(xs, histograms_counts)
|
||||||
else:
|
else:
|
||||||
self._set_full_data(xs, histogram_bins, histograms_counts)
|
self._set_full_data(xs, histogram_bins, histograms_counts)
|
||||||
|
|
||||||
|
def length_warning(self):
|
||||||
|
self.xy_plot.clear()
|
||||||
|
self.hist_plot.clear()
|
||||||
|
text = "⚠️ dataset lengths mismatch:\n\n"
|
||||||
|
if self.mismatch['bins']:
|
||||||
|
text = ''.join([text,
|
||||||
|
"bin boundaries should have the same length\n"
|
||||||
|
"as the first dimension of histogram counts."])
|
||||||
|
if self.mismatch['bins'] and self.mismatch['xs']:
|
||||||
|
text = ''.join([text, '\n\n'])
|
||||||
|
if self.mismatch['xs']:
|
||||||
|
text = ''.join([text,
|
||||||
|
"point abscissas should have the same length\n"
|
||||||
|
"as the second dimension of histogram counts."])
|
||||||
|
self.xy_plot.addItem(pyqtgraph.TextItem(text))
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
applet = SimpleApplet(XYHistPlot)
|
applet = SimpleApplet(XYHistPlot)
|
||||||
|
|
|
@ -0,0 +1,34 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
from PyQt5 import QtWidgets
|
||||||
|
|
||||||
|
from artiq.applets.simple import SimpleApplet
|
||||||
|
|
||||||
|
|
||||||
|
class ProgressWidget(QtWidgets.QProgressBar):
|
||||||
|
def __init__(self, args, req):
|
||||||
|
QtWidgets.QProgressBar.__init__(self)
|
||||||
|
self.setMinimum(args.min)
|
||||||
|
self.setMaximum(args.max)
|
||||||
|
self.dataset_value = args.value
|
||||||
|
|
||||||
|
def data_changed(self, value, metadata, persist, mods):
|
||||||
|
try:
|
||||||
|
val = round(value[self.dataset_value])
|
||||||
|
except (KeyError, ValueError, TypeError):
|
||||||
|
val = 0
|
||||||
|
self.setValue(val)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
applet = SimpleApplet(ProgressWidget)
|
||||||
|
applet.add_dataset("value", "counter")
|
||||||
|
applet.argparser.add_argument("--min", type=int, default=0,
|
||||||
|
help="minimum (left) value of the bar")
|
||||||
|
applet.argparser.add_argument("--max", type=int, default=100,
|
||||||
|
help="maximum (right) value of the bar")
|
||||||
|
applet.run()
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
|
@ -4,16 +4,115 @@ import asyncio
|
||||||
import os
|
import os
|
||||||
import string
|
import string
|
||||||
|
|
||||||
from quamash import QEventLoop, QtWidgets, QtCore
|
from qasync import QEventLoop, QtWidgets, QtCore
|
||||||
|
|
||||||
from artiq.protocols.sync_struct import Subscriber, process_mod
|
from sipyco.sync_struct import Subscriber, process_mod
|
||||||
from artiq.protocols import pyon
|
from sipyco.pc_rpc import AsyncioClient as RPCClient
|
||||||
from artiq.protocols.pipe_ipc import AsyncioChildComm
|
from sipyco import pyon
|
||||||
|
from sipyco.pipe_ipc import AsyncioChildComm
|
||||||
|
|
||||||
|
from artiq.language.scan import ScanObject
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class _AppletRequestInterface:
|
||||||
|
def __init__(self):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def set_dataset(self, key, value, unit=None, scale=None, precision=None, persist=None):
|
||||||
|
"""
|
||||||
|
Set a dataset.
|
||||||
|
See documentation of ``artiq.language.environment.set_dataset``.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def mutate_dataset(self, key, index, value):
|
||||||
|
"""
|
||||||
|
Mutate a dataset.
|
||||||
|
See documentation of ``artiq.language.environment.mutate_dataset``.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def append_to_dataset(self, key, value):
|
||||||
|
"""
|
||||||
|
Append to a dataset.
|
||||||
|
See documentation of ``artiq.language.environment.append_to_dataset``.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def set_argument_value(self, expurl, name, value):
|
||||||
|
"""
|
||||||
|
Temporarily set the value of an argument in a experiment in the dashboard.
|
||||||
|
The value resets to default value when recomputing the argument.
|
||||||
|
|
||||||
|
:param expurl: Experiment URL identifying the experiment in the dashboard. Example: 'repo:ArgumentsDemo'.
|
||||||
|
:param name: Name of the argument in the experiment.
|
||||||
|
:param value: Object representing the new temporary value of the argument. For ``Scannable`` arguments, this parameter
|
||||||
|
should be a ``ScanObject``. The type of the ``ScanObject`` will be set as the selected type when this function is called.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
|
class AppletRequestIPC(_AppletRequestInterface):
|
||||||
|
def __init__(self, ipc):
|
||||||
|
self.ipc = ipc
|
||||||
|
|
||||||
|
def set_dataset(self, key, value, unit=None, scale=None, precision=None, persist=None):
|
||||||
|
metadata = {}
|
||||||
|
if unit is not None:
|
||||||
|
metadata["unit"] = unit
|
||||||
|
if scale is not None:
|
||||||
|
metadata["scale"] = scale
|
||||||
|
if precision is not None:
|
||||||
|
metadata["precision"] = precision
|
||||||
|
self.ipc.set_dataset(key, value, metadata, persist)
|
||||||
|
|
||||||
|
def mutate_dataset(self, key, index, value):
|
||||||
|
mod = {"action": "setitem", "path": [key, 1], "key": index, "value": value}
|
||||||
|
self.ipc.update_dataset(mod)
|
||||||
|
|
||||||
|
def append_to_dataset(self, key, value):
|
||||||
|
mod = {"action": "append", "path": [key, 1], "x": value}
|
||||||
|
self.ipc.update_dataset(mod)
|
||||||
|
|
||||||
|
def set_argument_value(self, expurl, name, value):
|
||||||
|
if isinstance(value, ScanObject):
|
||||||
|
value = value.describe()
|
||||||
|
self.ipc.set_argument_value(expurl, name, value)
|
||||||
|
|
||||||
|
|
||||||
|
class AppletRequestRPC(_AppletRequestInterface):
|
||||||
|
def __init__(self, loop, dataset_ctl):
|
||||||
|
self.loop = loop
|
||||||
|
self.dataset_ctl = dataset_ctl
|
||||||
|
self.background_tasks = set()
|
||||||
|
|
||||||
|
def _background(self, coro, *args, **kwargs):
|
||||||
|
task = self.loop.create_task(coro(*args, **kwargs))
|
||||||
|
self.background_tasks.add(task)
|
||||||
|
task.add_done_callback(self.background_tasks.discard)
|
||||||
|
|
||||||
|
def set_dataset(self, key, value, unit=None, scale=None, precision=None, persist=None):
|
||||||
|
metadata = {}
|
||||||
|
if unit is not None:
|
||||||
|
metadata["unit"] = unit
|
||||||
|
if scale is not None:
|
||||||
|
metadata["scale"] = scale
|
||||||
|
if precision is not None:
|
||||||
|
metadata["precision"] = precision
|
||||||
|
self._background(self.dataset_ctl.set, key, value, metadata=metadata, persist=persist)
|
||||||
|
|
||||||
|
def mutate_dataset(self, key, index, value):
|
||||||
|
mod = {"action": "setitem", "path": [key, 1], "key": index, "value": value}
|
||||||
|
self._background(self.dataset_ctl.update, mod)
|
||||||
|
|
||||||
|
def append_to_dataset(self, key, value):
|
||||||
|
mod = {"action": "append", "path": [key, 1], "x": value}
|
||||||
|
self._background(self.dataset_ctl.update, mod)
|
||||||
|
|
||||||
|
|
||||||
class AppletIPCClient(AsyncioChildComm):
|
class AppletIPCClient(AsyncioChildComm):
|
||||||
def set_close_cb(self, close_cb):
|
def set_close_cb(self, close_cb):
|
||||||
self.close_cb = close_cb
|
self.close_cb = close_cb
|
||||||
|
@ -64,12 +163,30 @@ class AppletIPCClient(AsyncioChildComm):
|
||||||
exc_info=True)
|
exc_info=True)
|
||||||
self.close_cb()
|
self.close_cb()
|
||||||
|
|
||||||
def subscribe(self, datasets, init_cb, mod_cb):
|
def subscribe(self, datasets, init_cb, mod_cb, dataset_prefixes=[], *, loop):
|
||||||
self.write_pyon({"action": "subscribe",
|
self.write_pyon({"action": "subscribe",
|
||||||
"datasets": datasets})
|
"datasets": datasets,
|
||||||
|
"dataset_prefixes": dataset_prefixes})
|
||||||
self.init_cb = init_cb
|
self.init_cb = init_cb
|
||||||
self.mod_cb = mod_cb
|
self.mod_cb = mod_cb
|
||||||
asyncio.ensure_future(self.listen())
|
self.listen_task = loop.create_task(self.listen())
|
||||||
|
|
||||||
|
def set_dataset(self, key, value, metadata, persist=None):
|
||||||
|
self.write_pyon({"action": "set_dataset",
|
||||||
|
"key": key,
|
||||||
|
"value": value,
|
||||||
|
"metadata": metadata,
|
||||||
|
"persist": persist})
|
||||||
|
|
||||||
|
def update_dataset(self, mod):
|
||||||
|
self.write_pyon({"action": "update_dataset",
|
||||||
|
"mod": mod})
|
||||||
|
|
||||||
|
def set_argument_value(self, expurl, name, value):
|
||||||
|
self.write_pyon({"action": "set_argument_value",
|
||||||
|
"expurl": expurl,
|
||||||
|
"name": name,
|
||||||
|
"value": value})
|
||||||
|
|
||||||
|
|
||||||
class SimpleApplet:
|
class SimpleApplet:
|
||||||
|
@ -91,8 +208,11 @@ class SimpleApplet:
|
||||||
"for dataset notifications "
|
"for dataset notifications "
|
||||||
"(ignored in embedded mode)")
|
"(ignored in embedded mode)")
|
||||||
group.add_argument(
|
group.add_argument(
|
||||||
"--port", default=3250, type=int,
|
"--port-notify", default=3250, type=int,
|
||||||
help="TCP port to connect to")
|
help="TCP port to connect to for notifications (ignored in embedded mode)")
|
||||||
|
group.add_argument(
|
||||||
|
"--port-control", default=3251, type=int,
|
||||||
|
help="TCP port to connect to for control (ignored in embedded mode)")
|
||||||
|
|
||||||
self._arggroup_datasets = self.argparser.add_argument_group("datasets")
|
self._arggroup_datasets = self.argparser.add_argument_group("datasets")
|
||||||
|
|
||||||
|
@ -113,8 +233,11 @@ class SimpleApplet:
|
||||||
self.embed = os.getenv("ARTIQ_APPLET_EMBED")
|
self.embed = os.getenv("ARTIQ_APPLET_EMBED")
|
||||||
self.datasets = {getattr(self.args, arg.replace("-", "_"))
|
self.datasets = {getattr(self.args, arg.replace("-", "_"))
|
||||||
for arg in self.dataset_args}
|
for arg in self.dataset_args}
|
||||||
|
# Optional prefixes (dataset sub-trees) to match subscriptions against;
|
||||||
|
# currently only used by out-of-tree subclasses (ndscan).
|
||||||
|
self.dataset_prefixes = []
|
||||||
|
|
||||||
def quamash_init(self):
|
def qasync_init(self):
|
||||||
app = QtWidgets.QApplication([])
|
app = QtWidgets.QApplication([])
|
||||||
self.loop = QEventLoop(app)
|
self.loop = QEventLoop(app)
|
||||||
asyncio.set_event_loop(self.loop)
|
asyncio.set_event_loop(self.loop)
|
||||||
|
@ -128,8 +251,21 @@ class SimpleApplet:
|
||||||
if self.embed is not None:
|
if self.embed is not None:
|
||||||
self.ipc.close()
|
self.ipc.close()
|
||||||
|
|
||||||
|
def req_init(self):
|
||||||
|
if self.embed is None:
|
||||||
|
dataset_ctl = RPCClient()
|
||||||
|
self.loop.run_until_complete(dataset_ctl.connect_rpc(
|
||||||
|
self.args.server, self.args.port_control, "master_dataset_db"))
|
||||||
|
self.req = AppletRequestRPC(self.loop, dataset_ctl)
|
||||||
|
else:
|
||||||
|
self.req = AppletRequestIPC(self.ipc)
|
||||||
|
|
||||||
|
def req_close(self):
|
||||||
|
if self.embed is None:
|
||||||
|
self.req.dataset_ctl.close_rpc()
|
||||||
|
|
||||||
def create_main_widget(self):
|
def create_main_widget(self):
|
||||||
self.main_widget = self.main_widget_class(self.args)
|
self.main_widget = self.main_widget_class(self.args, self.req)
|
||||||
if self.embed is not None:
|
if self.embed is not None:
|
||||||
self.ipc.set_close_cb(self.main_widget.close)
|
self.ipc.set_close_cb(self.main_widget.close)
|
||||||
if os.name == "nt":
|
if os.name == "nt":
|
||||||
|
@ -162,6 +298,14 @@ class SimpleApplet:
|
||||||
self.data = data
|
self.data = data
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
def is_dataset_subscribed(self, key):
|
||||||
|
if key in self.datasets:
|
||||||
|
return True
|
||||||
|
for prefix in self.dataset_prefixes:
|
||||||
|
if key.startswith(prefix):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
def filter_mod(self, mod):
|
def filter_mod(self, mod):
|
||||||
if self.embed is not None:
|
if self.embed is not None:
|
||||||
# the parent already filters for us
|
# the parent already filters for us
|
||||||
|
@ -170,14 +314,19 @@ class SimpleApplet:
|
||||||
if mod["action"] == "init":
|
if mod["action"] == "init":
|
||||||
return True
|
return True
|
||||||
if mod["path"]:
|
if mod["path"]:
|
||||||
return mod["path"][0] in self.datasets
|
return self.is_dataset_subscribed(mod["path"][0])
|
||||||
elif mod["action"] in {"setitem", "delitem"}:
|
elif mod["action"] in {"setitem", "delitem"}:
|
||||||
return mod["key"] in self.datasets
|
return self.is_dataset_subscribed(mod["key"])
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def emit_data_changed(self, data, mod_buffer):
|
def emit_data_changed(self, data, mod_buffer):
|
||||||
self.main_widget.data_changed(data, mod_buffer)
|
persist = dict()
|
||||||
|
value = dict()
|
||||||
|
metadata = dict()
|
||||||
|
for k, d in data.items():
|
||||||
|
persist[k], value[k], metadata[k] = d
|
||||||
|
self.main_widget.data_changed(value, metadata, persist, mod_buffer)
|
||||||
|
|
||||||
def flush_mod_buffer(self):
|
def flush_mod_buffer(self):
|
||||||
self.emit_data_changed(self.data, self.mod_buffer)
|
self.emit_data_changed(self.data, self.mod_buffer)
|
||||||
|
@ -192,8 +341,8 @@ class SimpleApplet:
|
||||||
self.mod_buffer.append(mod)
|
self.mod_buffer.append(mod)
|
||||||
else:
|
else:
|
||||||
self.mod_buffer = [mod]
|
self.mod_buffer = [mod]
|
||||||
asyncio.get_event_loop().call_later(self.args.update_delay,
|
self.loop.call_later(self.args.update_delay,
|
||||||
self.flush_mod_buffer)
|
self.flush_mod_buffer)
|
||||||
else:
|
else:
|
||||||
self.emit_data_changed(self.data, [mod])
|
self.emit_data_changed(self.data, [mod])
|
||||||
|
|
||||||
|
@ -202,9 +351,11 @@ class SimpleApplet:
|
||||||
self.subscriber = Subscriber("datasets",
|
self.subscriber = Subscriber("datasets",
|
||||||
self.sub_init, self.sub_mod)
|
self.sub_init, self.sub_mod)
|
||||||
self.loop.run_until_complete(self.subscriber.connect(
|
self.loop.run_until_complete(self.subscriber.connect(
|
||||||
self.args.server, self.args.port))
|
self.args.server, self.args.port_notify))
|
||||||
else:
|
else:
|
||||||
self.ipc.subscribe(self.datasets, self.sub_init, self.sub_mod)
|
self.ipc.subscribe(self.datasets, self.sub_init, self.sub_mod,
|
||||||
|
dataset_prefixes=self.dataset_prefixes,
|
||||||
|
loop=self.loop)
|
||||||
|
|
||||||
def unsubscribe(self):
|
def unsubscribe(self):
|
||||||
if self.embed is None:
|
if self.embed is None:
|
||||||
|
@ -212,16 +363,20 @@ class SimpleApplet:
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
self.args_init()
|
self.args_init()
|
||||||
self.quamash_init()
|
self.qasync_init()
|
||||||
try:
|
try:
|
||||||
self.ipc_init()
|
self.ipc_init()
|
||||||
try:
|
try:
|
||||||
self.create_main_widget()
|
self.req_init()
|
||||||
self.subscribe()
|
|
||||||
try:
|
try:
|
||||||
self.loop.run_forever()
|
self.create_main_widget()
|
||||||
|
self.subscribe()
|
||||||
|
try:
|
||||||
|
self.loop.run_forever()
|
||||||
|
finally:
|
||||||
|
self.unsubscribe()
|
||||||
finally:
|
finally:
|
||||||
self.unsubscribe()
|
self.req_close()
|
||||||
finally:
|
finally:
|
||||||
self.ipc_close()
|
self.ipc_close()
|
||||||
finally:
|
finally:
|
||||||
|
@ -260,4 +415,9 @@ class TitleApplet(SimpleApplet):
|
||||||
title = self.args.title
|
title = self.args.title
|
||||||
else:
|
else:
|
||||||
title = None
|
title = None
|
||||||
self.main_widget.data_changed(data, mod_buffer, title)
|
persist = dict()
|
||||||
|
value = dict()
|
||||||
|
metadata = dict()
|
||||||
|
for k, d in data.items():
|
||||||
|
persist[k], value[k], metadata[k] = d
|
||||||
|
self.main_widget.data_changed(value, metadata, persist, mod_buffer, title)
|
||||||
|
|
|
@ -3,10 +3,11 @@ import asyncio
|
||||||
|
|
||||||
from PyQt5 import QtCore, QtWidgets
|
from PyQt5 import QtCore, QtWidgets
|
||||||
|
|
||||||
|
from sipyco.pc_rpc import AsyncioClient as RPCClient
|
||||||
|
|
||||||
from artiq.tools import short_format
|
from artiq.tools import short_format
|
||||||
from artiq.gui.tools import LayoutWidget, QRecursiveFilterProxyModel
|
from artiq.gui.tools import LayoutWidget, QRecursiveFilterProxyModel
|
||||||
from artiq.gui.models import DictSyncTreeSepModel
|
from artiq.gui.models import DictSyncTreeSepModel
|
||||||
from artiq.protocols.pc_rpc import AsyncioClient as RPCClient
|
|
||||||
|
|
||||||
# reduced read-only version of artiq.dashboard.datasets
|
# reduced read-only version of artiq.dashboard.datasets
|
||||||
|
|
||||||
|
@ -19,11 +20,46 @@ class Model(DictSyncTreeSepModel):
|
||||||
DictSyncTreeSepModel.__init__(self, ".", ["Dataset", "Value"], init)
|
DictSyncTreeSepModel.__init__(self, ".", ["Dataset", "Value"], init)
|
||||||
|
|
||||||
def convert(self, k, v, column):
|
def convert(self, k, v, column):
|
||||||
return short_format(v[1])
|
return short_format(v[1], v[2])
|
||||||
|
|
||||||
|
|
||||||
|
class DatasetCtl:
|
||||||
|
def __init__(self, master_host, master_port):
|
||||||
|
self.master_host = master_host
|
||||||
|
self.master_port = master_port
|
||||||
|
|
||||||
|
async def _execute_rpc(self, op_name, key_or_mod, value=None, persist=None, metadata=None):
|
||||||
|
logger.info("Starting %s operation on %s", op_name, key_or_mod)
|
||||||
|
try:
|
||||||
|
remote = RPCClient()
|
||||||
|
await remote.connect_rpc(self.master_host, self.master_port,
|
||||||
|
"master_dataset_db")
|
||||||
|
try:
|
||||||
|
if op_name == "set":
|
||||||
|
await remote.set(key_or_mod, value, persist, metadata)
|
||||||
|
elif op_name == "update":
|
||||||
|
await remote.update(key_or_mod)
|
||||||
|
else:
|
||||||
|
logger.error("Invalid operation: %s", op_name)
|
||||||
|
return
|
||||||
|
finally:
|
||||||
|
remote.close_rpc()
|
||||||
|
except:
|
||||||
|
logger.error("Failed %s operation on %s", op_name,
|
||||||
|
key_or_mod, exc_info=True)
|
||||||
|
else:
|
||||||
|
logger.info("Finished %s operation on %s", op_name,
|
||||||
|
key_or_mod)
|
||||||
|
|
||||||
|
async def set(self, key, value, persist=None, metadata=None):
|
||||||
|
await self._execute_rpc("set", key, value, persist, metadata)
|
||||||
|
|
||||||
|
async def update(self, mod):
|
||||||
|
await self._execute_rpc("update", mod)
|
||||||
|
|
||||||
|
|
||||||
class DatasetsDock(QtWidgets.QDockWidget):
|
class DatasetsDock(QtWidgets.QDockWidget):
|
||||||
def __init__(self, datasets_sub, master_host, master_port):
|
def __init__(self, dataset_sub, dataset_ctl):
|
||||||
QtWidgets.QDockWidget.__init__(self, "Datasets")
|
QtWidgets.QDockWidget.__init__(self, "Datasets")
|
||||||
self.setObjectName("Datasets")
|
self.setObjectName("Datasets")
|
||||||
self.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
|
self.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
|
||||||
|
@ -61,10 +97,9 @@ class DatasetsDock(QtWidgets.QDockWidget):
|
||||||
self.table.addAction(upload_action)
|
self.table.addAction(upload_action)
|
||||||
|
|
||||||
self.set_model(Model(dict()))
|
self.set_model(Model(dict()))
|
||||||
datasets_sub.add_setmodel_callback(self.set_model)
|
dataset_sub.add_setmodel_callback(self.set_model)
|
||||||
|
|
||||||
self.master_host = master_host
|
self.dataset_ctl = dataset_ctl
|
||||||
self.master_port = master_port
|
|
||||||
|
|
||||||
def _search_datasets(self):
|
def _search_datasets(self):
|
||||||
if hasattr(self, "table_model_filter"):
|
if hasattr(self, "table_model_filter"):
|
||||||
|
@ -81,30 +116,14 @@ class DatasetsDock(QtWidgets.QDockWidget):
|
||||||
self.table_model_filter.setSourceModel(self.table_model)
|
self.table_model_filter.setSourceModel(self.table_model)
|
||||||
self.table.setModel(self.table_model_filter)
|
self.table.setModel(self.table_model_filter)
|
||||||
|
|
||||||
async def _upload_dataset(self, name, value,):
|
|
||||||
logger.info("Uploading dataset '%s' to master...", name)
|
|
||||||
try:
|
|
||||||
remote = RPCClient()
|
|
||||||
await remote.connect_rpc(self.master_host, self.master_port,
|
|
||||||
"master_dataset_db")
|
|
||||||
try:
|
|
||||||
await remote.set(name, value)
|
|
||||||
finally:
|
|
||||||
remote.close_rpc()
|
|
||||||
except:
|
|
||||||
logger.error("Failed uploading dataset '%s'",
|
|
||||||
name, exc_info=True)
|
|
||||||
else:
|
|
||||||
logger.info("Finished uploading dataset '%s'", name)
|
|
||||||
|
|
||||||
def upload_clicked(self):
|
def upload_clicked(self):
|
||||||
idx = self.table.selectedIndexes()
|
idx = self.table.selectedIndexes()
|
||||||
if idx:
|
if idx:
|
||||||
idx = self.table_model_filter.mapToSource(idx[0])
|
idx = self.table_model_filter.mapToSource(idx[0])
|
||||||
key = self.table_model.index_to_key(idx)
|
key = self.table_model.index_to_key(idx)
|
||||||
if key is not None:
|
if key is not None:
|
||||||
persist, value = self.table_model.backing_store[key]
|
persist, value, metadata = self.table_model.backing_store[key]
|
||||||
asyncio.ensure_future(self._upload_dataset(key, value))
|
asyncio.ensure_future(self.dataset_ctl.set(key, value, metadata=metadata))
|
||||||
|
|
||||||
def save_state(self):
|
def save_state(self):
|
||||||
return bytes(self.table.header().saveState())
|
return bytes(self.table.header().saveState())
|
||||||
|
|
|
@ -7,24 +7,17 @@ from collections import OrderedDict
|
||||||
from PyQt5 import QtCore, QtGui, QtWidgets
|
from PyQt5 import QtCore, QtGui, QtWidgets
|
||||||
import h5py
|
import h5py
|
||||||
|
|
||||||
|
from sipyco import pyon
|
||||||
|
|
||||||
from artiq import __artiq_dir__ as artiq_dir
|
from artiq import __artiq_dir__ as artiq_dir
|
||||||
from artiq.gui.tools import LayoutWidget, log_level_to_name, get_open_file_name
|
from artiq.gui.tools import (LayoutWidget, WheelFilter,
|
||||||
|
log_level_to_name, get_open_file_name)
|
||||||
from artiq.gui.entries import procdesc_to_entry
|
from artiq.gui.entries import procdesc_to_entry
|
||||||
from artiq.protocols import pyon
|
|
||||||
from artiq.master.worker import Worker, log_worker_exception
|
from artiq.master.worker import Worker, log_worker_exception
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class _WheelFilter(QtCore.QObject):
|
|
||||||
def eventFilter(self, obj, event):
|
|
||||||
if (event.type() == QtCore.QEvent.Wheel and
|
|
||||||
event.modifiers() != QtCore.Qt.NoModifier):
|
|
||||||
event.ignore()
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class _ArgumentEditor(QtWidgets.QTreeWidget):
|
class _ArgumentEditor(QtWidgets.QTreeWidget):
|
||||||
def __init__(self, dock):
|
def __init__(self, dock):
|
||||||
QtWidgets.QTreeWidget.__init__(self)
|
QtWidgets.QTreeWidget.__init__(self)
|
||||||
|
@ -45,7 +38,7 @@ class _ArgumentEditor(QtWidgets.QTreeWidget):
|
||||||
self.setStyleSheet("QTreeWidget {background: " +
|
self.setStyleSheet("QTreeWidget {background: " +
|
||||||
self.palette().midlight().color().name() + " ;}")
|
self.palette().midlight().color().name() + " ;}")
|
||||||
|
|
||||||
self.viewport().installEventFilter(_WheelFilter(self.viewport()))
|
self.viewport().installEventFilter(WheelFilter(self.viewport(), True))
|
||||||
|
|
||||||
self._groups = dict()
|
self._groups = dict()
|
||||||
self._arg_to_widgets = dict()
|
self._arg_to_widgets = dict()
|
||||||
|
@ -377,9 +370,9 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
|
|
||||||
|
|
||||||
class LocalDatasetDB:
|
class LocalDatasetDB:
|
||||||
def __init__(self, datasets_sub):
|
def __init__(self, dataset_sub):
|
||||||
self.datasets_sub = datasets_sub
|
self.dataset_sub = dataset_sub
|
||||||
datasets_sub.add_setmodel_callback(self.init)
|
dataset_sub.add_setmodel_callback(self.init)
|
||||||
|
|
||||||
def init(self, data):
|
def init(self, data):
|
||||||
self._data = data
|
self._data = data
|
||||||
|
@ -388,11 +381,11 @@ class LocalDatasetDB:
|
||||||
return self._data.backing_store[key][1]
|
return self._data.backing_store[key][1]
|
||||||
|
|
||||||
def update(self, mod):
|
def update(self, mod):
|
||||||
self.datasets_sub.update(mod)
|
self.dataset_sub.update(mod)
|
||||||
|
|
||||||
|
|
||||||
class ExperimentsArea(QtWidgets.QMdiArea):
|
class ExperimentsArea(QtWidgets.QMdiArea):
|
||||||
def __init__(self, root, datasets_sub):
|
def __init__(self, root, dataset_sub):
|
||||||
QtWidgets.QMdiArea.__init__(self)
|
QtWidgets.QMdiArea.__init__(self)
|
||||||
self.pixmap = QtGui.QPixmap(os.path.join(
|
self.pixmap = QtGui.QPixmap(os.path.join(
|
||||||
artiq_dir, "gui", "logo_ver.svg"))
|
artiq_dir, "gui", "logo_ver.svg"))
|
||||||
|
@ -401,11 +394,11 @@ class ExperimentsArea(QtWidgets.QMdiArea):
|
||||||
|
|
||||||
self.open_experiments = []
|
self.open_experiments = []
|
||||||
|
|
||||||
self._ddb = LocalDatasetDB(datasets_sub)
|
self._ddb = LocalDatasetDB(dataset_sub)
|
||||||
|
|
||||||
self.worker_handlers = {
|
self.worker_handlers = {
|
||||||
"get_device_db": lambda: {},
|
"get_device_db": lambda: {},
|
||||||
"get_device": lambda k: {"type": "dummy"},
|
"get_device": lambda key, resolve_alias=False: {"type": "dummy"},
|
||||||
"get_dataset": self._ddb.get,
|
"get_dataset": self._ddb.get,
|
||||||
"update_dataset": self._ddb.update,
|
"update_dataset": self._ddb.update,
|
||||||
}
|
}
|
||||||
|
@ -515,5 +508,9 @@ class ExperimentsArea(QtWidgets.QMdiArea):
|
||||||
self.open_experiments.append(dock)
|
self.open_experiments.append(dock)
|
||||||
return dock
|
return dock
|
||||||
|
|
||||||
|
def set_argument_value(self, expurl, name, value):
|
||||||
|
logger.warning("Unable to set argument '%s', dropping change. "
|
||||||
|
"'set_argument_value' not supported in browser.", name)
|
||||||
|
|
||||||
def on_dock_closed(self, dock):
|
def on_dock_closed(self, dock):
|
||||||
self.open_experiments.remove(dock)
|
self.open_experiments.remove(dock)
|
||||||
|
|
|
@ -5,7 +5,8 @@ from datetime import datetime
|
||||||
import h5py
|
import h5py
|
||||||
from PyQt5 import QtCore, QtWidgets, QtGui
|
from PyQt5 import QtCore, QtWidgets, QtGui
|
||||||
|
|
||||||
from artiq.protocols import pyon
|
from sipyco import pyon
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -41,7 +42,7 @@ class ThumbnailIconProvider(QtWidgets.QFileIconProvider):
|
||||||
except KeyError:
|
except KeyError:
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
img = QtGui.QImage.fromData(t.value)
|
img = QtGui.QImage.fromData(t[()])
|
||||||
except:
|
except:
|
||||||
logger.warning("unable to read thumbnail from %s",
|
logger.warning("unable to read thumbnail from %s",
|
||||||
info.filePath(), exc_info=True)
|
info.filePath(), exc_info=True)
|
||||||
|
@ -70,7 +71,7 @@ class ZoomIconView(QtWidgets.QListView):
|
||||||
self._char_width = QtGui.QFontMetrics(self.font()).averageCharWidth()
|
self._char_width = QtGui.QFontMetrics(self.font()).averageCharWidth()
|
||||||
self.setViewMode(self.IconMode)
|
self.setViewMode(self.IconMode)
|
||||||
w = self._char_width*self.default_size
|
w = self._char_width*self.default_size
|
||||||
self.setIconSize(QtCore.QSize(w, w*self.aspect))
|
self.setIconSize(QtCore.QSize(w, int(w*self.aspect)))
|
||||||
self.setFlow(self.LeftToRight)
|
self.setFlow(self.LeftToRight)
|
||||||
self.setResizeMode(self.Adjust)
|
self.setResizeMode(self.Adjust)
|
||||||
self.setWrapping(True)
|
self.setWrapping(True)
|
||||||
|
@ -101,13 +102,14 @@ class Hdf5FileSystemModel(QtWidgets.QFileSystemModel):
|
||||||
h5 = open_h5(info)
|
h5 = open_h5(info)
|
||||||
if h5 is not None:
|
if h5 is not None:
|
||||||
try:
|
try:
|
||||||
expid = pyon.decode(h5["expid"].value)
|
expid = pyon.decode(h5["expid"][()]) if "expid" in h5 else dict()
|
||||||
start_time = datetime.fromtimestamp(h5["start_time"].value)
|
start_time = datetime.fromtimestamp(h5["start_time"][()]) if "start_time" in h5 else "<none>"
|
||||||
v = ("artiq_version: {}\nrepo_rev: {}\nfile: {}\n"
|
v = ("artiq_version: {}\nrepo_rev: {}\nfile: {}\n"
|
||||||
"class_name: {}\nrid: {}\nstart_time: {}").format(
|
"class_name: {}\nrid: {}\nstart_time: {}").format(
|
||||||
h5["artiq_version"].value, expid["repo_rev"],
|
h5["artiq_version"].asstr()[()] if "artiq_version" in h5 else "<none>",
|
||||||
expid["file"], expid["class_name"],
|
expid.get("repo_rev", "<none>"),
|
||||||
h5["rid"].value, start_time)
|
expid.get("file", "<none>"), expid.get("class_name", "<none>"),
|
||||||
|
h5["rid"][()] if "rid" in h5 else "<none>", start_time)
|
||||||
return v
|
return v
|
||||||
except:
|
except:
|
||||||
logger.warning("unable to read metadata from %s",
|
logger.warning("unable to read metadata from %s",
|
||||||
|
@ -173,31 +175,45 @@ class FilesDock(QtWidgets.QDockWidget):
|
||||||
logger.debug("loading datasets from %s", info.filePath())
|
logger.debug("loading datasets from %s", info.filePath())
|
||||||
with f:
|
with f:
|
||||||
try:
|
try:
|
||||||
expid = pyon.decode(f["expid"].value)
|
expid = pyon.decode(f["expid"][()]) if "expid" in f else dict()
|
||||||
start_time = datetime.fromtimestamp(f["start_time"].value)
|
start_time = datetime.fromtimestamp(f["start_time"][()]) if "start_time" in f else "<none>"
|
||||||
v = {
|
v = {
|
||||||
"artiq_version": f["artiq_version"].value,
|
"artiq_version": f["artiq_version"].asstr()[()] if "artiq_version" in f else "<none>",
|
||||||
"repo_rev": expid["repo_rev"],
|
"repo_rev": expid.get("repo_rev", "<none>"),
|
||||||
"file": expid["file"],
|
"file": expid.get("file", "<none>"),
|
||||||
"class_name": expid["class_name"],
|
"class_name": expid.get("class_name", "<none>"),
|
||||||
"rid": f["rid"].value,
|
"rid": f["rid"][()] if "rid" in f else "<none>",
|
||||||
"start_time": start_time,
|
"start_time": start_time,
|
||||||
}
|
}
|
||||||
self.metadata_changed.emit(v)
|
self.metadata_changed.emit(v)
|
||||||
except:
|
except:
|
||||||
logger.warning("unable to read metadata from %s",
|
logger.warning("unable to read metadata from %s",
|
||||||
info.filePath(), exc_info=True)
|
info.filePath(), exc_info=True)
|
||||||
rd = dict()
|
|
||||||
|
rd = {}
|
||||||
if "archive" in f:
|
if "archive" in f:
|
||||||
rd = {k: (True, v.value) for k, v in f["archive"].items()}
|
def visitor(k, v):
|
||||||
|
if isinstance(v, h5py.Dataset):
|
||||||
|
# v.attrs is a non-serializable h5py.AttributeManager, need to convert to dict
|
||||||
|
# See https://docs.h5py.org/en/stable/high/attr.html#h5py.AttributeManager
|
||||||
|
rd[k] = (True, v[()], dict(v.attrs))
|
||||||
|
|
||||||
|
f["archive"].visititems(visitor)
|
||||||
|
|
||||||
if "datasets" in f:
|
if "datasets" in f:
|
||||||
for k, v in f["datasets"].items():
|
def visitor(k, v):
|
||||||
if k in rd:
|
if isinstance(v, h5py.Dataset):
|
||||||
logger.warning("dataset '%s' is both in archive and "
|
if k in rd:
|
||||||
"outputs", k)
|
logger.warning("dataset '%s' is both in archive "
|
||||||
rd[k] = (True, v.value)
|
"and outputs", k)
|
||||||
if rd:
|
# v.attrs is a non-serializable h5py.AttributeManager, need to convert to dict
|
||||||
self.datasets.init(rd)
|
# See https://docs.h5py.org/en/stable/high/attr.html#h5py.AttributeManager
|
||||||
|
rd[k] = (True, v[()], dict(v.attrs))
|
||||||
|
|
||||||
|
f["datasets"].visititems(visitor)
|
||||||
|
|
||||||
|
self.datasets.init(rd)
|
||||||
|
|
||||||
self.dataset_changed.emit(info.filePath())
|
self.dataset_changed.emit(info.filePath())
|
||||||
|
|
||||||
def list_activated(self, idx):
|
def list_activated(self, idx):
|
||||||
|
|
|
@ -1,7 +1,9 @@
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
from misoc.cores import identifier
|
from migen import *
|
||||||
|
from migen.build.platforms.sinara import kasli
|
||||||
|
from misoc.interconnect.csr import *
|
||||||
from misoc.integration.builder import *
|
from misoc.integration.builder import *
|
||||||
|
|
||||||
from artiq.gateware.amp import AMPSoC
|
from artiq.gateware.amp import AMPSoC
|
||||||
|
@ -17,33 +19,58 @@ def get_identifier_string(soc, suffix="", add_class_name=True):
|
||||||
if suffix or add_class_name:
|
if suffix or add_class_name:
|
||||||
r += ";"
|
r += ";"
|
||||||
if add_class_name:
|
if add_class_name:
|
||||||
r += soc.__class__.__name__.lower()
|
r += getattr(soc, "class_name_override", soc.__class__.__name__.lower())
|
||||||
r += suffix
|
r += suffix
|
||||||
return r
|
return r
|
||||||
|
|
||||||
|
|
||||||
def add_identifier(soc, *args, **kwargs):
|
class ReprogrammableIdentifier(Module, AutoCSR):
|
||||||
|
def __init__(self, ident):
|
||||||
|
self.address = CSRStorage(8)
|
||||||
|
self.data = CSRStatus(8)
|
||||||
|
|
||||||
|
contents = list(ident.encode())
|
||||||
|
l = len(contents)
|
||||||
|
if l > 255:
|
||||||
|
raise ValueError("Identifier string must be 255 characters or less")
|
||||||
|
contents.insert(0, l)
|
||||||
|
|
||||||
|
for i in range(8):
|
||||||
|
self.specials += Instance("ROM256X1", name="identifier_str"+str(i),
|
||||||
|
i_A0=self.address.storage[0], i_A1=self.address.storage[1],
|
||||||
|
i_A2=self.address.storage[2], i_A3=self.address.storage[3],
|
||||||
|
i_A4=self.address.storage[4], i_A5=self.address.storage[5],
|
||||||
|
i_A6=self.address.storage[6], i_A7=self.address.storage[7],
|
||||||
|
o_O=self.data.status[i],
|
||||||
|
p_INIT=sum(1 << j if c & (1 << i) else 0 for j, c in enumerate(contents)))
|
||||||
|
|
||||||
|
|
||||||
|
def add_identifier(soc, *args, gateware_identifier_str=None, **kwargs):
|
||||||
if hasattr(soc, "identifier"):
|
if hasattr(soc, "identifier"):
|
||||||
raise ValueError
|
raise ValueError
|
||||||
identifier_str = get_identifier_string(soc, *args, **kwargs)
|
identifier_str = get_identifier_string(soc, *args, **kwargs)
|
||||||
soc.submodules.identifier = identifier.Identifier(identifier_str)
|
soc.submodules.identifier = ReprogrammableIdentifier(gateware_identifier_str or identifier_str)
|
||||||
soc.config["IDENTIFIER_STR"] = identifier_str
|
soc.config["IDENTIFIER_STR"] = identifier_str
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def build_artiq_soc(soc, argdict):
|
def build_artiq_soc(soc, argdict):
|
||||||
firmware_dir = os.path.join(artiq_dir, "firmware")
|
firmware_dir = os.path.join(artiq_dir, "firmware")
|
||||||
builder = Builder(soc, **argdict)
|
builder = Builder(soc, **argdict)
|
||||||
builder.software_packages = []
|
builder.software_packages = []
|
||||||
builder.add_software_package("bootloader", os.path.join(firmware_dir, "bootloader"))
|
builder.add_software_package("bootloader", os.path.join(firmware_dir, "bootloader"))
|
||||||
if isinstance(soc, AMPSoC):
|
is_kasli_v1 = isinstance(soc.platform, kasli.Platform) and soc.platform.hw_rev in ("v1.0", "v1.1")
|
||||||
builder.add_software_package("libm")
|
kernel_cpu_type = "vexriscv" if is_kasli_v1 else "vexriscv-g"
|
||||||
builder.add_software_package("libprintf")
|
builder.add_software_package("libm", cpu_type=kernel_cpu_type)
|
||||||
|
builder.add_software_package("libprintf", cpu_type=kernel_cpu_type)
|
||||||
|
builder.add_software_package("libunwind", cpu_type=kernel_cpu_type)
|
||||||
|
builder.add_software_package("ksupport", os.path.join(firmware_dir, "ksupport"), cpu_type=kernel_cpu_type)
|
||||||
|
# Generate unwinder for soft float target (ARTIQ runtime)
|
||||||
|
# If the kernel lacks FPU, then the runtime unwinder is already generated
|
||||||
|
if not is_kasli_v1:
|
||||||
builder.add_software_package("libunwind")
|
builder.add_software_package("libunwind")
|
||||||
builder.add_software_package("ksupport", os.path.join(firmware_dir, "ksupport"))
|
if not soc.config["DRTIO_ROLE"] == "satellite":
|
||||||
builder.add_software_package("runtime", os.path.join(firmware_dir, "runtime"))
|
builder.add_software_package("runtime", os.path.join(firmware_dir, "runtime"))
|
||||||
else:
|
else:
|
||||||
# Assume DRTIO satellite.
|
|
||||||
builder.add_software_package("satman", os.path.join(firmware_dir, "satman"))
|
builder.add_software_package("satman", os.path.join(firmware_dir, "satman"))
|
||||||
try:
|
try:
|
||||||
builder.build()
|
builder.build()
|
||||||
|
|
|
@ -21,13 +21,19 @@ class scoped(object):
|
||||||
set of variables resolved as globals
|
set of variables resolved as globals
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
class remote(object):
|
||||||
|
"""
|
||||||
|
:ivar remote_fn: (bool) whether function is ran on a remote device,
|
||||||
|
meaning arguments are received remotely and return is sent remotely
|
||||||
|
"""
|
||||||
|
|
||||||
# Typed versions of untyped nodes
|
# Typed versions of untyped nodes
|
||||||
class argT(ast.arg, commontyped):
|
class argT(ast.arg, commontyped):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
class ClassDefT(ast.ClassDef):
|
class ClassDefT(ast.ClassDef):
|
||||||
_types = ("constructor_type",)
|
_types = ("constructor_type",)
|
||||||
class FunctionDefT(ast.FunctionDef, scoped):
|
class FunctionDefT(ast.FunctionDef, scoped, remote):
|
||||||
_types = ("signature_type",)
|
_types = ("signature_type",)
|
||||||
class QuotedFunctionDefT(FunctionDefT):
|
class QuotedFunctionDefT(FunctionDefT):
|
||||||
"""
|
"""
|
||||||
|
@ -58,7 +64,7 @@ class BinOpT(ast.BinOp, commontyped):
|
||||||
pass
|
pass
|
||||||
class BoolOpT(ast.BoolOp, commontyped):
|
class BoolOpT(ast.BoolOp, commontyped):
|
||||||
pass
|
pass
|
||||||
class CallT(ast.Call, commontyped):
|
class CallT(ast.Call, commontyped, remote):
|
||||||
"""
|
"""
|
||||||
:ivar iodelay: (:class:`iodelay.Expr`)
|
:ivar iodelay: (:class:`iodelay.Expr`)
|
||||||
:ivar arg_exprs: (dict of str to :class:`iodelay.Expr`)
|
:ivar arg_exprs: (dict of str to :class:`iodelay.Expr`)
|
||||||
|
|
|
@ -38,6 +38,9 @@ class TInt(types.TMono):
|
||||||
def one():
|
def one():
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
|
def TInt8():
|
||||||
|
return TInt(types.TValue(8))
|
||||||
|
|
||||||
def TInt32():
|
def TInt32():
|
||||||
return TInt(types.TValue(32))
|
return TInt(types.TValue(32))
|
||||||
|
|
||||||
|
@ -82,13 +85,27 @@ class TList(types.TMono):
|
||||||
super().__init__("list", {"elt": elt})
|
super().__init__("list", {"elt": elt})
|
||||||
|
|
||||||
class TArray(types.TMono):
|
class TArray(types.TMono):
|
||||||
def __init__(self, elt=None):
|
def __init__(self, elt=None, num_dims=1):
|
||||||
if elt is None:
|
if elt is None:
|
||||||
elt = types.TVar()
|
elt = types.TVar()
|
||||||
super().__init__("array", {"elt": elt})
|
if isinstance(num_dims, int):
|
||||||
|
# Make TArray more convenient to instantiate from (ARTIQ) user code.
|
||||||
|
num_dims = types.TValue(num_dims)
|
||||||
|
# For now, enforce number of dimensions to be known, as we'd otherwise
|
||||||
|
# need to implement custom unification logic for the type of `shape`.
|
||||||
|
# Default to 1 to keep compatibility with old user code from before
|
||||||
|
# multidimensional array support.
|
||||||
|
assert isinstance(num_dims.value, int), "Number of dimensions must be resolved"
|
||||||
|
|
||||||
|
super().__init__("array", {"elt": elt, "num_dims": num_dims})
|
||||||
|
self.attributes = OrderedDict([
|
||||||
|
("buffer", types._TPointer(elt)),
|
||||||
|
("shape", types.TTuple([TInt32()] * num_dims.value)),
|
||||||
|
])
|
||||||
|
|
||||||
def _array_printer(typ, printer, depth, max_depth):
|
def _array_printer(typ, printer, depth, max_depth):
|
||||||
return "numpy.array(elt={})".format(printer.name(typ["elt"], depth, max_depth))
|
return "numpy.array(elt={}, num_dims={})".format(
|
||||||
|
printer.name(typ["elt"], depth, max_depth), typ["num_dims"].value)
|
||||||
types.TypePrinter.custom_printers["array"] = _array_printer
|
types.TypePrinter.custom_printers["array"] = _array_printer
|
||||||
|
|
||||||
class TRange(types.TMono):
|
class TRange(types.TMono):
|
||||||
|
@ -109,18 +126,23 @@ class TException(types.TMono):
|
||||||
# * File, line and column where it was raised (str, int, int).
|
# * File, line and column where it was raised (str, int, int).
|
||||||
# * Message, which can contain substitutions {0}, {1} and {2} (str).
|
# * Message, which can contain substitutions {0}, {1} and {2} (str).
|
||||||
# * Three 64-bit integers, parameterizing the message (numpy.int64).
|
# * Three 64-bit integers, parameterizing the message (numpy.int64).
|
||||||
|
# These attributes are prefixed with `#` so that users cannot access them,
|
||||||
|
# and we don't have to do string allocation in the runtime.
|
||||||
|
# #__name__ is now a string key in the host. TStr may not be an actual
|
||||||
|
# CSlice in the runtime, they might be a CSlice with length = i32::MAX and
|
||||||
|
# ptr = string key in the host.
|
||||||
|
|
||||||
# Keep this in sync with the function ARTIQIRGenerator.alloc_exn.
|
# Keep this in sync with the function ARTIQIRGenerator.alloc_exn.
|
||||||
attributes = OrderedDict([
|
attributes = OrderedDict([
|
||||||
("__name__", TStr()),
|
("#__name__", TInt32()),
|
||||||
("__file__", TStr()),
|
("#__file__", TStr()),
|
||||||
("__line__", TInt32()),
|
("#__line__", TInt32()),
|
||||||
("__col__", TInt32()),
|
("#__col__", TInt32()),
|
||||||
("__func__", TStr()),
|
("#__func__", TStr()),
|
||||||
("__message__", TStr()),
|
("#__message__", TStr()),
|
||||||
("__param0__", TInt64()),
|
("#__param0__", TInt64()),
|
||||||
("__param1__", TInt64()),
|
("#__param1__", TInt64()),
|
||||||
("__param2__", TInt64()),
|
("#__param2__", TInt64()),
|
||||||
])
|
])
|
||||||
|
|
||||||
def __init__(self, name="Exception", id=0):
|
def __init__(self, name="Exception", id=0):
|
||||||
|
@ -155,7 +177,9 @@ def fn_list():
|
||||||
return types.TConstructor(TList())
|
return types.TConstructor(TList())
|
||||||
|
|
||||||
def fn_array():
|
def fn_array():
|
||||||
return types.TConstructor(TArray())
|
# numpy.array() is actually a "magic" macro that is expanded in-place, but
|
||||||
|
# just as for builtin functions, we do not want to quote it, etc.
|
||||||
|
return types.TBuiltinFunction("array")
|
||||||
|
|
||||||
def fn_Exception():
|
def fn_Exception():
|
||||||
return types.TExceptionConstructor(TException("Exception"))
|
return types.TExceptionConstructor(TException("Exception"))
|
||||||
|
@ -181,6 +205,9 @@ def fn_len():
|
||||||
def fn_round():
|
def fn_round():
|
||||||
return types.TBuiltinFunction("round")
|
return types.TBuiltinFunction("round")
|
||||||
|
|
||||||
|
def fn_abs():
|
||||||
|
return types.TBuiltinFunction("abs")
|
||||||
|
|
||||||
def fn_min():
|
def fn_min():
|
||||||
return types.TBuiltinFunction("min")
|
return types.TBuiltinFunction("min")
|
||||||
|
|
||||||
|
@ -205,9 +232,6 @@ def obj_interleave():
|
||||||
def obj_sequential():
|
def obj_sequential():
|
||||||
return types.TBuiltin("sequential")
|
return types.TBuiltin("sequential")
|
||||||
|
|
||||||
def fn_watchdog():
|
|
||||||
return types.TBuiltinFunction("watchdog")
|
|
||||||
|
|
||||||
def fn_delay():
|
def fn_delay():
|
||||||
return types.TBuiltinFunction("delay")
|
return types.TBuiltinFunction("delay")
|
||||||
|
|
||||||
|
@ -223,6 +247,12 @@ def fn_at_mu():
|
||||||
def fn_rtio_log():
|
def fn_rtio_log():
|
||||||
return types.TBuiltinFunction("rtio_log")
|
return types.TBuiltinFunction("rtio_log")
|
||||||
|
|
||||||
|
def fn_subkernel_await():
|
||||||
|
return types.TBuiltinFunction("subkernel_await")
|
||||||
|
|
||||||
|
def fn_subkernel_preload():
|
||||||
|
return types.TBuiltinFunction("subkernel_preload")
|
||||||
|
|
||||||
# Accessors
|
# Accessors
|
||||||
|
|
||||||
def is_none(typ):
|
def is_none(typ):
|
||||||
|
@ -301,9 +331,12 @@ def is_iterable(typ):
|
||||||
return is_listish(typ) or is_range(typ)
|
return is_listish(typ) or is_range(typ)
|
||||||
|
|
||||||
def get_iterable_elt(typ):
|
def get_iterable_elt(typ):
|
||||||
|
# TODO: Arrays count as listish, but this returns the innermost element type for
|
||||||
|
# n-dimensional arrays, rather than the n-1 dimensional result of iterating over
|
||||||
|
# the first axis, which makes the name a bit misleading.
|
||||||
if is_str(typ) or is_bytes(typ) or is_bytearray(typ):
|
if is_str(typ) or is_bytes(typ) or is_bytearray(typ):
|
||||||
return TInt(types.TValue(8))
|
return TInt8()
|
||||||
elif is_iterable(typ):
|
elif types._is_pointer(typ) or is_iterable(typ):
|
||||||
return typ.find()["elt"].find()
|
return typ.find()["elt"].find()
|
||||||
else:
|
else:
|
||||||
assert False
|
assert False
|
||||||
|
@ -317,6 +350,6 @@ def is_allocated(typ):
|
||||||
return not (is_none(typ) or is_bool(typ) or is_int(typ) or
|
return not (is_none(typ) or is_bool(typ) or is_int(typ) or
|
||||||
is_float(typ) or is_range(typ) or
|
is_float(typ) or is_range(typ) or
|
||||||
types._is_pointer(typ) or types.is_function(typ) or
|
types._is_pointer(typ) or types.is_function(typ) or
|
||||||
types.is_c_function(typ) or types.is_rpc(typ) or
|
types.is_external_function(typ) or types.is_rpc(typ) or
|
||||||
types.is_method(typ) or types.is_tuple(typ) or
|
types.is_subkernel(typ) or types.is_method(typ) or
|
||||||
types.is_value(typ))
|
types.is_tuple(typ) or types.is_value(typ))
|
||||||
|
|
|
@ -5,7 +5,8 @@ the references to the host objects and translates the functions
|
||||||
annotated as ``@kernel`` when they are referenced.
|
annotated as ``@kernel`` when they are referenced.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import sys, os, re, linecache, inspect, textwrap, types as pytypes, numpy
|
import typing
|
||||||
|
import os, re, linecache, inspect, textwrap, types as pytypes, numpy
|
||||||
from collections import OrderedDict, defaultdict
|
from collections import OrderedDict, defaultdict
|
||||||
|
|
||||||
from pythonparser import ast, algorithm, source, diagnostic, parse_buffer
|
from pythonparser import ast, algorithm, source, diagnostic, parse_buffer
|
||||||
|
@ -14,10 +15,17 @@ from pythonparser import lexer as source_lexer, parser as source_parser
|
||||||
from Levenshtein import ratio as similarity, jaro_winkler
|
from Levenshtein import ratio as similarity, jaro_winkler
|
||||||
|
|
||||||
from ..language import core as language_core
|
from ..language import core as language_core
|
||||||
from . import types, builtins, asttyped, prelude
|
from . import types, builtins, asttyped, math_fns, prelude
|
||||||
from .transforms import ASTTypedRewriter, Inferencer, IntMonomorphizer, TypedtreePrinter
|
from .transforms import ASTTypedRewriter, Inferencer, IntMonomorphizer, TypedtreePrinter
|
||||||
from .transforms.asttyped_rewriter import LocalExtractor
|
from .transforms.asttyped_rewriter import LocalExtractor
|
||||||
|
|
||||||
|
try:
|
||||||
|
# From numpy=1.25.0 dispatching for `__array_function__` is done via
|
||||||
|
# a C wrapper: https://github.com/numpy/numpy/pull/23020
|
||||||
|
from numpy.core._multiarray_umath import _ArrayFunctionDispatcher
|
||||||
|
except ImportError:
|
||||||
|
_ArrayFunctionDispatcher = None
|
||||||
|
|
||||||
|
|
||||||
class SpecializedFunction:
|
class SpecializedFunction:
|
||||||
def __init__(self, instance_type, host_function):
|
def __init__(self, instance_type, host_function):
|
||||||
|
@ -45,8 +53,48 @@ class EmbeddingMap:
|
||||||
self.object_forward_map = {}
|
self.object_forward_map = {}
|
||||||
self.object_reverse_map = {}
|
self.object_reverse_map = {}
|
||||||
self.module_map = {}
|
self.module_map = {}
|
||||||
|
|
||||||
|
# type_map connects the host Python `type` to the pair of associated
|
||||||
|
# `(TInstance, TConstructor)`s. The `used_…_names` sets cache the
|
||||||
|
# respective `.name`s for O(1) collision avoidance.
|
||||||
self.type_map = {}
|
self.type_map = {}
|
||||||
|
self.used_instance_type_names = set()
|
||||||
|
self.used_constructor_type_names = set()
|
||||||
|
|
||||||
self.function_map = {}
|
self.function_map = {}
|
||||||
|
self.str_forward_map = {}
|
||||||
|
self.str_reverse_map = {}
|
||||||
|
|
||||||
|
self.preallocate_runtime_exception_names(["RuntimeError",
|
||||||
|
"RTIOUnderflow",
|
||||||
|
"RTIOOverflow",
|
||||||
|
"RTIODestinationUnreachable",
|
||||||
|
"DMAError",
|
||||||
|
"I2CError",
|
||||||
|
"CacheError",
|
||||||
|
"SPIError",
|
||||||
|
"0:ZeroDivisionError",
|
||||||
|
"0:IndexError",
|
||||||
|
"UnwrapNoneError",
|
||||||
|
"SubkernelError"])
|
||||||
|
|
||||||
|
def preallocate_runtime_exception_names(self, names):
|
||||||
|
for i, name in enumerate(names):
|
||||||
|
if ":" not in name:
|
||||||
|
name = "0:artiq.coredevice.exceptions." + name
|
||||||
|
exn_id = self.store_str(name)
|
||||||
|
assert exn_id == i
|
||||||
|
|
||||||
|
def store_str(self, s):
|
||||||
|
if s in self.str_forward_map:
|
||||||
|
return self.str_forward_map[s]
|
||||||
|
str_id = len(self.str_forward_map)
|
||||||
|
self.str_forward_map[s] = str_id
|
||||||
|
self.str_reverse_map[str_id] = s
|
||||||
|
return str_id
|
||||||
|
|
||||||
|
def retrieve_str(self, str_id):
|
||||||
|
return self.str_reverse_map[str_id]
|
||||||
|
|
||||||
# Modules
|
# Modules
|
||||||
def store_module(self, module, module_type):
|
def store_module(self, module, module_type):
|
||||||
|
@ -60,16 +108,6 @@ class EmbeddingMap:
|
||||||
|
|
||||||
# Types
|
# Types
|
||||||
def store_type(self, host_type, instance_type, constructor_type):
|
def store_type(self, host_type, instance_type, constructor_type):
|
||||||
self._rename_type(instance_type)
|
|
||||||
self.type_map[host_type] = (instance_type, constructor_type)
|
|
||||||
|
|
||||||
def retrieve_type(self, host_type):
|
|
||||||
return self.type_map[host_type]
|
|
||||||
|
|
||||||
def has_type(self, host_type):
|
|
||||||
return host_type in self.type_map
|
|
||||||
|
|
||||||
def _rename_type(self, new_instance_type):
|
|
||||||
# Generally, user-defined types that have exact same name (which is to say, classes
|
# Generally, user-defined types that have exact same name (which is to say, classes
|
||||||
# defined inside functions) do not pose a problem to the compiler. The two places which
|
# defined inside functions) do not pose a problem to the compiler. The two places which
|
||||||
# cannot handle this are:
|
# cannot handle this are:
|
||||||
|
@ -78,12 +116,29 @@ class EmbeddingMap:
|
||||||
# Since handling #2 requires renaming on ARTIQ side anyway, it's more straightforward
|
# Since handling #2 requires renaming on ARTIQ side anyway, it's more straightforward
|
||||||
# to do it once when embedding (since non-embedded code cannot define classes in
|
# to do it once when embedding (since non-embedded code cannot define classes in
|
||||||
# functions). Also, easier to debug.
|
# functions). Also, easier to debug.
|
||||||
n = 0
|
suffix = 0
|
||||||
for host_type in self.type_map:
|
new_instance_name = instance_type.name
|
||||||
instance_type, constructor_type = self.type_map[host_type]
|
new_constructor_name = constructor_type.name
|
||||||
if instance_type.name == new_instance_type.name:
|
while True:
|
||||||
n += 1
|
if (new_instance_name not in self.used_instance_type_names
|
||||||
new_instance_type.name = "{}.{}".format(new_instance_type.name, n)
|
and new_constructor_name not in self.used_constructor_type_names):
|
||||||
|
break
|
||||||
|
suffix += 1
|
||||||
|
new_instance_name = f"{instance_type.name}.{suffix}"
|
||||||
|
new_constructor_name = f"{constructor_type.name}.{suffix}"
|
||||||
|
|
||||||
|
self.used_instance_type_names.add(new_instance_name)
|
||||||
|
instance_type.name = new_instance_name
|
||||||
|
self.used_constructor_type_names.add(new_constructor_name)
|
||||||
|
constructor_type.name = new_constructor_name
|
||||||
|
|
||||||
|
self.type_map[host_type] = (instance_type, constructor_type)
|
||||||
|
|
||||||
|
def retrieve_type(self, host_type):
|
||||||
|
return self.type_map[host_type]
|
||||||
|
|
||||||
|
def has_type(self, host_type):
|
||||||
|
return host_type in self.type_map
|
||||||
|
|
||||||
def attribute_count(self):
|
def attribute_count(self):
|
||||||
count = 0
|
count = 0
|
||||||
|
@ -130,7 +185,22 @@ class EmbeddingMap:
|
||||||
obj_typ, _ = self.type_map[type(obj_ref)]
|
obj_typ, _ = self.type_map[type(obj_ref)]
|
||||||
yield obj_id, obj_ref, obj_typ
|
yield obj_id, obj_ref, obj_typ
|
||||||
|
|
||||||
|
def subkernels(self):
|
||||||
|
subkernels = {}
|
||||||
|
for k, v in self.object_forward_map.items():
|
||||||
|
if hasattr(v, "artiq_embedded"):
|
||||||
|
if v.artiq_embedded.destination is not None:
|
||||||
|
subkernels[k] = v
|
||||||
|
return subkernels
|
||||||
|
|
||||||
def has_rpc(self):
|
def has_rpc(self):
|
||||||
|
return any(filter(
|
||||||
|
lambda x: (inspect.isfunction(x) or inspect.ismethod(x)) and \
|
||||||
|
(not hasattr(x, "artiq_embedded") or x.artiq_embedded.destination is None),
|
||||||
|
self.object_forward_map.values()
|
||||||
|
))
|
||||||
|
|
||||||
|
def has_rpc_or_subkernel(self):
|
||||||
return any(filter(lambda x: inspect.isfunction(x) or inspect.ismethod(x),
|
return any(filter(lambda x: inspect.isfunction(x) or inspect.ismethod(x),
|
||||||
self.object_forward_map.values()))
|
self.object_forward_map.values()))
|
||||||
|
|
||||||
|
@ -138,6 +208,7 @@ class EmbeddingMap:
|
||||||
class ASTSynthesizer:
|
class ASTSynthesizer:
|
||||||
def __init__(self, embedding_map, value_map, quote_function=None, expanded_from=None):
|
def __init__(self, embedding_map, value_map, quote_function=None, expanded_from=None):
|
||||||
self.source = ""
|
self.source = ""
|
||||||
|
self.source_last_new_line = 0
|
||||||
self.source_buffer = source.Buffer(self.source, "<synthesized>")
|
self.source_buffer = source.Buffer(self.source, "<synthesized>")
|
||||||
self.embedding_map = embedding_map
|
self.embedding_map = embedding_map
|
||||||
self.value_map = value_map
|
self.value_map = value_map
|
||||||
|
@ -156,16 +227,90 @@ class ASTSynthesizer:
|
||||||
return source.Range(self.source_buffer, range_from, range_to,
|
return source.Range(self.source_buffer, range_from, range_to,
|
||||||
expanded_from=self.expanded_from)
|
expanded_from=self.expanded_from)
|
||||||
|
|
||||||
|
def _add_iterable(self, fragment):
|
||||||
|
# Since DILocation points on the beginning of the piece of source
|
||||||
|
# we don't care if the fragment's end will overflow LLVM's limit.
|
||||||
|
if len(self.source) - self.source_last_new_line >= 2**16:
|
||||||
|
fragment = "\\\n" + fragment
|
||||||
|
self.source_last_new_line = len(self.source) + 2
|
||||||
|
return self._add(fragment)
|
||||||
|
|
||||||
|
def fast_quote_list(self, value):
|
||||||
|
elts = [None] * len(value)
|
||||||
|
is_T = False
|
||||||
|
if len(value) > 0:
|
||||||
|
v = value[0]
|
||||||
|
is_T = True
|
||||||
|
if isinstance(v, int):
|
||||||
|
T = int
|
||||||
|
elif isinstance(v, float):
|
||||||
|
T = float
|
||||||
|
elif isinstance(v, numpy.int32):
|
||||||
|
T = numpy.int32
|
||||||
|
elif isinstance(v, numpy.int64):
|
||||||
|
T = numpy.int64
|
||||||
|
else:
|
||||||
|
is_T = False
|
||||||
|
if is_T:
|
||||||
|
for v in value:
|
||||||
|
if not isinstance(v, T):
|
||||||
|
is_T = False
|
||||||
|
break
|
||||||
|
if is_T:
|
||||||
|
is_int = T != float
|
||||||
|
if T == int:
|
||||||
|
typ = builtins.TInt()
|
||||||
|
elif T == float:
|
||||||
|
typ = builtins.TFloat()
|
||||||
|
elif T == numpy.int32:
|
||||||
|
typ = builtins.TInt32()
|
||||||
|
elif T == numpy.int64:
|
||||||
|
typ = builtins.TInt64()
|
||||||
|
else:
|
||||||
|
assert False
|
||||||
|
text = [repr(elt) for elt in value]
|
||||||
|
start = len(self.source)
|
||||||
|
self.source += ", ".join(text)
|
||||||
|
if is_int:
|
||||||
|
for i, (v, t) in enumerate(zip(value, text)):
|
||||||
|
l = len(t)
|
||||||
|
elts[i] = asttyped.NumT(
|
||||||
|
n=int(v), ctx=None, type=typ,
|
||||||
|
loc=source.Range(
|
||||||
|
self.source_buffer, start, start + l,
|
||||||
|
expanded_from=self.expanded_from))
|
||||||
|
start += l + 2
|
||||||
|
else:
|
||||||
|
for i, (v, t) in enumerate(zip(value, text)):
|
||||||
|
l = len(t)
|
||||||
|
elts[i] = asttyped.NumT(
|
||||||
|
n=v, ctx=None, type=typ,
|
||||||
|
loc=source.Range(
|
||||||
|
self.source_buffer, start, start + l,
|
||||||
|
expanded_from=self.expanded_from))
|
||||||
|
start += l + 2
|
||||||
|
else:
|
||||||
|
for index, elt in enumerate(value):
|
||||||
|
elts[index] = self.quote(elt)
|
||||||
|
if index < len(value) - 1:
|
||||||
|
self._add_iterable(", ")
|
||||||
|
return elts
|
||||||
|
|
||||||
def quote(self, value):
|
def quote(self, value):
|
||||||
"""Construct an AST fragment equal to `value`."""
|
"""Construct an AST fragment equal to `value`."""
|
||||||
if value is None:
|
if value is None:
|
||||||
typ = builtins.TNone()
|
typ = builtins.TNone()
|
||||||
return asttyped.NameConstantT(value=value, type=typ,
|
return asttyped.NameConstantT(value=value, type=typ,
|
||||||
loc=self._add(repr(value)))
|
loc=self._add(repr(value)))
|
||||||
elif value is True or value is False:
|
elif isinstance(value, (bool, numpy.bool_)):
|
||||||
typ = builtins.TBool()
|
typ = builtins.TBool()
|
||||||
return asttyped.NameConstantT(value=value, type=typ,
|
coerced = bool(value)
|
||||||
loc=self._add(repr(value)))
|
return asttyped.NameConstantT(value=coerced, type=typ,
|
||||||
|
loc=self._add(repr(coerced)))
|
||||||
|
elif value is float:
|
||||||
|
typ = builtins.fn_float()
|
||||||
|
return asttyped.NameConstantT(value=None, type=typ,
|
||||||
|
loc=self._add("float"))
|
||||||
elif value is numpy.int32:
|
elif value is numpy.int32:
|
||||||
typ = builtins.fn_int32()
|
typ = builtins.fn_int32()
|
||||||
return asttyped.NameConstantT(value=None, type=typ,
|
return asttyped.NameConstantT(value=None, type=typ,
|
||||||
|
@ -199,43 +344,40 @@ class ASTSynthesizer:
|
||||||
loc=self._add(repr(value)))
|
loc=self._add(repr(value)))
|
||||||
elif isinstance(value, str):
|
elif isinstance(value, str):
|
||||||
return asttyped.StrT(s=value, ctx=None, type=builtins.TStr(),
|
return asttyped.StrT(s=value, ctx=None, type=builtins.TStr(),
|
||||||
loc=self._add(repr(value)))
|
loc=self._add_iterable(repr(value)))
|
||||||
elif isinstance(value, bytes):
|
elif isinstance(value, bytes):
|
||||||
return asttyped.StrT(s=value, ctx=None, type=builtins.TBytes(),
|
return asttyped.StrT(s=value, ctx=None, type=builtins.TBytes(),
|
||||||
loc=self._add(repr(value)))
|
loc=self._add_iterable(repr(value)))
|
||||||
elif isinstance(value, bytearray):
|
elif isinstance(value, bytearray):
|
||||||
quote_loc = self._add('`')
|
quote_loc = self._add_iterable('`')
|
||||||
repr_loc = self._add(repr(value))
|
repr_loc = self._add_iterable(repr(value))
|
||||||
unquote_loc = self._add('`')
|
unquote_loc = self._add_iterable('`')
|
||||||
loc = quote_loc.join(unquote_loc)
|
loc = quote_loc.join(unquote_loc)
|
||||||
|
|
||||||
return asttyped.QuoteT(value=value, type=builtins.TByteArray(), loc=loc)
|
return asttyped.QuoteT(value=value, type=builtins.TByteArray(), loc=loc)
|
||||||
elif isinstance(value, list):
|
elif isinstance(value, list):
|
||||||
begin_loc = self._add("[")
|
begin_loc = self._add_iterable("[")
|
||||||
elts = []
|
elts = self.fast_quote_list(value)
|
||||||
for index, elt in enumerate(value):
|
end_loc = self._add_iterable("]")
|
||||||
elts.append(self.quote(elt))
|
|
||||||
if index < len(value) - 1:
|
|
||||||
self._add(", ")
|
|
||||||
end_loc = self._add("]")
|
|
||||||
return asttyped.ListT(elts=elts, ctx=None, type=builtins.TList(),
|
return asttyped.ListT(elts=elts, ctx=None, type=builtins.TList(),
|
||||||
begin_loc=begin_loc, end_loc=end_loc,
|
begin_loc=begin_loc, end_loc=end_loc,
|
||||||
loc=begin_loc.join(end_loc))
|
loc=begin_loc.join(end_loc))
|
||||||
|
elif isinstance(value, tuple):
|
||||||
|
begin_loc = self._add_iterable("(")
|
||||||
|
elts = self.fast_quote_list(value)
|
||||||
|
end_loc = self._add_iterable(")")
|
||||||
|
return asttyped.TupleT(elts=elts, ctx=None,
|
||||||
|
type=types.TTuple([e.type for e in elts]),
|
||||||
|
begin_loc=begin_loc, end_loc=end_loc,
|
||||||
|
loc=begin_loc.join(end_loc))
|
||||||
elif isinstance(value, numpy.ndarray):
|
elif isinstance(value, numpy.ndarray):
|
||||||
begin_loc = self._add("numpy.array([")
|
return self.call(numpy.array, [list(value)], {})
|
||||||
elts = []
|
|
||||||
for index, elt in enumerate(value):
|
|
||||||
elts.append(self.quote(elt))
|
|
||||||
if index < len(value) - 1:
|
|
||||||
self._add(", ")
|
|
||||||
end_loc = self._add("])")
|
|
||||||
|
|
||||||
return asttyped.ListT(elts=elts, ctx=None, type=builtins.TArray(),
|
|
||||||
begin_loc=begin_loc, end_loc=end_loc,
|
|
||||||
loc=begin_loc.join(end_loc))
|
|
||||||
elif inspect.isfunction(value) or inspect.ismethod(value) or \
|
elif inspect.isfunction(value) or inspect.ismethod(value) or \
|
||||||
isinstance(value, pytypes.BuiltinFunctionType) or \
|
isinstance(value, pytypes.BuiltinFunctionType) or \
|
||||||
isinstance(value, SpecializedFunction):
|
isinstance(value, SpecializedFunction) or \
|
||||||
|
isinstance(value, numpy.ufunc) or \
|
||||||
|
(isinstance(value, _ArrayFunctionDispatcher) if
|
||||||
|
_ArrayFunctionDispatcher is not None else False):
|
||||||
if inspect.ismethod(value):
|
if inspect.ismethod(value):
|
||||||
quoted_self = self.quote(value.__self__)
|
quoted_self = self.quote(value.__self__)
|
||||||
function_type = self.quote_function(value.__func__, self.expanded_from)
|
function_type = self.quote_function(value.__func__, self.expanded_from)
|
||||||
|
@ -344,7 +486,7 @@ class ASTSynthesizer:
|
||||||
return asttyped.QuoteT(value=value, type=instance_type,
|
return asttyped.QuoteT(value=value, type=instance_type,
|
||||||
loc=loc)
|
loc=loc)
|
||||||
|
|
||||||
def call(self, callee, args, kwargs, callback=None):
|
def call(self, callee, args, kwargs, callback=None, remote_fn=False):
|
||||||
"""
|
"""
|
||||||
Construct an AST fragment calling a function specified by
|
Construct an AST fragment calling a function specified by
|
||||||
an AST node `function_node`, with given arguments.
|
an AST node `function_node`, with given arguments.
|
||||||
|
@ -388,7 +530,7 @@ class ASTSynthesizer:
|
||||||
starargs=None, kwargs=None,
|
starargs=None, kwargs=None,
|
||||||
type=types.TVar(), iodelay=None, arg_exprs={},
|
type=types.TVar(), iodelay=None, arg_exprs={},
|
||||||
begin_loc=begin_loc, end_loc=end_loc, star_loc=None, dstar_loc=None,
|
begin_loc=begin_loc, end_loc=end_loc, star_loc=None, dstar_loc=None,
|
||||||
loc=callee_node.loc.join(end_loc))
|
loc=callee_node.loc.join(end_loc), remote_fn=remote_fn)
|
||||||
|
|
||||||
if callback is not None:
|
if callback is not None:
|
||||||
node = asttyped.CallT(
|
node = asttyped.CallT(
|
||||||
|
@ -423,7 +565,7 @@ class StitchingASTTypedRewriter(ASTTypedRewriter):
|
||||||
arg=node.arg, annotation=None,
|
arg=node.arg, annotation=None,
|
||||||
arg_loc=node.arg_loc, colon_loc=node.colon_loc, loc=node.loc)
|
arg_loc=node.arg_loc, colon_loc=node.colon_loc, loc=node.loc)
|
||||||
|
|
||||||
def visit_quoted_function(self, node, function):
|
def visit_quoted_function(self, node, function, remote_fn):
|
||||||
extractor = LocalExtractor(env_stack=self.env_stack, engine=self.engine)
|
extractor = LocalExtractor(env_stack=self.env_stack, engine=self.engine)
|
||||||
extractor.visit(node)
|
extractor.visit(node)
|
||||||
|
|
||||||
|
@ -440,11 +582,11 @@ class StitchingASTTypedRewriter(ASTTypedRewriter):
|
||||||
node = asttyped.QuotedFunctionDefT(
|
node = asttyped.QuotedFunctionDefT(
|
||||||
typing_env=extractor.typing_env, globals_in_scope=extractor.global_,
|
typing_env=extractor.typing_env, globals_in_scope=extractor.global_,
|
||||||
signature_type=types.TVar(), return_type=types.TVar(),
|
signature_type=types.TVar(), return_type=types.TVar(),
|
||||||
name=node.name, args=node.args, returns=node.returns,
|
name=node.name, args=node.args, returns=None,
|
||||||
body=node.body, decorator_list=node.decorator_list,
|
body=node.body, decorator_list=node.decorator_list,
|
||||||
keyword_loc=node.keyword_loc, name_loc=node.name_loc,
|
keyword_loc=node.keyword_loc, name_loc=node.name_loc,
|
||||||
arrow_loc=node.arrow_loc, colon_loc=node.colon_loc, at_locs=node.at_locs,
|
arrow_loc=node.arrow_loc, colon_loc=node.colon_loc, at_locs=node.at_locs,
|
||||||
loc=node.loc)
|
loc=node.loc, remote_fn=remote_fn)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.env_stack.append(node.typing_env)
|
self.env_stack.append(node.typing_env)
|
||||||
|
@ -516,7 +658,7 @@ class StitchingInferencer(Inferencer):
|
||||||
self.engine.process(diag)
|
self.engine.process(diag)
|
||||||
return
|
return
|
||||||
|
|
||||||
# Figure out what ARTIQ type does the value of the attribute have.
|
# Figure out the ARTIQ type of the value of the attribute.
|
||||||
# We do this by quoting it, as if to serialize. This has some
|
# We do this by quoting it, as if to serialize. This has some
|
||||||
# overhead (i.e. synthesizing a source buffer), but has the advantage
|
# overhead (i.e. synthesizing a source buffer), but has the advantage
|
||||||
# of having the host-to-ARTIQ mapping code in only one place and
|
# of having the host-to-ARTIQ mapping code in only one place and
|
||||||
|
@ -652,7 +794,7 @@ class TypedtreeHasher(algorithm.Visitor):
|
||||||
return hash(tuple(freeze(getattr(node, field_name)) for field_name in fields))
|
return hash(tuple(freeze(getattr(node, field_name)) for field_name in fields))
|
||||||
|
|
||||||
class Stitcher:
|
class Stitcher:
|
||||||
def __init__(self, core, dmgr, engine=None, print_as_rpc=True):
|
def __init__(self, core, dmgr, engine=None, print_as_rpc=True, destination=0, subkernel_arg_types=[]):
|
||||||
self.core = core
|
self.core = core
|
||||||
self.dmgr = dmgr
|
self.dmgr = dmgr
|
||||||
if engine is None:
|
if engine is None:
|
||||||
|
@ -676,12 +818,21 @@ class Stitcher:
|
||||||
|
|
||||||
self.embedding_map = EmbeddingMap()
|
self.embedding_map = EmbeddingMap()
|
||||||
self.value_map = defaultdict(lambda: [])
|
self.value_map = defaultdict(lambda: [])
|
||||||
|
self.definitely_changed = False
|
||||||
|
|
||||||
|
self.destination = destination
|
||||||
|
self.first_call = True
|
||||||
|
# for non-annotated subkernels:
|
||||||
|
# main kernel inferencer output with types of arguments
|
||||||
|
self.subkernel_arg_types = subkernel_arg_types
|
||||||
|
|
||||||
def stitch_call(self, function, args, kwargs, callback=None):
|
def stitch_call(self, function, args, kwargs, callback=None):
|
||||||
# We synthesize source code for the initial call so that
|
# We synthesize source code for the initial call so that
|
||||||
# diagnostics would have something meaningful to display to the user.
|
# diagnostics would have something meaningful to display to the user.
|
||||||
synthesizer = self._synthesizer(self._function_loc(function.artiq_embedded.function))
|
synthesizer = self._synthesizer(self._function_loc(function.artiq_embedded.function))
|
||||||
call_node = synthesizer.call(function, args, kwargs, callback)
|
# first call of a subkernel will get its arguments from remote (DRTIO)
|
||||||
|
remote_fn = self.destination != 0
|
||||||
|
call_node = synthesizer.call(function, args, kwargs, callback, remote_fn=remote_fn)
|
||||||
synthesizer.finalize()
|
synthesizer.finalize()
|
||||||
self.typedtree.append(call_node)
|
self.typedtree.append(call_node)
|
||||||
|
|
||||||
|
@ -696,13 +847,19 @@ class Stitcher:
|
||||||
old_attr_count = None
|
old_attr_count = None
|
||||||
while True:
|
while True:
|
||||||
inferencer.visit(self.typedtree)
|
inferencer.visit(self.typedtree)
|
||||||
typedtree_hash = typedtree_hasher.visit(self.typedtree)
|
if self.definitely_changed:
|
||||||
attr_count = self.embedding_map.attribute_count()
|
changed = True
|
||||||
|
self.definitely_changed = False
|
||||||
|
else:
|
||||||
|
typedtree_hash = typedtree_hasher.visit(self.typedtree)
|
||||||
|
attr_count = self.embedding_map.attribute_count()
|
||||||
|
changed = old_attr_count != attr_count or \
|
||||||
|
old_typedtree_hash != typedtree_hash
|
||||||
|
old_typedtree_hash = typedtree_hash
|
||||||
|
old_attr_count = attr_count
|
||||||
|
|
||||||
if old_typedtree_hash == typedtree_hash and old_attr_count == attr_count:
|
if not changed:
|
||||||
break
|
break
|
||||||
old_typedtree_hash = typedtree_hash
|
|
||||||
old_attr_count = attr_count
|
|
||||||
|
|
||||||
# After we've discovered every referenced attribute, check if any kernel_invariant
|
# After we've discovered every referenced attribute, check if any kernel_invariant
|
||||||
# specifications refers to ones we didn't encounter.
|
# specifications refers to ones we didn't encounter.
|
||||||
|
@ -754,6 +911,9 @@ class Stitcher:
|
||||||
if hasattr(function, 'artiq_embedded') and function.artiq_embedded.function:
|
if hasattr(function, 'artiq_embedded') and function.artiq_embedded.function:
|
||||||
function = function.artiq_embedded.function
|
function = function.artiq_embedded.function
|
||||||
|
|
||||||
|
if isinstance(function, str):
|
||||||
|
return source.Range(source.Buffer(function, "<string>"), 0, 0)
|
||||||
|
|
||||||
filename = function.__code__.co_filename
|
filename = function.__code__.co_filename
|
||||||
line = function.__code__.co_firstlineno
|
line = function.__code__.co_firstlineno
|
||||||
name = function.__code__.co_name
|
name = function.__code__.co_name
|
||||||
|
@ -784,6 +944,10 @@ class Stitcher:
|
||||||
return [diagnostic.Diagnostic("note",
|
return [diagnostic.Diagnostic("note",
|
||||||
"in kernel function here", {},
|
"in kernel function here", {},
|
||||||
call_loc)]
|
call_loc)]
|
||||||
|
elif fn_kind == 'subkernel':
|
||||||
|
return [diagnostic.Diagnostic("note",
|
||||||
|
"in subkernel call here", {},
|
||||||
|
call_loc)]
|
||||||
else:
|
else:
|
||||||
assert False
|
assert False
|
||||||
else:
|
else:
|
||||||
|
@ -803,7 +967,7 @@ class Stitcher:
|
||||||
self._function_loc(function),
|
self._function_loc(function),
|
||||||
notes=self._call_site_note(loc, fn_kind))
|
notes=self._call_site_note(loc, fn_kind))
|
||||||
self.engine.process(diag)
|
self.engine.process(diag)
|
||||||
elif fn_kind == 'rpc' and param.default is not inspect.Parameter.empty:
|
elif fn_kind == 'rpc' or fn_kind == 'subkernel' and param.default is not inspect.Parameter.empty:
|
||||||
notes = []
|
notes = []
|
||||||
notes.append(diagnostic.Diagnostic("note",
|
notes.append(diagnostic.Diagnostic("note",
|
||||||
"expanded from here while trying to infer a type for an"
|
"expanded from here while trying to infer a type for an"
|
||||||
|
@ -822,11 +986,21 @@ class Stitcher:
|
||||||
Inferencer(engine=self.engine).visit(ast)
|
Inferencer(engine=self.engine).visit(ast)
|
||||||
IntMonomorphizer(engine=self.engine).visit(ast)
|
IntMonomorphizer(engine=self.engine).visit(ast)
|
||||||
return ast.type
|
return ast.type
|
||||||
else:
|
elif fn_kind == 'kernel' and self.first_call and self.destination != 0:
|
||||||
# Let the rest of the program decide.
|
# subkernels do not have access to the main kernel code to infer
|
||||||
return types.TVar()
|
# arg types - so these are cached and passed onto subkernel
|
||||||
|
# compilation, to avoid having to annotate them fully
|
||||||
|
for name, typ in self.subkernel_arg_types:
|
||||||
|
if param.name == name:
|
||||||
|
return typ
|
||||||
|
|
||||||
|
# Let the rest of the program decide.
|
||||||
|
return types.TVar()
|
||||||
|
|
||||||
|
def _quote_embedded_function(self, function, flags, remote_fn=False):
|
||||||
|
# we are now parsing new functions... definitely changed the type
|
||||||
|
self.definitely_changed = True
|
||||||
|
|
||||||
def _quote_embedded_function(self, function, flags):
|
|
||||||
if isinstance(function, SpecializedFunction):
|
if isinstance(function, SpecializedFunction):
|
||||||
host_function = function.host_function
|
host_function = function.host_function
|
||||||
else:
|
else:
|
||||||
|
@ -837,10 +1011,20 @@ class Stitcher:
|
||||||
|
|
||||||
# Extract function source.
|
# Extract function source.
|
||||||
embedded_function = host_function.artiq_embedded.function
|
embedded_function = host_function.artiq_embedded.function
|
||||||
source_code = inspect.getsource(embedded_function)
|
if isinstance(embedded_function, str):
|
||||||
filename = embedded_function.__code__.co_filename
|
# This is a function to be eval'd from the given source code in string form.
|
||||||
module_name = embedded_function.__globals__['__name__']
|
# Mangle the host function's id() into the fully qualified name to make sure
|
||||||
first_line = embedded_function.__code__.co_firstlineno
|
# there are no collisions.
|
||||||
|
source_code = embedded_function
|
||||||
|
embedded_function = host_function
|
||||||
|
filename = "<string>"
|
||||||
|
module_name = "__eval_{}".format(id(host_function))
|
||||||
|
first_line = 1
|
||||||
|
else:
|
||||||
|
source_code = inspect.getsource(embedded_function)
|
||||||
|
filename = embedded_function.__code__.co_filename
|
||||||
|
module_name = embedded_function.__globals__['__name__']
|
||||||
|
first_line = embedded_function.__code__.co_firstlineno
|
||||||
|
|
||||||
# Extract function annotation.
|
# Extract function annotation.
|
||||||
signature = inspect.signature(embedded_function)
|
signature = inspect.signature(embedded_function)
|
||||||
|
@ -883,13 +1067,11 @@ class Stitcher:
|
||||||
|
|
||||||
# Parse.
|
# Parse.
|
||||||
source_buffer = source.Buffer(source_code, filename, first_line)
|
source_buffer = source.Buffer(source_code, filename, first_line)
|
||||||
lexer = source_lexer.Lexer(source_buffer, version=sys.version_info[0:2],
|
lexer = source_lexer.Lexer(source_buffer, version=(3, 6), diagnostic_engine=self.engine)
|
||||||
diagnostic_engine=self.engine)
|
|
||||||
lexer.indent = [(initial_indent,
|
lexer.indent = [(initial_indent,
|
||||||
source.Range(source_buffer, 0, len(initial_whitespace)),
|
source.Range(source_buffer, 0, len(initial_whitespace)),
|
||||||
initial_whitespace)]
|
initial_whitespace)]
|
||||||
parser = source_parser.Parser(lexer, version=sys.version_info[0:2],
|
parser = source_parser.Parser(lexer, version=(3, 6), diagnostic_engine=self.engine)
|
||||||
diagnostic_engine=self.engine)
|
|
||||||
function_node = parser.file_input().body[0]
|
function_node = parser.file_input().body[0]
|
||||||
|
|
||||||
# Mangle the name, since we put everything into a single module.
|
# Mangle the name, since we put everything into a single module.
|
||||||
|
@ -914,7 +1096,7 @@ class Stitcher:
|
||||||
engine=self.engine, prelude=self.prelude,
|
engine=self.engine, prelude=self.prelude,
|
||||||
globals=self.globals, host_environment=host_environment,
|
globals=self.globals, host_environment=host_environment,
|
||||||
quote=self._quote)
|
quote=self._quote)
|
||||||
function_node = asttyped_rewriter.visit_quoted_function(function_node, embedded_function)
|
function_node = asttyped_rewriter.visit_quoted_function(function_node, embedded_function, remote_fn)
|
||||||
function_node.flags = flags
|
function_node.flags = flags
|
||||||
|
|
||||||
# Add it into our typedtree so that it gets inferenced and codegen'd.
|
# Add it into our typedtree so that it gets inferenced and codegen'd.
|
||||||
|
@ -926,23 +1108,108 @@ class Stitcher:
|
||||||
return function_node
|
return function_node
|
||||||
|
|
||||||
def _extract_annot(self, function, annot, kind, call_loc, fn_kind):
|
def _extract_annot(self, function, annot, kind, call_loc, fn_kind):
|
||||||
if not isinstance(annot, types.Type):
|
if isinstance(function, SpecializedFunction):
|
||||||
diag = diagnostic.Diagnostic("error",
|
host_function = function.host_function
|
||||||
"type annotation for {kind}, '{annot}', is not an ARTIQ type",
|
|
||||||
{"kind": kind, "annot": repr(annot)},
|
|
||||||
self._function_loc(function),
|
|
||||||
notes=self._call_site_note(call_loc, fn_kind))
|
|
||||||
self.engine.process(diag)
|
|
||||||
|
|
||||||
return types.TVar()
|
|
||||||
else:
|
else:
|
||||||
|
host_function = function
|
||||||
|
|
||||||
|
if hasattr(host_function, 'artiq_embedded'):
|
||||||
|
embedded_function = host_function.artiq_embedded.function
|
||||||
|
else:
|
||||||
|
embedded_function = host_function
|
||||||
|
|
||||||
|
if isinstance(embedded_function, str):
|
||||||
|
embedded_function = host_function
|
||||||
|
|
||||||
|
return self._to_artiq_type(
|
||||||
|
annot,
|
||||||
|
function=function,
|
||||||
|
kind=kind,
|
||||||
|
eval_in_scope=lambda x: eval(x, embedded_function.__globals__),
|
||||||
|
call_loc=call_loc,
|
||||||
|
fn_kind=fn_kind)
|
||||||
|
|
||||||
|
def _to_artiq_type(
|
||||||
|
self, annot, *, function, kind: str, eval_in_scope, call_loc: str, fn_kind: str
|
||||||
|
) -> types.Type:
|
||||||
|
if isinstance(annot, str):
|
||||||
|
try:
|
||||||
|
annot = eval_in_scope(annot)
|
||||||
|
except Exception:
|
||||||
|
diag = diagnostic.Diagnostic(
|
||||||
|
"error",
|
||||||
|
"type annotation for {kind}, {annot}, cannot be evaluated",
|
||||||
|
{"kind": kind, "annot": repr(annot)},
|
||||||
|
self._function_loc(function),
|
||||||
|
notes=self._call_site_note(call_loc, fn_kind))
|
||||||
|
self.engine.process(diag)
|
||||||
|
|
||||||
|
if isinstance(annot, types.Type):
|
||||||
return annot
|
return annot
|
||||||
|
|
||||||
|
# Convert built-in Python types to ARTIQ ones.
|
||||||
|
if annot is None:
|
||||||
|
return builtins.TNone()
|
||||||
|
elif annot is numpy.int64:
|
||||||
|
return builtins.TInt64()
|
||||||
|
elif annot is numpy.int32:
|
||||||
|
return builtins.TInt32()
|
||||||
|
elif annot is float:
|
||||||
|
return builtins.TFloat()
|
||||||
|
elif annot is bool:
|
||||||
|
return builtins.TBool()
|
||||||
|
elif annot is str:
|
||||||
|
return builtins.TStr()
|
||||||
|
elif annot is bytes:
|
||||||
|
return builtins.TBytes()
|
||||||
|
elif annot is bytearray:
|
||||||
|
return builtins.TByteArray()
|
||||||
|
|
||||||
|
# Convert generic Python types to ARTIQ ones.
|
||||||
|
generic_ty = typing.get_origin(annot)
|
||||||
|
if generic_ty is not None:
|
||||||
|
type_args = typing.get_args(annot)
|
||||||
|
artiq_args = [
|
||||||
|
self._to_artiq_type(
|
||||||
|
x,
|
||||||
|
function=function,
|
||||||
|
kind=kind,
|
||||||
|
eval_in_scope=eval_in_scope,
|
||||||
|
call_loc=call_loc,
|
||||||
|
fn_kind=fn_kind)
|
||||||
|
for x in type_args
|
||||||
|
]
|
||||||
|
|
||||||
|
if generic_ty is list and len(artiq_args) == 1:
|
||||||
|
return builtins.TList(artiq_args[0])
|
||||||
|
elif generic_ty is tuple:
|
||||||
|
return types.TTuple(artiq_args)
|
||||||
|
|
||||||
|
# Otherwise report an unknown type and just use a fresh tyvar.
|
||||||
|
|
||||||
|
if annot is int:
|
||||||
|
message = (
|
||||||
|
"type annotation for {kind}, 'int' cannot be used as an ARTIQ type. "
|
||||||
|
"Use numpy's int32 or int64 instead."
|
||||||
|
)
|
||||||
|
ty = builtins.TInt()
|
||||||
|
else:
|
||||||
|
message = "type annotation for {kind}, '{annot}', is not an ARTIQ type"
|
||||||
|
ty = types.TVar()
|
||||||
|
|
||||||
|
diag = diagnostic.Diagnostic("error",
|
||||||
|
message,
|
||||||
|
{"kind": kind, "annot": repr(annot)},
|
||||||
|
self._function_loc(function),
|
||||||
|
notes=self._call_site_note(call_loc, fn_kind))
|
||||||
|
self.engine.process(diag)
|
||||||
|
|
||||||
|
return ty
|
||||||
|
|
||||||
def _quote_syscall(self, function, loc):
|
def _quote_syscall(self, function, loc):
|
||||||
signature = inspect.signature(function)
|
signature = inspect.signature(function)
|
||||||
|
|
||||||
arg_types = OrderedDict()
|
arg_types = OrderedDict()
|
||||||
optarg_types = OrderedDict()
|
|
||||||
for param in signature.parameters.values():
|
for param in signature.parameters.values():
|
||||||
if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:
|
if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:
|
||||||
diag = diagnostic.Diagnostic("error",
|
diag = diagnostic.Diagnostic("error",
|
||||||
|
@ -974,9 +1241,43 @@ class Stitcher:
|
||||||
self.engine.process(diag)
|
self.engine.process(diag)
|
||||||
ret_type = types.TVar()
|
ret_type = types.TVar()
|
||||||
|
|
||||||
function_type = types.TCFunction(arg_types, ret_type,
|
function_type = types.TExternalFunction(arg_types, ret_type,
|
||||||
name=function.artiq_embedded.syscall,
|
name=function.artiq_embedded.syscall,
|
||||||
flags=function.artiq_embedded.flags)
|
flags=function.artiq_embedded.flags)
|
||||||
|
self.functions[function] = function_type
|
||||||
|
return function_type
|
||||||
|
|
||||||
|
def _quote_subkernel(self, function, loc):
|
||||||
|
if isinstance(function, SpecializedFunction):
|
||||||
|
host_function = function.host_function
|
||||||
|
else:
|
||||||
|
host_function = function
|
||||||
|
ret_type = builtins.TNone()
|
||||||
|
signature = inspect.signature(host_function)
|
||||||
|
|
||||||
|
if signature.return_annotation is not inspect.Signature.empty:
|
||||||
|
ret_type = self._extract_annot(host_function, signature.return_annotation,
|
||||||
|
"return type", loc, fn_kind='subkernel')
|
||||||
|
arg_types = OrderedDict()
|
||||||
|
optarg_types = OrderedDict()
|
||||||
|
for param in signature.parameters.values():
|
||||||
|
if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:
|
||||||
|
diag = diagnostic.Diagnostic("error",
|
||||||
|
"subkernels must only use positional arguments; '{argument}' isn't",
|
||||||
|
{"argument": param.name},
|
||||||
|
self._function_loc(function),
|
||||||
|
notes=self._call_site_note(loc, fn_kind='subkernel'))
|
||||||
|
self.engine.process(diag)
|
||||||
|
|
||||||
|
arg_type = self._type_of_param(function, loc, param, fn_kind='subkernel')
|
||||||
|
if param.default is inspect.Parameter.empty:
|
||||||
|
arg_types[param.name] = arg_type
|
||||||
|
else:
|
||||||
|
optarg_types[param.name] = arg_type
|
||||||
|
|
||||||
|
function_type = types.TSubkernel(arg_types, optarg_types, ret_type,
|
||||||
|
sid=self.embedding_map.store_object(host_function),
|
||||||
|
destination=host_function.artiq_embedded.destination)
|
||||||
self.functions[function] = function_type
|
self.functions[function] = function_type
|
||||||
return function_type
|
return function_type
|
||||||
|
|
||||||
|
@ -1019,7 +1320,7 @@ class Stitcher:
|
||||||
|
|
||||||
function_type = types.TRPC(ret_type,
|
function_type = types.TRPC(ret_type,
|
||||||
service=self.embedding_map.store_object(host_function),
|
service=self.embedding_map.store_object(host_function),
|
||||||
async=is_async)
|
is_async=is_async)
|
||||||
self.functions[function] = function_type
|
self.functions[function] = function_type
|
||||||
return function_type
|
return function_type
|
||||||
|
|
||||||
|
@ -1030,13 +1331,27 @@ class Stitcher:
|
||||||
host_function = function
|
host_function = function
|
||||||
|
|
||||||
if function in self.functions:
|
if function in self.functions:
|
||||||
pass
|
return self.functions[function]
|
||||||
|
|
||||||
|
math_type = math_fns.match(function)
|
||||||
|
if math_type is not None:
|
||||||
|
self.functions[function] = math_type
|
||||||
elif not hasattr(host_function, "artiq_embedded") or \
|
elif not hasattr(host_function, "artiq_embedded") or \
|
||||||
(host_function.artiq_embedded.core_name is None and
|
(host_function.artiq_embedded.core_name is None and
|
||||||
host_function.artiq_embedded.portable is False and
|
host_function.artiq_embedded.portable is False and
|
||||||
host_function.artiq_embedded.syscall is None and
|
host_function.artiq_embedded.syscall is None and
|
||||||
|
host_function.artiq_embedded.destination is None and
|
||||||
host_function.artiq_embedded.forbidden is False):
|
host_function.artiq_embedded.forbidden is False):
|
||||||
self._quote_rpc(function, loc)
|
self._quote_rpc(function, loc)
|
||||||
|
elif host_function.artiq_embedded.destination is not None and \
|
||||||
|
host_function.artiq_embedded.destination != self.destination:
|
||||||
|
# treat subkernels as kernels if running on the same device
|
||||||
|
if not 0 < host_function.artiq_embedded.destination <= 255:
|
||||||
|
diag = diagnostic.Diagnostic("error",
|
||||||
|
"subkernel destination must be between 1 and 255 (inclusive)", {},
|
||||||
|
self._function_loc(host_function))
|
||||||
|
self.engine.process(diag)
|
||||||
|
self._quote_subkernel(function, loc)
|
||||||
elif host_function.artiq_embedded.function is not None:
|
elif host_function.artiq_embedded.function is not None:
|
||||||
if host_function.__name__ == "<lambda>":
|
if host_function.__name__ == "<lambda>":
|
||||||
note = diagnostic.Diagnostic("note",
|
note = diagnostic.Diagnostic("note",
|
||||||
|
@ -1060,8 +1375,13 @@ class Stitcher:
|
||||||
notes=[note])
|
notes=[note])
|
||||||
self.engine.process(diag)
|
self.engine.process(diag)
|
||||||
|
|
||||||
|
destination = host_function.artiq_embedded.destination
|
||||||
|
# remote_fn only for first call in subkernels
|
||||||
|
remote_fn = destination is not None and self.first_call
|
||||||
self._quote_embedded_function(function,
|
self._quote_embedded_function(function,
|
||||||
flags=host_function.artiq_embedded.flags)
|
flags=host_function.artiq_embedded.flags,
|
||||||
|
remote_fn=remote_fn)
|
||||||
|
self.first_call = False
|
||||||
elif host_function.artiq_embedded.syscall is not None:
|
elif host_function.artiq_embedded.syscall is not None:
|
||||||
# Insert a storage-less global whose type instructs the compiler
|
# Insert a storage-less global whose type instructs the compiler
|
||||||
# to perform a system call instead of a regular call.
|
# to perform a system call instead of a regular call.
|
||||||
|
|
|
@ -36,6 +36,48 @@ class TKeyword(types.TMono):
|
||||||
def is_keyword(typ):
|
def is_keyword(typ):
|
||||||
return isinstance(typ, TKeyword)
|
return isinstance(typ, TKeyword)
|
||||||
|
|
||||||
|
|
||||||
|
# See rpc_proto.rs and comm_kernel.py:_{send,receive}_rpc_value.
|
||||||
|
def rpc_tag(typ, error_handler):
|
||||||
|
typ = typ.find()
|
||||||
|
if types.is_tuple(typ):
|
||||||
|
assert len(typ.elts) < 256
|
||||||
|
return b"t" + bytes([len(typ.elts)]) + \
|
||||||
|
b"".join([rpc_tag(elt_type, error_handler)
|
||||||
|
for elt_type in typ.elts])
|
||||||
|
elif builtins.is_none(typ):
|
||||||
|
return b"n"
|
||||||
|
elif builtins.is_bool(typ):
|
||||||
|
return b"b"
|
||||||
|
elif builtins.is_int(typ, types.TValue(32)):
|
||||||
|
return b"i"
|
||||||
|
elif builtins.is_int(typ, types.TValue(64)):
|
||||||
|
return b"I"
|
||||||
|
elif builtins.is_float(typ):
|
||||||
|
return b"f"
|
||||||
|
elif builtins.is_str(typ):
|
||||||
|
return b"s"
|
||||||
|
elif builtins.is_bytes(typ):
|
||||||
|
return b"B"
|
||||||
|
elif builtins.is_bytearray(typ):
|
||||||
|
return b"A"
|
||||||
|
elif builtins.is_list(typ):
|
||||||
|
return b"l" + rpc_tag(builtins.get_iterable_elt(typ), error_handler)
|
||||||
|
elif builtins.is_array(typ):
|
||||||
|
num_dims = typ["num_dims"].value
|
||||||
|
return b"a" + bytes([num_dims]) + rpc_tag(typ["elt"], error_handler)
|
||||||
|
elif builtins.is_range(typ):
|
||||||
|
return b"r" + rpc_tag(builtins.get_iterable_elt(typ), error_handler)
|
||||||
|
elif is_keyword(typ):
|
||||||
|
return b"k" + rpc_tag(typ.params["value"], error_handler)
|
||||||
|
elif types.is_function(typ) or types.is_method(typ) or types.is_rpc(typ):
|
||||||
|
raise ValueError("RPC tag for functional value")
|
||||||
|
elif '__objectid__' in typ.attributes:
|
||||||
|
return b"O"
|
||||||
|
else:
|
||||||
|
error_handler(typ)
|
||||||
|
|
||||||
|
|
||||||
class Value:
|
class Value:
|
||||||
"""
|
"""
|
||||||
An SSA value that keeps track of its uses.
|
An SSA value that keeps track of its uses.
|
||||||
|
@ -93,6 +135,7 @@ class NamedValue(Value):
|
||||||
def __init__(self, typ, name):
|
def __init__(self, typ, name):
|
||||||
super().__init__(typ)
|
super().__init__(typ)
|
||||||
self.name, self.function = name, None
|
self.name, self.function = name, None
|
||||||
|
self.is_removed = False
|
||||||
|
|
||||||
def set_name(self, new_name):
|
def set_name(self, new_name):
|
||||||
if self.function is not None:
|
if self.function is not None:
|
||||||
|
@ -193,7 +236,7 @@ class Instruction(User):
|
||||||
self.drop_references()
|
self.drop_references()
|
||||||
# Check this after drop_references in case this
|
# Check this after drop_references in case this
|
||||||
# is a self-referencing phi.
|
# is a self-referencing phi.
|
||||||
assert not any(self.uses)
|
assert all(use.is_removed for use in self.uses)
|
||||||
|
|
||||||
def replace_with(self, value):
|
def replace_with(self, value):
|
||||||
self.replace_all_uses_with(value)
|
self.replace_all_uses_with(value)
|
||||||
|
@ -328,7 +371,7 @@ class BasicBlock(NamedValue):
|
||||||
self.remove_from_parent()
|
self.remove_from_parent()
|
||||||
# Check this after erasing instructions in case the block
|
# Check this after erasing instructions in case the block
|
||||||
# loops into itself.
|
# loops into itself.
|
||||||
assert not any(self.uses)
|
assert all(use.is_removed for use in self.uses)
|
||||||
|
|
||||||
def prepend(self, insn):
|
def prepend(self, insn):
|
||||||
assert isinstance(insn, Instruction)
|
assert isinstance(insn, Instruction)
|
||||||
|
@ -663,6 +706,81 @@ class SetLocal(Instruction):
|
||||||
def value(self):
|
def value(self):
|
||||||
return self.operands[1]
|
return self.operands[1]
|
||||||
|
|
||||||
|
class GetArgFromRemote(Instruction):
|
||||||
|
"""
|
||||||
|
An instruction that receives function arguments from remote
|
||||||
|
(ie. subkernel in DRTIO context)
|
||||||
|
|
||||||
|
:ivar arg_name: (string) argument name
|
||||||
|
:ivar arg_type: argument type
|
||||||
|
"""
|
||||||
|
|
||||||
|
"""
|
||||||
|
:param arg_name: (string) argument name
|
||||||
|
:param arg_type: argument type
|
||||||
|
"""
|
||||||
|
def __init__(self, arg_name, arg_type, name=""):
|
||||||
|
assert isinstance(arg_name, str)
|
||||||
|
super().__init__([], arg_type, name)
|
||||||
|
self.arg_name = arg_name
|
||||||
|
self.arg_type = arg_type
|
||||||
|
|
||||||
|
def copy(self, mapper):
|
||||||
|
self_copy = super().copy(mapper)
|
||||||
|
self_copy.arg_name = self.arg_name
|
||||||
|
self_copy.arg_type = self.arg_type
|
||||||
|
return self_copy
|
||||||
|
|
||||||
|
def opcode(self):
|
||||||
|
return "getargfromremote({})".format(repr(self.arg_name))
|
||||||
|
|
||||||
|
class GetOptArgFromRemote(GetArgFromRemote):
|
||||||
|
"""
|
||||||
|
An instruction that may or may not retrieve an optional function argument
|
||||||
|
from remote, depending on number of values received by firmware.
|
||||||
|
|
||||||
|
:ivar rcv_count: number of received values,
|
||||||
|
determined by firmware
|
||||||
|
:ivar index: (integer) index of the current argument,
|
||||||
|
in reference to remote arguments
|
||||||
|
"""
|
||||||
|
|
||||||
|
"""
|
||||||
|
:param rcv_count: number of received valuese
|
||||||
|
:param index: (integer) index of the current argument,
|
||||||
|
in reference to remote arguments
|
||||||
|
"""
|
||||||
|
def __init__(self, arg_name, arg_type, rcv_count, index, name=""):
|
||||||
|
super().__init__(arg_name, arg_type, name)
|
||||||
|
self.rcv_count = rcv_count
|
||||||
|
self.index = index
|
||||||
|
|
||||||
|
def copy(self, mapper):
|
||||||
|
self_copy = super().copy(mapper)
|
||||||
|
self_copy.rcv_count = self.rcv_count
|
||||||
|
self_copy.index = self.index
|
||||||
|
return self_copy
|
||||||
|
|
||||||
|
def opcode(self):
|
||||||
|
return "getoptargfromremote({})".format(repr(self.arg_name))
|
||||||
|
|
||||||
|
class SubkernelAwaitArgs(Instruction):
|
||||||
|
"""
|
||||||
|
A builtin instruction that takes min and max received messages as operands,
|
||||||
|
and a list of received types.
|
||||||
|
|
||||||
|
:ivar arg_types: (list of types) types of passed arguments (including optional)
|
||||||
|
"""
|
||||||
|
|
||||||
|
"""
|
||||||
|
:param arg_types: (list of types) types of passed arguments (including optional)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, operands, arg_types, name=None):
|
||||||
|
assert isinstance(arg_types, list)
|
||||||
|
self.arg_types = arg_types
|
||||||
|
super().__init__(operands, builtins.TNone(), name)
|
||||||
|
|
||||||
class GetAttr(Instruction):
|
class GetAttr(Instruction):
|
||||||
"""
|
"""
|
||||||
An intruction that loads an attribute from an object,
|
An intruction that loads an attribute from an object,
|
||||||
|
@ -685,7 +803,7 @@ class GetAttr(Instruction):
|
||||||
typ = obj.type.attributes[attr]
|
typ = obj.type.attributes[attr]
|
||||||
else:
|
else:
|
||||||
typ = obj.type.constructor.attributes[attr]
|
typ = obj.type.constructor.attributes[attr]
|
||||||
if types.is_function(typ) or types.is_rpc(typ):
|
if types.is_function(typ) or types.is_rpc(typ) or types.is_subkernel(typ):
|
||||||
typ = types.TMethod(obj.type, typ)
|
typ = types.TMethod(obj.type, typ)
|
||||||
super().__init__([obj], typ, name)
|
super().__init__([obj], typ, name)
|
||||||
self.attr = attr
|
self.attr = attr
|
||||||
|
@ -738,6 +856,33 @@ class SetAttr(Instruction):
|
||||||
def value(self):
|
def value(self):
|
||||||
return self.operands[1]
|
return self.operands[1]
|
||||||
|
|
||||||
|
class Offset(Instruction):
|
||||||
|
"""
|
||||||
|
An intruction that adds an offset to a pointer (indexes into a list).
|
||||||
|
|
||||||
|
This is used to represent internally generated pointer arithmetic, and must
|
||||||
|
remain inside the same object (see :class:`GetElem` and LLVM's GetElementPtr).
|
||||||
|
"""
|
||||||
|
|
||||||
|
"""
|
||||||
|
:param lst: (:class:`Value`) list
|
||||||
|
:param index: (:class:`Value`) index
|
||||||
|
"""
|
||||||
|
def __init__(self, base, offset, name=""):
|
||||||
|
assert isinstance(base, Value)
|
||||||
|
assert isinstance(offset, Value)
|
||||||
|
typ = types._TPointer(builtins.get_iterable_elt(base.type))
|
||||||
|
super().__init__([base, offset], typ, name)
|
||||||
|
|
||||||
|
def opcode(self):
|
||||||
|
return "offset"
|
||||||
|
|
||||||
|
def base(self):
|
||||||
|
return self.operands[0]
|
||||||
|
|
||||||
|
def index(self):
|
||||||
|
return self.operands[1]
|
||||||
|
|
||||||
class GetElem(Instruction):
|
class GetElem(Instruction):
|
||||||
"""
|
"""
|
||||||
An intruction that loads an element from a list.
|
An intruction that loads an element from a list.
|
||||||
|
@ -755,7 +900,7 @@ class GetElem(Instruction):
|
||||||
def opcode(self):
|
def opcode(self):
|
||||||
return "getelem"
|
return "getelem"
|
||||||
|
|
||||||
def list(self):
|
def base(self):
|
||||||
return self.operands[0]
|
return self.operands[0]
|
||||||
|
|
||||||
def index(self):
|
def index(self):
|
||||||
|
@ -781,7 +926,7 @@ class SetElem(Instruction):
|
||||||
def opcode(self):
|
def opcode(self):
|
||||||
return "setelem"
|
return "setelem"
|
||||||
|
|
||||||
def list(self):
|
def base(self):
|
||||||
return self.operands[0]
|
return self.operands[0]
|
||||||
|
|
||||||
def index(self):
|
def index(self):
|
||||||
|
@ -840,6 +985,7 @@ class Arith(Instruction):
|
||||||
def rhs(self):
|
def rhs(self):
|
||||||
return self.operands[1]
|
return self.operands[1]
|
||||||
|
|
||||||
|
|
||||||
class Compare(Instruction):
|
class Compare(Instruction):
|
||||||
"""
|
"""
|
||||||
A comparison operation on numbers.
|
A comparison operation on numbers.
|
||||||
|
@ -1119,14 +1265,18 @@ class IndirectBranch(Terminator):
|
||||||
class Return(Terminator):
|
class Return(Terminator):
|
||||||
"""
|
"""
|
||||||
A return instruction.
|
A return instruction.
|
||||||
|
:param remote_return: (bool)
|
||||||
|
marks a return in subkernel context,
|
||||||
|
where the return value is sent back through DRTIO
|
||||||
"""
|
"""
|
||||||
|
|
||||||
"""
|
"""
|
||||||
:param value: (:class:`Value`) return value
|
:param value: (:class:`Value`) return value
|
||||||
"""
|
"""
|
||||||
def __init__(self, value, name=""):
|
def __init__(self, value, remote_return=False, name=""):
|
||||||
assert isinstance(value, Value)
|
assert isinstance(value, Value)
|
||||||
super().__init__([value], builtins.TNone(), name)
|
super().__init__([value], builtins.TNone(), name)
|
||||||
|
self.remote_return = remote_return
|
||||||
|
|
||||||
def opcode(self):
|
def opcode(self):
|
||||||
return "return"
|
return "return"
|
||||||
|
@ -1175,9 +1325,9 @@ class Raise(Terminator):
|
||||||
if len(self.operands) > 1:
|
if len(self.operands) > 1:
|
||||||
return self.operands[1]
|
return self.operands[1]
|
||||||
|
|
||||||
class Reraise(Terminator):
|
class Resume(Terminator):
|
||||||
"""
|
"""
|
||||||
A reraise instruction.
|
A resume instruction.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
@ -1191,7 +1341,7 @@ class Reraise(Terminator):
|
||||||
super().__init__(operands, builtins.TNone(), name)
|
super().__init__(operands, builtins.TNone(), name)
|
||||||
|
|
||||||
def opcode(self):
|
def opcode(self):
|
||||||
return "reraise"
|
return "resume"
|
||||||
|
|
||||||
def exception_target(self):
|
def exception_target(self):
|
||||||
if len(self.operands) > 0:
|
if len(self.operands) > 0:
|
||||||
|
@ -1277,6 +1427,7 @@ class LandingPad(Terminator):
|
||||||
def __init__(self, cleanup, name=""):
|
def __init__(self, cleanup, name=""):
|
||||||
super().__init__([cleanup], builtins.TException(), name)
|
super().__init__([cleanup], builtins.TException(), name)
|
||||||
self.types = []
|
self.types = []
|
||||||
|
self.has_cleanup = True
|
||||||
|
|
||||||
def copy(self, mapper):
|
def copy(self, mapper):
|
||||||
self_copy = super().copy(mapper)
|
self_copy = super().copy(mapper)
|
||||||
|
|
|
@ -0,0 +1,70 @@
|
||||||
|
/* Force ld to make the ELF header as loadable. */
|
||||||
|
PHDRS
|
||||||
|
{
|
||||||
|
headers PT_LOAD FILEHDR PHDRS ;
|
||||||
|
text PT_LOAD ;
|
||||||
|
data PT_LOAD ;
|
||||||
|
dynamic PT_DYNAMIC ;
|
||||||
|
eh_frame PT_GNU_EH_FRAME ;
|
||||||
|
}
|
||||||
|
|
||||||
|
SECTIONS
|
||||||
|
{
|
||||||
|
/* Push back .text section enough so that ld.lld not complain */
|
||||||
|
. = SIZEOF_HEADERS;
|
||||||
|
|
||||||
|
.text :
|
||||||
|
{
|
||||||
|
*(.text .text.*)
|
||||||
|
} : text
|
||||||
|
|
||||||
|
.rodata :
|
||||||
|
{
|
||||||
|
*(.rodata .rodata.*)
|
||||||
|
}
|
||||||
|
|
||||||
|
.eh_frame :
|
||||||
|
{
|
||||||
|
KEEP(*(.eh_frame))
|
||||||
|
} : text
|
||||||
|
|
||||||
|
.eh_frame_hdr :
|
||||||
|
{
|
||||||
|
KEEP(*(.eh_frame_hdr))
|
||||||
|
} : text : eh_frame
|
||||||
|
|
||||||
|
.got :
|
||||||
|
{
|
||||||
|
*(.got)
|
||||||
|
} : text
|
||||||
|
|
||||||
|
.got.plt :
|
||||||
|
{
|
||||||
|
*(.got.plt)
|
||||||
|
} : text
|
||||||
|
|
||||||
|
.data :
|
||||||
|
{
|
||||||
|
*(.data .data.*)
|
||||||
|
} : data
|
||||||
|
|
||||||
|
.dynamic :
|
||||||
|
{
|
||||||
|
*(.dynamic)
|
||||||
|
} : data : dynamic
|
||||||
|
|
||||||
|
.bss (NOLOAD) : ALIGN(4)
|
||||||
|
{
|
||||||
|
__bss_start = .;
|
||||||
|
*(.sbss .sbss.* .bss .bss.*);
|
||||||
|
. = ALIGN(4);
|
||||||
|
_end = .;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Kernel stack grows downward from end of memory, so put guard page after
|
||||||
|
* all the program contents. Note: This requires all loaded sections (at
|
||||||
|
* least those accessed) to be explicitly listed in the above!
|
||||||
|
*/
|
||||||
|
. = ALIGN(0x1000);
|
||||||
|
_sstack_guard = .;
|
||||||
|
}
|
|
@ -0,0 +1,132 @@
|
||||||
|
r"""
|
||||||
|
The :mod:`math_fns` module lists math-related functions from NumPy recognized
|
||||||
|
by the ARTIQ compiler so host function objects can be :func:`match`\ ed to
|
||||||
|
the compiler type metadata describing their core device analogue.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from collections import OrderedDict
|
||||||
|
import numpy
|
||||||
|
from . import builtins, types
|
||||||
|
|
||||||
|
# Some special mathematical functions are exposed via their scipy.special
|
||||||
|
# equivalents. Since the rest of the ARTIQ core does not depend on SciPy,
|
||||||
|
# gracefully handle it not being present, making the functions simply not
|
||||||
|
# available.
|
||||||
|
try:
|
||||||
|
import scipy.special as scipy_special
|
||||||
|
except ImportError:
|
||||||
|
scipy_special = None
|
||||||
|
|
||||||
|
#: float -> float numpy.* math functions for which llvm.* intrinsics exist.
|
||||||
|
unary_fp_intrinsics = [(name, "llvm." + name + ".f64") for name in [
|
||||||
|
"sin",
|
||||||
|
"cos",
|
||||||
|
"exp",
|
||||||
|
"exp2",
|
||||||
|
"log",
|
||||||
|
"log10",
|
||||||
|
"log2",
|
||||||
|
"fabs",
|
||||||
|
"floor",
|
||||||
|
"ceil",
|
||||||
|
"trunc",
|
||||||
|
"sqrt",
|
||||||
|
]] + [
|
||||||
|
# numpy.rint() seems to (NumPy 1.19.0, Python 3.8.5, Linux x86_64)
|
||||||
|
# implement round-to-even, but unfortunately, rust-lang/libm only
|
||||||
|
# provides round(), which always rounds away from zero.
|
||||||
|
#
|
||||||
|
# As there is no equivalent of the latter in NumPy (nor any other
|
||||||
|
# basic rounding function), expose round() as numpy.rint anyway,
|
||||||
|
# even if the rounding modes don't match up, so there is some way
|
||||||
|
# to do rounding on the core device. (numpy.round() has entirely
|
||||||
|
# different semantics; it rounds to a configurable number of
|
||||||
|
# decimals.)
|
||||||
|
("rint", "llvm.round.f64"),
|
||||||
|
]
|
||||||
|
|
||||||
|
#: float -> float numpy.* math functions lowered to runtime calls.
|
||||||
|
unary_fp_runtime_calls = [
|
||||||
|
("tan", "tan"),
|
||||||
|
("arcsin", "asin"),
|
||||||
|
("arccos", "acos"),
|
||||||
|
("arctan", "atan"),
|
||||||
|
("sinh", "sinh"),
|
||||||
|
("cosh", "cosh"),
|
||||||
|
("tanh", "tanh"),
|
||||||
|
("arcsinh", "asinh"),
|
||||||
|
("arccosh", "acosh"),
|
||||||
|
("arctanh", "atanh"),
|
||||||
|
("expm1", "expm1"),
|
||||||
|
("cbrt", "cbrt"),
|
||||||
|
]
|
||||||
|
|
||||||
|
#: float -> float numpy.* math functions lowered to runtime calls.
|
||||||
|
unary_fp_runtime_calls = [
|
||||||
|
("tan", "tan"),
|
||||||
|
("arcsin", "asin"),
|
||||||
|
("arccos", "acos"),
|
||||||
|
("arctan", "atan"),
|
||||||
|
("sinh", "sinh"),
|
||||||
|
("cosh", "cosh"),
|
||||||
|
("tanh", "tanh"),
|
||||||
|
("arcsinh", "asinh"),
|
||||||
|
("arccosh", "acosh"),
|
||||||
|
("arctanh", "atanh"),
|
||||||
|
("expm1", "expm1"),
|
||||||
|
("cbrt", "cbrt"),
|
||||||
|
]
|
||||||
|
|
||||||
|
scipy_special_unary_runtime_calls = [
|
||||||
|
("erf", "erf"),
|
||||||
|
("erfc", "erfc"),
|
||||||
|
("gamma", "tgamma"),
|
||||||
|
("gammaln", "lgamma"),
|
||||||
|
("j0", "j0"),
|
||||||
|
("j1", "j1"),
|
||||||
|
("y0", "y0"),
|
||||||
|
("y1", "y1"),
|
||||||
|
]
|
||||||
|
# Not mapped: jv/yv, libm only supports integer orders.
|
||||||
|
|
||||||
|
#: (float, float) -> float numpy.* math functions lowered to runtime calls.
|
||||||
|
binary_fp_runtime_calls = [
|
||||||
|
("arctan2", "atan2"),
|
||||||
|
("copysign", "copysign"),
|
||||||
|
("fmax", "fmax"),
|
||||||
|
("fmin", "fmin"),
|
||||||
|
# ("ldexp", "ldexp"), # One argument is an int; would need a bit more plumbing.
|
||||||
|
("hypot", "hypot"),
|
||||||
|
("nextafter", "nextafter"),
|
||||||
|
]
|
||||||
|
|
||||||
|
#: Array handling builtins (special treatment due to allocations).
|
||||||
|
numpy_builtins = ["transpose"]
|
||||||
|
|
||||||
|
|
||||||
|
def fp_runtime_type(name, arity):
|
||||||
|
args = [("arg{}".format(i), builtins.TFloat()) for i in range(arity)]
|
||||||
|
return types.TExternalFunction(
|
||||||
|
OrderedDict(args),
|
||||||
|
builtins.TFloat(),
|
||||||
|
name,
|
||||||
|
# errno isn't observable from ARTIQ Python.
|
||||||
|
flags={"nounwind", "nowrite"},
|
||||||
|
broadcast_across_arrays=True)
|
||||||
|
|
||||||
|
|
||||||
|
math_fn_map = {
|
||||||
|
getattr(numpy, symbol): fp_runtime_type(mangle, arity=1)
|
||||||
|
for symbol, mangle in (unary_fp_intrinsics + unary_fp_runtime_calls)
|
||||||
|
}
|
||||||
|
for symbol, mangle in binary_fp_runtime_calls:
|
||||||
|
math_fn_map[getattr(numpy, symbol)] = fp_runtime_type(mangle, arity=2)
|
||||||
|
for name in numpy_builtins:
|
||||||
|
math_fn_map[getattr(numpy, name)] = types.TBuiltinFunction("numpy." + name)
|
||||||
|
if scipy_special is not None:
|
||||||
|
for symbol, mangle in scipy_special_unary_runtime_calls:
|
||||||
|
math_fn_map[getattr(scipy_special, symbol)] = fp_runtime_type(mangle, arity=1)
|
||||||
|
|
||||||
|
|
||||||
|
def match(obj):
|
||||||
|
return math_fn_map.get(obj, None)
|
|
@ -10,7 +10,7 @@ string and infers types for it using a trivial :module:`prelude`.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
from pythonparser import source, diagnostic, parse_buffer
|
from pythonparser import source, diagnostic, parse_buffer
|
||||||
from . import prelude, types, transforms, analyses, validators
|
from . import prelude, types, transforms, analyses, validators, embedding
|
||||||
|
|
||||||
class Source:
|
class Source:
|
||||||
def __init__(self, source_buffer, engine=None):
|
def __init__(self, source_buffer, engine=None):
|
||||||
|
@ -18,7 +18,7 @@ class Source:
|
||||||
self.engine = diagnostic.Engine(all_errors_are_fatal=True)
|
self.engine = diagnostic.Engine(all_errors_are_fatal=True)
|
||||||
else:
|
else:
|
||||||
self.engine = engine
|
self.engine = engine
|
||||||
self.embedding_map = None
|
self.embedding_map = embedding.EmbeddingMap()
|
||||||
self.name, _ = os.path.splitext(os.path.basename(source_buffer.name))
|
self.name, _ = os.path.splitext(os.path.basename(source_buffer.name))
|
||||||
|
|
||||||
asttyped_rewriter = transforms.ASTTypedRewriter(engine=engine,
|
asttyped_rewriter = transforms.ASTTypedRewriter(engine=engine,
|
||||||
|
@ -57,7 +57,8 @@ class Module:
|
||||||
constness_validator = validators.ConstnessValidator(engine=self.engine)
|
constness_validator = validators.ConstnessValidator(engine=self.engine)
|
||||||
artiq_ir_generator = transforms.ARTIQIRGenerator(engine=self.engine,
|
artiq_ir_generator = transforms.ARTIQIRGenerator(engine=self.engine,
|
||||||
module_name=src.name,
|
module_name=src.name,
|
||||||
ref_period=ref_period)
|
ref_period=ref_period,
|
||||||
|
embedding_map=self.embedding_map)
|
||||||
dead_code_eliminator = transforms.DeadCodeEliminator(engine=self.engine)
|
dead_code_eliminator = transforms.DeadCodeEliminator(engine=self.engine)
|
||||||
local_access_validator = validators.LocalAccessValidator(engine=self.engine)
|
local_access_validator = validators.LocalAccessValidator(engine=self.engine)
|
||||||
local_demoter = transforms.LocalDemoter()
|
local_demoter = transforms.LocalDemoter()
|
||||||
|
@ -66,8 +67,8 @@ class Module:
|
||||||
interleaver = transforms.Interleaver(engine=self.engine)
|
interleaver = transforms.Interleaver(engine=self.engine)
|
||||||
invariant_detection = analyses.InvariantDetection(engine=self.engine)
|
invariant_detection = analyses.InvariantDetection(engine=self.engine)
|
||||||
|
|
||||||
cast_monomorphizer.visit(src.typedtree)
|
|
||||||
int_monomorphizer.visit(src.typedtree)
|
int_monomorphizer.visit(src.typedtree)
|
||||||
|
cast_monomorphizer.visit(src.typedtree)
|
||||||
inferencer.visit(src.typedtree)
|
inferencer.visit(src.typedtree)
|
||||||
monomorphism_validator.visit(src.typedtree)
|
monomorphism_validator.visit(src.typedtree)
|
||||||
escape_validator.visit(src.typedtree)
|
escape_validator.visit(src.typedtree)
|
||||||
|
@ -83,6 +84,8 @@ class Module:
|
||||||
constant_hoister.process(self.artiq_ir)
|
constant_hoister.process(self.artiq_ir)
|
||||||
if remarks:
|
if remarks:
|
||||||
invariant_detection.process(self.artiq_ir)
|
invariant_detection.process(self.artiq_ir)
|
||||||
|
# for subkernels: main kernel inferencer output, to be passed to further compilations
|
||||||
|
self.subkernel_arg_types = inferencer.subkernel_arg_types
|
||||||
|
|
||||||
def build_llvm_ir(self, target):
|
def build_llvm_ir(self, target):
|
||||||
"""Compile the module to LLVM IR for the specified target."""
|
"""Compile the module to LLVM IR for the specified target."""
|
||||||
|
|
|
@ -25,16 +25,19 @@ def globals():
|
||||||
"IndexError": builtins.fn_IndexError(),
|
"IndexError": builtins.fn_IndexError(),
|
||||||
"ValueError": builtins.fn_ValueError(),
|
"ValueError": builtins.fn_ValueError(),
|
||||||
"ZeroDivisionError": builtins.fn_ZeroDivisionError(),
|
"ZeroDivisionError": builtins.fn_ZeroDivisionError(),
|
||||||
|
"RuntimeError": builtins.fn_RuntimeError(),
|
||||||
|
|
||||||
# Built-in Python functions
|
# Built-in Python functions
|
||||||
"len": builtins.fn_len(),
|
"len": builtins.fn_len(),
|
||||||
"round": builtins.fn_round(),
|
"round": builtins.fn_round(),
|
||||||
|
"abs": builtins.fn_abs(),
|
||||||
"min": builtins.fn_min(),
|
"min": builtins.fn_min(),
|
||||||
"max": builtins.fn_max(),
|
"max": builtins.fn_max(),
|
||||||
"print": builtins.fn_print(),
|
"print": builtins.fn_print(),
|
||||||
|
|
||||||
# ARTIQ decorators
|
# ARTIQ decorators
|
||||||
"kernel": builtins.fn_kernel(),
|
"kernel": builtins.fn_kernel(),
|
||||||
|
"subkernel": builtins.fn_kernel(),
|
||||||
"portable": builtins.fn_kernel(),
|
"portable": builtins.fn_kernel(),
|
||||||
"rpc": builtins.fn_kernel(),
|
"rpc": builtins.fn_kernel(),
|
||||||
|
|
||||||
|
@ -42,7 +45,6 @@ def globals():
|
||||||
"parallel": builtins.obj_parallel(),
|
"parallel": builtins.obj_parallel(),
|
||||||
"interleave": builtins.obj_interleave(),
|
"interleave": builtins.obj_interleave(),
|
||||||
"sequential": builtins.obj_sequential(),
|
"sequential": builtins.obj_sequential(),
|
||||||
"watchdog": builtins.fn_watchdog(),
|
|
||||||
|
|
||||||
# ARTIQ time management functions
|
# ARTIQ time management functions
|
||||||
"delay": builtins.fn_delay(),
|
"delay": builtins.fn_delay(),
|
||||||
|
@ -53,4 +55,8 @@ def globals():
|
||||||
# ARTIQ utility functions
|
# ARTIQ utility functions
|
||||||
"rtio_log": builtins.fn_rtio_log(),
|
"rtio_log": builtins.fn_rtio_log(),
|
||||||
"core_log": builtins.fn_print(),
|
"core_log": builtins.fn_print(),
|
||||||
|
|
||||||
|
# ARTIQ subkernel utility functions
|
||||||
|
"subkernel_await": builtins.fn_subkernel_await(),
|
||||||
|
"subkernel_preload": builtins.fn_subkernel_preload(),
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import os, sys, tempfile, subprocess, io
|
import os, sys, tempfile, subprocess, io
|
||||||
from artiq.compiler import types, ir
|
from artiq.compiler import types, ir
|
||||||
from llvmlite_artiq import ir as ll, binding as llvm
|
from llvmlite import ir as ll, binding as llvm
|
||||||
|
|
||||||
llvm.initialize()
|
llvm.initialize()
|
||||||
llvm.initialize_all_targets()
|
llvm.initialize_all_targets()
|
||||||
|
@ -28,8 +28,10 @@ class RunTool:
|
||||||
for argument in self._pattern:
|
for argument in self._pattern:
|
||||||
cmdline.append(argument.format(**self._tempnames))
|
cmdline.append(argument.format(**self._tempnames))
|
||||||
|
|
||||||
|
# https://bugs.python.org/issue17023
|
||||||
|
windows = os.name == "nt"
|
||||||
process = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
process = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||||
universal_newlines=True)
|
universal_newlines=True, shell=windows)
|
||||||
stdout, stderr = process.communicate()
|
stdout, stderr = process.communicate()
|
||||||
if process.returncode != 0:
|
if process.returncode != 0:
|
||||||
raise Exception("{} invocation failed: {}".
|
raise Exception("{} invocation failed: {}".
|
||||||
|
@ -67,29 +69,41 @@ class Target:
|
||||||
generated by the ARTIQ compiler will be deployed.
|
generated by the ARTIQ compiler will be deployed.
|
||||||
|
|
||||||
:var triple: (string)
|
:var triple: (string)
|
||||||
LLVM target triple, e.g. ``"or1k"``
|
LLVM target triple, e.g. ``"riscv32"``
|
||||||
:var data_layout: (string)
|
:var data_layout: (string)
|
||||||
LLVM target data layout, e.g. ``"E-m:e-p:32:32-i64:32-f64:32-v64:32-v128:32-a:0:32-n32"``
|
LLVM target data layout, e.g. ``"E-m:e-p:32:32-i64:32-f64:32-v64:32-v128:32-a:0:32-n32"``
|
||||||
:var features: (list of string)
|
:var features: (list of string)
|
||||||
LLVM target CPU features, e.g. ``["mul", "div", "ffl1"]``
|
LLVM target CPU features, e.g. ``["mul", "div", "ffl1"]``
|
||||||
|
:var additional_linker_options: (list of string)
|
||||||
|
Linker options for the target in addition to the target-independent ones, e.g. ``["--target2=rel"]``
|
||||||
:var print_function: (string)
|
:var print_function: (string)
|
||||||
Name of a formatted print functions (with the signature of ``printf``)
|
Name of a formatted print functions (with the signature of ``printf``)
|
||||||
provided by the target, e.g. ``"printf"``.
|
provided by the target, e.g. ``"printf"``.
|
||||||
|
:var now_pinning: (boolean)
|
||||||
|
Whether the target implements the now-pinning RTIO optimization.
|
||||||
"""
|
"""
|
||||||
triple = "unknown"
|
triple = "unknown"
|
||||||
data_layout = ""
|
data_layout = ""
|
||||||
features = []
|
features = []
|
||||||
|
additional_linker_options = []
|
||||||
print_function = "printf"
|
print_function = "printf"
|
||||||
|
now_pinning = True
|
||||||
|
|
||||||
|
tool_ld = "ld.lld"
|
||||||
|
tool_strip = "llvm-strip"
|
||||||
|
tool_symbolizer = "llvm-symbolizer"
|
||||||
|
tool_cxxfilt = "llvm-cxxfilt"
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self, subkernel_id=None):
|
||||||
self.llcontext = ll.Context()
|
self.llcontext = ll.Context()
|
||||||
|
self.subkernel_id = subkernel_id
|
||||||
|
|
||||||
def target_machine(self):
|
def target_machine(self):
|
||||||
lltarget = llvm.Target.from_triple(self.triple)
|
lltarget = llvm.Target.from_triple(self.triple)
|
||||||
llmachine = lltarget.create_target_machine(
|
llmachine = lltarget.create_target_machine(
|
||||||
features=",".join(["+{}".format(f) for f in self.features]),
|
features=",".join(["+{}".format(f) for f in self.features]),
|
||||||
reloc="pic", codemodel="default")
|
reloc="pic", codemodel="default",
|
||||||
|
abiname="ilp32d" if isinstance(self, RV32GTarget) else "")
|
||||||
llmachine.set_asm_verbosity(True)
|
llmachine.set_asm_verbosity(True)
|
||||||
return llmachine
|
return llmachine
|
||||||
|
|
||||||
|
@ -135,7 +149,8 @@ class Target:
|
||||||
ir.BasicBlock._dump_loc = False
|
ir.BasicBlock._dump_loc = False
|
||||||
|
|
||||||
type_printer = types.TypePrinter()
|
type_printer = types.TypePrinter()
|
||||||
_dump(os.getenv("ARTIQ_DUMP_IR"), "ARTIQ IR", ".txt",
|
suffix = "_subkernel_{}".format(self.subkernel_id) if self.subkernel_id is not None else ""
|
||||||
|
_dump(os.getenv("ARTIQ_DUMP_IR"), "ARTIQ IR", suffix + ".txt",
|
||||||
lambda: "\n".join(fn.as_entity(type_printer) for fn in module.artiq_ir))
|
lambda: "\n".join(fn.as_entity(type_printer) for fn in module.artiq_ir))
|
||||||
|
|
||||||
llmod = module.build_llvm_ir(self)
|
llmod = module.build_llvm_ir(self)
|
||||||
|
@ -147,12 +162,12 @@ class Target:
|
||||||
_dump("", "LLVM IR (broken)", ".ll", lambda: str(llmod))
|
_dump("", "LLVM IR (broken)", ".ll", lambda: str(llmod))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
_dump(os.getenv("ARTIQ_DUMP_UNOPT_LLVM"), "LLVM IR (generated)", "_unopt.ll",
|
_dump(os.getenv("ARTIQ_DUMP_UNOPT_LLVM"), "LLVM IR (generated)", suffix + "_unopt.ll",
|
||||||
lambda: str(llparsedmod))
|
lambda: str(llparsedmod))
|
||||||
|
|
||||||
self.optimize(llparsedmod)
|
self.optimize(llparsedmod)
|
||||||
|
|
||||||
_dump(os.getenv("ARTIQ_DUMP_LLVM"), "LLVM IR (optimized)", ".ll",
|
_dump(os.getenv("ARTIQ_DUMP_LLVM"), "LLVM IR (optimized)", suffix + ".ll",
|
||||||
lambda: str(llparsedmod))
|
lambda: str(llparsedmod))
|
||||||
|
|
||||||
return llparsedmod
|
return llparsedmod
|
||||||
|
@ -170,8 +185,11 @@ class Target:
|
||||||
|
|
||||||
def link(self, objects):
|
def link(self, objects):
|
||||||
"""Link the relocatable objects into a shared library for this target."""
|
"""Link the relocatable objects into a shared library for this target."""
|
||||||
with RunTool([self.triple + "-ld", "-shared", "--eh-frame-hdr"] +
|
with RunTool([self.tool_ld, "-shared", "--eh-frame-hdr"] +
|
||||||
|
self.additional_linker_options +
|
||||||
|
["-T" + os.path.join(os.path.dirname(__file__), "kernel.ld")] +
|
||||||
["{{obj{}}}".format(index) for index in range(len(objects))] +
|
["{{obj{}}}".format(index) for index in range(len(objects))] +
|
||||||
|
["-x"] +
|
||||||
["-o", "{output}"],
|
["-o", "{output}"],
|
||||||
output=None,
|
output=None,
|
||||||
**{"obj{}".format(index): obj for index, obj in enumerate(objects)}) \
|
**{"obj{}".format(index): obj for index, obj in enumerate(objects)}) \
|
||||||
|
@ -187,7 +205,7 @@ class Target:
|
||||||
return self.link([self.assemble(self.compile(module)) for module in modules])
|
return self.link([self.assemble(self.compile(module)) for module in modules])
|
||||||
|
|
||||||
def strip(self, library):
|
def strip(self, library):
|
||||||
with RunTool([self.triple + "-strip", "--strip-debug", "{library}", "-o", "{output}"],
|
with RunTool([self.tool_strip, "--strip-debug", "{library}", "-o", "{output}"],
|
||||||
library=library, output=None) \
|
library=library, output=None) \
|
||||||
as results:
|
as results:
|
||||||
return results["output"].read()
|
return results["output"].read()
|
||||||
|
@ -200,9 +218,10 @@ class Target:
|
||||||
# just after the call. Offset them back to get an address somewhere
|
# just after the call. Offset them back to get an address somewhere
|
||||||
# inside the call instruction (or its delay slot), since that's what
|
# inside the call instruction (or its delay slot), since that's what
|
||||||
# the backtrace entry should point at.
|
# the backtrace entry should point at.
|
||||||
|
last_inlined = None
|
||||||
offset_addresses = [hex(addr - 1) for addr in addresses]
|
offset_addresses = [hex(addr - 1) for addr in addresses]
|
||||||
with RunTool([self.triple + "-addr2line", "--addresses", "--functions", "--inlines",
|
with RunTool([self.tool_symbolizer, "--addresses", "--functions", "--inlines",
|
||||||
"--demangle", "--exe={library}"] + offset_addresses,
|
"--demangle", "--output-style=GNU", "--exe={library}"] + offset_addresses,
|
||||||
library=library) \
|
library=library) \
|
||||||
as results:
|
as results:
|
||||||
lines = iter(results["__stdout__"].read().rstrip().split("\n"))
|
lines = iter(results["__stdout__"].read().rstrip().split("\n"))
|
||||||
|
@ -215,9 +234,11 @@ class Target:
|
||||||
if address_or_function[:2] == "0x":
|
if address_or_function[:2] == "0x":
|
||||||
address = int(address_or_function[2:], 16) + 1 # remove offset
|
address = int(address_or_function[2:], 16) + 1 # remove offset
|
||||||
function = next(lines)
|
function = next(lines)
|
||||||
|
inlined = False
|
||||||
else:
|
else:
|
||||||
address = backtrace[-1][4] # inlined
|
address = backtrace[-1][4] # inlined
|
||||||
function = address_or_function
|
function = address_or_function
|
||||||
|
inlined = True
|
||||||
location = next(lines)
|
location = next(lines)
|
||||||
|
|
||||||
filename, line = location.rsplit(":", 1)
|
filename, line = location.rsplit(":", 1)
|
||||||
|
@ -228,21 +249,61 @@ class Target:
|
||||||
else:
|
else:
|
||||||
line = int(line)
|
line = int(line)
|
||||||
# can't get column out of addr2line D:
|
# can't get column out of addr2line D:
|
||||||
backtrace.append((filename, line, -1, function, address))
|
if inlined:
|
||||||
|
last_inlined.append((filename, line, -1, function, address))
|
||||||
|
else:
|
||||||
|
last_inlined = []
|
||||||
|
backtrace.append((filename, line, -1, function, address,
|
||||||
|
last_inlined))
|
||||||
return backtrace
|
return backtrace
|
||||||
|
|
||||||
def demangle(self, names):
|
def demangle(self, names):
|
||||||
with RunTool([self.triple + "-c++filt"] + names) as results:
|
if not any(names):
|
||||||
|
return names
|
||||||
|
with RunTool([self.tool_cxxfilt] + names) as results:
|
||||||
return results["__stdout__"].read().rstrip().split("\n")
|
return results["__stdout__"].read().rstrip().split("\n")
|
||||||
|
|
||||||
class NativeTarget(Target):
|
class NativeTarget(Target):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.triple = llvm.get_default_triple()
|
self.triple = llvm.get_default_triple()
|
||||||
|
self.data_layout = str(llvm.targets.Target.from_default_triple().create_target_machine().target_data)
|
||||||
|
|
||||||
class OR1KTarget(Target):
|
class RV32IMATarget(Target):
|
||||||
triple = "or1k-linux"
|
triple = "riscv32-unknown-linux"
|
||||||
data_layout = "E-m:e-p:32:32-i8:8:8-i16:16:16-i64:32:32-" \
|
data_layout = "e-m:e-p:32:32-i64:64-n32-S128"
|
||||||
"f64:32:32-v64:32:32-v128:32:32-a0:0:32-n32"
|
features = ["m", "a"]
|
||||||
features = ["mul", "div", "ffl1", "cmov", "addc"]
|
additional_linker_options = ["-m", "elf32lriscv"]
|
||||||
print_function = "core_log"
|
print_function = "core_log"
|
||||||
|
now_pinning = True
|
||||||
|
|
||||||
|
tool_ld = "ld.lld"
|
||||||
|
tool_strip = "llvm-strip"
|
||||||
|
tool_symbolizer = "llvm-symbolizer"
|
||||||
|
tool_cxxfilt = "llvm-cxxfilt"
|
||||||
|
|
||||||
|
class RV32GTarget(Target):
|
||||||
|
triple = "riscv32-unknown-linux"
|
||||||
|
data_layout = "e-m:e-p:32:32-i64:64-n32-S128"
|
||||||
|
features = ["m", "a", "f", "d"]
|
||||||
|
additional_linker_options = ["-m", "elf32lriscv"]
|
||||||
|
print_function = "core_log"
|
||||||
|
now_pinning = True
|
||||||
|
|
||||||
|
tool_ld = "ld.lld"
|
||||||
|
tool_strip = "llvm-strip"
|
||||||
|
tool_symbolizer = "llvm-symbolizer"
|
||||||
|
tool_cxxfilt = "llvm-cxxfilt"
|
||||||
|
|
||||||
|
class CortexA9Target(Target):
|
||||||
|
triple = "armv7-unknown-linux-gnueabihf"
|
||||||
|
data_layout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
|
||||||
|
features = ["dsp", "fp16", "neon", "vfp3"]
|
||||||
|
additional_linker_options = ["-m", "armelf_linux_eabi", "--target2=rel"]
|
||||||
|
print_function = "core_log"
|
||||||
|
now_pinning = False
|
||||||
|
|
||||||
|
tool_ld = "ld.lld"
|
||||||
|
tool_strip = "llvm-strip"
|
||||||
|
tool_symbolizer = "llvm-symbolizer"
|
||||||
|
tool_cxxfilt = "llvm-cxxfilt"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import os, sys, fileinput, ctypes
|
import os, sys, fileinput, ctypes
|
||||||
from pythonparser import diagnostic
|
from pythonparser import diagnostic
|
||||||
from llvmlite_artiq import binding as llvm
|
from llvmlite import binding as llvm
|
||||||
from ..module import Module, Source
|
from ..module import Module, Source
|
||||||
from ..targets import NativeTarget
|
from ..targets import NativeTarget
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import sys, fileinput
|
import sys, fileinput
|
||||||
from pythonparser import diagnostic
|
from pythonparser import diagnostic
|
||||||
from llvmlite_artiq import ir as ll
|
from llvmlite import ir as ll
|
||||||
from ..module import Module, Source
|
from ..module import Module, Source
|
||||||
from ..targets import NativeTarget
|
from ..targets import NativeTarget
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import sys, os
|
import sys, os
|
||||||
from pythonparser import diagnostic
|
from pythonparser import diagnostic
|
||||||
from ..module import Module, Source
|
from ..module import Module, Source
|
||||||
from ..targets import OR1KTarget
|
from ..targets import RV32GTarget
|
||||||
from . import benchmark
|
from . import benchmark
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
@ -30,7 +30,7 @@ def main():
|
||||||
benchmark(lambda: Module(source),
|
benchmark(lambda: Module(source),
|
||||||
"ARTIQ transforms and validators")
|
"ARTIQ transforms and validators")
|
||||||
|
|
||||||
benchmark(lambda: OR1KTarget().compile_and_link([module]),
|
benchmark(lambda: RV32GTarget().compile_and_link([module]),
|
||||||
"LLVM optimization and linking")
|
"LLVM optimization and linking")
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
|
@ -5,7 +5,7 @@ from ...master.databases import DeviceDB, DatasetDB
|
||||||
from ...master.worker_db import DeviceManager, DatasetManager
|
from ...master.worker_db import DeviceManager, DatasetManager
|
||||||
from ..module import Module
|
from ..module import Module
|
||||||
from ..embedding import Stitcher
|
from ..embedding import Stitcher
|
||||||
from ..targets import OR1KTarget
|
from ..targets import RV32GTarget
|
||||||
from . import benchmark
|
from . import benchmark
|
||||||
|
|
||||||
|
|
||||||
|
@ -30,8 +30,9 @@ def main():
|
||||||
device_db_path = os.path.join(os.path.dirname(sys.argv[1]), "device_db.py")
|
device_db_path = os.path.join(os.path.dirname(sys.argv[1]), "device_db.py")
|
||||||
device_mgr = DeviceManager(DeviceDB(device_db_path))
|
device_mgr = DeviceManager(DeviceDB(device_db_path))
|
||||||
|
|
||||||
dataset_db_path = os.path.join(os.path.dirname(sys.argv[1]), "dataset_db.pyon")
|
dataset_db_path = os.path.join(os.path.dirname(sys.argv[1]), "dataset_db.mdb")
|
||||||
dataset_mgr = DatasetManager(DatasetDB(dataset_db_path))
|
dataset_db = DatasetDB(dataset_db_path)
|
||||||
|
dataset_mgr = DatasetManager()
|
||||||
|
|
||||||
argument_mgr = ProcessArgumentManager({})
|
argument_mgr = ProcessArgumentManager({})
|
||||||
|
|
||||||
|
@ -45,7 +46,7 @@ def main():
|
||||||
|
|
||||||
stitcher = embed()
|
stitcher = embed()
|
||||||
module = Module(stitcher)
|
module = Module(stitcher)
|
||||||
target = OR1KTarget()
|
target = RV32GTarget()
|
||||||
llvm_ir = target.compile(module)
|
llvm_ir = target.compile(module)
|
||||||
elf_obj = target.assemble(llvm_ir)
|
elf_obj = target.assemble(llvm_ir)
|
||||||
elf_shlib = target.link([elf_obj])
|
elf_shlib = target.link([elf_obj])
|
||||||
|
@ -68,5 +69,7 @@ def main():
|
||||||
benchmark(lambda: target.strip(elf_shlib),
|
benchmark(lambda: target.strip(elf_shlib),
|
||||||
"Stripping debug information")
|
"Stripping debug information")
|
||||||
|
|
||||||
|
dataset_db.close_db()
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import sys, os
|
import sys, os
|
||||||
from pythonparser import diagnostic
|
from pythonparser import diagnostic
|
||||||
from ..module import Module, Source
|
from ..module import Module, Source
|
||||||
from ..targets import OR1KTarget
|
from ..targets import RV32GTarget
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
if not len(sys.argv) > 1:
|
if not len(sys.argv) > 1:
|
||||||
|
@ -20,7 +20,7 @@ def main():
|
||||||
for filename in sys.argv[1:]:
|
for filename in sys.argv[1:]:
|
||||||
modules.append(Module(Source.from_filename(filename, engine=engine)))
|
modules.append(Module(Source.from_filename(filename, engine=engine)))
|
||||||
|
|
||||||
llobj = OR1KTarget().compile_and_link(modules)
|
llobj = RV32GTarget().compile_and_link(modules)
|
||||||
|
|
||||||
basename, ext = os.path.splitext(sys.argv[-1])
|
basename, ext = os.path.splitext(sys.argv[-1])
|
||||||
with open(basename + ".so", "wb") as f:
|
with open(basename + ".so", "wb") as f:
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -238,7 +238,7 @@ class ASTTypedRewriter(algorithm.Transformer):
|
||||||
body=node.body, decorator_list=node.decorator_list,
|
body=node.body, decorator_list=node.decorator_list,
|
||||||
keyword_loc=node.keyword_loc, name_loc=node.name_loc,
|
keyword_loc=node.keyword_loc, name_loc=node.name_loc,
|
||||||
arrow_loc=node.arrow_loc, colon_loc=node.colon_loc, at_locs=node.at_locs,
|
arrow_loc=node.arrow_loc, colon_loc=node.colon_loc, at_locs=node.at_locs,
|
||||||
loc=node.loc)
|
loc=node.loc, remote_fn=False)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.env_stack.append(node.typing_env)
|
self.env_stack.append(node.typing_env)
|
||||||
|
@ -439,8 +439,9 @@ class ASTTypedRewriter(algorithm.Transformer):
|
||||||
|
|
||||||
def visit_Call(self, node):
|
def visit_Call(self, node):
|
||||||
node = self.generic_visit(node)
|
node = self.generic_visit(node)
|
||||||
node = asttyped.CallT(type=types.TVar(), iodelay=None, arg_exprs={},
|
node = asttyped.CallT(type=types.TVar(), iodelay=None, arg_exprs={},
|
||||||
func=node.func, args=node.args, keywords=node.keywords,
|
remote_fn=False, func=node.func,
|
||||||
|
args=node.args, keywords=node.keywords,
|
||||||
starargs=node.starargs, kwargs=node.kwargs,
|
starargs=node.starargs, kwargs=node.kwargs,
|
||||||
star_loc=node.star_loc, dstar_loc=node.dstar_loc,
|
star_loc=node.star_loc, dstar_loc=node.dstar_loc,
|
||||||
begin_loc=node.begin_loc, end_loc=node.end_loc, loc=node.loc)
|
begin_loc=node.begin_loc, end_loc=node.end_loc, loc=node.loc)
|
||||||
|
|
|
@ -11,13 +11,12 @@ class CastMonomorphizer(algorithm.Visitor):
|
||||||
self.engine = engine
|
self.engine = engine
|
||||||
|
|
||||||
def visit_CallT(self, node):
|
def visit_CallT(self, node):
|
||||||
self.generic_visit(node)
|
|
||||||
|
|
||||||
if (types.is_builtin(node.func.type, "int") or
|
if (types.is_builtin(node.func.type, "int") or
|
||||||
types.is_builtin(node.func.type, "int32") or
|
types.is_builtin(node.func.type, "int32") or
|
||||||
types.is_builtin(node.func.type, "int64")):
|
types.is_builtin(node.func.type, "int64")):
|
||||||
typ = node.type.find()
|
typ = node.type.find()
|
||||||
if (not types.is_var(typ["width"]) and
|
if (not types.is_var(typ["width"]) and
|
||||||
|
len(node.args) == 1 and
|
||||||
builtins.is_int(node.args[0].type) and
|
builtins.is_int(node.args[0].type) and
|
||||||
types.is_var(node.args[0].type.find()["width"])):
|
types.is_var(node.args[0].type.find()["width"])):
|
||||||
if isinstance(node.args[0], asttyped.BinOpT):
|
if isinstance(node.args[0], asttyped.BinOpT):
|
||||||
|
@ -29,3 +28,20 @@ class CastMonomorphizer(algorithm.Visitor):
|
||||||
|
|
||||||
node.args[0].type.unify(typ)
|
node.args[0].type.unify(typ)
|
||||||
|
|
||||||
|
if types.is_builtin(node.func.type, "int") or \
|
||||||
|
types.is_builtin(node.func.type, "round"):
|
||||||
|
typ = node.type.find()
|
||||||
|
if types.is_var(typ["width"]):
|
||||||
|
typ["width"].unify(types.TValue(32))
|
||||||
|
|
||||||
|
self.generic_visit(node)
|
||||||
|
|
||||||
|
def visit_CoerceT(self, node):
|
||||||
|
if isinstance(node.value, asttyped.NumT) and \
|
||||||
|
builtins.is_int(node.type) and \
|
||||||
|
builtins.is_int(node.value.type) and \
|
||||||
|
not types.is_var(node.type["width"]) and \
|
||||||
|
types.is_var(node.value.type["width"]):
|
||||||
|
node.value.type.unify(node.type)
|
||||||
|
|
||||||
|
self.generic_visit(node)
|
||||||
|
|
|
@ -15,13 +15,26 @@ class DeadCodeEliminator:
|
||||||
self.process_function(func)
|
self.process_function(func)
|
||||||
|
|
||||||
def process_function(self, func):
|
def process_function(self, func):
|
||||||
modified = True
|
# defer removing those blocks, so our use checks will ignore deleted blocks
|
||||||
while modified:
|
preserve = [func.entry()]
|
||||||
modified = False
|
work_list = [func.entry()]
|
||||||
for block in list(func.basic_blocks):
|
while any(work_list):
|
||||||
if not any(block.predecessors()) and block != func.entry():
|
block = work_list.pop()
|
||||||
self.remove_block(block)
|
for succ in block.successors():
|
||||||
modified = True
|
if succ not in preserve:
|
||||||
|
preserve.append(succ)
|
||||||
|
work_list.append(succ)
|
||||||
|
|
||||||
|
to_be_removed = []
|
||||||
|
for block in func.basic_blocks:
|
||||||
|
if block not in preserve:
|
||||||
|
block.is_removed = True
|
||||||
|
to_be_removed.append(block)
|
||||||
|
for insn in block.instructions:
|
||||||
|
insn.is_removed = True
|
||||||
|
|
||||||
|
for block in to_be_removed:
|
||||||
|
self.remove_block(block)
|
||||||
|
|
||||||
modified = True
|
modified = True
|
||||||
while modified:
|
while modified:
|
||||||
|
@ -33,7 +46,8 @@ class DeadCodeEliminator:
|
||||||
# it also has to run after the interleaver, but interleaver
|
# it also has to run after the interleaver, but interleaver
|
||||||
# doesn't like to work with IR before DCE.
|
# doesn't like to work with IR before DCE.
|
||||||
if isinstance(insn, (ir.Phi, ir.Alloc, ir.GetAttr, ir.GetElem, ir.Coerce,
|
if isinstance(insn, (ir.Phi, ir.Alloc, ir.GetAttr, ir.GetElem, ir.Coerce,
|
||||||
ir.Arith, ir.Compare, ir.Select, ir.Quote, ir.Closure)) \
|
ir.Arith, ir.Compare, ir.Select, ir.Quote, ir.Closure,
|
||||||
|
ir.Offset)) \
|
||||||
and not any(insn.uses):
|
and not any(insn.uses):
|
||||||
insn.erase()
|
insn.erase()
|
||||||
modified = True
|
modified = True
|
||||||
|
@ -41,6 +55,8 @@ class DeadCodeEliminator:
|
||||||
def remove_block(self, block):
|
def remove_block(self, block):
|
||||||
# block.uses are updated while iterating
|
# block.uses are updated while iterating
|
||||||
for use in set(block.uses):
|
for use in set(block.uses):
|
||||||
|
if use.is_removed:
|
||||||
|
continue
|
||||||
if isinstance(use, ir.Phi):
|
if isinstance(use, ir.Phi):
|
||||||
use.remove_incoming_block(block)
|
use.remove_incoming_block(block)
|
||||||
if not any(use.operands):
|
if not any(use.operands):
|
||||||
|
@ -55,6 +71,8 @@ class DeadCodeEliminator:
|
||||||
|
|
||||||
def remove_instruction(self, insn):
|
def remove_instruction(self, insn):
|
||||||
for use in set(insn.uses):
|
for use in set(insn.uses):
|
||||||
|
if use.is_removed:
|
||||||
|
continue
|
||||||
if isinstance(use, ir.Phi):
|
if isinstance(use, ir.Phi):
|
||||||
use.remove_incoming_value(insn)
|
use.remove_incoming_value(insn)
|
||||||
if not any(use.operands):
|
if not any(use.operands):
|
||||||
|
|
|
@ -6,6 +6,30 @@ from collections import OrderedDict
|
||||||
from pythonparser import algorithm, diagnostic, ast
|
from pythonparser import algorithm, diagnostic, ast
|
||||||
from .. import asttyped, types, builtins
|
from .. import asttyped, types, builtins
|
||||||
from .typedtree_printer import TypedtreePrinter
|
from .typedtree_printer import TypedtreePrinter
|
||||||
|
from artiq.experiment import kernel
|
||||||
|
|
||||||
|
|
||||||
|
def is_nested_empty_list(node):
|
||||||
|
"""If the passed AST node is an empty list, or a regularly nested list thereof,
|
||||||
|
returns the number of nesting layers, or ``None`` otherwise.
|
||||||
|
|
||||||
|
For instance, ``is_nested_empty_list([]) == 1`` and
|
||||||
|
``is_nested_empty_list([[], []]) == 2``, but
|
||||||
|
``is_nested_empty_list([[[]], []]) == None`` as the number of nesting layers doesn't
|
||||||
|
match.
|
||||||
|
"""
|
||||||
|
if not isinstance(node, ast.List):
|
||||||
|
return None
|
||||||
|
if not node.elts:
|
||||||
|
return 1
|
||||||
|
result = is_nested_empty_list(node.elts[0])
|
||||||
|
if result is None:
|
||||||
|
return None
|
||||||
|
for elt in node.elts[:1]:
|
||||||
|
if result != is_nested_empty_list(elt):
|
||||||
|
return None
|
||||||
|
return result + 1
|
||||||
|
|
||||||
|
|
||||||
class Inferencer(algorithm.Visitor):
|
class Inferencer(algorithm.Visitor):
|
||||||
"""
|
"""
|
||||||
|
@ -22,6 +46,7 @@ class Inferencer(algorithm.Visitor):
|
||||||
self.function = None # currently visited function, for Return inference
|
self.function = None # currently visited function, for Return inference
|
||||||
self.in_loop = False
|
self.in_loop = False
|
||||||
self.has_return = False
|
self.has_return = False
|
||||||
|
self.subkernel_arg_types = dict()
|
||||||
|
|
||||||
def _unify(self, typea, typeb, loca, locb, makenotes=None, when=""):
|
def _unify(self, typea, typeb, loca, locb, makenotes=None, when=""):
|
||||||
try:
|
try:
|
||||||
|
@ -154,7 +179,7 @@ class Inferencer(algorithm.Visitor):
|
||||||
# Convert to a method.
|
# Convert to a method.
|
||||||
attr_type = types.TMethod(object_type, attr_type)
|
attr_type = types.TMethod(object_type, attr_type)
|
||||||
self._unify_method_self(attr_type, attr_name, attr_loc, loc, value_node.loc)
|
self._unify_method_self(attr_type, attr_name, attr_loc, loc, value_node.loc)
|
||||||
elif types.is_rpc(attr_type):
|
elif types.is_rpc(attr_type) or types.is_subkernel(attr_type):
|
||||||
# Convert to a method. We don't have to bother typechecking
|
# Convert to a method. We don't have to bother typechecking
|
||||||
# the self argument, since for RPCs anything goes.
|
# the self argument, since for RPCs anything goes.
|
||||||
attr_type = types.TMethod(object_type, attr_type)
|
attr_type = types.TMethod(object_type, attr_type)
|
||||||
|
@ -183,6 +208,14 @@ class Inferencer(algorithm.Visitor):
|
||||||
if builtins.is_bytes(collection.type) or builtins.is_bytearray(collection.type):
|
if builtins.is_bytes(collection.type) or builtins.is_bytearray(collection.type):
|
||||||
self._unify(element.type, builtins.get_iterable_elt(collection.type),
|
self._unify(element.type, builtins.get_iterable_elt(collection.type),
|
||||||
element.loc, None)
|
element.loc, None)
|
||||||
|
elif builtins.is_array(collection.type):
|
||||||
|
array_type = collection.type.find()
|
||||||
|
elem_dims = array_type["num_dims"].value - 1
|
||||||
|
if elem_dims > 0:
|
||||||
|
elem_type = builtins.TArray(array_type["elt"], types.TValue(elem_dims))
|
||||||
|
else:
|
||||||
|
elem_type = array_type["elt"]
|
||||||
|
self._unify(element.type, elem_type, element.loc, collection.loc)
|
||||||
elif builtins.is_iterable(collection.type) and not builtins.is_str(collection.type):
|
elif builtins.is_iterable(collection.type) and not builtins.is_str(collection.type):
|
||||||
rhs_type = collection.type.find()
|
rhs_type = collection.type.find()
|
||||||
rhs_wrapped_lhs_type = types.TMono(rhs_type.name, {"elt": element.type})
|
rhs_wrapped_lhs_type = types.TMono(rhs_type.name, {"elt": element.type})
|
||||||
|
@ -199,15 +232,15 @@ class Inferencer(algorithm.Visitor):
|
||||||
self.generic_visit(node)
|
self.generic_visit(node)
|
||||||
value = node.value
|
value = node.value
|
||||||
if types.is_tuple(value.type):
|
if types.is_tuple(value.type):
|
||||||
diag = diagnostic.Diagnostic("error",
|
for elt in value.type.find().elts:
|
||||||
"multi-dimensional slices are not supported", {},
|
self._unify(elt, builtins.TInt(),
|
||||||
node.loc, [])
|
value.loc, None)
|
||||||
self.engine.process(diag)
|
|
||||||
else:
|
else:
|
||||||
self._unify(value.type, builtins.TInt(),
|
self._unify(value.type, builtins.TInt(),
|
||||||
value.loc, None)
|
value.loc, None)
|
||||||
|
|
||||||
def visit_SliceT(self, node):
|
def visit_SliceT(self, node):
|
||||||
|
self.generic_visit(node)
|
||||||
if (node.lower, node.upper, node.step) == (None, None, None):
|
if (node.lower, node.upper, node.step) == (None, None, None):
|
||||||
self._unify(node.type, builtins.TInt32(),
|
self._unify(node.type, builtins.TInt32(),
|
||||||
node.loc, None)
|
node.loc, None)
|
||||||
|
@ -227,16 +260,78 @@ class Inferencer(algorithm.Visitor):
|
||||||
|
|
||||||
def visit_SubscriptT(self, node):
|
def visit_SubscriptT(self, node):
|
||||||
self.generic_visit(node)
|
self.generic_visit(node)
|
||||||
if isinstance(node.slice, ast.Index):
|
|
||||||
self._unify_iterable(element=node, collection=node.value)
|
if types.is_tuple(node.value.type):
|
||||||
|
if (not isinstance(node.slice, ast.Index) or
|
||||||
|
not isinstance(node.slice.value, ast.Num)):
|
||||||
|
diag = diagnostic.Diagnostic(
|
||||||
|
"error", "tuples can only be indexed by a constant", {},
|
||||||
|
node.slice.loc, []
|
||||||
|
)
|
||||||
|
self.engine.process(diag)
|
||||||
|
return
|
||||||
|
|
||||||
|
tuple_type = node.value.type.find()
|
||||||
|
index = node.slice.value.n
|
||||||
|
if index < 0 or index >= len(tuple_type.elts):
|
||||||
|
diag = diagnostic.Diagnostic(
|
||||||
|
"error",
|
||||||
|
"index {index} is out of range for tuple of size {size}",
|
||||||
|
{"index": index, "size": len(tuple_type.elts)},
|
||||||
|
node.slice.loc, []
|
||||||
|
)
|
||||||
|
self.engine.process(diag)
|
||||||
|
return
|
||||||
|
|
||||||
|
self._unify(node.type, tuple_type.elts[index], node.loc, node.value.loc)
|
||||||
|
elif isinstance(node.slice, ast.Index):
|
||||||
|
if types.is_tuple(node.slice.value.type):
|
||||||
|
if types.is_var(node.value.type):
|
||||||
|
return
|
||||||
|
if not builtins.is_array(node.value.type):
|
||||||
|
diag = diagnostic.Diagnostic(
|
||||||
|
"error",
|
||||||
|
"multi-dimensional indexing only supported for arrays, not {type}",
|
||||||
|
{"type": types.TypePrinter().name(node.value.type)},
|
||||||
|
node.loc, [])
|
||||||
|
self.engine.process(diag)
|
||||||
|
return
|
||||||
|
num_idxs = len(node.slice.value.type.find().elts)
|
||||||
|
array_type = node.value.type.find()
|
||||||
|
num_dims = array_type["num_dims"].value
|
||||||
|
remaining_dims = num_dims - num_idxs
|
||||||
|
if remaining_dims < 0:
|
||||||
|
diag = diagnostic.Diagnostic(
|
||||||
|
"error",
|
||||||
|
"too many indices for array of dimension {num_dims}",
|
||||||
|
{"num_dims": num_dims}, node.slice.loc, [])
|
||||||
|
self.engine.process(diag)
|
||||||
|
return
|
||||||
|
if remaining_dims == 0:
|
||||||
|
self._unify(node.type, array_type["elt"], node.loc,
|
||||||
|
node.value.loc)
|
||||||
|
else:
|
||||||
|
self._unify(
|
||||||
|
node.type,
|
||||||
|
builtins.TArray(array_type["elt"], remaining_dims))
|
||||||
|
else:
|
||||||
|
self._unify_iterable(element=node, collection=node.value)
|
||||||
elif isinstance(node.slice, ast.Slice):
|
elif isinstance(node.slice, ast.Slice):
|
||||||
self._unify(node.type, node.value.type,
|
if builtins.is_array(node.value.type):
|
||||||
node.loc, node.value.loc)
|
if node.slice.step is not None:
|
||||||
else: # ExtSlice
|
diag = diagnostic.Diagnostic(
|
||||||
pass # error emitted above
|
"error",
|
||||||
|
"strided slicing not yet supported for NumPy arrays", {},
|
||||||
|
node.slice.step.loc, [])
|
||||||
|
self.engine.process(diag)
|
||||||
|
return
|
||||||
|
self._unify(node.type, node.value.type, node.loc, node.value.loc)
|
||||||
|
else: # ExtSlice
|
||||||
|
pass # error emitted above
|
||||||
|
|
||||||
def visit_IfExpT(self, node):
|
def visit_IfExpT(self, node):
|
||||||
self.generic_visit(node)
|
self.generic_visit(node)
|
||||||
|
self._unify(node.test.type, builtins.TBool(), node.test.loc, None)
|
||||||
self._unify(node.body.type, node.orelse.type,
|
self._unify(node.body.type, node.orelse.type,
|
||||||
node.body.loc, node.orelse.loc)
|
node.body.loc, node.orelse.loc)
|
||||||
self._unify(node.type, node.body.type,
|
self._unify(node.type, node.body.type,
|
||||||
|
@ -265,21 +360,36 @@ class Inferencer(algorithm.Visitor):
|
||||||
node.operand.loc)
|
node.operand.loc)
|
||||||
self.engine.process(diag)
|
self.engine.process(diag)
|
||||||
else: # UAdd, USub
|
else: # UAdd, USub
|
||||||
|
if types.is_var(operand_type):
|
||||||
|
return
|
||||||
|
|
||||||
if builtins.is_numeric(operand_type):
|
if builtins.is_numeric(operand_type):
|
||||||
self._unify(node.type, operand_type,
|
self._unify(node.type, operand_type, node.loc, None)
|
||||||
node.loc, None)
|
return
|
||||||
elif not types.is_var(operand_type):
|
|
||||||
diag = diagnostic.Diagnostic("error",
|
if builtins.is_array(operand_type):
|
||||||
"expected unary '{op}' operand to be of numeric type, not {type}",
|
elt = operand_type.find()["elt"]
|
||||||
{"op": node.op.loc.source(),
|
if builtins.is_numeric(elt):
|
||||||
"type": types.TypePrinter().name(operand_type)},
|
self._unify(node.type, operand_type, node.loc, None)
|
||||||
node.operand.loc)
|
return
|
||||||
self.engine.process(diag)
|
if types.is_var(elt):
|
||||||
|
return
|
||||||
|
|
||||||
|
diag = diagnostic.Diagnostic("error",
|
||||||
|
"expected unary '{op}' operand to be of numeric type, not {type}",
|
||||||
|
{"op": node.op.loc.source(),
|
||||||
|
"type": types.TypePrinter().name(operand_type)},
|
||||||
|
node.operand.loc)
|
||||||
|
self.engine.process(diag)
|
||||||
|
|
||||||
def visit_CoerceT(self, node):
|
def visit_CoerceT(self, node):
|
||||||
self.generic_visit(node)
|
self.generic_visit(node)
|
||||||
if builtins.is_numeric(node.type) and builtins.is_numeric(node.value.type):
|
if builtins.is_numeric(node.type) and builtins.is_numeric(node.value.type):
|
||||||
pass
|
pass
|
||||||
|
elif (builtins.is_array(node.type) and builtins.is_array(node.value.type)
|
||||||
|
and builtins.is_numeric(node.type.find()["elt"])
|
||||||
|
and builtins.is_numeric(node.value.type.find()["elt"])):
|
||||||
|
pass
|
||||||
else:
|
else:
|
||||||
printer = types.TypePrinter()
|
printer = types.TypePrinter()
|
||||||
note = diagnostic.Diagnostic("note",
|
note = diagnostic.Diagnostic("note",
|
||||||
|
@ -305,7 +415,7 @@ class Inferencer(algorithm.Visitor):
|
||||||
self.visit(node)
|
self.visit(node)
|
||||||
return node
|
return node
|
||||||
|
|
||||||
def _coerce_numeric(self, nodes, map_return=lambda typ: typ):
|
def _coerce_numeric(self, nodes, map_return=lambda typ: typ, map_node_type =lambda typ:typ):
|
||||||
# See https://docs.python.org/3/library/stdtypes.html#numeric-types-int-float-complex.
|
# See https://docs.python.org/3/library/stdtypes.html#numeric-types-int-float-complex.
|
||||||
node_types = []
|
node_types = []
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
|
@ -321,6 +431,7 @@ class Inferencer(algorithm.Visitor):
|
||||||
node_types.append(node.type)
|
node_types.append(node.type)
|
||||||
else:
|
else:
|
||||||
node_types.append(node.type)
|
node_types.append(node.type)
|
||||||
|
node_types = [map_node_type(typ) for typ in node_types]
|
||||||
if any(map(types.is_var, node_types)): # not enough info yet
|
if any(map(types.is_var, node_types)): # not enough info yet
|
||||||
return
|
return
|
||||||
elif not all(map(builtins.is_numeric, node_types)):
|
elif not all(map(builtins.is_numeric, node_types)):
|
||||||
|
@ -352,8 +463,125 @@ class Inferencer(algorithm.Visitor):
|
||||||
else:
|
else:
|
||||||
assert False
|
assert False
|
||||||
|
|
||||||
|
def _coerce_binary_broadcast_op(self, left, right, map_return_elt, op_loc):
|
||||||
|
def num_dims(typ):
|
||||||
|
if builtins.is_array(typ):
|
||||||
|
# TODO: If number of dimensions is ever made a non-fixed parameter,
|
||||||
|
# need to acutally unify num_dims in _coerce_binop/….
|
||||||
|
return typ.find()["num_dims"].value
|
||||||
|
return 0
|
||||||
|
|
||||||
|
left_dims = num_dims(left.type)
|
||||||
|
right_dims = num_dims(right.type)
|
||||||
|
if left_dims != right_dims and left_dims != 0 and right_dims != 0:
|
||||||
|
# Mismatch (only scalar broadcast supported for now).
|
||||||
|
note1 = diagnostic.Diagnostic("note", "operand of dimension {num_dims}",
|
||||||
|
{"num_dims": left_dims}, left.loc)
|
||||||
|
note2 = diagnostic.Diagnostic("note", "operand of dimension {num_dims}",
|
||||||
|
{"num_dims": right_dims}, right.loc)
|
||||||
|
diag = diagnostic.Diagnostic(
|
||||||
|
"error", "dimensions of '{op}' array operands must match",
|
||||||
|
{"op": op_loc.source()}, op_loc, [left.loc, right.loc], [note1, note2])
|
||||||
|
self.engine.process(diag)
|
||||||
|
return
|
||||||
|
|
||||||
|
def map_node_type(typ):
|
||||||
|
if not builtins.is_array(typ):
|
||||||
|
# This is a single value broadcast across the array.
|
||||||
|
return typ
|
||||||
|
return typ.find()["elt"]
|
||||||
|
|
||||||
|
# Figure out result type, handling broadcasts.
|
||||||
|
result_dims = left_dims if left_dims else right_dims
|
||||||
|
def map_return(typ):
|
||||||
|
elt = map_return_elt(typ)
|
||||||
|
result = builtins.TArray(elt=elt, num_dims=result_dims)
|
||||||
|
left = builtins.TArray(elt=elt, num_dims=left_dims) if left_dims else elt
|
||||||
|
right = builtins.TArray(elt=elt, num_dims=right_dims) if right_dims else elt
|
||||||
|
return (result, left, right)
|
||||||
|
|
||||||
|
return self._coerce_numeric((left, right),
|
||||||
|
map_return=map_return,
|
||||||
|
map_node_type=map_node_type)
|
||||||
|
|
||||||
def _coerce_binop(self, op, left, right):
|
def _coerce_binop(self, op, left, right):
|
||||||
if isinstance(op, (ast.BitAnd, ast.BitOr, ast.BitXor,
|
if isinstance(op, ast.MatMult):
|
||||||
|
if types.is_var(left.type) or types.is_var(right.type):
|
||||||
|
return
|
||||||
|
|
||||||
|
def num_dims(operand):
|
||||||
|
if not builtins.is_array(operand.type):
|
||||||
|
diag = diagnostic.Diagnostic(
|
||||||
|
"error",
|
||||||
|
"expected matrix multiplication operand to be of array type, not {type}",
|
||||||
|
{
|
||||||
|
"op": op.loc.source(),
|
||||||
|
"type": types.TypePrinter().name(operand.type)
|
||||||
|
}, op.loc, [operand.loc])
|
||||||
|
self.engine.process(diag)
|
||||||
|
return
|
||||||
|
num_dims = operand.type.find()["num_dims"].value
|
||||||
|
if num_dims not in (1, 2):
|
||||||
|
diag = diagnostic.Diagnostic(
|
||||||
|
"error",
|
||||||
|
"expected matrix multiplication operand to be 1- or 2-dimensional, not {type}",
|
||||||
|
{
|
||||||
|
"op": op.loc.source(),
|
||||||
|
"type": types.TypePrinter().name(operand.type)
|
||||||
|
}, op.loc, [operand.loc])
|
||||||
|
self.engine.process(diag)
|
||||||
|
return
|
||||||
|
return num_dims
|
||||||
|
|
||||||
|
left_dims = num_dims(left)
|
||||||
|
if not left_dims:
|
||||||
|
return
|
||||||
|
right_dims = num_dims(right)
|
||||||
|
if not right_dims:
|
||||||
|
return
|
||||||
|
|
||||||
|
def map_node_type(typ):
|
||||||
|
return typ.find()["elt"]
|
||||||
|
|
||||||
|
def map_return(typ):
|
||||||
|
if left_dims == 1:
|
||||||
|
if right_dims == 1:
|
||||||
|
result_dims = 0
|
||||||
|
else:
|
||||||
|
result_dims = 1
|
||||||
|
elif right_dims == 1:
|
||||||
|
result_dims = 1
|
||||||
|
else:
|
||||||
|
result_dims = 2
|
||||||
|
result = typ if result_dims == 0 else builtins.TArray(
|
||||||
|
typ, result_dims)
|
||||||
|
return (result, builtins.TArray(typ, left_dims),
|
||||||
|
builtins.TArray(typ, right_dims))
|
||||||
|
|
||||||
|
return self._coerce_numeric((left, right),
|
||||||
|
map_return=map_return,
|
||||||
|
map_node_type=map_node_type)
|
||||||
|
elif builtins.is_array(left.type) or builtins.is_array(right.type):
|
||||||
|
# Operations on arrays are element-wise (possibly using broadcasting).
|
||||||
|
|
||||||
|
# TODO: Allow only for integer arrays.
|
||||||
|
# allowed_int_array_ops = (ast.BitAnd, ast.BitOr, ast.BitXor, ast.LShift,
|
||||||
|
# ast.RShift)
|
||||||
|
allowed_array_ops = (ast.Add, ast.Mult, ast.FloorDiv, ast.Mod,
|
||||||
|
ast.Pow, ast.Sub, ast.Div)
|
||||||
|
if not isinstance(op, allowed_array_ops):
|
||||||
|
diag = diagnostic.Diagnostic(
|
||||||
|
"error", "operator '{op}' not valid for array types",
|
||||||
|
{"op": op.loc.source()}, op.loc)
|
||||||
|
self.engine.process(diag)
|
||||||
|
return
|
||||||
|
|
||||||
|
def map_result(typ):
|
||||||
|
if isinstance(op, ast.Div):
|
||||||
|
return builtins.TFloat()
|
||||||
|
return typ
|
||||||
|
return self._coerce_binary_broadcast_op(left, right, map_result, op.loc)
|
||||||
|
elif isinstance(op, (ast.BitAnd, ast.BitOr, ast.BitXor,
|
||||||
ast.LShift, ast.RShift)):
|
ast.LShift, ast.RShift)):
|
||||||
# bitwise operators require integers
|
# bitwise operators require integers
|
||||||
for operand in (left, right):
|
for operand in (left, right):
|
||||||
|
@ -452,7 +680,7 @@ class Inferencer(algorithm.Visitor):
|
||||||
# division always returns a float
|
# division always returns a float
|
||||||
return self._coerce_numeric((left, right),
|
return self._coerce_numeric((left, right),
|
||||||
lambda typ: (builtins.TFloat(), builtins.TFloat(), builtins.TFloat()))
|
lambda typ: (builtins.TFloat(), builtins.TFloat(), builtins.TFloat()))
|
||||||
else: # MatMult
|
else:
|
||||||
diag = diagnostic.Diagnostic("error",
|
diag = diagnostic.Diagnostic("error",
|
||||||
"operator '{op}' is not supported", {"op": op.loc.source()},
|
"operator '{op}' is not supported", {"op": op.loc.source()},
|
||||||
op.loc)
|
op.loc)
|
||||||
|
@ -695,28 +923,111 @@ class Inferencer(algorithm.Visitor):
|
||||||
"strings currently cannot be constructed", {},
|
"strings currently cannot be constructed", {},
|
||||||
node.loc)
|
node.loc)
|
||||||
self.engine.process(diag)
|
self.engine.process(diag)
|
||||||
elif types.is_builtin(typ, "list") or types.is_builtin(typ, "array"):
|
elif types.is_builtin(typ, "array"):
|
||||||
if types.is_builtin(typ, "list"):
|
valid_forms = lambda: [
|
||||||
valid_forms = lambda: [
|
valid_form("array(x:'a) -> array(elt='b) where 'a is iterable"),
|
||||||
valid_form("list() -> list(elt='a)"),
|
valid_form("array(x:'a, dtype:'b) -> array(elt='b) where 'a is iterable")
|
||||||
valid_form("list(x:'a) -> list(elt='b) where 'a is iterable")
|
]
|
||||||
]
|
|
||||||
|
|
||||||
self._unify(node.type, builtins.TList(),
|
explicit_dtype = None
|
||||||
node.loc, None)
|
keywords_acceptable = False
|
||||||
elif types.is_builtin(typ, "array"):
|
if len(node.keywords) == 0:
|
||||||
valid_forms = lambda: [
|
keywords_acceptable = True
|
||||||
valid_form("array() -> array(elt='a)"),
|
elif len(node.keywords) == 1:
|
||||||
valid_form("array(x:'a) -> array(elt='b) where 'a is iterable")
|
if node.keywords[0].arg == "dtype":
|
||||||
]
|
keywords_acceptable = True
|
||||||
|
explicit_dtype = node.keywords[0].value
|
||||||
|
if len(node.args) == 1 and keywords_acceptable:
|
||||||
|
arg, = node.args
|
||||||
|
|
||||||
self._unify(node.type, builtins.TArray(),
|
num_empty_dims = is_nested_empty_list(arg)
|
||||||
node.loc, None)
|
if num_empty_dims is not None:
|
||||||
|
# As a special case, following the behaviour of numpy.array (and
|
||||||
|
# repr() on ndarrays), consider empty lists to be exactly of the
|
||||||
|
# number of dimensions given, instead of potentially containing an
|
||||||
|
# unknown number of extra dimensions.
|
||||||
|
num_dims = num_empty_dims
|
||||||
|
|
||||||
|
# The ultimate element type will be TVar initially, but we might be
|
||||||
|
# able to resolve it from context.
|
||||||
|
elt = arg.type
|
||||||
|
for _ in range(num_dims):
|
||||||
|
assert builtins.is_list(elt)
|
||||||
|
elt = elt.find()["elt"]
|
||||||
|
else:
|
||||||
|
# In the absence of any other information (there currently isn't a way
|
||||||
|
# to specify any), assume that all iterables are expandable into a
|
||||||
|
# (runtime-checked) rectangular array of the innermost element type.
|
||||||
|
elt = arg.type
|
||||||
|
num_dims = 0
|
||||||
|
expected_dims = (node.type.find()["num_dims"].value
|
||||||
|
if builtins.is_array(node.type) else -1)
|
||||||
|
while True:
|
||||||
|
if num_dims == expected_dims:
|
||||||
|
# If we already know the number of dimensions of the result,
|
||||||
|
# stop so we can disambiguate the (innermost) element type of
|
||||||
|
# the argument if it is still unknown.
|
||||||
|
break
|
||||||
|
if types.is_var(elt):
|
||||||
|
# Can't make progress here because we don't know how many more
|
||||||
|
# dimensions might be "hidden" inside.
|
||||||
|
return
|
||||||
|
if not builtins.is_iterable(elt) or builtins.is_str(elt):
|
||||||
|
break
|
||||||
|
if builtins.is_array(elt):
|
||||||
|
num_dims += elt.find()["num_dims"].value
|
||||||
|
else:
|
||||||
|
num_dims += 1
|
||||||
|
elt = builtins.get_iterable_elt(elt)
|
||||||
|
|
||||||
|
if explicit_dtype is not None:
|
||||||
|
# TODO: Factor out type detection; support quoted type constructors
|
||||||
|
# (TList(TInt32), …)?
|
||||||
|
typ = explicit_dtype.type
|
||||||
|
if types.is_builtin(typ, "int32"):
|
||||||
|
elt = builtins.TInt32()
|
||||||
|
elif types.is_builtin(typ, "int64"):
|
||||||
|
elt = builtins.TInt64()
|
||||||
|
elif types.is_constructor(typ):
|
||||||
|
elt = typ.find().instance
|
||||||
|
else:
|
||||||
|
diag = diagnostic.Diagnostic(
|
||||||
|
"error",
|
||||||
|
"dtype argument of {builtin}() must be a valid constructor",
|
||||||
|
{"builtin": typ.find().name},
|
||||||
|
node.func.loc,
|
||||||
|
notes=[note])
|
||||||
|
self.engine.process(diag)
|
||||||
|
return
|
||||||
|
|
||||||
|
if num_dims == 0:
|
||||||
|
note = diagnostic.Diagnostic(
|
||||||
|
"note", "this expression has type {type}",
|
||||||
|
{"type": types.TypePrinter().name(arg.type)}, arg.loc)
|
||||||
|
diag = diagnostic.Diagnostic(
|
||||||
|
"error",
|
||||||
|
"the argument of {builtin}() must be of an iterable type",
|
||||||
|
{"builtin": typ.find().name},
|
||||||
|
node.func.loc,
|
||||||
|
notes=[note])
|
||||||
|
self.engine.process(diag)
|
||||||
|
return
|
||||||
|
|
||||||
|
self._unify(node.type,
|
||||||
|
builtins.TArray(elt, types.TValue(num_dims)),
|
||||||
|
node.loc, arg.loc)
|
||||||
else:
|
else:
|
||||||
assert False
|
diagnose(valid_forms())
|
||||||
|
elif types.is_builtin(typ, "list"):
|
||||||
|
valid_forms = lambda: [
|
||||||
|
valid_form("list() -> list(elt='a)"),
|
||||||
|
valid_form("list(x:'a) -> list(elt='b) where 'a is iterable")
|
||||||
|
]
|
||||||
|
|
||||||
|
self._unify(node.type, builtins.TList(), node.loc, None)
|
||||||
|
|
||||||
if len(node.args) == 0 and len(node.keywords) == 0:
|
if len(node.args) == 0 and len(node.keywords) == 0:
|
||||||
pass # []
|
pass # []
|
||||||
elif len(node.args) == 1 and len(node.keywords) == 0:
|
elif len(node.args) == 1 and len(node.keywords) == 0:
|
||||||
arg, = node.args
|
arg, = node.args
|
||||||
|
|
||||||
|
@ -811,6 +1122,28 @@ class Inferencer(algorithm.Visitor):
|
||||||
arg.loc, None)
|
arg.loc, None)
|
||||||
else:
|
else:
|
||||||
diagnose(valid_forms())
|
diagnose(valid_forms())
|
||||||
|
elif types.is_builtin(typ, "abs"):
|
||||||
|
fn = typ.name
|
||||||
|
|
||||||
|
valid_forms = lambda: [
|
||||||
|
valid_form("abs(x:numpy.int?) -> numpy.int?"),
|
||||||
|
valid_form("abs(x:float) -> float")
|
||||||
|
]
|
||||||
|
|
||||||
|
if len(node.args) == 1 and len(node.keywords) == 0:
|
||||||
|
(arg,) = node.args
|
||||||
|
if builtins.is_int(arg.type) or builtins.is_float(arg.type):
|
||||||
|
self._unify(arg.type, node.type,
|
||||||
|
arg.loc, node.loc)
|
||||||
|
elif types.is_var(arg.type):
|
||||||
|
pass # undetermined yet
|
||||||
|
else:
|
||||||
|
diag = diagnostic.Diagnostic("error",
|
||||||
|
"the arguments of abs() must be of a numeric type", {},
|
||||||
|
node.func.loc)
|
||||||
|
self.engine.process(diag)
|
||||||
|
else:
|
||||||
|
diagnose(valid_forms())
|
||||||
elif types.is_builtin(typ, "min") or types.is_builtin(typ, "max"):
|
elif types.is_builtin(typ, "min") or types.is_builtin(typ, "max"):
|
||||||
fn = typ.name
|
fn = typ.name
|
||||||
|
|
||||||
|
@ -857,21 +1190,69 @@ class Inferencer(algorithm.Visitor):
|
||||||
diagnose(valid_forms())
|
diagnose(valid_forms())
|
||||||
elif types.is_builtin(typ, "make_array"):
|
elif types.is_builtin(typ, "make_array"):
|
||||||
valid_forms = lambda: [
|
valid_forms = lambda: [
|
||||||
valid_form("numpy.full(count:int32, value:'a) -> numpy.array(elt='a)")
|
valid_form("numpy.full(count:int32, value:'a) -> array(elt='a, num_dims=1)"),
|
||||||
|
valid_form("numpy.full(shape:(int32,)*'b, value:'a) -> array(elt='a, num_dims='b)"),
|
||||||
]
|
]
|
||||||
|
|
||||||
self._unify(node.type, builtins.TArray(),
|
|
||||||
node.loc, None)
|
|
||||||
|
|
||||||
if len(node.args) == 2 and len(node.keywords) == 0:
|
if len(node.args) == 2 and len(node.keywords) == 0:
|
||||||
arg0, arg1 = node.args
|
arg0, arg1 = node.args
|
||||||
|
|
||||||
self._unify(arg0.type, builtins.TInt32(),
|
if types.is_var(arg0.type):
|
||||||
arg0.loc, None)
|
return # undetermined yet
|
||||||
|
elif types.is_tuple(arg0.type):
|
||||||
|
num_dims = len(arg0.type.find().elts)
|
||||||
|
self._unify(arg0.type, types.TTuple([builtins.TInt32()] * num_dims),
|
||||||
|
arg0.loc, None)
|
||||||
|
else:
|
||||||
|
num_dims = 1
|
||||||
|
self._unify(arg0.type, builtins.TInt32(),
|
||||||
|
arg0.loc, None)
|
||||||
|
|
||||||
|
self._unify(node.type, builtins.TArray(num_dims=num_dims),
|
||||||
|
node.loc, None)
|
||||||
self._unify(arg1.type, node.type.find()["elt"],
|
self._unify(arg1.type, node.type.find()["elt"],
|
||||||
arg1.loc, None)
|
arg1.loc, None)
|
||||||
else:
|
else:
|
||||||
diagnose(valid_forms())
|
diagnose(valid_forms())
|
||||||
|
elif types.is_builtin(typ, "numpy.transpose"):
|
||||||
|
valid_forms = lambda: [
|
||||||
|
valid_form("transpose(x: array(elt='a, num_dims=1)) -> array(elt='a, num_dims=1)"),
|
||||||
|
valid_form("transpose(x: array(elt='a, num_dims=2)) -> array(elt='a, num_dims=2)")
|
||||||
|
]
|
||||||
|
|
||||||
|
if len(node.args) == 1 and len(node.keywords) == 0:
|
||||||
|
arg, = node.args
|
||||||
|
|
||||||
|
if types.is_var(arg.type):
|
||||||
|
pass # undetermined yet
|
||||||
|
elif not builtins.is_array(arg.type):
|
||||||
|
note = diagnostic.Diagnostic(
|
||||||
|
"note", "this expression has type {type}",
|
||||||
|
{"type": types.TypePrinter().name(arg.type)}, arg.loc)
|
||||||
|
diag = diagnostic.Diagnostic(
|
||||||
|
"error",
|
||||||
|
"the argument of {builtin}() must be an array",
|
||||||
|
{"builtin": typ.find().name},
|
||||||
|
node.func.loc,
|
||||||
|
notes=[note])
|
||||||
|
self.engine.process(diag)
|
||||||
|
else:
|
||||||
|
num_dims = arg.type.find()["num_dims"].value
|
||||||
|
if num_dims not in (1, 2):
|
||||||
|
note = diagnostic.Diagnostic(
|
||||||
|
"note", "argument is {num_dims}-dimensional",
|
||||||
|
{"num_dims": num_dims}, arg.loc)
|
||||||
|
diag = diagnostic.Diagnostic(
|
||||||
|
"error",
|
||||||
|
"{builtin}() is currently only supported for up to "
|
||||||
|
"two-dimensional arrays", {"builtin": typ.find().name},
|
||||||
|
node.func.loc,
|
||||||
|
notes=[note])
|
||||||
|
self.engine.process(diag)
|
||||||
|
else:
|
||||||
|
self._unify(node.type, arg.type, node.loc, None)
|
||||||
|
else:
|
||||||
|
diagnose(valid_forms())
|
||||||
elif types.is_builtin(typ, "rtio_log"):
|
elif types.is_builtin(typ, "rtio_log"):
|
||||||
valid_forms = lambda: [
|
valid_forms = lambda: [
|
||||||
valid_form("rtio_log(channel:str, args...) -> None"),
|
valid_form("rtio_log(channel:str, args...) -> None"),
|
||||||
|
@ -905,9 +1286,6 @@ class Inferencer(algorithm.Visitor):
|
||||||
elif types.is_builtin(typ, "at_mu"):
|
elif types.is_builtin(typ, "at_mu"):
|
||||||
simple_form("at_mu(time_mu:numpy.int64) -> None",
|
simple_form("at_mu(time_mu:numpy.int64) -> None",
|
||||||
[builtins.TInt64()])
|
[builtins.TInt64()])
|
||||||
elif types.is_builtin(typ, "watchdog"):
|
|
||||||
simple_form("watchdog(time:float) -> [builtin context manager]",
|
|
||||||
[builtins.TFloat()], builtins.TNone())
|
|
||||||
elif types.is_constructor(typ):
|
elif types.is_constructor(typ):
|
||||||
# An user-defined class.
|
# An user-defined class.
|
||||||
self._unify(node.type, typ.find().instance,
|
self._unify(node.type, typ.find().instance,
|
||||||
|
@ -916,6 +1294,55 @@ class Inferencer(algorithm.Visitor):
|
||||||
# Ignored.
|
# Ignored.
|
||||||
self._unify(node.type, builtins.TNone(),
|
self._unify(node.type, builtins.TNone(),
|
||||||
node.loc, None)
|
node.loc, None)
|
||||||
|
elif types.is_builtin(typ, "subkernel_await"):
|
||||||
|
valid_forms = lambda: [
|
||||||
|
valid_form("subkernel_await(f: subkernel) -> f return type"),
|
||||||
|
valid_form("subkernel_await(f: subkernel, timeout: numpy.int64) -> f return type")
|
||||||
|
]
|
||||||
|
if 1 <= len(node.args) <= 2:
|
||||||
|
arg0 = node.args[0].type
|
||||||
|
if types.is_var(arg0):
|
||||||
|
pass # undetermined yet
|
||||||
|
else:
|
||||||
|
if types.is_method(arg0):
|
||||||
|
fn = types.get_method_function(arg0)
|
||||||
|
elif types.is_function(arg0) or types.is_subkernel(arg0):
|
||||||
|
fn = arg0
|
||||||
|
else:
|
||||||
|
diagnose(valid_forms())
|
||||||
|
self._unify(node.type, fn.ret,
|
||||||
|
node.loc, None)
|
||||||
|
if len(node.args) == 2:
|
||||||
|
arg1 = node.args[1]
|
||||||
|
if types.is_var(arg1.type):
|
||||||
|
pass
|
||||||
|
elif builtins.is_int(arg1.type):
|
||||||
|
# promote to TInt64
|
||||||
|
self._unify(arg1.type, builtins.TInt64(),
|
||||||
|
arg1.loc, None)
|
||||||
|
else:
|
||||||
|
diagnose(valid_forms())
|
||||||
|
else:
|
||||||
|
diagnose(valid_forms())
|
||||||
|
elif types.is_builtin(typ, "subkernel_preload"):
|
||||||
|
valid_forms = lambda: [
|
||||||
|
valid_form("subkernel_preload(f: subkernel) -> None")
|
||||||
|
]
|
||||||
|
if len(node.args) == 1:
|
||||||
|
arg0 = node.args[0].type
|
||||||
|
if types.is_var(arg0):
|
||||||
|
pass # undetermined yet
|
||||||
|
else:
|
||||||
|
if types.is_method(arg0):
|
||||||
|
fn = types.get_method_function(arg0)
|
||||||
|
elif types.is_function(arg0) or types.is_subkernel(arg0):
|
||||||
|
fn = arg0
|
||||||
|
else:
|
||||||
|
diagnose(valid_forms())
|
||||||
|
self._unify(node.type, fn.ret,
|
||||||
|
node.loc, None)
|
||||||
|
else:
|
||||||
|
diagnose(valid_forms())
|
||||||
else:
|
else:
|
||||||
assert False
|
assert False
|
||||||
|
|
||||||
|
@ -954,6 +1381,7 @@ class Inferencer(algorithm.Visitor):
|
||||||
typ_args = typ.args
|
typ_args = typ.args
|
||||||
typ_optargs = typ.optargs
|
typ_optargs = typ.optargs
|
||||||
typ_ret = typ.ret
|
typ_ret = typ.ret
|
||||||
|
typ_func = typ
|
||||||
else:
|
else:
|
||||||
typ_self = types.get_method_self(typ)
|
typ_self = types.get_method_self(typ)
|
||||||
typ_func = types.get_method_function(typ)
|
typ_func = types.get_method_function(typ)
|
||||||
|
@ -991,11 +1419,43 @@ class Inferencer(algorithm.Visitor):
|
||||||
self.engine.process(diag)
|
self.engine.process(diag)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Array broadcasting for functions explicitly marked as such.
|
||||||
|
if len(node.args) == typ_arity and types.is_broadcast_across_arrays(typ):
|
||||||
|
if typ_arity == 1:
|
||||||
|
arg_type = node.args[0].type.find()
|
||||||
|
if builtins.is_array(arg_type):
|
||||||
|
typ_arg, = typ_args.values()
|
||||||
|
self._unify(typ_arg, arg_type["elt"], node.args[0].loc, None)
|
||||||
|
self._unify(node.type, builtins.TArray(typ_ret, arg_type["num_dims"]),
|
||||||
|
node.loc, None)
|
||||||
|
return
|
||||||
|
elif typ_arity == 2:
|
||||||
|
if any(builtins.is_array(arg.type) for arg in node.args):
|
||||||
|
ret, arg0, arg1 = self._coerce_binary_broadcast_op(
|
||||||
|
node.args[0], node.args[1], lambda t: typ_ret, node.loc)
|
||||||
|
node.args[0] = self._coerce_one(arg0, node.args[0],
|
||||||
|
other_node=node.args[1])
|
||||||
|
node.args[1] = self._coerce_one(arg1, node.args[1],
|
||||||
|
other_node=node.args[0])
|
||||||
|
self._unify(node.type, ret, node.loc, None)
|
||||||
|
return
|
||||||
|
if types.is_subkernel(typ_func) and typ_func.sid not in self.subkernel_arg_types:
|
||||||
|
self.subkernel_arg_types[typ_func.sid] = []
|
||||||
|
|
||||||
for actualarg, (formalname, formaltyp) in \
|
for actualarg, (formalname, formaltyp) in \
|
||||||
zip(node.args, list(typ_args.items()) + list(typ_optargs.items())):
|
zip(node.args, list(typ_args.items()) + list(typ_optargs.items())):
|
||||||
self._unify(actualarg.type, formaltyp,
|
self._unify(actualarg.type, formaltyp,
|
||||||
actualarg.loc, None)
|
actualarg.loc, None)
|
||||||
passed_args[formalname] = actualarg.loc
|
passed_args[formalname] = actualarg.loc
|
||||||
|
if types.is_subkernel(typ_func):
|
||||||
|
if types.is_instance(actualarg.type):
|
||||||
|
# objects cannot be passed to subkernels, as rpc code doesn't support them
|
||||||
|
diag = diagnostic.Diagnostic("error",
|
||||||
|
"argument '{name}' of type: {typ} is not supported in subkernels",
|
||||||
|
{"name": formalname, "typ": actualarg.type},
|
||||||
|
actualarg.loc, [])
|
||||||
|
self.engine.process(diag)
|
||||||
|
self.subkernel_arg_types[typ_func.sid].append((formalname, formaltyp))
|
||||||
|
|
||||||
for keyword in node.keywords:
|
for keyword in node.keywords:
|
||||||
if keyword.arg in passed_args:
|
if keyword.arg in passed_args:
|
||||||
|
@ -1026,7 +1486,7 @@ class Inferencer(algorithm.Visitor):
|
||||||
passed_args[keyword.arg] = keyword.arg_loc
|
passed_args[keyword.arg] = keyword.arg_loc
|
||||||
|
|
||||||
for formalname in typ_args:
|
for formalname in typ_args:
|
||||||
if formalname not in passed_args:
|
if formalname not in passed_args and not node.remote_fn:
|
||||||
note = diagnostic.Diagnostic("note",
|
note = diagnostic.Diagnostic("note",
|
||||||
"the called function is of type {type}",
|
"the called function is of type {type}",
|
||||||
{"type": types.TypePrinter().name(node.func.type)},
|
{"type": types.TypePrinter().name(node.func.type)},
|
||||||
|
@ -1131,9 +1591,7 @@ class Inferencer(algorithm.Visitor):
|
||||||
|
|
||||||
typ = node.context_expr.type
|
typ = node.context_expr.type
|
||||||
if (types.is_builtin(typ, "interleave") or types.is_builtin(typ, "sequential") or
|
if (types.is_builtin(typ, "interleave") or types.is_builtin(typ, "sequential") or
|
||||||
types.is_builtin(typ, "parallel") or
|
types.is_builtin(typ, "parallel")):
|
||||||
(isinstance(node.context_expr, asttyped.CallT) and
|
|
||||||
types.is_builtin(node.context_expr.func.type, "watchdog"))):
|
|
||||||
# builtin context managers
|
# builtin context managers
|
||||||
if node.optional_vars is not None:
|
if node.optional_vars is not None:
|
||||||
self._unify(node.optional_vars.type, builtins.TNone(),
|
self._unify(node.optional_vars.type, builtins.TNone(),
|
||||||
|
@ -1291,7 +1749,14 @@ class Inferencer(algorithm.Visitor):
|
||||||
|
|
||||||
def visit_FunctionDefT(self, node):
|
def visit_FunctionDefT(self, node):
|
||||||
for index, decorator in enumerate(node.decorator_list):
|
for index, decorator in enumerate(node.decorator_list):
|
||||||
if types.is_builtin(decorator.type, "kernel") or \
|
def eval_attr(attr):
|
||||||
|
if isinstance(attr.value, asttyped.QuoteT):
|
||||||
|
return getattr(attr.value.value, attr.attr)
|
||||||
|
return getattr(eval_attr(attr.value), attr.attr)
|
||||||
|
if isinstance(decorator, asttyped.AttributeT):
|
||||||
|
decorator = eval_attr(decorator)
|
||||||
|
if id(decorator) == id(kernel) or \
|
||||||
|
types.is_builtin(decorator.type, "kernel") or \
|
||||||
isinstance(decorator, asttyped.CallT) and \
|
isinstance(decorator, asttyped.CallT) and \
|
||||||
types.is_builtin(decorator.func.type, "kernel"):
|
types.is_builtin(decorator.func.type, "kernel"):
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -26,22 +26,3 @@ class IntMonomorphizer(algorithm.Visitor):
|
||||||
return
|
return
|
||||||
|
|
||||||
node.type["width"].unify(types.TValue(width))
|
node.type["width"].unify(types.TValue(width))
|
||||||
|
|
||||||
def visit_CallT(self, node):
|
|
||||||
self.generic_visit(node)
|
|
||||||
|
|
||||||
if types.is_builtin(node.func.type, "int") or \
|
|
||||||
types.is_builtin(node.func.type, "round"):
|
|
||||||
typ = node.type.find()
|
|
||||||
if types.is_var(typ["width"]):
|
|
||||||
typ["width"].unify(types.TValue(32))
|
|
||||||
|
|
||||||
def visit_CoerceT(self, node):
|
|
||||||
if isinstance(node.value, asttyped.NumT) and \
|
|
||||||
builtins.is_int(node.type) and \
|
|
||||||
builtins.is_int(node.value.type) and \
|
|
||||||
not types.is_var(node.type["width"]) and \
|
|
||||||
types.is_var(node.value.type["width"]):
|
|
||||||
node.value.type.unify(node.type)
|
|
||||||
|
|
||||||
self.generic_visit(node)
|
|
||||||
|
|
|
@ -280,7 +280,7 @@ class IODelayEstimator(algorithm.Visitor):
|
||||||
context="as an argument for delay_mu()")
|
context="as an argument for delay_mu()")
|
||||||
call_delay = value
|
call_delay = value
|
||||||
elif not types.is_builtin(typ):
|
elif not types.is_builtin(typ):
|
||||||
if types.is_function(typ) or types.is_rpc(typ):
|
if types.is_function(typ) or types.is_rpc(typ) or types.is_subkernel(typ):
|
||||||
offset = 0
|
offset = 0
|
||||||
elif types.is_method(typ):
|
elif types.is_method(typ):
|
||||||
offset = 1
|
offset = 1
|
||||||
|
@ -288,7 +288,7 @@ class IODelayEstimator(algorithm.Visitor):
|
||||||
else:
|
else:
|
||||||
assert False
|
assert False
|
||||||
|
|
||||||
if types.is_rpc(typ):
|
if types.is_rpc(typ) or types.is_subkernel(typ):
|
||||||
call_delay = iodelay.Const(0)
|
call_delay = iodelay.Const(0)
|
||||||
else:
|
else:
|
||||||
delay = typ.find().delay.find()
|
delay = typ.find().delay.find()
|
||||||
|
@ -311,13 +311,20 @@ class IODelayEstimator(algorithm.Visitor):
|
||||||
args[arg_name] = arg_node
|
args[arg_name] = arg_node
|
||||||
|
|
||||||
free_vars = delay.duration.free_vars()
|
free_vars = delay.duration.free_vars()
|
||||||
node.arg_exprs = {
|
try:
|
||||||
arg: self.evaluate(args[arg], abort=abort,
|
node.arg_exprs = {
|
||||||
context="in the expression for argument '{}' "
|
arg: self.evaluate(args[arg], abort=abort,
|
||||||
"that affects I/O delay".format(arg))
|
context="in the expression for argument '{}' "
|
||||||
for arg in free_vars
|
"that affects I/O delay".format(arg))
|
||||||
}
|
for arg in free_vars
|
||||||
call_delay = delay.duration.fold(node.arg_exprs)
|
}
|
||||||
|
call_delay = delay.duration.fold(node.arg_exprs)
|
||||||
|
except KeyError as e:
|
||||||
|
if getattr(node, "remote_fn", False):
|
||||||
|
note = diagnostic.Diagnostic("note",
|
||||||
|
"function called here", {},
|
||||||
|
node.loc)
|
||||||
|
self.abort("due to arguments passed remotely", node.loc, note)
|
||||||
else:
|
else:
|
||||||
assert False
|
assert False
|
||||||
else:
|
else:
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -3,6 +3,7 @@ The :mod:`types` module contains the classes describing the types
|
||||||
in :mod:`asttyped`.
|
in :mod:`asttyped`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import builtins
|
||||||
import string
|
import string
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
from . import iodelay
|
from . import iodelay
|
||||||
|
@ -55,38 +56,39 @@ class TVar(Type):
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.parent = self
|
self.parent = self
|
||||||
|
self.rank = 0
|
||||||
|
|
||||||
def find(self):
|
def find(self):
|
||||||
if self.parent is self:
|
parent = self.parent
|
||||||
|
if parent is self:
|
||||||
return self
|
return self
|
||||||
else:
|
else:
|
||||||
# The recursive find() invocation is turned into a loop
|
# The recursive find() invocation is turned into a loop
|
||||||
# because paths resulting from unification of large arrays
|
# because paths resulting from unification of large arrays
|
||||||
# can easily cause a stack overflow.
|
# can easily cause a stack overflow.
|
||||||
root = self
|
root = self
|
||||||
while root.__class__ == TVar:
|
while parent.__class__ == TVar and root is not parent:
|
||||||
if root is root.parent:
|
_, parent = root, root.parent = parent, parent.parent
|
||||||
break
|
return root.parent
|
||||||
else:
|
|
||||||
root = root.parent
|
|
||||||
|
|
||||||
# path compression
|
|
||||||
iter = self
|
|
||||||
while iter.__class__ == TVar:
|
|
||||||
if iter is iter.parent:
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
iter, iter.parent = iter.parent, root
|
|
||||||
|
|
||||||
return root
|
|
||||||
|
|
||||||
def unify(self, other):
|
def unify(self, other):
|
||||||
other = other.find()
|
if other is self:
|
||||||
|
return
|
||||||
if self.parent is self:
|
x = other.find()
|
||||||
self.parent = other
|
y = self.find()
|
||||||
|
if x is y:
|
||||||
|
return
|
||||||
|
if y.__class__ == TVar:
|
||||||
|
if x.__class__ == TVar:
|
||||||
|
if x.rank < y.rank:
|
||||||
|
x, y = y, x
|
||||||
|
y.parent = x
|
||||||
|
if x.rank == y.rank:
|
||||||
|
x.rank += 1
|
||||||
|
else:
|
||||||
|
y.parent = x
|
||||||
else:
|
else:
|
||||||
self.find().unify(other)
|
y.unify(x)
|
||||||
|
|
||||||
def fold(self, accum, fn):
|
def fold(self, accum, fn):
|
||||||
if self.parent is self:
|
if self.parent is self:
|
||||||
|
@ -95,6 +97,8 @@ class TVar(Type):
|
||||||
return self.find().fold(accum, fn)
|
return self.find().fold(accum, fn)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
|
if getattr(builtins, "__in_sphinx__", False):
|
||||||
|
return str(self)
|
||||||
if self.parent is self:
|
if self.parent is self:
|
||||||
return "<artiq.compiler.types.TVar %d>" % id(self)
|
return "<artiq.compiler.types.TVar %d>" % id(self)
|
||||||
else:
|
else:
|
||||||
|
@ -124,6 +128,8 @@ class TMono(Type):
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def unify(self, other):
|
def unify(self, other):
|
||||||
|
if other is self:
|
||||||
|
return
|
||||||
if isinstance(other, TMono) and self.name == other.name:
|
if isinstance(other, TMono) and self.name == other.name:
|
||||||
assert self.params.keys() == other.params.keys()
|
assert self.params.keys() == other.params.keys()
|
||||||
for param in self.params:
|
for param in self.params:
|
||||||
|
@ -139,6 +145,8 @@ class TMono(Type):
|
||||||
return fn(accum, self)
|
return fn(accum, self)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
|
if getattr(builtins, "__in_sphinx__", False):
|
||||||
|
return str(self)
|
||||||
return "artiq.compiler.types.TMono(%s, %s)" % (repr(self.name), repr(self.params))
|
return "artiq.compiler.types.TMono(%s, %s)" % (repr(self.name), repr(self.params))
|
||||||
|
|
||||||
def __getitem__(self, param):
|
def __getitem__(self, param):
|
||||||
|
@ -171,6 +179,8 @@ class TTuple(Type):
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def unify(self, other):
|
def unify(self, other):
|
||||||
|
if other is self:
|
||||||
|
return
|
||||||
if isinstance(other, TTuple) and len(self.elts) == len(other.elts):
|
if isinstance(other, TTuple) and len(self.elts) == len(other.elts):
|
||||||
for selfelt, otherelt in zip(self.elts, other.elts):
|
for selfelt, otherelt in zip(self.elts, other.elts):
|
||||||
selfelt.unify(otherelt)
|
selfelt.unify(otherelt)
|
||||||
|
@ -185,6 +195,8 @@ class TTuple(Type):
|
||||||
return fn(accum, self)
|
return fn(accum, self)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
|
if getattr(builtins, "__in_sphinx__", False):
|
||||||
|
return str(self)
|
||||||
return "artiq.compiler.types.TTuple(%s)" % repr(self.elts)
|
return "artiq.compiler.types.TTuple(%s)" % repr(self.elts)
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
|
@ -198,8 +210,10 @@ class TTuple(Type):
|
||||||
return hash(tuple(self.elts))
|
return hash(tuple(self.elts))
|
||||||
|
|
||||||
class _TPointer(TMono):
|
class _TPointer(TMono):
|
||||||
def __init__(self):
|
def __init__(self, elt=None):
|
||||||
super().__init__("pointer")
|
if elt is None:
|
||||||
|
elt = TMono("int", {"width": 8}) # i8*
|
||||||
|
super().__init__("pointer", params={"elt": elt})
|
||||||
|
|
||||||
class TFunction(Type):
|
class TFunction(Type):
|
||||||
"""
|
"""
|
||||||
|
@ -237,6 +251,8 @@ class TFunction(Type):
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def unify(self, other):
|
def unify(self, other):
|
||||||
|
if other is self:
|
||||||
|
return
|
||||||
if isinstance(other, TFunction) and \
|
if isinstance(other, TFunction) and \
|
||||||
self.args.keys() == other.args.keys() and \
|
self.args.keys() == other.args.keys() and \
|
||||||
self.optargs.keys() == other.optargs.keys():
|
self.optargs.keys() == other.optargs.keys():
|
||||||
|
@ -259,6 +275,8 @@ class TFunction(Type):
|
||||||
return fn(accum, self)
|
return fn(accum, self)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
|
if getattr(builtins, "__in_sphinx__", False):
|
||||||
|
return str(self)
|
||||||
return "artiq.compiler.types.TFunction({}, {}, {})".format(
|
return "artiq.compiler.types.TFunction({}, {}, {})".format(
|
||||||
repr(self.args), repr(self.optargs), repr(self.ret))
|
repr(self.args), repr(self.optargs), repr(self.ret))
|
||||||
|
|
||||||
|
@ -273,20 +291,29 @@ class TFunction(Type):
|
||||||
def __hash__(self):
|
def __hash__(self):
|
||||||
return hash((_freeze(self.args), _freeze(self.optargs), self.ret))
|
return hash((_freeze(self.args), _freeze(self.optargs), self.ret))
|
||||||
|
|
||||||
class TCFunction(TFunction):
|
class TExternalFunction(TFunction):
|
||||||
"""
|
"""
|
||||||
A function type of a runtime-provided C function.
|
A type of an externally-provided function.
|
||||||
|
|
||||||
:ivar name: (str) C function name
|
This can be any function following the C ABI, such as provided by the
|
||||||
:ivar flags: (set of str) C function flags.
|
C/Rust runtime, or a compiler backend intrinsic. The mangled name to link
|
||||||
|
against is encoded as part of the type.
|
||||||
|
|
||||||
|
:ivar name: (str) external symbol name.
|
||||||
|
This will be the symbol linked against (following any extra C name
|
||||||
|
mangling rules).
|
||||||
|
:ivar flags: (set of str) function flags.
|
||||||
Flag ``nounwind`` means the function never raises an exception.
|
Flag ``nounwind`` means the function never raises an exception.
|
||||||
Flag ``nowrite`` means the function never writes any memory
|
Flag ``nowrite`` means the function never accesses any memory
|
||||||
that the ARTIQ Python code can observe.
|
that the ARTIQ Python code can observe.
|
||||||
|
:ivar broadcast_across_arrays: (bool)
|
||||||
|
If True, the function is transparently applied element-wise when called
|
||||||
|
with TArray arguments.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
attributes = OrderedDict()
|
attributes = OrderedDict()
|
||||||
|
|
||||||
def __init__(self, args, ret, name, flags={}):
|
def __init__(self, args, ret, name, flags=set(), broadcast_across_arrays=False):
|
||||||
assert isinstance(flags, set)
|
assert isinstance(flags, set)
|
||||||
for flag in flags:
|
for flag in flags:
|
||||||
assert flag in {'nounwind', 'nowrite'}
|
assert flag in {'nounwind', 'nowrite'}
|
||||||
|
@ -294,9 +321,12 @@ class TCFunction(TFunction):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.delay = TFixedDelay(iodelay.Const(0))
|
self.delay = TFixedDelay(iodelay.Const(0))
|
||||||
self.flags = flags
|
self.flags = flags
|
||||||
|
self.broadcast_across_arrays = broadcast_across_arrays
|
||||||
|
|
||||||
def unify(self, other):
|
def unify(self, other):
|
||||||
if isinstance(other, TCFunction) and \
|
if other is self:
|
||||||
|
return
|
||||||
|
if isinstance(other, TExternalFunction) and \
|
||||||
self.name == other.name:
|
self.name == other.name:
|
||||||
super().unify(other)
|
super().unify(other)
|
||||||
elif isinstance(other, TVar):
|
elif isinstance(other, TVar):
|
||||||
|
@ -311,22 +341,24 @@ class TRPC(Type):
|
||||||
:ivar ret: (:class:`Type`)
|
:ivar ret: (:class:`Type`)
|
||||||
return type
|
return type
|
||||||
:ivar service: (int) RPC service number
|
:ivar service: (int) RPC service number
|
||||||
:ivar async: (bool) whether the RPC blocks until return
|
:ivar is_async: (bool) whether the RPC blocks until return
|
||||||
"""
|
"""
|
||||||
|
|
||||||
attributes = OrderedDict()
|
attributes = OrderedDict()
|
||||||
|
|
||||||
def __init__(self, ret, service, async=False):
|
def __init__(self, ret, service, is_async=False):
|
||||||
assert isinstance(ret, Type)
|
assert isinstance(ret, Type)
|
||||||
self.ret, self.service, self.async = ret, service, async
|
self.ret, self.service, self.is_async = ret, service, is_async
|
||||||
|
|
||||||
def find(self):
|
def find(self):
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def unify(self, other):
|
def unify(self, other):
|
||||||
|
if other is self:
|
||||||
|
return
|
||||||
if isinstance(other, TRPC) and \
|
if isinstance(other, TRPC) and \
|
||||||
self.service == other.service and \
|
self.service == other.service and \
|
||||||
self.async == other.async:
|
self.is_async == other.is_async:
|
||||||
self.ret.unify(other.ret)
|
self.ret.unify(other.ret)
|
||||||
elif isinstance(other, TVar):
|
elif isinstance(other, TVar):
|
||||||
other.unify(self)
|
other.unify(self)
|
||||||
|
@ -338,12 +370,14 @@ class TRPC(Type):
|
||||||
return fn(accum, self)
|
return fn(accum, self)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
|
if getattr(builtins, "__in_sphinx__", False):
|
||||||
|
return str(self)
|
||||||
return "artiq.compiler.types.TRPC({})".format(repr(self.ret))
|
return "artiq.compiler.types.TRPC({})".format(repr(self.ret))
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
return isinstance(other, TRPC) and \
|
return isinstance(other, TRPC) and \
|
||||||
self.service == other.service and \
|
self.service == other.service and \
|
||||||
self.async == other.async
|
self.is_async == other.is_async
|
||||||
|
|
||||||
def __ne__(self, other):
|
def __ne__(self, other):
|
||||||
return not (self == other)
|
return not (self == other)
|
||||||
|
@ -351,6 +385,50 @@ class TRPC(Type):
|
||||||
def __hash__(self):
|
def __hash__(self):
|
||||||
return hash(self.service)
|
return hash(self.service)
|
||||||
|
|
||||||
|
class TSubkernel(TFunction):
|
||||||
|
"""
|
||||||
|
A kernel to be run on a satellite.
|
||||||
|
|
||||||
|
:ivar args: (:class:`collections.OrderedDict` of string to :class:`Type`)
|
||||||
|
function arguments
|
||||||
|
:ivar ret: (:class:`Type`)
|
||||||
|
return type
|
||||||
|
:ivar sid: (int) subkernel ID number
|
||||||
|
:ivar destination: (int) satellite destination number
|
||||||
|
"""
|
||||||
|
|
||||||
|
attributes = OrderedDict()
|
||||||
|
|
||||||
|
def __init__(self, args, optargs, ret, sid, destination):
|
||||||
|
assert isinstance(ret, Type)
|
||||||
|
super().__init__(args, optargs, ret)
|
||||||
|
self.sid, self.destination = sid, destination
|
||||||
|
self.delay = TFixedDelay(iodelay.Const(0))
|
||||||
|
|
||||||
|
def unify(self, other):
|
||||||
|
if other is self:
|
||||||
|
return
|
||||||
|
if isinstance(other, TSubkernel) and \
|
||||||
|
self.sid == other.sid and \
|
||||||
|
self.destination == other.destination:
|
||||||
|
self.ret.unify(other.ret)
|
||||||
|
elif isinstance(other, TVar):
|
||||||
|
other.unify(self)
|
||||||
|
else:
|
||||||
|
raise UnificationError(self, other)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
if getattr(builtins, "__in_sphinx__", False):
|
||||||
|
return str(self)
|
||||||
|
return "artiq.compiler.types.TSubkernel({})".format(repr(self.ret))
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
return isinstance(other, TSubkernel) and \
|
||||||
|
self.sid == other.sid
|
||||||
|
|
||||||
|
def __hash__(self):
|
||||||
|
return hash(self.sid)
|
||||||
|
|
||||||
class TBuiltin(Type):
|
class TBuiltin(Type):
|
||||||
"""
|
"""
|
||||||
An instance of builtin type. Every instance of a builtin
|
An instance of builtin type. Every instance of a builtin
|
||||||
|
@ -366,6 +444,8 @@ class TBuiltin(Type):
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def unify(self, other):
|
def unify(self, other):
|
||||||
|
if other is self:
|
||||||
|
return
|
||||||
if self != other:
|
if self != other:
|
||||||
raise UnificationError(self, other)
|
raise UnificationError(self, other)
|
||||||
|
|
||||||
|
@ -373,6 +453,8 @@ class TBuiltin(Type):
|
||||||
return fn(accum, self)
|
return fn(accum, self)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
|
if getattr(builtins, "__in_sphinx__", False):
|
||||||
|
return str(self)
|
||||||
return "artiq.compiler.types.{}({})".format(type(self).__name__, repr(self.name))
|
return "artiq.compiler.types.{}({})".format(type(self).__name__, repr(self.name))
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
|
@ -388,6 +470,11 @@ class TBuiltin(Type):
|
||||||
class TBuiltinFunction(TBuiltin):
|
class TBuiltinFunction(TBuiltin):
|
||||||
"""
|
"""
|
||||||
A type of a builtin function.
|
A type of a builtin function.
|
||||||
|
|
||||||
|
Builtin functions are treated specially throughout all stages of the
|
||||||
|
compilation process according to their name (e.g. calls may not actually
|
||||||
|
lower to a function call). See :class:`TExternalFunction` for externally
|
||||||
|
defined functions that are otherwise regular.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
class TConstructor(TBuiltin):
|
class TConstructor(TBuiltin):
|
||||||
|
@ -428,6 +515,8 @@ class TInstance(TMono):
|
||||||
self.constant_attributes = set()
|
self.constant_attributes = set()
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
|
if getattr(builtins, "__in_sphinx__", False):
|
||||||
|
return str(self)
|
||||||
return "artiq.compiler.types.TInstance({}, {})".format(
|
return "artiq.compiler.types.TInstance({}, {})".format(
|
||||||
repr(self.name), repr(self.attributes))
|
repr(self.name), repr(self.attributes))
|
||||||
|
|
||||||
|
@ -443,6 +532,8 @@ class TModule(TMono):
|
||||||
self.constant_attributes = set()
|
self.constant_attributes = set()
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
|
if getattr(builtins, "__in_sphinx__", False):
|
||||||
|
return str(self)
|
||||||
return "artiq.compiler.types.TModule({}, {})".format(
|
return "artiq.compiler.types.TModule({}, {})".format(
|
||||||
repr(self.name), repr(self.attributes))
|
repr(self.name), repr(self.attributes))
|
||||||
|
|
||||||
|
@ -471,6 +562,8 @@ class TValue(Type):
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def unify(self, other):
|
def unify(self, other):
|
||||||
|
if other is self:
|
||||||
|
return
|
||||||
if isinstance(other, TVar):
|
if isinstance(other, TVar):
|
||||||
other.unify(self)
|
other.unify(self)
|
||||||
elif self != other:
|
elif self != other:
|
||||||
|
@ -480,6 +573,8 @@ class TValue(Type):
|
||||||
return fn(accum, self)
|
return fn(accum, self)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
|
if getattr(builtins, "__in_sphinx__", False):
|
||||||
|
return str(self)
|
||||||
return "artiq.compiler.types.TValue(%s)" % repr(self.value)
|
return "artiq.compiler.types.TValue(%s)" % repr(self.value)
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
|
@ -538,6 +633,8 @@ class TDelay(Type):
|
||||||
return not (self == other)
|
return not (self == other)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
|
if getattr(builtins, "__in_sphinx__", False):
|
||||||
|
return str(self)
|
||||||
if self.duration is None:
|
if self.duration is None:
|
||||||
return "<{}.TIndeterminateDelay>".format(__name__)
|
return "<{}.TIndeterminateDelay>".format(__name__)
|
||||||
elif self.cause is None:
|
elif self.cause is None:
|
||||||
|
@ -561,13 +658,15 @@ def is_mono(typ, name=None, **params):
|
||||||
if not isinstance(typ, TMono):
|
if not isinstance(typ, TMono):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
params_match = True
|
if name is not None and typ.name != name:
|
||||||
|
return False
|
||||||
|
|
||||||
for param in params:
|
for param in params:
|
||||||
if param not in typ.params:
|
if param not in typ.params:
|
||||||
return False
|
return False
|
||||||
params_match = params_match and \
|
if typ.params[param].find() != params[param].find():
|
||||||
typ.params[param].find() == params[param].find()
|
return False
|
||||||
return name is None or (typ.name == name and params_match)
|
return True
|
||||||
|
|
||||||
def is_polymorphic(typ):
|
def is_polymorphic(typ):
|
||||||
return typ.fold(False, lambda accum, typ: accum or is_var(typ))
|
return typ.fold(False, lambda accum, typ: accum or is_var(typ))
|
||||||
|
@ -589,12 +688,15 @@ def is_function(typ):
|
||||||
def is_rpc(typ):
|
def is_rpc(typ):
|
||||||
return isinstance(typ.find(), TRPC)
|
return isinstance(typ.find(), TRPC)
|
||||||
|
|
||||||
def is_c_function(typ, name=None):
|
def is_subkernel(typ):
|
||||||
|
return isinstance(typ.find(), TSubkernel)
|
||||||
|
|
||||||
|
def is_external_function(typ, name=None):
|
||||||
typ = typ.find()
|
typ = typ.find()
|
||||||
if name is None:
|
if name is None:
|
||||||
return isinstance(typ, TCFunction)
|
return isinstance(typ, TExternalFunction)
|
||||||
else:
|
else:
|
||||||
return isinstance(typ, TCFunction) and \
|
return isinstance(typ, TExternalFunction) and \
|
||||||
typ.name == name
|
typ.name == name
|
||||||
|
|
||||||
def is_builtin(typ, name=None):
|
def is_builtin(typ, name=None):
|
||||||
|
@ -613,6 +715,15 @@ def is_builtin_function(typ, name=None):
|
||||||
return isinstance(typ, TBuiltinFunction) and \
|
return isinstance(typ, TBuiltinFunction) and \
|
||||||
typ.name == name
|
typ.name == name
|
||||||
|
|
||||||
|
def is_broadcast_across_arrays(typ):
|
||||||
|
# For now, broadcasting is only exposed to predefined external functions, and
|
||||||
|
# statically selected. Might be extended to user-defined functions if the design
|
||||||
|
# pans out.
|
||||||
|
typ = typ.find()
|
||||||
|
if not isinstance(typ, TExternalFunction):
|
||||||
|
return False
|
||||||
|
return typ.broadcast_across_arrays
|
||||||
|
|
||||||
def is_constructor(typ, name=None):
|
def is_constructor(typ, name=None):
|
||||||
typ = typ.find()
|
typ = typ.find()
|
||||||
if name is not None:
|
if name is not None:
|
||||||
|
@ -717,12 +828,14 @@ class TypePrinter(object):
|
||||||
else:
|
else:
|
||||||
return "%s(%s)" % (typ.name, ", ".join(
|
return "%s(%s)" % (typ.name, ", ".join(
|
||||||
["%s=%s" % (k, self.name(typ.params[k], depth + 1)) for k in typ.params]))
|
["%s=%s" % (k, self.name(typ.params[k], depth + 1)) for k in typ.params]))
|
||||||
|
elif isinstance(typ, _TPointer):
|
||||||
|
return "{}*".format(self.name(typ["elt"], depth + 1))
|
||||||
elif isinstance(typ, TTuple):
|
elif isinstance(typ, TTuple):
|
||||||
if len(typ.elts) == 1:
|
if len(typ.elts) == 1:
|
||||||
return "(%s,)" % self.name(typ.elts[0], depth + 1)
|
return "(%s,)" % self.name(typ.elts[0], depth + 1)
|
||||||
else:
|
else:
|
||||||
return "(%s)" % ", ".join([self.name(typ, depth + 1) for typ in typ.elts])
|
return "(%s)" % ", ".join([self.name(typ, depth + 1) for typ in typ.elts])
|
||||||
elif isinstance(typ, (TFunction, TCFunction)):
|
elif isinstance(typ, (TFunction, TExternalFunction)):
|
||||||
args = []
|
args = []
|
||||||
args += [ "%s:%s" % (arg, self.name(typ.args[arg], depth + 1))
|
args += [ "%s:%s" % (arg, self.name(typ.args[arg], depth + 1))
|
||||||
for arg in typ.args]
|
for arg in typ.args]
|
||||||
|
@ -736,14 +849,18 @@ class TypePrinter(object):
|
||||||
elif not (delay.is_fixed() and iodelay.is_zero(delay.duration)):
|
elif not (delay.is_fixed() and iodelay.is_zero(delay.duration)):
|
||||||
signature += " " + self.name(delay, depth + 1)
|
signature += " " + self.name(delay, depth + 1)
|
||||||
|
|
||||||
if isinstance(typ, TCFunction):
|
if isinstance(typ, TExternalFunction):
|
||||||
return "[ffi {}]{}".format(repr(typ.name), signature)
|
return "[ffi {}]{}".format(repr(typ.name), signature)
|
||||||
elif isinstance(typ, TFunction):
|
elif isinstance(typ, TFunction):
|
||||||
return signature
|
return signature
|
||||||
elif isinstance(typ, TRPC):
|
elif isinstance(typ, TRPC):
|
||||||
return "[rpc{} #{}](...)->{}".format(typ.service,
|
return "[rpc{} #{}](...)->{}".format(typ.service,
|
||||||
" async" if typ.async else "",
|
" async" if typ.is_async else "",
|
||||||
self.name(typ.ret, depth + 1))
|
self.name(typ.ret, depth + 1))
|
||||||
|
elif isinstance(typ, TSubkernel):
|
||||||
|
return "<subkernel{} dest#{}>->{}".format(typ.sid,
|
||||||
|
typ.destination,
|
||||||
|
self.name(typ.ret, depth + 1))
|
||||||
elif isinstance(typ, TBuiltinFunction):
|
elif isinstance(typ, TBuiltinFunction):
|
||||||
return "<function {}>".format(typ.name)
|
return "<function {}>".format(typ.name)
|
||||||
elif isinstance(typ, (TConstructor, TExceptionConstructor)):
|
elif isinstance(typ, (TConstructor, TExceptionConstructor)):
|
||||||
|
|
|
@ -50,3 +50,9 @@ class ConstnessValidator(algorithm.Visitor):
|
||||||
node.loc)
|
node.loc)
|
||||||
self.engine.process(diag)
|
self.engine.process(diag)
|
||||||
return
|
return
|
||||||
|
if builtins.is_array(typ):
|
||||||
|
diag = diagnostic.Diagnostic("error",
|
||||||
|
"array attributes cannot be assigned to",
|
||||||
|
{}, node.loc)
|
||||||
|
self.engine.process(diag)
|
||||||
|
return
|
||||||
|
|
|
@ -51,10 +51,6 @@ class Region:
|
||||||
(other.range.begin_pos <= self.range.begin_pos <= other.range.end_pos and \
|
(other.range.begin_pos <= self.range.begin_pos <= other.range.end_pos and \
|
||||||
self.range.end_pos > other.range.end_pos)
|
self.range.end_pos > other.range.end_pos)
|
||||||
|
|
||||||
def contract(self, other):
|
|
||||||
if not self.range:
|
|
||||||
self.range = other.range
|
|
||||||
|
|
||||||
def outlives(lhs, rhs):
|
def outlives(lhs, rhs):
|
||||||
if not isinstance(lhs, Region): # lhs lives nonlexically
|
if not isinstance(lhs, Region): # lhs lives nonlexically
|
||||||
return True
|
return True
|
||||||
|
@ -69,8 +65,11 @@ class Region:
|
||||||
|
|
||||||
class RegionOf(algorithm.Visitor):
|
class RegionOf(algorithm.Visitor):
|
||||||
"""
|
"""
|
||||||
Visit an expression and return the list of regions that must
|
Visit an expression and return the region that must be alive for the
|
||||||
be alive for the expression to execute.
|
expression to execute.
|
||||||
|
|
||||||
|
For expressions involving multiple regions, the shortest-lived one is
|
||||||
|
returned.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, env_stack, youngest_region):
|
def __init__(self, env_stack, youngest_region):
|
||||||
|
@ -100,11 +99,23 @@ class RegionOf(algorithm.Visitor):
|
||||||
visit_BinOpT = visit_sometimes_allocating
|
visit_BinOpT = visit_sometimes_allocating
|
||||||
|
|
||||||
def visit_CallT(self, node):
|
def visit_CallT(self, node):
|
||||||
if types.is_c_function(node.func.type, "cache_get"):
|
if types.is_external_function(node.func.type, "cache_get"):
|
||||||
# The cache is borrow checked dynamically
|
# The cache is borrow checked dynamically
|
||||||
return Global()
|
return Global()
|
||||||
else:
|
|
||||||
self.visit_sometimes_allocating(node)
|
if (types.is_builtin_function(node.func.type, "array")
|
||||||
|
or types.is_builtin_function(node.func.type, "make_array")
|
||||||
|
or types.is_builtin_function(node.func.type, "numpy.transpose")):
|
||||||
|
# While lifetime tracking across function calls in general is currently
|
||||||
|
# broken (see below), these special builtins that allocate an array on
|
||||||
|
# the stack of the caller _always_ allocate regardless of the parameters,
|
||||||
|
# and we can thus handle them without running into the precision issue
|
||||||
|
# mentioned in commit ae999db.
|
||||||
|
return self.visit_allocating(node)
|
||||||
|
|
||||||
|
# FIXME: Return statement missing here, but see m-labs/artiq#1497 and
|
||||||
|
# commit ae999db.
|
||||||
|
self.visit_sometimes_allocating(node)
|
||||||
|
|
||||||
# Value lives as long as the object/container, if it's mutable,
|
# Value lives as long as the object/container, if it's mutable,
|
||||||
# or else forever
|
# or else forever
|
||||||
|
@ -157,7 +168,7 @@ class RegionOf(algorithm.Visitor):
|
||||||
visit_NameConstantT = visit_immutable
|
visit_NameConstantT = visit_immutable
|
||||||
visit_NumT = visit_immutable
|
visit_NumT = visit_immutable
|
||||||
visit_EllipsisT = visit_immutable
|
visit_EllipsisT = visit_immutable
|
||||||
visit_UnaryOpT = visit_immutable
|
visit_UnaryOpT = visit_sometimes_allocating # possibly array op
|
||||||
visit_CompareT = visit_immutable
|
visit_CompareT = visit_immutable
|
||||||
|
|
||||||
# Value lives forever
|
# Value lives forever
|
||||||
|
@ -301,17 +312,20 @@ class EscapeValidator(algorithm.Visitor):
|
||||||
def visit_assignment(self, target, value):
|
def visit_assignment(self, target, value):
|
||||||
value_region = self._region_of(value)
|
value_region = self._region_of(value)
|
||||||
|
|
||||||
# If this is a variable, we might need to contract the live range.
|
|
||||||
if isinstance(value_region, Region):
|
|
||||||
for name in self._names_of(target):
|
|
||||||
region = self._region_of(name)
|
|
||||||
if isinstance(region, Region):
|
|
||||||
region.contract(value_region)
|
|
||||||
|
|
||||||
# If we assign to an attribute of a quoted value, there will be no names
|
# If we assign to an attribute of a quoted value, there will be no names
|
||||||
# in the assignment lhs.
|
# in the assignment lhs.
|
||||||
target_names = self._names_of(target) or []
|
target_names = self._names_of(target) or []
|
||||||
|
|
||||||
|
# Adopt the value region for any variables declared on the lhs.
|
||||||
|
for name in target_names:
|
||||||
|
region = self._region_of(name)
|
||||||
|
if isinstance(region, Region) and not region.present():
|
||||||
|
# Find the name's environment to overwrite the region.
|
||||||
|
for env in self.env_stack[::-1]:
|
||||||
|
if name.id in env:
|
||||||
|
env[name.id] = value_region
|
||||||
|
break
|
||||||
|
|
||||||
# The assigned value should outlive the assignee
|
# The assigned value should outlive the assignee
|
||||||
target_regions = [self._region_of(name) for name in target_names]
|
target_regions = [self._region_of(name) for name in target_names]
|
||||||
for target_region in target_regions:
|
for target_region in target_regions:
|
||||||
|
|
|
@ -80,21 +80,30 @@ def ad53xx_cmd_read_ch(channel, op):
|
||||||
return AD53XX_CMD_SPECIAL | AD53XX_SPECIAL_READ | (op + (channel << 7))
|
return AD53XX_CMD_SPECIAL | AD53XX_SPECIAL_READ | (op + (channel << 7))
|
||||||
|
|
||||||
|
|
||||||
|
# maintain function definition for backward compatibility
|
||||||
@portable
|
@portable
|
||||||
def voltage_to_mu(voltage, offset_dacs=0x2000, vref=5.):
|
def voltage_to_mu(voltage, offset_dacs=0x2000, vref=5.):
|
||||||
"""Returns the DAC register value required to produce a given output
|
"""Returns the 16-bit DAC register value required to produce a given output
|
||||||
voltage, assuming offset and gain errors have been trimmed out.
|
voltage, assuming offset and gain errors have been trimmed out.
|
||||||
|
|
||||||
|
The 16-bit register value may also be used with 14-bit DACs. The additional
|
||||||
|
bits are disregarded by 14-bit DACs.
|
||||||
|
|
||||||
Also used to return offset register value required to produce a given
|
Also used to return offset register value required to produce a given
|
||||||
voltage when the DAC register is set to mid-scale.
|
voltage when the DAC register is set to mid-scale.
|
||||||
An offset of V can be used to trim out a DAC offset error of -V.
|
An offset of V can be used to trim out a DAC offset error of -V.
|
||||||
|
|
||||||
:param voltage: Voltage
|
:param voltage: Voltage in SI units.
|
||||||
|
Valid voltages are: [-2*vref, + 2*vref - 1 LSB] + voltage offset.
|
||||||
:param offset_dacs: Register value for the two offset DACs
|
:param offset_dacs: Register value for the two offset DACs
|
||||||
(default: 0x2000)
|
(default: 0x2000)
|
||||||
:param vref: DAC reference voltage (default: 5.)
|
:param vref: DAC reference voltage (default: 5.)
|
||||||
|
:return: The 16-bit DAC register value
|
||||||
"""
|
"""
|
||||||
return int(round(0x10000*(voltage/(4.*vref)) + offset_dacs*0x4))
|
code = int(round((1 << 16) * (voltage / (4. * vref)) + offset_dacs * 0x4))
|
||||||
|
if code < 0x0 or code > 0xffff:
|
||||||
|
raise ValueError("Invalid DAC voltage!")
|
||||||
|
return code
|
||||||
|
|
||||||
|
|
||||||
class _DummyTTL:
|
class _DummyTTL:
|
||||||
|
@ -118,9 +127,9 @@ class AD53xx:
|
||||||
transactions (default: 1)
|
transactions (default: 1)
|
||||||
:param div_write: SPI clock divider for write operations (default: 4,
|
:param div_write: SPI clock divider for write operations (default: 4,
|
||||||
50MHz max SPI clock with {t_high, t_low} >=8ns)
|
50MHz max SPI clock with {t_high, t_low} >=8ns)
|
||||||
:param div_read: SPI clock divider for read operations (default: 8, not
|
:param div_read: SPI clock divider for read operations (default: 16, not
|
||||||
optimized for speed, but cf data sheet t22: 25ns min SCLK edge to SDO
|
optimized for speed; datasheet says t22: 25ns min SCLK edge to SDO
|
||||||
valid)
|
valid, and suggests the SPI speed for reads should be <=20 MHz)
|
||||||
:param vref: DAC reference voltage (default: 5.)
|
:param vref: DAC reference voltage (default: 5.)
|
||||||
:param offset_dacs: Initial register value for the two offset DACs, device
|
:param offset_dacs: Initial register value for the two offset DACs, device
|
||||||
dependent and must be set correctly for correct voltage to mu
|
dependent and must be set correctly for correct voltage to mu
|
||||||
|
@ -169,6 +178,8 @@ class AD53xx:
|
||||||
self.write_offset_dacs_mu(self.offset_dacs)
|
self.write_offset_dacs_mu(self.offset_dacs)
|
||||||
if not blind:
|
if not blind:
|
||||||
ctrl = self.read_reg(channel=0, op=AD53XX_READ_CONTROL)
|
ctrl = self.read_reg(channel=0, op=AD53XX_READ_CONTROL)
|
||||||
|
if ctrl == 0xffff:
|
||||||
|
raise ValueError("DAC not found")
|
||||||
if ctrl & 0b10000:
|
if ctrl & 0b10000:
|
||||||
raise ValueError("DAC over temperature")
|
raise ValueError("DAC over temperature")
|
||||||
delay(25*us)
|
delay(25*us)
|
||||||
|
@ -176,7 +187,7 @@ class AD53xx:
|
||||||
(AD53XX_CMD_SPECIAL | AD53XX_SPECIAL_CONTROL | 0b0010) << 8)
|
(AD53XX_CMD_SPECIAL | AD53XX_SPECIAL_CONTROL | 0b0010) << 8)
|
||||||
if not blind:
|
if not blind:
|
||||||
ctrl = self.read_reg(channel=0, op=AD53XX_READ_CONTROL)
|
ctrl = self.read_reg(channel=0, op=AD53XX_READ_CONTROL)
|
||||||
if ctrl != 0b0010:
|
if (ctrl & 0b10111) != 0b00010:
|
||||||
raise ValueError("DAC CONTROL readback mismatch")
|
raise ValueError("DAC CONTROL readback mismatch")
|
||||||
delay(15*us)
|
delay(15*us)
|
||||||
|
|
||||||
|
@ -222,7 +233,7 @@ class AD53xx:
|
||||||
def write_gain_mu(self, channel, gain=0xffff):
|
def write_gain_mu(self, channel, gain=0xffff):
|
||||||
"""Program the gain register for a DAC channel.
|
"""Program the gain register for a DAC channel.
|
||||||
|
|
||||||
The DAC output is not updated until LDAC is pulsed (see :meth load:).
|
The DAC output is not updated until LDAC is pulsed (see :meth:`load`).
|
||||||
This method advances the timeline by the duration of one SPI transfer.
|
This method advances the timeline by the duration of one SPI transfer.
|
||||||
|
|
||||||
:param gain: 16-bit gain register value (default: 0xffff)
|
:param gain: 16-bit gain register value (default: 0xffff)
|
||||||
|
@ -234,7 +245,7 @@ class AD53xx:
|
||||||
def write_offset_mu(self, channel, offset=0x8000):
|
def write_offset_mu(self, channel, offset=0x8000):
|
||||||
"""Program the offset register for a DAC channel.
|
"""Program the offset register for a DAC channel.
|
||||||
|
|
||||||
The DAC output is not updated until LDAC is pulsed (see :meth load:).
|
The DAC output is not updated until LDAC is pulsed (see :meth:`load`).
|
||||||
This method advances the timeline by the duration of one SPI transfer.
|
This method advances the timeline by the duration of one SPI transfer.
|
||||||
|
|
||||||
:param offset: 16-bit offset register value (default: 0x8000)
|
:param offset: 16-bit offset register value (default: 0x8000)
|
||||||
|
@ -247,7 +258,7 @@ class AD53xx:
|
||||||
"""Program the DAC offset voltage for a channel.
|
"""Program the DAC offset voltage for a channel.
|
||||||
|
|
||||||
An offset of +V can be used to trim out a DAC offset error of -V.
|
An offset of +V can be used to trim out a DAC offset error of -V.
|
||||||
The DAC output is not updated until LDAC is pulsed (see :meth load:).
|
The DAC output is not updated until LDAC is pulsed (see :meth:`load`).
|
||||||
This method advances the timeline by the duration of one SPI transfer.
|
This method advances the timeline by the duration of one SPI transfer.
|
||||||
|
|
||||||
:param voltage: the offset voltage
|
:param voltage: the offset voltage
|
||||||
|
@ -259,7 +270,7 @@ class AD53xx:
|
||||||
def write_dac_mu(self, channel, value):
|
def write_dac_mu(self, channel, value):
|
||||||
"""Program the DAC input register for a channel.
|
"""Program the DAC input register for a channel.
|
||||||
|
|
||||||
The DAC output is not updated until LDAC is pulsed (see :meth load:).
|
The DAC output is not updated until LDAC is pulsed (see :meth:`load`).
|
||||||
This method advances the timeline by the duration of one SPI transfer.
|
This method advances the timeline by the duration of one SPI transfer.
|
||||||
"""
|
"""
|
||||||
self.bus.write(
|
self.bus.write(
|
||||||
|
@ -269,7 +280,7 @@ class AD53xx:
|
||||||
def write_dac(self, channel, voltage):
|
def write_dac(self, channel, voltage):
|
||||||
"""Program the DAC output voltage for a channel.
|
"""Program the DAC output voltage for a channel.
|
||||||
|
|
||||||
The DAC output is not updated until LDAC is pulsed (see :meth load:).
|
The DAC output is not updated until LDAC is pulsed (see :meth:`load`).
|
||||||
This method advances the timeline by the duration of one SPI transfer.
|
This method advances the timeline by the duration of one SPI transfer.
|
||||||
"""
|
"""
|
||||||
self.write_dac_mu(channel, voltage_to_mu(voltage, self.offset_dacs,
|
self.write_dac_mu(channel, voltage_to_mu(voltage, self.offset_dacs,
|
||||||
|
@ -302,7 +313,7 @@ class AD53xx:
|
||||||
|
|
||||||
If no LDAC device was defined, the LDAC pulse is skipped.
|
If no LDAC device was defined, the LDAC pulse is skipped.
|
||||||
|
|
||||||
See :meth load:.
|
See :meth:`load`.
|
||||||
|
|
||||||
:param values: list of DAC values to program
|
:param values: list of DAC values to program
|
||||||
:param channels: list of DAC channels to program. If not specified,
|
:param channels: list of DAC channels to program. If not specified,
|
||||||
|
@ -344,7 +355,7 @@ class AD53xx:
|
||||||
""" Two-point calibration of a DAC channel.
|
""" Two-point calibration of a DAC channel.
|
||||||
|
|
||||||
Programs the offset and gain register to trim out DAC errors. Does not
|
Programs the offset and gain register to trim out DAC errors. Does not
|
||||||
take effect until LDAC is pulsed (see :meth load:).
|
take effect until LDAC is pulsed (see :meth:`load`).
|
||||||
|
|
||||||
Calibration consists of measuring the DAC output voltage for a channel
|
Calibration consists of measuring the DAC output voltage for a channel
|
||||||
with the DAC set to zero-scale (0x0000) and full-scale (0xffff).
|
with the DAC set to zero-scale (0x0000) and full-scale (0xffff).
|
||||||
|
@ -366,3 +377,17 @@ class AD53xx:
|
||||||
self.core.break_realtime()
|
self.core.break_realtime()
|
||||||
self.write_offset_mu(channel, 0x8000-offset_err)
|
self.write_offset_mu(channel, 0x8000-offset_err)
|
||||||
self.write_gain_mu(channel, 0xffff-gain_err)
|
self.write_gain_mu(channel, 0xffff-gain_err)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def voltage_to_mu(self, voltage):
|
||||||
|
"""Returns the 16-bit DAC register value required to produce a given
|
||||||
|
output voltage, assuming offset and gain errors have been trimmed out.
|
||||||
|
|
||||||
|
The 16-bit register value may also be used with 14-bit DACs. The
|
||||||
|
additional bits are disregarded by 14-bit DACs.
|
||||||
|
|
||||||
|
:param voltage: Voltage in SI units.
|
||||||
|
Valid voltages are: [-2*vref, + 2*vref - 1 LSB] + voltage offset.
|
||||||
|
:return: The 16-bit DAC register value
|
||||||
|
"""
|
||||||
|
return voltage_to_mu(voltage, self.offset_dacs, self.vref)
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,23 +0,0 @@
|
||||||
from artiq.language.core import kernel
|
|
||||||
|
|
||||||
|
|
||||||
class AD9154:
|
|
||||||
"""Kernel interface to AD9154 registers, using non-realtime SPI."""
|
|
||||||
|
|
||||||
def __init__(self, dmgr, spi_device, chip_select):
|
|
||||||
self.core = dmgr.get("core")
|
|
||||||
self.bus = dmgr.get(spi_device)
|
|
||||||
self.chip_select = chip_select
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def setup_bus(self, div=16):
|
|
||||||
self.bus.set_config_mu(0, 24, div, self.chip_select)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def write(self, addr, data):
|
|
||||||
self.bus.write((addr << 16) | (data<< 8))
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def read(self, addr):
|
|
||||||
self.write((1 << 15) | addr, 0)
|
|
||||||
return self.bus.read()
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,7 +1,8 @@
|
||||||
from numpy import int32, int64
|
from numpy import int32, int64
|
||||||
|
|
||||||
|
from artiq.language.types import TInt32, TInt64, TFloat, TTuple, TBool
|
||||||
from artiq.language.core import kernel, delay, portable
|
from artiq.language.core import kernel, delay, portable
|
||||||
from artiq.language.units import us, ns
|
from artiq.language.units import ms, us, ns
|
||||||
from artiq.coredevice.ad9912_reg import *
|
from artiq.coredevice.ad9912_reg import *
|
||||||
|
|
||||||
from artiq.coredevice import spi2 as spi
|
from artiq.coredevice import spi2 as spi
|
||||||
|
@ -21,14 +22,17 @@ class AD9912:
|
||||||
:param sw_device: Name of the RF switch device. The RF switch is a
|
:param sw_device: Name of the RF switch device. The RF switch is a
|
||||||
TTLOut channel available as the :attr:`sw` attribute of this instance.
|
TTLOut channel available as the :attr:`sw` attribute of this instance.
|
||||||
:param pll_n: DDS PLL multiplier. The DDS sample clock is
|
:param pll_n: DDS PLL multiplier. The DDS sample clock is
|
||||||
f_ref*pll_n where f_ref is the reference frequency (set in the parent
|
f_ref/clk_div*pll_n where f_ref is the reference frequency and clk_div
|
||||||
Urukul CPLD instance).
|
is the reference clock divider (both set in the parent Urukul CPLD
|
||||||
|
instance).
|
||||||
|
:param pll_en: PLL enable bit, set to 0 to bypass PLL (default: 1).
|
||||||
|
Note that when bypassing the PLL the red front panel LED may remain on.
|
||||||
"""
|
"""
|
||||||
kernel_invariants = {"chip_select", "cpld", "core", "bus",
|
|
||||||
"ftw_per_hz", "sysclk", "pll_n"}
|
|
||||||
|
|
||||||
def __init__(self, dmgr, chip_select, cpld_device, sw_device=None,
|
def __init__(self, dmgr, chip_select, cpld_device, sw_device=None,
|
||||||
pll_n=10):
|
pll_n=10, pll_en=1):
|
||||||
|
self.kernel_invariants = {"cpld", "core", "bus", "chip_select",
|
||||||
|
"pll_n", "pll_en", "ftw_per_hz"}
|
||||||
self.cpld = dmgr.get(cpld_device)
|
self.cpld = dmgr.get(cpld_device)
|
||||||
self.core = self.cpld.core
|
self.core = self.cpld.core
|
||||||
self.bus = self.cpld.bus
|
self.bus = self.cpld.bus
|
||||||
|
@ -37,13 +41,17 @@ class AD9912:
|
||||||
if sw_device:
|
if sw_device:
|
||||||
self.sw = dmgr.get(sw_device)
|
self.sw = dmgr.get(sw_device)
|
||||||
self.kernel_invariants.add("sw")
|
self.kernel_invariants.add("sw")
|
||||||
|
self.pll_en = pll_en
|
||||||
self.pll_n = pll_n
|
self.pll_n = pll_n
|
||||||
self.sysclk = self.cpld.refclk*pll_n
|
if pll_en:
|
||||||
assert self.sysclk <= 1e9
|
sysclk = self.cpld.refclk / [1, 1, 2, 4][self.cpld.clk_div] * pll_n
|
||||||
self.ftw_per_hz = 1/self.sysclk*(int64(1) << 48)
|
else:
|
||||||
|
sysclk = self.cpld.refclk
|
||||||
|
assert sysclk <= 1e9
|
||||||
|
self.ftw_per_hz = 1 / sysclk * (int64(1) << 48)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def write(self, addr, data, length):
|
def write(self, addr: TInt32, data: TInt32, length: TInt32):
|
||||||
"""Variable length write to a register.
|
"""Variable length write to a register.
|
||||||
Up to 4 bytes.
|
Up to 4 bytes.
|
||||||
|
|
||||||
|
@ -54,14 +62,14 @@ class AD9912:
|
||||||
assert length > 0
|
assert length > 0
|
||||||
assert length <= 4
|
assert length <= 4
|
||||||
self.bus.set_config_mu(urukul.SPI_CONFIG, 16,
|
self.bus.set_config_mu(urukul.SPI_CONFIG, 16,
|
||||||
urukul.SPIT_DDS_WR, self.chip_select)
|
urukul.SPIT_DDS_WR, self.chip_select)
|
||||||
self.bus.write((addr | ((length - 1) << 13)) << 16)
|
self.bus.write((addr | ((length - 1) << 13)) << 16)
|
||||||
self.bus.set_config_mu(urukul.SPI_CONFIG | spi.SPI_END, length*8,
|
self.bus.set_config_mu(urukul.SPI_CONFIG | spi.SPI_END, length * 8,
|
||||||
urukul.SPIT_DDS_WR, self.chip_select)
|
urukul.SPIT_DDS_WR, self.chip_select)
|
||||||
self.bus.write(data << (32 - length*8))
|
self.bus.write(data << (32 - length * 8))
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def read(self, addr, length):
|
def read(self, addr: TInt32, length: TInt32) -> TInt32:
|
||||||
"""Variable length read from a register.
|
"""Variable length read from a register.
|
||||||
Up to 4 bytes.
|
Up to 4 bytes.
|
||||||
|
|
||||||
|
@ -72,15 +80,15 @@ class AD9912:
|
||||||
assert length > 0
|
assert length > 0
|
||||||
assert length <= 4
|
assert length <= 4
|
||||||
self.bus.set_config_mu(urukul.SPI_CONFIG, 16,
|
self.bus.set_config_mu(urukul.SPI_CONFIG, 16,
|
||||||
urukul.SPIT_DDS_WR, self.chip_select)
|
urukul.SPIT_DDS_WR, self.chip_select)
|
||||||
self.bus.write((addr | ((length - 1) << 13) | 0x8000) << 16)
|
self.bus.write((addr | ((length - 1) << 13) | 0x8000) << 16)
|
||||||
self.bus.set_config_mu(urukul.SPI_CONFIG | spi.SPI_END
|
self.bus.set_config_mu(urukul.SPI_CONFIG | spi.SPI_END
|
||||||
| spi.SPI_INPUT, length*8,
|
| spi.SPI_INPUT, length * 8,
|
||||||
urukul.SPIT_DDS_RD, self.chip_select)
|
urukul.SPIT_DDS_RD, self.chip_select)
|
||||||
self.bus.write(0)
|
self.bus.write(0)
|
||||||
data = self.bus.read()
|
data = self.bus.read()
|
||||||
if length < 4:
|
if length < 4:
|
||||||
data &= (1 << (length*8)) - 1
|
data &= (1 << (length * 8)) - 1
|
||||||
return data
|
return data
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
|
@ -93,25 +101,30 @@ class AD9912:
|
||||||
"""
|
"""
|
||||||
# SPI mode
|
# SPI mode
|
||||||
self.write(AD9912_SER_CONF, 0x99, length=1)
|
self.write(AD9912_SER_CONF, 0x99, length=1)
|
||||||
self.cpld.io_update.pulse(2*us)
|
self.cpld.io_update.pulse(2 * us)
|
||||||
# Verify chip ID and presence
|
# Verify chip ID and presence
|
||||||
prodid = self.read(AD9912_PRODIDH, length=2)
|
prodid = self.read(AD9912_PRODIDH, length=2)
|
||||||
if (prodid != 0x1982) and (prodid != 0x1902):
|
if (prodid != 0x1982) and (prodid != 0x1902):
|
||||||
raise ValueError("Urukul AD9912 product id mismatch")
|
raise ValueError("Urukul AD9912 product id mismatch")
|
||||||
delay(50*us)
|
delay(50 * us)
|
||||||
# HSTL power down, CMOS power down
|
# HSTL power down, CMOS power down
|
||||||
self.write(AD9912_PWRCNTRL1, 0x80, length=1)
|
pwrcntrl1 = 0x80 | ((~self.pll_en & 1) << 4)
|
||||||
self.cpld.io_update.pulse(2*us)
|
self.write(AD9912_PWRCNTRL1, pwrcntrl1, length=1)
|
||||||
self.write(AD9912_N_DIV, self.pll_n//2 - 2, length=1)
|
self.cpld.io_update.pulse(2 * us)
|
||||||
self.cpld.io_update.pulse(2*us)
|
if self.pll_en:
|
||||||
# I_cp = 375 µA, VCO high range
|
self.write(AD9912_N_DIV, self.pll_n // 2 - 2, length=1)
|
||||||
self.write(AD9912_PLLCFG, 0b00000101, length=1)
|
self.cpld.io_update.pulse(2 * us)
|
||||||
self.cpld.io_update.pulse(2*us)
|
# I_cp = 375 µA, VCO high range
|
||||||
|
self.write(AD9912_PLLCFG, 0b00000101, length=1)
|
||||||
|
self.cpld.io_update.pulse(2 * us)
|
||||||
|
delay(1 * ms)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set_att_mu(self, att):
|
def set_att_mu(self, att: TInt32):
|
||||||
"""Set digital step attenuator in machine units.
|
"""Set digital step attenuator in machine units.
|
||||||
|
|
||||||
|
This method will write the attenuator settings of all four channels.
|
||||||
|
|
||||||
.. seealso:: :meth:`artiq.coredevice.urukul.CPLD.set_att_mu`
|
.. seealso:: :meth:`artiq.coredevice.urukul.CPLD.set_att_mu`
|
||||||
|
|
||||||
:param att: Attenuation setting, 8 bit digital.
|
:param att: Attenuation setting, 8 bit digital.
|
||||||
|
@ -119,9 +132,11 @@ class AD9912:
|
||||||
self.cpld.set_att_mu(self.chip_select - 4, att)
|
self.cpld.set_att_mu(self.chip_select - 4, att)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set_att(self, att):
|
def set_att(self, att: TFloat):
|
||||||
"""Set digital step attenuator in SI units.
|
"""Set digital step attenuator in SI units.
|
||||||
|
|
||||||
|
This method will write the attenuator settings of all four channels.
|
||||||
|
|
||||||
.. seealso:: :meth:`artiq.coredevice.urukul.CPLD.set_att`
|
.. seealso:: :meth:`artiq.coredevice.urukul.CPLD.set_att`
|
||||||
|
|
||||||
:param att: Attenuation in dB. Higher values mean more attenuation.
|
:param att: Attenuation in dB. Higher values mean more attenuation.
|
||||||
|
@ -129,62 +144,125 @@ class AD9912:
|
||||||
self.cpld.set_att(self.chip_select - 4, att)
|
self.cpld.set_att(self.chip_select - 4, att)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set_mu(self, ftw, pow):
|
def get_att_mu(self) -> TInt32:
|
||||||
|
"""Get digital step attenuator value in machine units.
|
||||||
|
|
||||||
|
.. seealso:: :meth:`artiq.coredevice.urukul.CPLD.get_channel_att_mu`
|
||||||
|
|
||||||
|
:return: Attenuation setting, 8 bit digital.
|
||||||
|
"""
|
||||||
|
return self.cpld.get_channel_att_mu(self.chip_select - 4)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def get_att(self) -> TFloat:
|
||||||
|
"""Get digital step attenuator value in SI units.
|
||||||
|
|
||||||
|
.. seealso:: :meth:`artiq.coredevice.urukul.CPLD.get_channel_att`
|
||||||
|
|
||||||
|
:return: Attenuation in dB.
|
||||||
|
"""
|
||||||
|
return self.cpld.get_channel_att(self.chip_select - 4)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_mu(self, ftw: TInt64, pow_: TInt32 = 0):
|
||||||
"""Set profile 0 data in machine units.
|
"""Set profile 0 data in machine units.
|
||||||
|
|
||||||
After the SPI transfer, the shared IO update pin is pulsed to
|
After the SPI transfer, the shared IO update pin is pulsed to
|
||||||
activate the data.
|
activate the data.
|
||||||
|
|
||||||
:param ftw: Frequency tuning word: 32 bit unsigned.
|
:param ftw: Frequency tuning word: 48 bit unsigned.
|
||||||
:param pow: Phase tuning word: 16 bit unsigned.
|
:param pow_: Phase tuning word: 16 bit unsigned.
|
||||||
"""
|
"""
|
||||||
# streaming transfer of FTW and POW
|
# streaming transfer of FTW and POW
|
||||||
self.bus.set_config_mu(urukul.SPI_CONFIG, 16,
|
self.bus.set_config_mu(urukul.SPI_CONFIG, 16,
|
||||||
urukul.SPIT_DDS_WR, self.chip_select)
|
urukul.SPIT_DDS_WR, self.chip_select)
|
||||||
self.bus.write((AD9912_POW1 << 16) | (3 << 29))
|
self.bus.write((AD9912_POW1 << 16) | (3 << 29))
|
||||||
self.bus.set_config_mu(urukul.SPI_CONFIG, 32,
|
self.bus.set_config_mu(urukul.SPI_CONFIG, 32,
|
||||||
urukul.SPIT_DDS_WR, self.chip_select)
|
urukul.SPIT_DDS_WR, self.chip_select)
|
||||||
self.bus.write((pow << 16) | (int32(ftw >> 32) & 0xffff))
|
self.bus.write((pow_ << 16) | (int32(ftw >> 32) & 0xffff))
|
||||||
self.bus.set_config_mu(urukul.SPI_CONFIG | spi.SPI_END, 32,
|
self.bus.set_config_mu(urukul.SPI_CONFIG | spi.SPI_END, 32,
|
||||||
urukul.SPIT_DDS_WR, self.chip_select)
|
urukul.SPIT_DDS_WR, self.chip_select)
|
||||||
self.bus.write(int32(ftw))
|
self.bus.write(int32(ftw))
|
||||||
self.cpld.io_update.pulse(10*ns)
|
self.cpld.io_update.pulse(10 * ns)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def get_mu(self) -> TTuple([TInt64, TInt32]):
|
||||||
|
"""Get the frequency tuning word and phase offset word.
|
||||||
|
|
||||||
|
.. seealso:: :meth:`get`
|
||||||
|
|
||||||
|
:return: A tuple ``(ftw, pow)``.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Read data
|
||||||
|
high = self.read(AD9912_POW1, 4)
|
||||||
|
self.core.break_realtime() # Regain slack to perform second read
|
||||||
|
low = self.read(AD9912_FTW3, 4)
|
||||||
|
# Extract and return fields
|
||||||
|
ftw = (int64(high & 0xffff) << 32) | (int64(low) & int64(0xffffffff))
|
||||||
|
pow_ = (high >> 16) & 0x3fff
|
||||||
|
return ftw, pow_
|
||||||
|
|
||||||
@portable(flags={"fast-math"})
|
@portable(flags={"fast-math"})
|
||||||
def frequency_to_ftw(self, frequency):
|
def frequency_to_ftw(self, frequency: TFloat) -> TInt64:
|
||||||
"""Returns the frequency tuning word corresponding to the given
|
"""Returns the 48-bit frequency tuning word corresponding to the given
|
||||||
frequency.
|
frequency.
|
||||||
"""
|
"""
|
||||||
return int64(round(self.ftw_per_hz*frequency))
|
return int64(round(self.ftw_per_hz * frequency)) & (
|
||||||
|
(int64(1) << 48) - 1)
|
||||||
|
|
||||||
@portable(flags={"fast-math"})
|
@portable(flags={"fast-math"})
|
||||||
def ftw_to_frequency(self, ftw):
|
def ftw_to_frequency(self, ftw: TInt64) -> TFloat:
|
||||||
"""Returns the frequency corresponding to the given
|
"""Returns the frequency corresponding to the given
|
||||||
frequency tuning word.
|
frequency tuning word.
|
||||||
"""
|
"""
|
||||||
return ftw/self.ftw_per_hz
|
return ftw / self.ftw_per_hz
|
||||||
|
|
||||||
@portable(flags={"fast-math"})
|
@portable(flags={"fast-math"})
|
||||||
def turns_to_pow(self, phase):
|
def turns_to_pow(self, phase: TFloat) -> TInt32:
|
||||||
"""Returns the phase offset word corresponding to the given
|
"""Returns the 16-bit phase offset word corresponding to the given
|
||||||
phase.
|
phase.
|
||||||
"""
|
"""
|
||||||
return int32(round((1 << 14)*phase))
|
return int32(round((1 << 14) * phase)) & 0xffff
|
||||||
|
|
||||||
|
@portable(flags={"fast-math"})
|
||||||
|
def pow_to_turns(self, pow_: TInt32) -> TFloat:
|
||||||
|
"""Return the phase in turns corresponding to a given phase offset
|
||||||
|
word.
|
||||||
|
|
||||||
|
:param pow_: Phase offset word.
|
||||||
|
:return: Phase in turns.
|
||||||
|
"""
|
||||||
|
return pow_ / (1 << 14)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set(self, frequency, phase=0.0):
|
def set(self, frequency: TFloat, phase: TFloat = 0.0):
|
||||||
"""Set profile 0 data in SI units.
|
"""Set profile 0 data in SI units.
|
||||||
|
|
||||||
.. seealso:: :meth:`set_mu`
|
.. seealso:: :meth:`set_mu`
|
||||||
|
|
||||||
:param ftw: Frequency in Hz
|
:param frequency: Frequency in Hz
|
||||||
:param pow: Phase tuning word in turns
|
:param phase: Phase tuning word in turns
|
||||||
"""
|
"""
|
||||||
self.set_mu(self.frequency_to_ftw(frequency),
|
self.set_mu(self.frequency_to_ftw(frequency),
|
||||||
self.turns_to_pow(phase))
|
self.turns_to_pow(phase))
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def cfg_sw(self, state):
|
def get(self) -> TTuple([TFloat, TFloat]):
|
||||||
|
"""Get the frequency and phase.
|
||||||
|
|
||||||
|
.. seealso:: :meth:`get_mu`
|
||||||
|
|
||||||
|
:return: A tuple ``(frequency, phase)``.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Get values
|
||||||
|
ftw, pow_ = self.get_mu()
|
||||||
|
# Convert and return
|
||||||
|
return self.ftw_to_frequency(ftw), self.pow_to_turns(pow_)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def cfg_sw(self, state: TBool):
|
||||||
"""Set CPLD CFG RF switch state. The RF switch is controlled by the
|
"""Set CPLD CFG RF switch state. The RF switch is controlled by the
|
||||||
logical or of the CPLD configuration shift register
|
logical or of the CPLD configuration shift register
|
||||||
RF switch bit and the SW TTL line (if used).
|
RF switch bit and the SW TTL line (if used).
|
||||||
|
|
|
@ -80,11 +80,16 @@ class AD9914:
|
||||||
self.set_x_duration_mu = 7 * self.write_duration_mu
|
self.set_x_duration_mu = 7 * self.write_duration_mu
|
||||||
self.exit_x_duration_mu = 3 * self.write_duration_mu
|
self.exit_x_duration_mu = 3 * self.write_duration_mu
|
||||||
|
|
||||||
self.continuous_phase_comp = 0
|
@staticmethod
|
||||||
|
def get_rtio_channels(bus_channel, channel, **kwargs):
|
||||||
|
# return only first entry, as there are several devices with the same RTIO channel
|
||||||
|
if channel == 0:
|
||||||
|
return [(bus_channel, None)]
|
||||||
|
return []
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def write(self, addr, data):
|
def write(self, addr, data):
|
||||||
rtio_output(now_mu(), self.bus_channel, addr, data)
|
rtio_output((self.bus_channel << 8) | addr, data)
|
||||||
delay_mu(self.write_duration_mu)
|
delay_mu(self.write_duration_mu)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
|
@ -175,12 +180,16 @@ class AD9914:
|
||||||
accumulator is set to the value it would have if the DDS had been
|
accumulator is set to the value it would have if the DDS had been
|
||||||
running at the specified frequency since the start of the
|
running at the specified frequency since the start of the
|
||||||
experiment.
|
experiment.
|
||||||
|
|
||||||
|
.. warning:: This setting may become inconsistent when used as part of
|
||||||
|
a DMA recording. When using DMA, it is recommended to specify the
|
||||||
|
phase mode explicitly when calling :meth:`set` or :meth:`set_mu`.
|
||||||
"""
|
"""
|
||||||
self.phase_mode = phase_mode
|
self.phase_mode = phase_mode
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set_mu(self, ftw, pow=0, phase_mode=_PHASE_MODE_DEFAULT,
|
def set_mu(self, ftw, pow=0, phase_mode=_PHASE_MODE_DEFAULT,
|
||||||
asf=0x0fff, ref_time=-1):
|
asf=0x0fff, ref_time_mu=-1):
|
||||||
"""Sets the DDS channel to the specified frequency and phase.
|
"""Sets the DDS channel to the specified frequency and phase.
|
||||||
|
|
||||||
This uses machine units (FTW and POW). The frequency tuning word width
|
This uses machine units (FTW and POW). The frequency tuning word width
|
||||||
|
@ -194,14 +203,17 @@ class AD9914:
|
||||||
:param pow: adds an offset to the phase.
|
:param pow: adds an offset to the phase.
|
||||||
:param phase_mode: if specified, overrides the default phase mode set
|
:param phase_mode: if specified, overrides the default phase mode set
|
||||||
by :meth:`set_phase_mode` for this call.
|
by :meth:`set_phase_mode` for this call.
|
||||||
:param ref_time: reference time used to compute phase. Specifying this
|
:param ref_time_mu: reference time used to compute phase. Specifying this
|
||||||
makes it easier to have a well-defined phase relationship between
|
makes it easier to have a well-defined phase relationship between
|
||||||
DDSes on the same bus that are updated at a similar time.
|
DDSes on the same bus that are updated at a similar time.
|
||||||
|
:return: Resulting phase offset word after application of phase
|
||||||
|
tracking offset. When using :const:`PHASE_MODE_CONTINUOUS` in
|
||||||
|
subsequent calls, use this value as the "current" phase.
|
||||||
"""
|
"""
|
||||||
if phase_mode == _PHASE_MODE_DEFAULT:
|
if phase_mode == _PHASE_MODE_DEFAULT:
|
||||||
phase_mode = self.phase_mode
|
phase_mode = self.phase_mode
|
||||||
if ref_time < 0:
|
if ref_time_mu < 0:
|
||||||
ref_time = now_mu()
|
ref_time_mu = now_mu()
|
||||||
delay_mu(-self.set_duration_mu)
|
delay_mu(-self.set_duration_mu)
|
||||||
|
|
||||||
self.write(AD9914_GPIO, (1 << self.channel) << 1)
|
self.write(AD9914_GPIO, (1 << self.channel) << 1)
|
||||||
|
@ -215,27 +227,26 @@ class AD9914:
|
||||||
# Do not clear phase accumulator on FUD
|
# Do not clear phase accumulator on FUD
|
||||||
# Disable autoclear phase accumulator and enables OSK.
|
# Disable autoclear phase accumulator and enables OSK.
|
||||||
self.write(AD9914_REG_CFR1L, 0x0108)
|
self.write(AD9914_REG_CFR1L, 0x0108)
|
||||||
pow += self.continuous_phase_comp
|
|
||||||
else:
|
else:
|
||||||
# Clear phase accumulator on FUD
|
# Clear phase accumulator on FUD
|
||||||
# Enable autoclear phase accumulator and enables OSK.
|
# Enable autoclear phase accumulator and enables OSK.
|
||||||
self.write(AD9914_REG_CFR1L, 0x2108)
|
self.write(AD9914_REG_CFR1L, 0x2108)
|
||||||
fud_time = now_mu() + 2 * self.write_duration_mu
|
fud_time = now_mu() + 2 * self.write_duration_mu
|
||||||
pow -= int32((ref_time - fud_time) * self.sysclk_per_mu * ftw >> (32 - 16))
|
pow -= int32((ref_time_mu - fud_time) * self.sysclk_per_mu * ftw >> (32 - 16))
|
||||||
if phase_mode == PHASE_MODE_TRACKING:
|
if phase_mode == PHASE_MODE_TRACKING:
|
||||||
pow += int32(ref_time * self.sysclk_per_mu * ftw >> (32 - 16))
|
pow += int32(ref_time_mu * self.sysclk_per_mu * ftw >> (32 - 16))
|
||||||
self.continuous_phase_comp = pow
|
|
||||||
|
|
||||||
self.write(AD9914_REG_POW, pow)
|
self.write(AD9914_REG_POW, pow)
|
||||||
self.write(AD9914_REG_ASF, asf)
|
self.write(AD9914_REG_ASF, asf)
|
||||||
self.write(AD9914_FUD, 0)
|
self.write(AD9914_FUD, 0)
|
||||||
|
return pow
|
||||||
|
|
||||||
@portable(flags={"fast-math"})
|
@portable(flags={"fast-math"})
|
||||||
def frequency_to_ftw(self, frequency):
|
def frequency_to_ftw(self, frequency):
|
||||||
"""Returns the frequency tuning word corresponding to the given
|
"""Returns the 32-bit frequency tuning word corresponding to the given
|
||||||
frequency.
|
frequency.
|
||||||
"""
|
"""
|
||||||
return round(float(int64(2)**32*frequency/self.sysclk))
|
return int32(round(float(int64(2)**32*frequency/self.sysclk)))
|
||||||
|
|
||||||
@portable(flags={"fast-math"})
|
@portable(flags={"fast-math"})
|
||||||
def ftw_to_frequency(self, ftw):
|
def ftw_to_frequency(self, ftw):
|
||||||
|
@ -246,9 +257,9 @@ class AD9914:
|
||||||
|
|
||||||
@portable(flags={"fast-math"})
|
@portable(flags={"fast-math"})
|
||||||
def turns_to_pow(self, turns):
|
def turns_to_pow(self, turns):
|
||||||
"""Returns the phase offset word corresponding to the given phase
|
"""Returns the 16-bit phase offset word corresponding to the given
|
||||||
in turns."""
|
phase in turns."""
|
||||||
return round(float(turns*2**16))
|
return round(float(turns*2**16)) & 0xffff
|
||||||
|
|
||||||
@portable(flags={"fast-math"})
|
@portable(flags={"fast-math"})
|
||||||
def pow_to_turns(self, pow):
|
def pow_to_turns(self, pow):
|
||||||
|
@ -258,8 +269,12 @@ class AD9914:
|
||||||
|
|
||||||
@portable(flags={"fast-math"})
|
@portable(flags={"fast-math"})
|
||||||
def amplitude_to_asf(self, amplitude):
|
def amplitude_to_asf(self, amplitude):
|
||||||
"""Returns amplitude scale factor corresponding to given amplitude."""
|
"""Returns 12-bit amplitude scale factor corresponding to given
|
||||||
return round(float(amplitude*0x0fff))
|
amplitude."""
|
||||||
|
code = round(float(amplitude * 0x0fff))
|
||||||
|
if code < 0 or code > 0xfff:
|
||||||
|
raise ValueError("Invalid AD9914 amplitude!")
|
||||||
|
return code
|
||||||
|
|
||||||
@portable(flags={"fast-math"})
|
@portable(flags={"fast-math"})
|
||||||
def asf_to_amplitude(self, asf):
|
def asf_to_amplitude(self, asf):
|
||||||
|
@ -271,9 +286,10 @@ class AD9914:
|
||||||
def set(self, frequency, phase=0.0, phase_mode=_PHASE_MODE_DEFAULT,
|
def set(self, frequency, phase=0.0, phase_mode=_PHASE_MODE_DEFAULT,
|
||||||
amplitude=1.0):
|
amplitude=1.0):
|
||||||
"""Like :meth:`set_mu`, but uses Hz and turns."""
|
"""Like :meth:`set_mu`, but uses Hz and turns."""
|
||||||
self.set_mu(self.frequency_to_ftw(frequency),
|
return self.pow_to_turns(
|
||||||
|
self.set_mu(self.frequency_to_ftw(frequency),
|
||||||
self.turns_to_pow(phase), phase_mode,
|
self.turns_to_pow(phase), phase_mode,
|
||||||
self.amplitude_to_asf(amplitude))
|
self.amplitude_to_asf(amplitude)))
|
||||||
|
|
||||||
# Extended-resolution functions
|
# Extended-resolution functions
|
||||||
@kernel
|
@kernel
|
||||||
|
@ -309,10 +325,11 @@ class AD9914:
|
||||||
|
|
||||||
@portable(flags={"fast-math"})
|
@portable(flags={"fast-math"})
|
||||||
def frequency_to_xftw(self, frequency):
|
def frequency_to_xftw(self, frequency):
|
||||||
"""Returns the frequency tuning word corresponding to the given
|
"""Returns the 63-bit frequency tuning word corresponding to the given
|
||||||
frequency (extended resolution mode).
|
frequency (extended resolution mode).
|
||||||
"""
|
"""
|
||||||
return int64(round(2.0*float(int64(2)**62)*frequency/self.sysclk))
|
return int64(round(2.0*float(int64(2)**62)*frequency/self.sysclk)) & (
|
||||||
|
(int64(1) << 63) - 1)
|
||||||
|
|
||||||
@portable(flags={"fast-math"})
|
@portable(flags={"fast-math"})
|
||||||
def xftw_to_frequency(self, xftw):
|
def xftw_to_frequency(self, xftw):
|
||||||
|
|
|
@ -0,0 +1,598 @@
|
||||||
|
"""RTIO driver for the Analog Devices ADF[45]35[56] family of GHz PLLs
|
||||||
|
on Mirny-style prefixed SPI buses.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# https://github.com/analogdevicesinc/linux/blob/master/Documentation/devicetree/bindings/iio/frequency/adf5355.txt
|
||||||
|
# https://github.com/analogdevicesinc/linux/blob/master/drivers/iio/frequency/adf5355.c
|
||||||
|
# https://www.analog.com/media/en/technical-documentation/data-sheets/ADF5355.pdf
|
||||||
|
# https://www.analog.com/media/en/technical-documentation/data-sheets/ADF5355.pdf
|
||||||
|
# https://www.analog.com/media/en/technical-documentation/user-guides/EV-ADF5355SD1Z-UG-1087.pdf
|
||||||
|
|
||||||
|
|
||||||
|
from artiq.language.core import kernel, portable, delay
|
||||||
|
from artiq.language.units import us, GHz, MHz
|
||||||
|
from artiq.language.types import TInt32, TInt64
|
||||||
|
from artiq.coredevice import spi2 as spi
|
||||||
|
from artiq.coredevice.adf5356_reg import *
|
||||||
|
|
||||||
|
from numpy import int32, int64, floor, ceil
|
||||||
|
|
||||||
|
|
||||||
|
SPI_CONFIG = (
|
||||||
|
0 * spi.SPI_OFFLINE
|
||||||
|
| 0 * spi.SPI_END
|
||||||
|
| 0 * spi.SPI_INPUT
|
||||||
|
| 1 * spi.SPI_CS_POLARITY
|
||||||
|
| 0 * spi.SPI_CLK_POLARITY
|
||||||
|
| 0 * spi.SPI_CLK_PHASE
|
||||||
|
| 0 * spi.SPI_LSB_FIRST
|
||||||
|
| 0 * spi.SPI_HALF_DUPLEX
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
ADF5356_MIN_VCO_FREQ = int64(3.4 * GHz)
|
||||||
|
ADF5356_MAX_VCO_FREQ = int64(6.8 * GHz)
|
||||||
|
ADF5356_MAX_FREQ_PFD = int32(125.0 * MHz)
|
||||||
|
ADF5356_MODULUS1 = int32(1 << 24)
|
||||||
|
ADF5356_MAX_MODULUS2 = int32(1 << 28) # FIXME: ADF5356 has 28 bits MOD2
|
||||||
|
ADF5356_MAX_R_CNT = int32(1023)
|
||||||
|
|
||||||
|
|
||||||
|
class ADF5356:
|
||||||
|
"""Analog Devices AD[45]35[56] family of GHz PLLs.
|
||||||
|
|
||||||
|
:param cpld_device: Mirny CPLD device name
|
||||||
|
:param sw_device: Mirny RF switch device name
|
||||||
|
:param channel: Mirny RF channel index
|
||||||
|
:param ref_doubler: enable/disable reference clock doubler
|
||||||
|
:param ref_divider: enable/disable reference clock divide-by-2
|
||||||
|
:param core_device: Core device name (default: "core")
|
||||||
|
"""
|
||||||
|
|
||||||
|
kernel_invariants = {"cpld", "sw", "channel", "core", "sysclk"}
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
dmgr,
|
||||||
|
cpld_device,
|
||||||
|
sw_device,
|
||||||
|
channel,
|
||||||
|
ref_doubler=False,
|
||||||
|
ref_divider=False,
|
||||||
|
core="core",
|
||||||
|
):
|
||||||
|
self.cpld = dmgr.get(cpld_device)
|
||||||
|
self.sw = dmgr.get(sw_device)
|
||||||
|
self.channel = channel
|
||||||
|
self.core = dmgr.get(core)
|
||||||
|
|
||||||
|
self.ref_doubler = ref_doubler
|
||||||
|
self.ref_divider = ref_divider
|
||||||
|
self.sysclk = self.cpld.refclk
|
||||||
|
assert 10 <= self.sysclk / 1e6 <= 600
|
||||||
|
|
||||||
|
self._init_registers()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(**kwargs):
|
||||||
|
return []
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def init(self, blind=False):
|
||||||
|
"""
|
||||||
|
Initialize and configure the PLL.
|
||||||
|
|
||||||
|
:param blind: Do not attempt to verify presence.
|
||||||
|
"""
|
||||||
|
if not blind:
|
||||||
|
# MUXOUT = VDD
|
||||||
|
self.regs[4] = ADF5356_REG4_MUXOUT_UPDATE(self.regs[4], 1)
|
||||||
|
self.sync()
|
||||||
|
delay(1000 * us)
|
||||||
|
if not self.read_muxout():
|
||||||
|
raise ValueError("MUXOUT not high")
|
||||||
|
delay(800 * us)
|
||||||
|
|
||||||
|
# MUXOUT = DGND
|
||||||
|
self.regs[4] = ADF5356_REG4_MUXOUT_UPDATE(self.regs[4], 2)
|
||||||
|
self.sync()
|
||||||
|
delay(1000 * us)
|
||||||
|
if self.read_muxout():
|
||||||
|
raise ValueError("MUXOUT not low")
|
||||||
|
delay(800 * us)
|
||||||
|
|
||||||
|
# MUXOUT = digital lock-detect
|
||||||
|
self.regs[4] = ADF5356_REG4_MUXOUT_UPDATE(self.regs[4], 6)
|
||||||
|
else:
|
||||||
|
self.sync()
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_att(self, att):
|
||||||
|
"""Set digital step attenuator in SI units.
|
||||||
|
|
||||||
|
This method will write the attenuator settings of the channel.
|
||||||
|
|
||||||
|
.. seealso:: :meth:`artiq.coredevice.mirny.Mirny.set_att`
|
||||||
|
|
||||||
|
:param att: Attenuation in dB.
|
||||||
|
"""
|
||||||
|
self.cpld.set_att(self.channel, att)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_att_mu(self, att):
|
||||||
|
"""Set digital step attenuator in machine units.
|
||||||
|
|
||||||
|
:param att: Attenuation setting, 8 bit digital.
|
||||||
|
"""
|
||||||
|
self.cpld.set_att_mu(self.channel, att)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def write(self, data):
|
||||||
|
self.cpld.write_ext(self.channel | 4, 32, data)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def read_muxout(self):
|
||||||
|
"""
|
||||||
|
Read the state of the MUXOUT line.
|
||||||
|
|
||||||
|
By default, this is configured to be the digital lock detection.
|
||||||
|
"""
|
||||||
|
return bool(self.cpld.read_reg(0) & (1 << (self.channel + 8)))
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_output_power_mu(self, n):
|
||||||
|
"""
|
||||||
|
Set the power level at output A of the PLL chip in machine units.
|
||||||
|
|
||||||
|
This driver defaults to `n = 3` at init.
|
||||||
|
|
||||||
|
:param n: output power setting, 0, 1, 2, or 3 (see ADF5356 datasheet, fig. 44).
|
||||||
|
"""
|
||||||
|
if n not in [0, 1, 2, 3]:
|
||||||
|
raise ValueError("invalid power setting")
|
||||||
|
self.regs[6] = ADF5356_REG6_RF_OUTPUT_A_POWER_UPDATE(self.regs[6], n)
|
||||||
|
self.sync()
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def output_power_mu(self):
|
||||||
|
"""
|
||||||
|
Return the power level at output A of the PLL chip in machine units.
|
||||||
|
"""
|
||||||
|
return ADF5356_REG6_RF_OUTPUT_A_POWER_GET(self.regs[6])
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def enable_output(self):
|
||||||
|
"""
|
||||||
|
Enable output A of the PLL chip. This is the default after init.
|
||||||
|
"""
|
||||||
|
self.regs[6] |= ADF5356_REG6_RF_OUTPUT_A_ENABLE(1)
|
||||||
|
self.sync()
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def disable_output(self):
|
||||||
|
"""
|
||||||
|
Disable output A of the PLL chip.
|
||||||
|
"""
|
||||||
|
self.regs[6] &= ~ADF5356_REG6_RF_OUTPUT_A_ENABLE(1)
|
||||||
|
self.sync()
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_frequency(self, f):
|
||||||
|
"""
|
||||||
|
Output given frequency on output A.
|
||||||
|
|
||||||
|
:param f: 53.125 MHz <= f <= 6800 MHz
|
||||||
|
"""
|
||||||
|
freq = int64(round(f))
|
||||||
|
|
||||||
|
if freq > ADF5356_MAX_VCO_FREQ:
|
||||||
|
raise ValueError("Requested too high frequency")
|
||||||
|
|
||||||
|
# select minimal output divider
|
||||||
|
rf_div_sel = 0
|
||||||
|
while freq < ADF5356_MIN_VCO_FREQ:
|
||||||
|
freq <<= 1
|
||||||
|
rf_div_sel += 1
|
||||||
|
|
||||||
|
if (1 << rf_div_sel) > 64:
|
||||||
|
raise ValueError("Requested too low frequency")
|
||||||
|
|
||||||
|
# choose reference divider that maximizes PFD frequency
|
||||||
|
self.regs[4] = ADF5356_REG4_R_COUNTER_UPDATE(
|
||||||
|
self.regs[4], self._compute_reference_counter()
|
||||||
|
)
|
||||||
|
f_pfd = self.f_pfd()
|
||||||
|
|
||||||
|
# choose prescaler
|
||||||
|
if freq > int64(6e9):
|
||||||
|
self.regs[0] |= ADF5356_REG0_PRESCALER(1) # 8/9
|
||||||
|
n_min, n_max = 75, 65535
|
||||||
|
|
||||||
|
# adjust reference divider to be able to match n_min constraint
|
||||||
|
while n_min * f_pfd > freq:
|
||||||
|
r = ADF5356_REG4_R_COUNTER_GET(self.regs[4])
|
||||||
|
self.regs[4] = ADF5356_REG4_R_COUNTER_UPDATE(self.regs[4], r + 1)
|
||||||
|
f_pfd = self.f_pfd()
|
||||||
|
else:
|
||||||
|
self.regs[0] &= ~ADF5356_REG0_PRESCALER(1) # 4/5
|
||||||
|
n_min, n_max = 23, 32767
|
||||||
|
|
||||||
|
# calculate PLL parameters
|
||||||
|
n, frac1, (frac2_msb, frac2_lsb), (mod2_msb, mod2_lsb) = calculate_pll(
|
||||||
|
freq, f_pfd
|
||||||
|
)
|
||||||
|
|
||||||
|
if not (n_min <= n <= n_max):
|
||||||
|
raise ValueError("Invalid INT value")
|
||||||
|
|
||||||
|
# configure PLL
|
||||||
|
self.regs[0] = ADF5356_REG0_INT_VALUE_UPDATE(self.regs[0], n)
|
||||||
|
self.regs[1] = ADF5356_REG1_MAIN_FRAC_VALUE_UPDATE(self.regs[1], frac1)
|
||||||
|
self.regs[2] = ADF5356_REG2_AUX_FRAC_LSB_VALUE_UPDATE(self.regs[2], frac2_lsb)
|
||||||
|
self.regs[2] = ADF5356_REG2_AUX_MOD_LSB_VALUE_UPDATE(self.regs[2], mod2_lsb)
|
||||||
|
self.regs[13] = ADF5356_REG13_AUX_FRAC_MSB_VALUE_UPDATE(
|
||||||
|
self.regs[13], frac2_msb
|
||||||
|
)
|
||||||
|
self.regs[13] = ADF5356_REG13_AUX_MOD_MSB_VALUE_UPDATE(self.regs[13], mod2_msb)
|
||||||
|
|
||||||
|
self.regs[6] = ADF5356_REG6_RF_DIVIDER_SELECT_UPDATE(self.regs[6], rf_div_sel)
|
||||||
|
self.regs[6] = ADF5356_REG6_CP_BLEED_CURRENT_UPDATE(
|
||||||
|
self.regs[6], int32(floor(24 * f_pfd / (61.44 * MHz)))
|
||||||
|
)
|
||||||
|
self.regs[9] = ADF5356_REG9_VCO_BAND_DIVISION_UPDATE(
|
||||||
|
self.regs[9], int32(ceil(f_pfd / 160e3))
|
||||||
|
)
|
||||||
|
|
||||||
|
# commit
|
||||||
|
self.sync()
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def sync(self):
|
||||||
|
"""
|
||||||
|
Write all registers to the device. Attempts to lock the PLL.
|
||||||
|
"""
|
||||||
|
f_pfd = self.f_pfd()
|
||||||
|
delay(200 * us) # Slack
|
||||||
|
|
||||||
|
if f_pfd <= 75.0 * MHz:
|
||||||
|
for i in range(13, 0, -1):
|
||||||
|
self.write(self.regs[i])
|
||||||
|
delay(200 * us)
|
||||||
|
self.write(self.regs[0] | ADF5356_REG0_AUTOCAL(1))
|
||||||
|
else:
|
||||||
|
# AUTOCAL AT HALF PFD FREQUENCY
|
||||||
|
|
||||||
|
# calculate PLL at f_pfd/2
|
||||||
|
n, frac1, (frac2_msb, frac2_lsb), (mod2_msb, mod2_lsb) = calculate_pll(
|
||||||
|
self.f_vco(), f_pfd >> 1
|
||||||
|
)
|
||||||
|
delay(200 * us) # Slack
|
||||||
|
|
||||||
|
self.write(
|
||||||
|
13
|
||||||
|
| ADF5356_REG13_AUX_FRAC_MSB_VALUE(frac2_msb)
|
||||||
|
| ADF5356_REG13_AUX_MOD_MSB_VALUE(mod2_msb)
|
||||||
|
)
|
||||||
|
|
||||||
|
for i in range(12, 4, -1):
|
||||||
|
self.write(self.regs[i])
|
||||||
|
|
||||||
|
self.write(
|
||||||
|
ADF5356_REG4_R_COUNTER_UPDATE(self.regs[4], 2 * self.ref_counter())
|
||||||
|
)
|
||||||
|
|
||||||
|
self.write(self.regs[3])
|
||||||
|
self.write(
|
||||||
|
2
|
||||||
|
| ADF5356_REG2_AUX_MOD_LSB_VALUE(mod2_lsb)
|
||||||
|
| ADF5356_REG2_AUX_FRAC_LSB_VALUE(frac2_lsb)
|
||||||
|
)
|
||||||
|
self.write(1 | ADF5356_REG1_MAIN_FRAC_VALUE(frac1))
|
||||||
|
|
||||||
|
delay(200 * us)
|
||||||
|
self.write(ADF5356_REG0_INT_VALUE(n) | ADF5356_REG0_AUTOCAL(1))
|
||||||
|
|
||||||
|
# RELOCK AT WANTED PFD FREQUENCY
|
||||||
|
|
||||||
|
for i in [4, 2, 1]:
|
||||||
|
self.write(self.regs[i])
|
||||||
|
|
||||||
|
# force-disable autocal
|
||||||
|
self.write(self.regs[0] & ~ADF5356_REG0_AUTOCAL(1))
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def f_pfd(self) -> TInt64:
|
||||||
|
"""
|
||||||
|
Return the PFD frequency for the cached set of registers.
|
||||||
|
"""
|
||||||
|
r = ADF5356_REG4_R_COUNTER_GET(self.regs[4])
|
||||||
|
d = ADF5356_REG4_R_DOUBLER_GET(self.regs[4])
|
||||||
|
t = ADF5356_REG4_R_DIVIDER_GET(self.regs[4])
|
||||||
|
return self._compute_pfd_frequency(r, d, t)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def f_vco(self) -> TInt64:
|
||||||
|
"""
|
||||||
|
Return the VCO frequency for the cached set of registers.
|
||||||
|
"""
|
||||||
|
return int64(
|
||||||
|
self.f_pfd()
|
||||||
|
* (
|
||||||
|
self.pll_n()
|
||||||
|
+ (self.pll_frac1() + self.pll_frac2() / self.pll_mod2())
|
||||||
|
/ ADF5356_MODULUS1
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def pll_n(self) -> TInt32:
|
||||||
|
"""
|
||||||
|
Return the PLL integer value (INT) for the cached set of registers.
|
||||||
|
"""
|
||||||
|
return ADF5356_REG0_INT_VALUE_GET(self.regs[0])
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def pll_frac1(self) -> TInt32:
|
||||||
|
"""
|
||||||
|
Return the main fractional value (FRAC1) for the cached set of registers.
|
||||||
|
"""
|
||||||
|
return ADF5356_REG1_MAIN_FRAC_VALUE_GET(self.regs[1])
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def pll_frac2(self) -> TInt32:
|
||||||
|
"""
|
||||||
|
Return the auxiliary fractional value (FRAC2) for the cached set of registers.
|
||||||
|
"""
|
||||||
|
return (
|
||||||
|
ADF5356_REG13_AUX_FRAC_MSB_VALUE_GET(self.regs[13]) << 14
|
||||||
|
) | ADF5356_REG2_AUX_FRAC_LSB_VALUE_GET(self.regs[2])
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def pll_mod2(self) -> TInt32:
|
||||||
|
"""
|
||||||
|
Return the auxiliary modulus value (MOD2) for the cached set of registers.
|
||||||
|
"""
|
||||||
|
return (
|
||||||
|
ADF5356_REG13_AUX_MOD_MSB_VALUE_GET(self.regs[13]) << 14
|
||||||
|
) | ADF5356_REG2_AUX_MOD_LSB_VALUE_GET(self.regs[2])
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ref_counter(self) -> TInt32:
|
||||||
|
"""
|
||||||
|
Return the reference counter value (R) for the cached set of registers.
|
||||||
|
"""
|
||||||
|
return ADF5356_REG4_R_COUNTER_GET(self.regs[4])
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def output_divider(self) -> TInt32:
|
||||||
|
"""
|
||||||
|
Return the value of the output A divider.
|
||||||
|
"""
|
||||||
|
return 1 << ADF5356_REG6_RF_DIVIDER_SELECT_GET(self.regs[6])
|
||||||
|
|
||||||
|
def info(self):
|
||||||
|
"""
|
||||||
|
Return a summary of high-level parameters as a dict.
|
||||||
|
"""
|
||||||
|
prescaler = ADF5356_REG0_PRESCALER_GET(self.regs[0])
|
||||||
|
return {
|
||||||
|
# output
|
||||||
|
"f_outA": self.f_vco() / self.output_divider(),
|
||||||
|
"f_outB": self.f_vco() * 2,
|
||||||
|
"output_divider": self.output_divider(),
|
||||||
|
# PLL parameters
|
||||||
|
"f_vco": self.f_vco(),
|
||||||
|
"pll_n": self.pll_n(),
|
||||||
|
"pll_frac1": self.pll_frac1(),
|
||||||
|
"pll_frac2": self.pll_frac2(),
|
||||||
|
"pll_mod2": self.pll_mod2(),
|
||||||
|
"prescaler": "4/5" if prescaler == 0 else "8/9",
|
||||||
|
# reference / PFD
|
||||||
|
"sysclk": self.sysclk,
|
||||||
|
"ref_doubler": self.ref_doubler,
|
||||||
|
"ref_divider": self.ref_divider,
|
||||||
|
"ref_counter": self.ref_counter(),
|
||||||
|
"f_pfd": self.f_pfd(),
|
||||||
|
}
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def _init_registers(self):
|
||||||
|
"""
|
||||||
|
Initialize cached registers with sensible defaults.
|
||||||
|
"""
|
||||||
|
# fill with control bits
|
||||||
|
self.regs = [int32(i) for i in range(ADF5356_NUM_REGS)]
|
||||||
|
|
||||||
|
# REG2
|
||||||
|
# ====
|
||||||
|
|
||||||
|
# avoid divide-by-zero
|
||||||
|
self.regs[2] |= ADF5356_REG2_AUX_MOD_LSB_VALUE(1)
|
||||||
|
|
||||||
|
# REG4
|
||||||
|
# ====
|
||||||
|
|
||||||
|
# single-ended reference mode is recommended
|
||||||
|
# for references up to 250 MHz, even if the signal is differential
|
||||||
|
if self.sysclk <= 250 * MHz:
|
||||||
|
self.regs[4] |= ADF5356_REG4_REF_MODE(0)
|
||||||
|
else:
|
||||||
|
self.regs[4] |= ADF5356_REG4_REF_MODE(1)
|
||||||
|
|
||||||
|
# phase detector polarity: positive
|
||||||
|
self.regs[4] |= ADF5356_REG4_PD_POLARITY(1)
|
||||||
|
|
||||||
|
# charge pump current: 0.94 mA
|
||||||
|
self.regs[4] |= ADF5356_REG4_CURRENT_SETTING(2)
|
||||||
|
|
||||||
|
# MUXOUT: digital lock detect
|
||||||
|
self.regs[4] |= ADF5356_REG4_MUX_LOGIC(1) # 3v3 logic
|
||||||
|
self.regs[4] |= ADF5356_REG4_MUXOUT(6)
|
||||||
|
|
||||||
|
# setup reference path
|
||||||
|
if self.ref_doubler:
|
||||||
|
self.regs[4] |= ADF5356_REG4_R_DOUBLER(1)
|
||||||
|
|
||||||
|
if self.ref_divider:
|
||||||
|
self.regs[4] |= ADF5356_REG4_R_DIVIDER(1)
|
||||||
|
|
||||||
|
r = self._compute_reference_counter()
|
||||||
|
self.regs[4] |= ADF5356_REG4_R_COUNTER(r)
|
||||||
|
|
||||||
|
# REG5
|
||||||
|
# ====
|
||||||
|
|
||||||
|
# reserved values
|
||||||
|
self.regs[5] = int32(0x800025)
|
||||||
|
|
||||||
|
# REG6
|
||||||
|
# ====
|
||||||
|
|
||||||
|
# reserved values
|
||||||
|
self.regs[6] = int32(0x14000006)
|
||||||
|
|
||||||
|
# enable negative bleed
|
||||||
|
self.regs[6] |= ADF5356_REG6_NEGATIVE_BLEED(1)
|
||||||
|
|
||||||
|
# charge pump bleed current
|
||||||
|
self.regs[6] |= ADF5356_REG6_CP_BLEED_CURRENT(
|
||||||
|
int32(floor(24 * self.f_pfd() / (61.44 * MHz)))
|
||||||
|
)
|
||||||
|
|
||||||
|
# direct feedback from VCO to N counter
|
||||||
|
self.regs[6] |= ADF5356_REG6_FB_SELECT(1)
|
||||||
|
|
||||||
|
# mute until the PLL is locked
|
||||||
|
self.regs[6] |= ADF5356_REG6_MUTE_TILL_LD(1)
|
||||||
|
|
||||||
|
# enable output A
|
||||||
|
self.regs[6] |= ADF5356_REG6_RF_OUTPUT_A_ENABLE(1)
|
||||||
|
|
||||||
|
# set output A power to max power, is adjusted by extra attenuator
|
||||||
|
self.regs[6] |= ADF5356_REG6_RF_OUTPUT_A_POWER(3) # +5 dBm
|
||||||
|
|
||||||
|
# REG7
|
||||||
|
# ====
|
||||||
|
|
||||||
|
# reserved values
|
||||||
|
self.regs[7] = int32(0x10000007)
|
||||||
|
|
||||||
|
# sync load-enable to reference
|
||||||
|
self.regs[7] |= ADF5356_REG7_LE_SYNC(1)
|
||||||
|
|
||||||
|
# frac-N lock-detect precision: 12 ns
|
||||||
|
self.regs[7] |= ADF5356_REG7_FRAC_N_LD_PRECISION(3)
|
||||||
|
|
||||||
|
# REG8
|
||||||
|
# ====
|
||||||
|
|
||||||
|
# reserved values
|
||||||
|
self.regs[8] = int32(0x102D0428)
|
||||||
|
|
||||||
|
# REG9
|
||||||
|
# ====
|
||||||
|
|
||||||
|
# default timeouts (from eval software)
|
||||||
|
self.regs[9] |= (
|
||||||
|
ADF5356_REG9_SYNTH_LOCK_TIMEOUT(13)
|
||||||
|
| ADF5356_REG9_AUTOCAL_TIMEOUT(31)
|
||||||
|
| ADF5356_REG9_TIMEOUT(0x67)
|
||||||
|
)
|
||||||
|
|
||||||
|
self.regs[9] |= ADF5356_REG9_VCO_BAND_DIVISION(
|
||||||
|
int32(ceil(self.f_pfd() / 160e3))
|
||||||
|
)
|
||||||
|
|
||||||
|
# REG10
|
||||||
|
# =====
|
||||||
|
|
||||||
|
# reserved values
|
||||||
|
self.regs[10] = int32(0xC0000A)
|
||||||
|
|
||||||
|
# ADC defaults (from eval software)
|
||||||
|
self.regs[10] |= (
|
||||||
|
ADF5356_REG10_ADC_ENABLE(1)
|
||||||
|
| ADF5356_REG10_ADC_CLK_DIV(256)
|
||||||
|
| ADF5356_REG10_ADC_CONV(1)
|
||||||
|
)
|
||||||
|
|
||||||
|
# REG11
|
||||||
|
# =====
|
||||||
|
|
||||||
|
# reserved values
|
||||||
|
self.regs[11] = int32(0x61200B)
|
||||||
|
|
||||||
|
# REG12
|
||||||
|
# =====
|
||||||
|
|
||||||
|
# reserved values
|
||||||
|
self.regs[12] = int32(0x15FC)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def _compute_pfd_frequency(self, r, d, t) -> TInt64:
|
||||||
|
"""
|
||||||
|
Calculate the PFD frequency from the given reference path parameters
|
||||||
|
"""
|
||||||
|
return int64(self.sysclk * ((1 + d) / (r * (1 + t))))
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def _compute_reference_counter(self) -> TInt32:
|
||||||
|
"""
|
||||||
|
Determine the reference counter R that maximizes the PFD frequency
|
||||||
|
"""
|
||||||
|
d = ADF5356_REG4_R_DOUBLER_GET(self.regs[4])
|
||||||
|
t = ADF5356_REG4_R_DIVIDER_GET(self.regs[4])
|
||||||
|
r = 1
|
||||||
|
while self._compute_pfd_frequency(r, d, t) > ADF5356_MAX_FREQ_PFD:
|
||||||
|
r += 1
|
||||||
|
return int32(r)
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def gcd(a, b):
|
||||||
|
while b:
|
||||||
|
a, b = b, a % b
|
||||||
|
return a
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def split_msb_lsb_28b(v):
|
||||||
|
return int32((v >> 14) & 0x3FFF), int32(v & 0x3FFF)
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def calculate_pll(f_vco: TInt64, f_pfd: TInt64):
|
||||||
|
"""
|
||||||
|
Calculate fractional-N PLL parameters such that
|
||||||
|
|
||||||
|
``f_vco`` = ``f_pfd`` * (``n`` + (``frac1`` + ``frac2``/``mod2``) / ``mod1``)
|
||||||
|
|
||||||
|
where
|
||||||
|
``mod1 = 2**24`` and ``mod2 <= 2**28``
|
||||||
|
|
||||||
|
:param f_vco: target VCO frequency
|
||||||
|
:param f_pfd: PFD frequency
|
||||||
|
:return: ``(n, frac1, (frac2_msb, frac2_lsb), (mod2_msb, mod2_lsb))``
|
||||||
|
"""
|
||||||
|
f_pfd = int64(f_pfd)
|
||||||
|
f_vco = int64(f_vco)
|
||||||
|
|
||||||
|
# integral part
|
||||||
|
n, r = int32(f_vco // f_pfd), f_vco % f_pfd
|
||||||
|
|
||||||
|
# main fractional part
|
||||||
|
r *= ADF5356_MODULUS1
|
||||||
|
frac1, frac2 = int32(r // f_pfd), r % f_pfd
|
||||||
|
|
||||||
|
# auxiliary fractional part
|
||||||
|
mod2 = f_pfd
|
||||||
|
|
||||||
|
while mod2 > ADF5356_MAX_MODULUS2:
|
||||||
|
mod2 >>= 1
|
||||||
|
frac2 >>= 1
|
||||||
|
|
||||||
|
gcd_div = gcd(frac2, mod2)
|
||||||
|
mod2 //= gcd_div
|
||||||
|
frac2 //= gcd_div
|
||||||
|
|
||||||
|
return n, frac1, split_msb_lsb_28b(frac2), split_msb_lsb_28b(mod2)
|
|
@ -0,0 +1,642 @@
|
||||||
|
# auto-generated, do not edit
|
||||||
|
from artiq.language.core import portable
|
||||||
|
from artiq.language.types import TInt32
|
||||||
|
from numpy import int32
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG0_AUTOCAL_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 21) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG0_AUTOCAL(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 21)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG0_AUTOCAL_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 21)) | ((x & 0x1) << 21))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG0_INT_VALUE_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 4) & 0xffff)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG0_INT_VALUE(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0xffff) << 4)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG0_INT_VALUE_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0xffff << 4)) | ((x & 0xffff) << 4))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG0_PRESCALER_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 20) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG0_PRESCALER(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 20)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG0_PRESCALER_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 20)) | ((x & 0x1) << 20))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG1_MAIN_FRAC_VALUE_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 4) & 0xffffff)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG1_MAIN_FRAC_VALUE(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0xffffff) << 4)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG1_MAIN_FRAC_VALUE_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0xffffff << 4)) | ((x & 0xffffff) << 4))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG2_AUX_FRAC_LSB_VALUE_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 18) & 0x3fff)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG2_AUX_FRAC_LSB_VALUE(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x3fff) << 18)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG2_AUX_FRAC_LSB_VALUE_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x3fff << 18)) | ((x & 0x3fff) << 18))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG2_AUX_MOD_LSB_VALUE_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 4) & 0x3fff)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG2_AUX_MOD_LSB_VALUE(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x3fff) << 4)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG2_AUX_MOD_LSB_VALUE_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x3fff << 4)) | ((x & 0x3fff) << 4))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG3_PHASE_ADJUST_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 28) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG3_PHASE_ADJUST(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 28)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG3_PHASE_ADJUST_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 28)) | ((x & 0x1) << 28))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG3_PHASE_RESYNC_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 29) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG3_PHASE_RESYNC(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 29)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG3_PHASE_RESYNC_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 29)) | ((x & 0x1) << 29))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG3_PHASE_VALUE_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 4) & 0xffffff)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG3_PHASE_VALUE(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0xffffff) << 4)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG3_PHASE_VALUE_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0xffffff << 4)) | ((x & 0xffffff) << 4))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG3_SD_LOAD_RESET_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 30) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG3_SD_LOAD_RESET(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 30)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG3_SD_LOAD_RESET_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 30)) | ((x & 0x1) << 30))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_COUNTER_RESET_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 4) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_COUNTER_RESET(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 4)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_COUNTER_RESET_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 4)) | ((x & 0x1) << 4))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_CP_THREE_STATE_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 5) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_CP_THREE_STATE(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 5)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_CP_THREE_STATE_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 5)) | ((x & 0x1) << 5))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_CURRENT_SETTING_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 10) & 0xf)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_CURRENT_SETTING(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0xf) << 10)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_CURRENT_SETTING_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0xf << 10)) | ((x & 0xf) << 10))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_DOUBLE_BUFF_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 14) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_DOUBLE_BUFF(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 14)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_DOUBLE_BUFF_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 14)) | ((x & 0x1) << 14))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_MUX_LOGIC_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 8) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_MUX_LOGIC(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 8)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_MUX_LOGIC_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 8)) | ((x & 0x1) << 8))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_MUXOUT_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 27) & 0x7)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_MUXOUT(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x7) << 27)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_MUXOUT_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x7 << 27)) | ((x & 0x7) << 27))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_PD_POLARITY_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 7) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_PD_POLARITY(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 7)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_PD_POLARITY_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 7)) | ((x & 0x1) << 7))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_POWER_DOWN_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 6) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_POWER_DOWN(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 6)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_POWER_DOWN_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 6)) | ((x & 0x1) << 6))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_R_COUNTER_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 15) & 0x3ff)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_R_COUNTER(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x3ff) << 15)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_R_COUNTER_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x3ff << 15)) | ((x & 0x3ff) << 15))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_R_DIVIDER_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 25) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_R_DIVIDER(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 25)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_R_DIVIDER_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 25)) | ((x & 0x1) << 25))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_R_DOUBLER_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 26) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_R_DOUBLER(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 26)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_R_DOUBLER_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 26)) | ((x & 0x1) << 26))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_REF_MODE_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 9) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_REF_MODE(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 9)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG4_REF_MODE_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 9)) | ((x & 0x1) << 9))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_BLEED_POLARITY_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 31) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_BLEED_POLARITY(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 31)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_BLEED_POLARITY_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 31)) | ((x & 0x1) << 31))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_CP_BLEED_CURRENT_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 13) & 0xff)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_CP_BLEED_CURRENT(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0xff) << 13)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_CP_BLEED_CURRENT_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0xff << 13)) | ((x & 0xff) << 13))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_FB_SELECT_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 24) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_FB_SELECT(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 24)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_FB_SELECT_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 24)) | ((x & 0x1) << 24))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_GATE_BLEED_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 30) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_GATE_BLEED(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 30)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_GATE_BLEED_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 30)) | ((x & 0x1) << 30))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_MUTE_TILL_LD_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 11) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_MUTE_TILL_LD(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 11)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_MUTE_TILL_LD_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 11)) | ((x & 0x1) << 11))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_NEGATIVE_BLEED_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 29) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_NEGATIVE_BLEED(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 29)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_NEGATIVE_BLEED_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 29)) | ((x & 0x1) << 29))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_RF_DIVIDER_SELECT_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 21) & 0x7)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_RF_DIVIDER_SELECT(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x7) << 21)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_RF_DIVIDER_SELECT_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x7 << 21)) | ((x & 0x7) << 21))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_RF_OUTPUT_A_ENABLE_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 6) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_RF_OUTPUT_A_ENABLE(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 6)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_RF_OUTPUT_A_ENABLE_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 6)) | ((x & 0x1) << 6))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_RF_OUTPUT_A_POWER_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 4) & 0x3)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_RF_OUTPUT_A_POWER(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x3) << 4)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_RF_OUTPUT_A_POWER_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x3 << 4)) | ((x & 0x3) << 4))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_RF_OUTPUT_B_ENABLE_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 10) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_RF_OUTPUT_B_ENABLE(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 10)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG6_RF_OUTPUT_B_ENABLE_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 10)) | ((x & 0x1) << 10))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG7_FRAC_N_LD_PRECISION_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 5) & 0x3)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG7_FRAC_N_LD_PRECISION(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x3) << 5)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG7_FRAC_N_LD_PRECISION_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x3 << 5)) | ((x & 0x3) << 5))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG7_LD_CYCLE_COUNT_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 8) & 0x3)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG7_LD_CYCLE_COUNT(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x3) << 8)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG7_LD_CYCLE_COUNT_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x3 << 8)) | ((x & 0x3) << 8))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG7_LD_MODE_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 4) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG7_LD_MODE(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 4)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG7_LD_MODE_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 4)) | ((x & 0x1) << 4))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG7_LE_SEL_SYNC_EDGE_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 27) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG7_LE_SEL_SYNC_EDGE(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 27)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG7_LE_SEL_SYNC_EDGE_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 27)) | ((x & 0x1) << 27))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG7_LE_SYNC_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 25) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG7_LE_SYNC(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 25)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG7_LE_SYNC_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 25)) | ((x & 0x1) << 25))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG7_LOL_MODE_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 7) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG7_LOL_MODE(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 7)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG7_LOL_MODE_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 7)) | ((x & 0x1) << 7))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG9_AUTOCAL_TIMEOUT_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 9) & 0x1f)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG9_AUTOCAL_TIMEOUT(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1f) << 9)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG9_AUTOCAL_TIMEOUT_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1f << 9)) | ((x & 0x1f) << 9))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG9_SYNTH_LOCK_TIMEOUT_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 4) & 0x1f)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG9_SYNTH_LOCK_TIMEOUT(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1f) << 4)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG9_SYNTH_LOCK_TIMEOUT_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1f << 4)) | ((x & 0x1f) << 4))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG9_TIMEOUT_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 14) & 0x3ff)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG9_TIMEOUT(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x3ff) << 14)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG9_TIMEOUT_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x3ff << 14)) | ((x & 0x3ff) << 14))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG9_VCO_BAND_DIVISION_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 24) & 0xff)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG9_VCO_BAND_DIVISION(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0xff) << 24)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG9_VCO_BAND_DIVISION_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0xff << 24)) | ((x & 0xff) << 24))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG10_ADC_CLK_DIV_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 6) & 0xff)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG10_ADC_CLK_DIV(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0xff) << 6)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG10_ADC_CLK_DIV_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0xff << 6)) | ((x & 0xff) << 6))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG10_ADC_CONV_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 5) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG10_ADC_CONV(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 5)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG10_ADC_CONV_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 5)) | ((x & 0x1) << 5))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG10_ADC_ENABLE_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 4) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG10_ADC_ENABLE(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 4)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG10_ADC_ENABLE_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 4)) | ((x & 0x1) << 4))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG11_VCO_BAND_HOLD_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 24) & 0x1)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG11_VCO_BAND_HOLD(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x1) << 24)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG11_VCO_BAND_HOLD_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x1 << 24)) | ((x & 0x1) << 24))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG12_PHASE_RESYNC_CLK_VALUE_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 12) & 0xfffff)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG12_PHASE_RESYNC_CLK_VALUE(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0xfffff) << 12)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG12_PHASE_RESYNC_CLK_VALUE_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0xfffff << 12)) | ((x & 0xfffff) << 12))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG13_AUX_FRAC_MSB_VALUE_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 18) & 0x3fff)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG13_AUX_FRAC_MSB_VALUE(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x3fff) << 18)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG13_AUX_FRAC_MSB_VALUE_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x3fff << 18)) | ((x & 0x3fff) << 18))
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG13_AUX_MOD_MSB_VALUE_GET(reg: TInt32) -> TInt32:
|
||||||
|
return int32((reg >> 4) & 0x3fff)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG13_AUX_MOD_MSB_VALUE(x: TInt32) -> TInt32:
|
||||||
|
return int32((x & 0x3fff) << 4)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def ADF5356_REG13_AUX_MOD_MSB_VALUE_UPDATE(reg: TInt32, x: TInt32) -> TInt32:
|
||||||
|
return int32((reg & ~(0x3fff << 4)) | ((x & 0x3fff) << 4))
|
||||||
|
|
||||||
|
ADF5356_NUM_REGS = 14
|
|
@ -0,0 +1,185 @@
|
||||||
|
from artiq.language.core import kernel, portable
|
||||||
|
|
||||||
|
from numpy import int32
|
||||||
|
|
||||||
|
|
||||||
|
# almazny-specific data
|
||||||
|
ALMAZNY_LEGACY_REG_BASE = 0x0C
|
||||||
|
ALMAZNY_LEGACY_OE_SHIFT = 12
|
||||||
|
|
||||||
|
# higher SPI write divider to match almazny shift register timing
|
||||||
|
# min SER time before SRCLK rise = 125ns
|
||||||
|
# -> div=32 gives 125ns for data before clock rise
|
||||||
|
# works at faster dividers too but could be less reliable
|
||||||
|
ALMAZNY_LEGACY_SPIT_WR = 32
|
||||||
|
|
||||||
|
|
||||||
|
class AlmaznyLegacy:
|
||||||
|
"""
|
||||||
|
Almazny (High frequency mezzanine board for Mirny)
|
||||||
|
|
||||||
|
This applies to Almazny hardware v1.1 and earlier.
|
||||||
|
Use :class:`artiq.coredevice.almazny.AlmaznyChannel` for Almazny v1.2 and later.
|
||||||
|
|
||||||
|
:param host_mirny: Mirny device Almazny is connected to
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, dmgr, host_mirny):
|
||||||
|
self.mirny_cpld = dmgr.get(host_mirny)
|
||||||
|
self.att_mu = [0x3f] * 4
|
||||||
|
self.channel_sw = [0] * 4
|
||||||
|
self.output_enable = False
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def init(self):
|
||||||
|
self.output_toggle(self.output_enable)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def att_to_mu(self, att):
|
||||||
|
"""
|
||||||
|
Convert an attenuator setting in dB to machine units.
|
||||||
|
|
||||||
|
:param att: attenuator setting in dB [0-31.5]
|
||||||
|
:return: attenuator setting in machine units
|
||||||
|
"""
|
||||||
|
mu = round(att * 2.0)
|
||||||
|
if mu > 63 or mu < 0:
|
||||||
|
raise ValueError("Invalid Almazny attenuator settings!")
|
||||||
|
return mu
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def mu_to_att(self, att_mu):
|
||||||
|
"""
|
||||||
|
Convert a digital attenuator setting to dB.
|
||||||
|
|
||||||
|
:param att_mu: attenuator setting in machine units
|
||||||
|
:return: attenuator setting in dB
|
||||||
|
"""
|
||||||
|
return att_mu / 2
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_att(self, channel, att, rf_switch=True):
|
||||||
|
"""
|
||||||
|
Sets attenuators on chosen shift register (channel).
|
||||||
|
|
||||||
|
:param channel: index of the register [0-3]
|
||||||
|
:param att: attenuation setting in dBm [0-31.5]
|
||||||
|
:param rf_switch: rf switch (bool)
|
||||||
|
"""
|
||||||
|
self.set_att_mu(channel, self.att_to_mu(att), rf_switch)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_att_mu(self, channel, att_mu, rf_switch=True):
|
||||||
|
"""
|
||||||
|
Sets attenuators on chosen shift register (channel).
|
||||||
|
|
||||||
|
:param channel: index of the register [0-3]
|
||||||
|
:param att_mu: attenuation setting in machine units [0-63]
|
||||||
|
:param rf_switch: rf switch (bool)
|
||||||
|
"""
|
||||||
|
self.channel_sw[channel] = 1 if rf_switch else 0
|
||||||
|
self.att_mu[channel] = att_mu
|
||||||
|
self._update_register(channel)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def output_toggle(self, oe):
|
||||||
|
"""
|
||||||
|
Toggles output on all shift registers on or off.
|
||||||
|
|
||||||
|
:param oe: toggle output enable (bool)
|
||||||
|
"""
|
||||||
|
self.output_enable = oe
|
||||||
|
cfg_reg = self.mirny_cpld.read_reg(1)
|
||||||
|
en = 1 if self.output_enable else 0
|
||||||
|
delay(100 * us)
|
||||||
|
new_reg = (en << ALMAZNY_LEGACY_OE_SHIFT) | (cfg_reg & 0x3FF)
|
||||||
|
self.mirny_cpld.write_reg(1, new_reg)
|
||||||
|
delay(100 * us)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def _flip_mu_bits(self, mu):
|
||||||
|
# in this form MSB is actually 0.5dB attenuator
|
||||||
|
# unnatural for users, so we flip the six bits
|
||||||
|
return (((mu & 0x01) << 5)
|
||||||
|
| ((mu & 0x02) << 3)
|
||||||
|
| ((mu & 0x04) << 1)
|
||||||
|
| ((mu & 0x08) >> 1)
|
||||||
|
| ((mu & 0x10) >> 3)
|
||||||
|
| ((mu & 0x20) >> 5))
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def _update_register(self, ch):
|
||||||
|
self.mirny_cpld.write_ext(
|
||||||
|
ALMAZNY_LEGACY_REG_BASE + ch,
|
||||||
|
8,
|
||||||
|
self._flip_mu_bits(self.att_mu[ch]) | (self.channel_sw[ch] << 6),
|
||||||
|
ALMAZNY_LEGACY_SPIT_WR
|
||||||
|
)
|
||||||
|
delay(100 * us)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def _update_all_registers(self):
|
||||||
|
for i in range(4):
|
||||||
|
self._update_register(i)
|
||||||
|
|
||||||
|
|
||||||
|
class AlmaznyChannel:
|
||||||
|
"""
|
||||||
|
One Almazny channel
|
||||||
|
Almazny is a mezzanine for the Quad PLL RF source Mirny that exposes and
|
||||||
|
controls the frequency-doubled outputs.
|
||||||
|
This driver requires Almazny hardware revision v1.2 or later
|
||||||
|
and Mirny CPLD gateware v0.3 or later.
|
||||||
|
Use :class:`artiq.coredevice.almazny.AlmaznyLegacy` for Almazny hardware v1.1 and earlier.
|
||||||
|
|
||||||
|
:param host_mirny: Mirny CPLD device name
|
||||||
|
:param channel: channel index (0-3)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, dmgr, host_mirny, channel):
|
||||||
|
self.channel = channel
|
||||||
|
self.mirny_cpld = dmgr.get(host_mirny)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def to_mu(self, att, enable, led):
|
||||||
|
"""
|
||||||
|
Convert an attenuation in dB, RF switch state and LED state to machine
|
||||||
|
units.
|
||||||
|
|
||||||
|
:param att: attenuator setting in dB (0-31.5)
|
||||||
|
:param enable: RF switch state (bool)
|
||||||
|
:param led: LED state (bool)
|
||||||
|
:return: channel setting in machine units
|
||||||
|
"""
|
||||||
|
mu = int32(round(att * 2.))
|
||||||
|
if mu >= 64 or mu < 0:
|
||||||
|
raise ValueError("Attenuation out of range")
|
||||||
|
# unfortunate hardware design: bit reverse
|
||||||
|
mu = ((mu & 0x15) << 1) | ((mu >> 1) & 0x15)
|
||||||
|
mu = ((mu & 0x03) << 4) | (mu & 0x0c) | ((mu >> 4) & 0x03)
|
||||||
|
if enable:
|
||||||
|
mu |= 1 << 6
|
||||||
|
if led:
|
||||||
|
mu |= 1 << 7
|
||||||
|
return mu
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_mu(self, mu):
|
||||||
|
"""
|
||||||
|
Set channel state (machine units).
|
||||||
|
|
||||||
|
:param mu: channel state in machine units.
|
||||||
|
"""
|
||||||
|
self.mirny_cpld.write_ext(
|
||||||
|
addr=0xc + self.channel, length=8, data=mu, ext_div=32)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set(self, att, enable, led=False):
|
||||||
|
"""
|
||||||
|
Set attenuation, RF switch, and LED state (SI units).
|
||||||
|
|
||||||
|
:param att: attenuator setting in dB (0-31.5)
|
||||||
|
:param enable: RF switch state (bool)
|
||||||
|
:param led: LED state (bool)
|
||||||
|
"""
|
||||||
|
self.set_mu(self.to_mu(att, enable, led))
|
|
@ -2,11 +2,11 @@ from artiq.language.core import *
|
||||||
from artiq.language.types import *
|
from artiq.language.types import *
|
||||||
|
|
||||||
|
|
||||||
@syscall(flags={"nounwind", "nowrite"})
|
@syscall(flags={"nounwind"})
|
||||||
def cache_get(key: TStr) -> TList(TInt32):
|
def cache_get(key: TStr) -> TList(TInt32):
|
||||||
raise NotImplementedError("syscall not simulated")
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
@syscall(flags={"nowrite"})
|
@syscall
|
||||||
def cache_put(key: TStr, value: TList(TInt32)) -> TNone:
|
def cache_put(key: TStr, value: TList(TInt32)) -> TNone:
|
||||||
raise NotImplementedError("syscall not simulated")
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
|
|
|
@ -1,37 +0,0 @@
|
||||||
import sys
|
|
||||||
import socket
|
|
||||||
import logging
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def set_keepalive(sock, after_idle, interval, max_fails):
|
|
||||||
if sys.platform.startswith("linux"):
|
|
||||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
|
||||||
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, after_idle)
|
|
||||||
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, interval)
|
|
||||||
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, max_fails)
|
|
||||||
elif sys.platform.startswith("win") or sys.platform.startswith("cygwin"):
|
|
||||||
# setting max_fails is not supported, typically ends up being 5 or 10
|
|
||||||
# depending on Windows version
|
|
||||||
sock.ioctl(socket.SIO_KEEPALIVE_VALS,
|
|
||||||
(1, after_idle*1000, interval*1000))
|
|
||||||
else:
|
|
||||||
logger.warning("TCP keepalive not supported on platform '%s', ignored",
|
|
||||||
sys.platform)
|
|
||||||
|
|
||||||
|
|
||||||
def initialize_connection(host, port, ssh_transport=None):
|
|
||||||
if ssh_transport is None:
|
|
||||||
sock = socket.create_connection((host, port), 5.0)
|
|
||||||
sock.settimeout(None)
|
|
||||||
set_keepalive(sock, 3, 2, 3)
|
|
||||||
logger.debug("connected to %s:%d", host, port)
|
|
||||||
else:
|
|
||||||
sock = ssh_transport.open_channel("direct-tcpip", (host, port),
|
|
||||||
("localhost", 9999), timeout=5.0)
|
|
||||||
ssh_transport.set_keepalive(2)
|
|
||||||
logger.debug("connected to %s:%d via SSH transport to %s:%d",
|
|
||||||
host, port, *ssh_transport.getpeername())
|
|
||||||
return sock
|
|
|
@ -90,18 +90,28 @@ DecodedDump = namedtuple(
|
||||||
|
|
||||||
|
|
||||||
def decode_dump(data):
|
def decode_dump(data):
|
||||||
parts = struct.unpack(">IQbbb", data[:15])
|
# extract endian byte
|
||||||
|
if data[0] == ord('E'):
|
||||||
|
endian = '>'
|
||||||
|
elif data[0] == ord('e'):
|
||||||
|
endian = '<'
|
||||||
|
else:
|
||||||
|
raise ValueError
|
||||||
|
data = data[1:]
|
||||||
|
# only header is device endian
|
||||||
|
# messages are big endian
|
||||||
|
parts = struct.unpack(endian + "IQbbb", data[:15])
|
||||||
(sent_bytes, total_byte_count,
|
(sent_bytes, total_byte_count,
|
||||||
overflow_occured, log_channel, dds_onehot_sel) = parts
|
error_occurred, log_channel, dds_onehot_sel) = parts
|
||||||
|
|
||||||
expected_len = sent_bytes + 15
|
expected_len = sent_bytes + 15
|
||||||
if expected_len != len(data):
|
if expected_len != len(data):
|
||||||
raise ValueError("analyzer dump has incorrect length "
|
raise ValueError("analyzer dump has incorrect length "
|
||||||
"(got {}, expected {})".format(
|
"(got {}, expected {})".format(
|
||||||
len(data), expected_len))
|
len(data), expected_len))
|
||||||
if overflow_occured:
|
if error_occurred:
|
||||||
logger.warning("analyzer FIFO overflow occured, "
|
logger.warning("error occurred within the analyzer, "
|
||||||
"some messages have been lost")
|
"data may be corrupted")
|
||||||
if total_byte_count > sent_bytes:
|
if total_byte_count > sent_bytes:
|
||||||
logger.info("analyzer ring buffer has wrapped %d times",
|
logger.info("analyzer ring buffer has wrapped %d times",
|
||||||
total_byte_count//sent_bytes)
|
total_byte_count//sent_bytes)
|
||||||
|
@ -360,8 +370,8 @@ class SPIMaster2Handler(WishboneHandler):
|
||||||
def process_message(self, message):
|
def process_message(self, message):
|
||||||
self.stb.set_value("1")
|
self.stb.set_value("1")
|
||||||
self.stb.set_value("0")
|
self.stb.set_value("0")
|
||||||
data = message.data
|
|
||||||
if isinstance(message, OutputMessage):
|
if isinstance(message, OutputMessage):
|
||||||
|
data = message.data
|
||||||
address = message.address
|
address = message.address
|
||||||
if address == 1:
|
if address == 1:
|
||||||
logger.debug("SPI config @%d data=0x%08x",
|
logger.debug("SPI config @%d data=0x%08x",
|
||||||
|
@ -462,7 +472,7 @@ def get_ref_period(devices):
|
||||||
|
|
||||||
def get_dds_sysclk(devices):
|
def get_dds_sysclk(devices):
|
||||||
return get_single_device_argument(devices, "artiq.coredevice.ad9914",
|
return get_single_device_argument(devices, "artiq.coredevice.ad9914",
|
||||||
("ad9914",), "sysclk")
|
("AD9914",), "sysclk")
|
||||||
|
|
||||||
|
|
||||||
def create_channel_handlers(vcd_manager, devices, ref_period,
|
def create_channel_handlers(vcd_manager, devices, ref_period,
|
||||||
|
@ -485,8 +495,7 @@ def create_channel_handlers(vcd_manager, devices, ref_period,
|
||||||
if dds_bus_channel in channel_handlers:
|
if dds_bus_channel in channel_handlers:
|
||||||
dds_handler = channel_handlers[dds_bus_channel]
|
dds_handler = channel_handlers[dds_bus_channel]
|
||||||
else:
|
else:
|
||||||
dds_handler = DDSHandler(vcd_manager, desc["class"],
|
dds_handler = DDSHandler(vcd_manager, dds_onehot_sel, dds_sysclk)
|
||||||
dds_onehot_sel, dds_sysclk)
|
|
||||||
channel_handlers[dds_bus_channel] = dds_handler
|
channel_handlers[dds_bus_channel] = dds_handler
|
||||||
dds_handler.add_dds_channel(name, dds_channel)
|
dds_handler.add_dds_channel(name, dds_channel)
|
||||||
if (desc["module"] == "artiq.coredevice.spi2" and
|
if (desc["module"] == "artiq.coredevice.spi2" and
|
||||||
|
@ -501,11 +510,13 @@ def get_message_time(message):
|
||||||
return getattr(message, "timestamp", message.rtio_counter)
|
return getattr(message, "timestamp", message.rtio_counter)
|
||||||
|
|
||||||
|
|
||||||
def decoded_dump_to_vcd(fileobj, devices, dump):
|
def decoded_dump_to_vcd(fileobj, devices, dump, uniform_interval=False):
|
||||||
vcd_manager = VCDManager(fileobj)
|
vcd_manager = VCDManager(fileobj)
|
||||||
ref_period = get_ref_period(devices)
|
ref_period = get_ref_period(devices)
|
||||||
|
|
||||||
if ref_period is not None:
|
if ref_period is not None:
|
||||||
vcd_manager.set_timescale_ps(ref_period*1e12)
|
if not uniform_interval:
|
||||||
|
vcd_manager.set_timescale_ps(ref_period*1e12)
|
||||||
else:
|
else:
|
||||||
logger.warning("unable to determine core device ref_period")
|
logger.warning("unable to determine core device ref_period")
|
||||||
ref_period = 1e-9 # guess
|
ref_period = 1e-9 # guess
|
||||||
|
@ -527,6 +538,12 @@ def decoded_dump_to_vcd(fileobj, devices, dump):
|
||||||
vcd_log_channels = get_vcd_log_channels(dump.log_channel, messages)
|
vcd_log_channels = get_vcd_log_channels(dump.log_channel, messages)
|
||||||
channel_handlers[dump.log_channel] = LogHandler(
|
channel_handlers[dump.log_channel] = LogHandler(
|
||||||
vcd_manager, vcd_log_channels)
|
vcd_manager, vcd_log_channels)
|
||||||
|
if uniform_interval:
|
||||||
|
# RTIO event timestamp in machine units
|
||||||
|
timestamp = vcd_manager.get_channel("timestamp", 64)
|
||||||
|
# RTIO time interval between this and the next timed event
|
||||||
|
# in SI seconds
|
||||||
|
interval = vcd_manager.get_channel("interval", 64)
|
||||||
slack = vcd_manager.get_channel("rtio_slack", 64)
|
slack = vcd_manager.get_channel("rtio_slack", 64)
|
||||||
|
|
||||||
vcd_manager.set_time(0)
|
vcd_manager.set_time(0)
|
||||||
|
@ -536,11 +553,18 @@ def decoded_dump_to_vcd(fileobj, devices, dump):
|
||||||
if start_time:
|
if start_time:
|
||||||
break
|
break
|
||||||
|
|
||||||
for message in messages:
|
t0 = 0
|
||||||
|
for i, message in enumerate(messages):
|
||||||
if message.channel in channel_handlers:
|
if message.channel in channel_handlers:
|
||||||
t = get_message_time(message) - start_time
|
t = get_message_time(message) - start_time
|
||||||
if t >= 0:
|
if t >= 0:
|
||||||
vcd_manager.set_time(t)
|
if uniform_interval:
|
||||||
|
interval.set_value_double((t - t0)*ref_period)
|
||||||
|
vcd_manager.set_time(i)
|
||||||
|
timestamp.set_value("{:064b}".format(t))
|
||||||
|
t0 = t
|
||||||
|
else:
|
||||||
|
vcd_manager.set_time(t)
|
||||||
channel_handlers[message.channel].process_message(message)
|
channel_handlers[message.channel].process_message(message)
|
||||||
if isinstance(message, OutputMessage):
|
if isinstance(message, OutputMessage):
|
||||||
slack.set_value_double(
|
slack.set_value_double(
|
||||||
|
|
|
@ -2,14 +2,14 @@ import struct
|
||||||
import logging
|
import logging
|
||||||
import traceback
|
import traceback
|
||||||
import numpy
|
import numpy
|
||||||
|
import socket
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from fractions import Fraction
|
from fractions import Fraction
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
from artiq.coredevice import exceptions
|
from artiq.coredevice import exceptions
|
||||||
from artiq.coredevice.comm import initialize_connection
|
|
||||||
from artiq import __version__ as software_version
|
from artiq import __version__ as software_version
|
||||||
|
from sipyco.keepalive import create_connection
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -23,6 +23,8 @@ class Request(Enum):
|
||||||
RPCReply = 7
|
RPCReply = 7
|
||||||
RPCException = 8
|
RPCException = 8
|
||||||
|
|
||||||
|
SubkernelUpload = 9
|
||||||
|
|
||||||
|
|
||||||
class Reply(Enum):
|
class Reply(Enum):
|
||||||
SystemInfo = 2
|
SystemInfo = 2
|
||||||
|
@ -36,16 +38,17 @@ class Reply(Enum):
|
||||||
|
|
||||||
RPCRequest = 10
|
RPCRequest = 10
|
||||||
|
|
||||||
WatchdogExpired = 14
|
|
||||||
ClockFailure = 15
|
ClockFailure = 15
|
||||||
|
|
||||||
|
|
||||||
class UnsupportedDevice(Exception):
|
class UnsupportedDevice(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class LoadError(Exception):
|
class LoadError(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class RPCReturnValueError(ValueError):
|
class RPCReturnValueError(ValueError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -53,6 +56,105 @@ class RPCReturnValueError(ValueError):
|
||||||
RPCKeyword = namedtuple('RPCKeyword', ['name', 'value'])
|
RPCKeyword = namedtuple('RPCKeyword', ['name', 'value'])
|
||||||
|
|
||||||
|
|
||||||
|
def _receive_fraction(kernel, embedding_map):
|
||||||
|
numerator = kernel._read_int64()
|
||||||
|
denominator = kernel._read_int64()
|
||||||
|
return Fraction(numerator, denominator)
|
||||||
|
|
||||||
|
|
||||||
|
def _receive_list(kernel, embedding_map):
|
||||||
|
length = kernel._read_int32()
|
||||||
|
tag = chr(kernel._read_int8())
|
||||||
|
if tag == "b":
|
||||||
|
buffer = kernel._read(length)
|
||||||
|
return list(struct.unpack(kernel.endian + "%s?" % length, buffer))
|
||||||
|
elif tag == "i":
|
||||||
|
buffer = kernel._read(4 * length)
|
||||||
|
return list(struct.unpack(kernel.endian + "%sl" % length, buffer))
|
||||||
|
elif tag == "I":
|
||||||
|
buffer = kernel._read(8 * length)
|
||||||
|
return list(numpy.ndarray((length, ), kernel.endian + 'i8', buffer))
|
||||||
|
elif tag == "f":
|
||||||
|
buffer = kernel._read(8 * length)
|
||||||
|
return list(struct.unpack(kernel.endian + "%sd" % length, buffer))
|
||||||
|
else:
|
||||||
|
fn = receivers[tag]
|
||||||
|
elems = []
|
||||||
|
for _ in range(length):
|
||||||
|
# discard tag, as our device would still send the tag for each
|
||||||
|
# non-primitive elements.
|
||||||
|
kernel._read_int8()
|
||||||
|
item = fn(kernel, embedding_map)
|
||||||
|
elems.append(item)
|
||||||
|
return elems
|
||||||
|
|
||||||
|
|
||||||
|
def _receive_array(kernel, embedding_map):
|
||||||
|
num_dims = kernel._read_int8()
|
||||||
|
shape = tuple(kernel._read_int32() for _ in range(num_dims))
|
||||||
|
tag = chr(kernel._read_int8())
|
||||||
|
fn = receivers[tag]
|
||||||
|
length = numpy.prod(shape)
|
||||||
|
if tag == "b":
|
||||||
|
buffer = kernel._read(length)
|
||||||
|
elems = numpy.ndarray((length, ), '?', buffer)
|
||||||
|
elif tag == "i":
|
||||||
|
buffer = kernel._read(4 * length)
|
||||||
|
elems = numpy.ndarray((length, ), kernel.endian + 'i4', buffer)
|
||||||
|
elif tag == "I":
|
||||||
|
buffer = kernel._read(8 * length)
|
||||||
|
elems = numpy.ndarray((length, ), kernel.endian + 'i8', buffer)
|
||||||
|
elif tag == "f":
|
||||||
|
buffer = kernel._read(8 * length)
|
||||||
|
elems = numpy.ndarray((length, ), kernel.endian + 'd', buffer)
|
||||||
|
else:
|
||||||
|
fn = receivers[tag]
|
||||||
|
elems = []
|
||||||
|
for _ in range(numpy.prod(shape)):
|
||||||
|
# discard the tag
|
||||||
|
kernel._read_int8()
|
||||||
|
item = fn(kernel, embedding_map)
|
||||||
|
elems.append(item)
|
||||||
|
elems = numpy.array(elems)
|
||||||
|
return elems.reshape(shape)
|
||||||
|
|
||||||
|
|
||||||
|
def _receive_range(kernel, embedding_map):
|
||||||
|
start = kernel._receive_rpc_value(embedding_map)
|
||||||
|
stop = kernel._receive_rpc_value(embedding_map)
|
||||||
|
step = kernel._receive_rpc_value(embedding_map)
|
||||||
|
return range(start, stop, step)
|
||||||
|
|
||||||
|
|
||||||
|
def _receive_keyword(kernel, embedding_map):
|
||||||
|
name = kernel._read_string()
|
||||||
|
value = kernel._receive_rpc_value(embedding_map)
|
||||||
|
return RPCKeyword(name, value)
|
||||||
|
|
||||||
|
|
||||||
|
receivers = {
|
||||||
|
"\x00": lambda kernel, embedding_map: kernel._rpc_sentinel,
|
||||||
|
"t": lambda kernel, embedding_map:
|
||||||
|
tuple(kernel._receive_rpc_value(embedding_map)
|
||||||
|
for _ in range(kernel._read_int8())),
|
||||||
|
"n": lambda kernel, embedding_map: None,
|
||||||
|
"b": lambda kernel, embedding_map: bool(kernel._read_int8()),
|
||||||
|
"i": lambda kernel, embedding_map: numpy.int32(kernel._read_int32()),
|
||||||
|
"I": lambda kernel, embedding_map: numpy.int64(kernel._read_int64()),
|
||||||
|
"f": lambda kernel, embedding_map: kernel._read_float64(),
|
||||||
|
"s": lambda kernel, embedding_map: kernel._read_string(),
|
||||||
|
"B": lambda kernel, embedding_map: kernel._read_bytes(),
|
||||||
|
"A": lambda kernel, embedding_map: kernel._read_bytes(),
|
||||||
|
"O": lambda kernel, embedding_map:
|
||||||
|
embedding_map.retrieve_object(kernel._read_int32()),
|
||||||
|
"F": _receive_fraction,
|
||||||
|
"l": _receive_list,
|
||||||
|
"a": _receive_array,
|
||||||
|
"r": _receive_range,
|
||||||
|
"k": _receive_keyword
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class CommKernelDummy:
|
class CommKernelDummy:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
pass
|
pass
|
||||||
|
@ -70,6 +172,16 @@ class CommKernelDummy:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def incompatible_versions(v1, v2):
|
||||||
|
if v1.endswith(".beta") or v2.endswith(".beta"):
|
||||||
|
# Beta branches may introduce breaking changes. Check version strictly.
|
||||||
|
return v1 != v2
|
||||||
|
else:
|
||||||
|
# On stable branches, runtime/software protocol backward compatibility is kept.
|
||||||
|
# Runtime and software with the same major version number are compatible.
|
||||||
|
return v1.split(".", maxsplit=1)[0] != v2.split(".", maxsplit=1)[0]
|
||||||
|
|
||||||
|
|
||||||
class CommKernel:
|
class CommKernel:
|
||||||
warned_of_mismatch = False
|
warned_of_mismatch = False
|
||||||
|
|
||||||
|
@ -77,12 +189,31 @@ class CommKernel:
|
||||||
self._read_type = None
|
self._read_type = None
|
||||||
self.host = host
|
self.host = host
|
||||||
self.port = port
|
self.port = port
|
||||||
|
self.read_buffer = bytearray()
|
||||||
|
self.write_buffer = bytearray()
|
||||||
|
|
||||||
def open(self, **kwargs):
|
|
||||||
|
def open(self):
|
||||||
if hasattr(self, "socket"):
|
if hasattr(self, "socket"):
|
||||||
return
|
return
|
||||||
self.socket = initialize_connection(self.host, self.port, **kwargs)
|
self.socket = create_connection(self.host, self.port)
|
||||||
self.socket.sendall(b"ARTIQ coredev\n")
|
self.socket.sendall(b"ARTIQ coredev\n")
|
||||||
|
endian = self._read(1)
|
||||||
|
if endian == b"e":
|
||||||
|
self.endian = "<"
|
||||||
|
elif endian == b"E":
|
||||||
|
self.endian = ">"
|
||||||
|
else:
|
||||||
|
raise IOError("Incorrect reply from device: expected e/E.")
|
||||||
|
self.unpack_int32 = struct.Struct(self.endian + "l").unpack
|
||||||
|
self.unpack_int64 = struct.Struct(self.endian + "q").unpack
|
||||||
|
self.unpack_float64 = struct.Struct(self.endian + "d").unpack
|
||||||
|
|
||||||
|
self.pack_header = struct.Struct(self.endian + "lB").pack
|
||||||
|
self.pack_int8 = struct.Struct(self.endian + "B").pack
|
||||||
|
self.pack_int32 = struct.Struct(self.endian + "l").pack
|
||||||
|
self.pack_int64 = struct.Struct(self.endian + "q").pack
|
||||||
|
self.pack_float64 = struct.Struct(self.endian + "d").pack
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
if not hasattr(self, "socket"):
|
if not hasattr(self, "socket"):
|
||||||
|
@ -91,36 +222,41 @@ class CommKernel:
|
||||||
del self.socket
|
del self.socket
|
||||||
logger.debug("disconnected")
|
logger.debug("disconnected")
|
||||||
|
|
||||||
def read(self, length):
|
|
||||||
r = bytes()
|
|
||||||
while len(r) < length:
|
|
||||||
rn = self.socket.recv(min(8192, length - len(r)))
|
|
||||||
if not rn:
|
|
||||||
raise ConnectionResetError("Connection closed")
|
|
||||||
r += rn
|
|
||||||
return r
|
|
||||||
|
|
||||||
def write(self, data):
|
|
||||||
self.socket.sendall(data)
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Reader interface
|
# Reader interface
|
||||||
#
|
#
|
||||||
|
|
||||||
|
def _read(self, length):
|
||||||
|
# cache the reads to avoid frequent call to recv
|
||||||
|
while len(self.read_buffer) < length:
|
||||||
|
# the number is just the maximum amount
|
||||||
|
# when there is not much data, it would return earlier
|
||||||
|
diff = length - len(self.read_buffer)
|
||||||
|
flag = 0
|
||||||
|
if diff > 8192:
|
||||||
|
flag |= socket.MSG_WAITALL
|
||||||
|
new_buffer = self.socket.recv(8192, flag)
|
||||||
|
if not new_buffer:
|
||||||
|
raise ConnectionResetError("Core device connection closed unexpectedly")
|
||||||
|
self.read_buffer += new_buffer
|
||||||
|
result = self.read_buffer[:length]
|
||||||
|
self.read_buffer = self.read_buffer[length:]
|
||||||
|
return result
|
||||||
|
|
||||||
def _read_header(self):
|
def _read_header(self):
|
||||||
self.open()
|
self.open()
|
||||||
|
|
||||||
# Wait for a synchronization sequence, 5a 5a 5a 5a.
|
# Wait for a synchronization sequence, 5a 5a 5a 5a.
|
||||||
sync_count = 0
|
sync_count = 0
|
||||||
while sync_count < 4:
|
while sync_count < 4:
|
||||||
(sync_byte, ) = struct.unpack("B", self.read(1))
|
sync_byte = self._read(1)[0]
|
||||||
if sync_byte == 0x5a:
|
if sync_byte == 0x5a:
|
||||||
sync_count += 1
|
sync_count += 1
|
||||||
else:
|
else:
|
||||||
sync_count = 0
|
sync_count = 0
|
||||||
|
|
||||||
# Read message header.
|
# Read message header.
|
||||||
(raw_type, ) = struct.unpack("B", self.read(1))
|
raw_type = self._read(1)[0]
|
||||||
self._read_type = Reply(raw_type)
|
self._read_type = Reply(raw_type)
|
||||||
|
|
||||||
logger.debug("receiving message: type=%r",
|
logger.debug("receiving message: type=%r",
|
||||||
|
@ -135,30 +271,26 @@ class CommKernel:
|
||||||
self._read_header()
|
self._read_header()
|
||||||
self._read_expect(ty)
|
self._read_expect(ty)
|
||||||
|
|
||||||
def _read_chunk(self, length):
|
|
||||||
return self.read(length)
|
|
||||||
|
|
||||||
def _read_int8(self):
|
def _read_int8(self):
|
||||||
(value, ) = struct.unpack("B", self._read_chunk(1))
|
return self._read(1)[0]
|
||||||
return value
|
|
||||||
|
|
||||||
def _read_int32(self):
|
def _read_int32(self):
|
||||||
(value, ) = struct.unpack(">l", self._read_chunk(4))
|
(value, ) = self.unpack_int32(self._read(4))
|
||||||
return value
|
return value
|
||||||
|
|
||||||
def _read_int64(self):
|
def _read_int64(self):
|
||||||
(value, ) = struct.unpack(">q", self._read_chunk(8))
|
(value, ) = self.unpack_int64(self._read(8))
|
||||||
return value
|
return value
|
||||||
|
|
||||||
def _read_float64(self):
|
def _read_float64(self):
|
||||||
(value, ) = struct.unpack(">d", self._read_chunk(8))
|
(value, ) = self.unpack_float64(self._read(8))
|
||||||
return value
|
return value
|
||||||
|
|
||||||
def _read_bool(self):
|
def _read_bool(self):
|
||||||
return True if self._read_int8() else False
|
return True if self._read_int8() else False
|
||||||
|
|
||||||
def _read_bytes(self):
|
def _read_bytes(self):
|
||||||
return self._read_chunk(self._read_int32())
|
return self._read(self._read_int32())
|
||||||
|
|
||||||
def _read_string(self):
|
def _read_string(self):
|
||||||
return self._read_bytes().decode("utf-8")
|
return self._read_bytes().decode("utf-8")
|
||||||
|
@ -167,38 +299,49 @@ class CommKernel:
|
||||||
# Writer interface
|
# Writer interface
|
||||||
#
|
#
|
||||||
|
|
||||||
|
def _write(self, data):
|
||||||
|
self.write_buffer += data
|
||||||
|
# if the buffer is already pretty large, send it
|
||||||
|
# the block size is arbitrary, tuning it may improve performance
|
||||||
|
if len(self.write_buffer) > 4096:
|
||||||
|
self._flush()
|
||||||
|
|
||||||
|
def _flush(self):
|
||||||
|
self.socket.sendall(self.write_buffer)
|
||||||
|
self.write_buffer.clear()
|
||||||
|
|
||||||
def _write_header(self, ty):
|
def _write_header(self, ty):
|
||||||
self.open()
|
self.open()
|
||||||
|
|
||||||
logger.debug("sending message: type=%r", ty)
|
logger.debug("sending message: type=%r", ty)
|
||||||
|
|
||||||
# Write synchronization sequence and header.
|
# Write synchronization sequence and header.
|
||||||
self.write(struct.pack(">lB", 0x5a5a5a5a, ty.value))
|
self._write(self.pack_header(0x5a5a5a5a, ty.value))
|
||||||
|
|
||||||
def _write_empty(self, ty):
|
def _write_empty(self, ty):
|
||||||
self._write_header(ty)
|
self._write_header(ty)
|
||||||
|
|
||||||
def _write_chunk(self, chunk):
|
def _write_chunk(self, chunk):
|
||||||
self.write(chunk)
|
self._write(chunk)
|
||||||
|
|
||||||
def _write_int8(self, value):
|
def _write_int8(self, value):
|
||||||
self.write(struct.pack("B", value))
|
self._write(self.pack_int8(value))
|
||||||
|
|
||||||
def _write_int32(self, value):
|
def _write_int32(self, value):
|
||||||
self.write(struct.pack(">l", value))
|
self._write(self.pack_int32(value))
|
||||||
|
|
||||||
def _write_int64(self, value):
|
def _write_int64(self, value):
|
||||||
self.write(struct.pack(">q", value))
|
self._write(self.pack_int64(value))
|
||||||
|
|
||||||
def _write_float64(self, value):
|
def _write_float64(self, value):
|
||||||
self.write(struct.pack(">d", value))
|
self._write(self.pack_float64(value))
|
||||||
|
|
||||||
def _write_bool(self, value):
|
def _write_bool(self, value):
|
||||||
self.write(struct.pack("B", value))
|
self._write(b'\x01' if value else b'\x00')
|
||||||
|
|
||||||
def _write_bytes(self, value):
|
def _write_bytes(self, value):
|
||||||
self._write_int32(len(value))
|
self._write_int32(len(value))
|
||||||
self.write(value)
|
self._write(value)
|
||||||
|
|
||||||
def _write_string(self, value):
|
def _write_string(self, value):
|
||||||
self._write_bytes(value.encode("utf-8"))
|
self._write_bytes(value.encode("utf-8"))
|
||||||
|
@ -207,33 +350,47 @@ class CommKernel:
|
||||||
# Exported APIs
|
# Exported APIs
|
||||||
#
|
#
|
||||||
|
|
||||||
def reset_session(self):
|
|
||||||
self.write(struct.pack(">ll", 0x5a5a5a5a, 0))
|
|
||||||
|
|
||||||
def check_system_info(self):
|
def check_system_info(self):
|
||||||
self._write_empty(Request.SystemInfo)
|
self._write_empty(Request.SystemInfo)
|
||||||
|
self._flush()
|
||||||
|
|
||||||
self._read_header()
|
self._read_header()
|
||||||
self._read_expect(Reply.SystemInfo)
|
self._read_expect(Reply.SystemInfo)
|
||||||
runtime_id = self._read_chunk(4)
|
runtime_id = self._read(4)
|
||||||
if runtime_id != b"AROR":
|
if runtime_id == b"AROR":
|
||||||
|
gateware_version = self._read_string().split(";")[0]
|
||||||
|
if not self.warned_of_mismatch and incompatible_versions(gateware_version, software_version):
|
||||||
|
logger.warning("Mismatch between gateware (%s) "
|
||||||
|
"and software (%s) versions",
|
||||||
|
gateware_version, software_version)
|
||||||
|
CommKernel.warned_of_mismatch = True
|
||||||
|
|
||||||
|
finished_cleanly = self._read_bool()
|
||||||
|
if not finished_cleanly:
|
||||||
|
logger.warning("Previous kernel did not cleanly finish")
|
||||||
|
elif runtime_id == b"ARZQ":
|
||||||
|
pass
|
||||||
|
else:
|
||||||
raise UnsupportedDevice("Unsupported runtime ID: {}"
|
raise UnsupportedDevice("Unsupported runtime ID: {}"
|
||||||
.format(runtime_id))
|
.format(runtime_id))
|
||||||
|
|
||||||
gateware_version = self._read_string().split(";")[0]
|
|
||||||
if gateware_version != software_version and not self.warned_of_mismatch:
|
|
||||||
logger.warning("Mismatch between gateware (%s) "
|
|
||||||
"and software (%s) versions",
|
|
||||||
gateware_version, software_version)
|
|
||||||
CommKernel.warned_of_mismatch = True
|
|
||||||
|
|
||||||
finished_cleanly = self._read_bool()
|
|
||||||
if not finished_cleanly:
|
|
||||||
logger.warning("Previous kernel did not cleanly finish")
|
|
||||||
|
|
||||||
def load(self, kernel_library):
|
def load(self, kernel_library):
|
||||||
self._write_header(Request.LoadKernel)
|
self._write_header(Request.LoadKernel)
|
||||||
self._write_bytes(kernel_library)
|
self._write_bytes(kernel_library)
|
||||||
|
self._flush()
|
||||||
|
|
||||||
|
self._read_header()
|
||||||
|
if self._read_type == Reply.LoadFailed:
|
||||||
|
raise LoadError(self._read_string())
|
||||||
|
else:
|
||||||
|
self._read_expect(Reply.LoadCompleted)
|
||||||
|
|
||||||
|
def upload_subkernel(self, kernel_library, id, destination):
|
||||||
|
self._write_header(Request.SubkernelUpload)
|
||||||
|
self._write_int32(id)
|
||||||
|
self._write_int8(destination)
|
||||||
|
self._write_bytes(kernel_library)
|
||||||
|
self._flush()
|
||||||
|
|
||||||
self._read_header()
|
self._read_header()
|
||||||
if self._read_type == Reply.LoadFailed:
|
if self._read_type == Reply.LoadFailed:
|
||||||
|
@ -243,55 +400,16 @@ class CommKernel:
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
self._write_empty(Request.RunKernel)
|
self._write_empty(Request.RunKernel)
|
||||||
|
self._flush()
|
||||||
logger.debug("running kernel")
|
logger.debug("running kernel")
|
||||||
|
|
||||||
_rpc_sentinel = object()
|
_rpc_sentinel = object()
|
||||||
|
|
||||||
# See session.c:{send,receive}_rpc_value and llvm_ir_generator.py:_rpc_tag.
|
# See rpc_proto.rs and compiler/ir.py:rpc_tag.
|
||||||
def _receive_rpc_value(self, embedding_map):
|
def _receive_rpc_value(self, embedding_map):
|
||||||
tag = chr(self._read_int8())
|
tag = chr(self._read_int8())
|
||||||
if tag == "\x00":
|
if tag in receivers:
|
||||||
return self._rpc_sentinel
|
return receivers.get(tag)(self, embedding_map)
|
||||||
elif tag == "t":
|
|
||||||
length = self._read_int8()
|
|
||||||
return tuple(self._receive_rpc_value(embedding_map) for _ in range(length))
|
|
||||||
elif tag == "n":
|
|
||||||
return None
|
|
||||||
elif tag == "b":
|
|
||||||
return bool(self._read_int8())
|
|
||||||
elif tag == "i":
|
|
||||||
return numpy.int32(self._read_int32())
|
|
||||||
elif tag == "I":
|
|
||||||
return numpy.int64(self._read_int64())
|
|
||||||
elif tag == "f":
|
|
||||||
return self._read_float64()
|
|
||||||
elif tag == "F":
|
|
||||||
numerator = self._read_int64()
|
|
||||||
denominator = self._read_int64()
|
|
||||||
return Fraction(numerator, denominator)
|
|
||||||
elif tag == "s":
|
|
||||||
return self._read_string()
|
|
||||||
elif tag == "B":
|
|
||||||
return self._read_bytes()
|
|
||||||
elif tag == "A":
|
|
||||||
return self._read_bytes()
|
|
||||||
elif tag == "l":
|
|
||||||
length = self._read_int32()
|
|
||||||
return [self._receive_rpc_value(embedding_map) for _ in range(length)]
|
|
||||||
elif tag == "a":
|
|
||||||
length = self._read_int32()
|
|
||||||
return numpy.array([self._receive_rpc_value(embedding_map) for _ in range(length)])
|
|
||||||
elif tag == "r":
|
|
||||||
start = self._receive_rpc_value(embedding_map)
|
|
||||||
stop = self._receive_rpc_value(embedding_map)
|
|
||||||
step = self._receive_rpc_value(embedding_map)
|
|
||||||
return range(start, stop, step)
|
|
||||||
elif tag == "k":
|
|
||||||
name = self._read_string()
|
|
||||||
value = self._receive_rpc_value(embedding_map)
|
|
||||||
return RPCKeyword(name, value)
|
|
||||||
elif tag == "O":
|
|
||||||
return embedding_map.retrieve_object(self._read_int32())
|
|
||||||
else:
|
else:
|
||||||
raise IOError("Unknown RPC value tag: {}".format(repr(tag)))
|
raise IOError("Unknown RPC value tag: {}".format(repr(tag)))
|
||||||
|
|
||||||
|
@ -307,7 +425,7 @@ class CommKernel:
|
||||||
args.append(value)
|
args.append(value)
|
||||||
|
|
||||||
def _skip_rpc_value(self, tags):
|
def _skip_rpc_value(self, tags):
|
||||||
tag = tags.pop(0)
|
tag = chr(tags.pop(0))
|
||||||
if tag == "t":
|
if tag == "t":
|
||||||
length = tags.pop(0)
|
length = tags.pop(0)
|
||||||
for _ in range(length):
|
for _ in range(length):
|
||||||
|
@ -316,6 +434,9 @@ class CommKernel:
|
||||||
self._skip_rpc_value(tags)
|
self._skip_rpc_value(tags)
|
||||||
elif tag == "r":
|
elif tag == "r":
|
||||||
self._skip_rpc_value(tags)
|
self._skip_rpc_value(tags)
|
||||||
|
elif tag == "a":
|
||||||
|
_ndims = tags.pop(0)
|
||||||
|
self._skip_rpc_value(tags)
|
||||||
else:
|
else:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -341,15 +462,15 @@ class CommKernel:
|
||||||
elif tag == "b":
|
elif tag == "b":
|
||||||
check(isinstance(value, bool),
|
check(isinstance(value, bool),
|
||||||
lambda: "bool")
|
lambda: "bool")
|
||||||
self._write_int8(value)
|
self._write_bool(value)
|
||||||
elif tag == "i":
|
elif tag == "i":
|
||||||
check(isinstance(value, (int, numpy.int32)) and
|
check(isinstance(value, (int, numpy.int32)) and
|
||||||
(-2**31 < value < 2**31-1),
|
(-2**31 <= value < 2**31),
|
||||||
lambda: "32-bit int")
|
lambda: "32-bit int")
|
||||||
self._write_int32(value)
|
self._write_int32(value)
|
||||||
elif tag == "I":
|
elif tag == "I":
|
||||||
check(isinstance(value, (int, numpy.int32, numpy.int64)) and
|
check(isinstance(value, (int, numpy.int32, numpy.int64)) and
|
||||||
(-2**63 < value < 2**63-1),
|
(-2**63 <= value < 2**63),
|
||||||
lambda: "64-bit int")
|
lambda: "64-bit int")
|
||||||
self._write_int64(value)
|
self._write_int64(value)
|
||||||
elif tag == "f":
|
elif tag == "f":
|
||||||
|
@ -358,8 +479,8 @@ class CommKernel:
|
||||||
self._write_float64(value)
|
self._write_float64(value)
|
||||||
elif tag == "F":
|
elif tag == "F":
|
||||||
check(isinstance(value, Fraction) and
|
check(isinstance(value, Fraction) and
|
||||||
(-2**63 < value.numerator < 2**63-1) and
|
(-2**63 <= value.numerator < 2**63) and
|
||||||
(-2**63 < value.denominator < 2**63-1),
|
(-2**63 <= value.denominator < 2**63),
|
||||||
lambda: "64-bit Fraction")
|
lambda: "64-bit Fraction")
|
||||||
self._write_int64(value.numerator)
|
self._write_int64(value.numerator)
|
||||||
self._write_int64(value.denominator)
|
self._write_int64(value.denominator)
|
||||||
|
@ -379,9 +500,58 @@ class CommKernel:
|
||||||
check(isinstance(value, list),
|
check(isinstance(value, list),
|
||||||
lambda: "list")
|
lambda: "list")
|
||||||
self._write_int32(len(value))
|
self._write_int32(len(value))
|
||||||
for elt in value:
|
tag_element = chr(tags[0])
|
||||||
tags_copy = bytearray(tags)
|
if tag_element == "b":
|
||||||
self._send_rpc_value(tags_copy, elt, root, function)
|
self._write(bytes(value))
|
||||||
|
elif tag_element == "i":
|
||||||
|
try:
|
||||||
|
self._write(struct.pack(self.endian + "%sl" % len(value), *value))
|
||||||
|
except struct.error:
|
||||||
|
raise RPCReturnValueError(
|
||||||
|
"type mismatch: cannot serialize {value} as {type}".format(
|
||||||
|
value=repr(value), type="32-bit integer list"))
|
||||||
|
elif tag_element == "I":
|
||||||
|
try:
|
||||||
|
self._write(struct.pack(self.endian + "%sq" % len(value), *value))
|
||||||
|
except struct.error:
|
||||||
|
raise RPCReturnValueError(
|
||||||
|
"type mismatch: cannot serialize {value} as {type}".format(
|
||||||
|
value=repr(value), type="64-bit integer list"))
|
||||||
|
elif tag_element == "f":
|
||||||
|
self._write(struct.pack(self.endian + "%sd" %
|
||||||
|
len(value), *value))
|
||||||
|
else:
|
||||||
|
for elt in value:
|
||||||
|
tags_copy = bytearray(tags)
|
||||||
|
self._send_rpc_value(tags_copy, elt, root, function)
|
||||||
|
self._skip_rpc_value(tags)
|
||||||
|
elif tag == "a":
|
||||||
|
check(isinstance(value, numpy.ndarray),
|
||||||
|
lambda: "numpy.ndarray")
|
||||||
|
num_dims = tags.pop(0)
|
||||||
|
check(num_dims == len(value.shape),
|
||||||
|
lambda: "{}-dimensional numpy.ndarray".format(num_dims))
|
||||||
|
for s in value.shape:
|
||||||
|
self._write_int32(s)
|
||||||
|
tag_element = chr(tags[0])
|
||||||
|
if tag_element == "b":
|
||||||
|
self._write(value.reshape((-1,), order="C").tobytes())
|
||||||
|
elif tag_element == "i":
|
||||||
|
array = value.reshape(
|
||||||
|
(-1,), order="C").astype(self.endian + 'i4')
|
||||||
|
self._write(array.tobytes())
|
||||||
|
elif tag_element == "I":
|
||||||
|
array = value.reshape(
|
||||||
|
(-1,), order="C").astype(self.endian + 'i8')
|
||||||
|
self._write(array.tobytes())
|
||||||
|
elif tag_element == "f":
|
||||||
|
array = value.reshape(
|
||||||
|
(-1,), order="C").astype(self.endian + 'd')
|
||||||
|
self._write(array.tobytes())
|
||||||
|
else:
|
||||||
|
for elt in value.reshape((-1,), order="C"):
|
||||||
|
tags_copy = bytearray(tags)
|
||||||
|
self._send_rpc_value(tags_copy, elt, root, function)
|
||||||
self._skip_rpc_value(tags)
|
self._skip_rpc_value(tags)
|
||||||
elif tag == "r":
|
elif tag == "r":
|
||||||
check(isinstance(value, range),
|
check(isinstance(value, range),
|
||||||
|
@ -403,59 +573,59 @@ class CommKernel:
|
||||||
return msg
|
return msg
|
||||||
|
|
||||||
def _serve_rpc(self, embedding_map):
|
def _serve_rpc(self, embedding_map):
|
||||||
async = self._read_bool()
|
is_async = self._read_bool()
|
||||||
service_id = self._read_int32()
|
service_id = self._read_int32()
|
||||||
args, kwargs = self._receive_rpc_args(embedding_map)
|
args, kwargs = self._receive_rpc_args(embedding_map)
|
||||||
return_tags = self._read_bytes()
|
return_tags = self._read_bytes()
|
||||||
|
|
||||||
if service_id is 0:
|
if service_id == 0:
|
||||||
service = lambda obj, attr, value: setattr(obj, attr, value)
|
def service(obj, attr, value): return setattr(obj, attr, value)
|
||||||
else:
|
else:
|
||||||
service = embedding_map.retrieve_object(service_id)
|
service = embedding_map.retrieve_object(service_id)
|
||||||
logger.debug("rpc service: [%d]%r%s %r %r -> %s", service_id, service,
|
logger.debug("rpc service: [%d]%r%s %r %r -> %s", service_id, service,
|
||||||
(" (async)" if async else ""), args, kwargs, return_tags)
|
(" (async)" if is_async else ""), args, kwargs, return_tags)
|
||||||
|
|
||||||
if async:
|
if is_async:
|
||||||
service(*args, **kwargs)
|
service(*args, **kwargs)
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
result = service(*args, **kwargs)
|
result = service(*args, **kwargs)
|
||||||
logger.debug("rpc service: %d %r %r = %r", service_id, args, kwargs, result)
|
|
||||||
|
|
||||||
self._write_header(Request.RPCReply)
|
|
||||||
self._write_bytes(return_tags)
|
|
||||||
self._send_rpc_value(bytearray(return_tags), result, result, service)
|
|
||||||
except RPCReturnValueError as exn:
|
except RPCReturnValueError as exn:
|
||||||
raise
|
raise
|
||||||
except Exception as exn:
|
except Exception as exn:
|
||||||
logger.debug("rpc service: %d %r %r ! %r", service_id, args, kwargs, exn)
|
logger.debug("rpc service: %d %r %r ! %r",
|
||||||
|
service_id, args, kwargs, exn)
|
||||||
|
|
||||||
self._write_header(Request.RPCException)
|
self._write_header(Request.RPCException)
|
||||||
|
|
||||||
|
# Note: instead of sending strings, we send object ID
|
||||||
|
# This is to avoid the need of allocatio on the device side
|
||||||
|
# This is a special case: this only applies to exceptions
|
||||||
if hasattr(exn, "artiq_core_exception"):
|
if hasattr(exn, "artiq_core_exception"):
|
||||||
exn = exn.artiq_core_exception
|
exn = exn.artiq_core_exception
|
||||||
self._write_string(exn.name)
|
self._write_int32(embedding_map.store_str(exn.name))
|
||||||
self._write_string(self._truncate_message(exn.message))
|
self._write_int32(embedding_map.store_str(self._truncate_message(exn.message)))
|
||||||
for index in range(3):
|
for index in range(3):
|
||||||
self._write_int64(exn.param[index])
|
self._write_int64(exn.param[index])
|
||||||
|
|
||||||
filename, line, column, function = exn.traceback[-1]
|
filename, line, column, function = exn.traceback[-1]
|
||||||
self._write_string(filename)
|
self._write_int32(embedding_map.store_str(filename))
|
||||||
self._write_int32(line)
|
self._write_int32(line)
|
||||||
self._write_int32(column)
|
self._write_int32(column)
|
||||||
self._write_string(function)
|
self._write_int32(embedding_map.store_str(function))
|
||||||
else:
|
else:
|
||||||
exn_type = type(exn)
|
exn_type = type(exn)
|
||||||
if exn_type in (ZeroDivisionError, ValueError, IndexError, RuntimeError) or \
|
if exn_type in (ZeroDivisionError, ValueError, IndexError, RuntimeError) or \
|
||||||
hasattr(exn, "artiq_builtin"):
|
hasattr(exn, "artiq_builtin"):
|
||||||
self._write_string("0:{}".format(exn_type.__name__))
|
name = "0:{}".format(exn_type.__name__)
|
||||||
else:
|
else:
|
||||||
exn_id = embedding_map.store_object(exn_type)
|
exn_id = embedding_map.store_object(exn_type)
|
||||||
self._write_string("{}:{}.{}".format(exn_id,
|
name = "{}:{}.{}".format(exn_id,
|
||||||
exn_type.__module__,
|
exn_type.__module__,
|
||||||
exn_type.__qualname__))
|
exn_type.__qualname__)
|
||||||
self._write_string(self._truncate_message(str(exn)))
|
self._write_int32(embedding_map.store_str(name))
|
||||||
|
self._write_int32(embedding_map.store_str(self._truncate_message(str(exn))))
|
||||||
for index in range(3):
|
for index in range(3):
|
||||||
self._write_int64(0)
|
self._write_int64(0)
|
||||||
|
|
||||||
|
@ -466,36 +636,93 @@ class CommKernel:
|
||||||
((filename, line, function, _), ) = tb
|
((filename, line, function, _), ) = tb
|
||||||
else:
|
else:
|
||||||
assert False
|
assert False
|
||||||
self._write_string(filename)
|
self._write_int32(embedding_map.store_str(filename))
|
||||||
self._write_int32(line)
|
self._write_int32(line)
|
||||||
self._write_int32(-1) # column not known
|
self._write_int32(-1) # column not known
|
||||||
self._write_string(function)
|
self._write_int32(embedding_map.store_str(function))
|
||||||
|
self._flush()
|
||||||
|
else:
|
||||||
|
logger.debug("rpc service: %d %r %r = %r",
|
||||||
|
service_id, args, kwargs, result)
|
||||||
|
self._write_header(Request.RPCReply)
|
||||||
|
self._write_bytes(return_tags)
|
||||||
|
self._send_rpc_value(bytearray(return_tags),
|
||||||
|
result, result, service)
|
||||||
|
self._flush()
|
||||||
|
|
||||||
def _serve_exception(self, embedding_map, symbolizer, demangler):
|
def _serve_exception(self, embedding_map, symbolizer, demangler):
|
||||||
name = self._read_string()
|
exception_count = self._read_int32()
|
||||||
message = self._read_string()
|
nested_exceptions = []
|
||||||
params = [self._read_int64() for _ in range(3)]
|
|
||||||
|
|
||||||
filename = self._read_string()
|
def read_exception_string():
|
||||||
line = self._read_int32()
|
# note: if length == -1, the following int32 is the object key
|
||||||
column = self._read_int32()
|
length = self._read_int32()
|
||||||
function = self._read_string()
|
if length == -1:
|
||||||
|
return embedding_map.retrieve_str(self._read_int32())
|
||||||
|
else:
|
||||||
|
return self._read(length).decode("utf-8")
|
||||||
|
|
||||||
backtrace = [self._read_int32() for _ in range(self._read_int32())]
|
for _ in range(exception_count):
|
||||||
|
name = embedding_map.retrieve_str(self._read_int32())
|
||||||
|
message = read_exception_string()
|
||||||
|
params = [self._read_int64() for _ in range(3)]
|
||||||
|
|
||||||
traceback = list(reversed(symbolizer(backtrace))) + \
|
filename = read_exception_string()
|
||||||
[(filename, line, column, *demangler([function]), None)]
|
line = self._read_int32()
|
||||||
core_exn = exceptions.CoreException(name, message, params, traceback)
|
column = self._read_int32()
|
||||||
|
function = read_exception_string()
|
||||||
|
nested_exceptions.append([name, message, params,
|
||||||
|
filename, line, column, function])
|
||||||
|
|
||||||
|
demangled_names = demangler([ex[6] for ex in nested_exceptions])
|
||||||
|
for i in range(exception_count):
|
||||||
|
nested_exceptions[i][6] = demangled_names[i]
|
||||||
|
|
||||||
|
exception_info = []
|
||||||
|
for _ in range(exception_count):
|
||||||
|
sp = self._read_int32()
|
||||||
|
initial_backtrace = self._read_int32()
|
||||||
|
current_backtrace = self._read_int32()
|
||||||
|
exception_info.append((sp, initial_backtrace, current_backtrace))
|
||||||
|
|
||||||
|
backtrace = []
|
||||||
|
stack_pointers = []
|
||||||
|
for _ in range(self._read_int32()):
|
||||||
|
backtrace.append(self._read_int32())
|
||||||
|
stack_pointers.append(self._read_int32())
|
||||||
|
|
||||||
|
self._process_async_error()
|
||||||
|
|
||||||
|
traceback = list(symbolizer(backtrace))
|
||||||
|
core_exn = exceptions.CoreException(nested_exceptions, exception_info,
|
||||||
|
traceback, stack_pointers)
|
||||||
|
|
||||||
if core_exn.id == 0:
|
if core_exn.id == 0:
|
||||||
python_exn_type = getattr(exceptions, core_exn.name.split('.')[-1])
|
python_exn_type = getattr(exceptions, core_exn.name.split('.')[-1])
|
||||||
else:
|
else:
|
||||||
python_exn_type = embedding_map.retrieve_object(core_exn.id)
|
python_exn_type = embedding_map.retrieve_object(core_exn.id)
|
||||||
|
|
||||||
python_exn = python_exn_type(message.format(*params))
|
try:
|
||||||
|
python_exn = python_exn_type(
|
||||||
|
nested_exceptions[-1][1].format(*nested_exceptions[0][2]))
|
||||||
|
except Exception as ex:
|
||||||
|
python_exn = RuntimeError(
|
||||||
|
f"Exception type={python_exn_type}, which couldn't be "
|
||||||
|
f"reconstructed ({ex})"
|
||||||
|
)
|
||||||
python_exn.artiq_core_exception = core_exn
|
python_exn.artiq_core_exception = core_exn
|
||||||
raise python_exn
|
raise python_exn
|
||||||
|
|
||||||
|
def _process_async_error(self):
|
||||||
|
errors = self._read_int8()
|
||||||
|
if errors > 0:
|
||||||
|
map_name = lambda y, z: [f"{y}(s)"] if z else []
|
||||||
|
errors = map_name("collision", errors & 2 ** 0) + \
|
||||||
|
map_name("busy error", errors & 2 ** 1) + \
|
||||||
|
map_name("sequence error", errors & 2 ** 2)
|
||||||
|
logger.warning(f"{(', '.join(errors[:-1]) + ' and ') if len(errors) > 1 else ''}{errors[-1]} "
|
||||||
|
f"reported during kernel execution")
|
||||||
|
|
||||||
def serve(self, embedding_map, symbolizer, demangler):
|
def serve(self, embedding_map, symbolizer, demangler):
|
||||||
while True:
|
while True:
|
||||||
self._read_header()
|
self._read_header()
|
||||||
|
@ -503,10 +730,9 @@ class CommKernel:
|
||||||
self._serve_rpc(embedding_map)
|
self._serve_rpc(embedding_map)
|
||||||
elif self._read_type == Reply.KernelException:
|
elif self._read_type == Reply.KernelException:
|
||||||
self._serve_exception(embedding_map, symbolizer, demangler)
|
self._serve_exception(embedding_map, symbolizer, demangler)
|
||||||
elif self._read_type == Reply.WatchdogExpired:
|
|
||||||
raise exceptions.WatchdogExpired
|
|
||||||
elif self._read_type == Reply.ClockFailure:
|
elif self._read_type == Reply.ClockFailure:
|
||||||
raise exceptions.ClockFailure
|
raise exceptions.ClockFailure
|
||||||
else:
|
else:
|
||||||
self._read_expect(Reply.KernelFinished)
|
self._read_expect(Reply.KernelFinished)
|
||||||
|
self._process_async_error()
|
||||||
return
|
return
|
||||||
|
|
|
@ -1,10 +1,8 @@
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
import logging
|
import logging
|
||||||
import socket
|
|
||||||
import struct
|
import struct
|
||||||
|
|
||||||
from artiq.coredevice.comm import initialize_connection
|
from sipyco.keepalive import create_connection
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -21,11 +19,6 @@ class Request(Enum):
|
||||||
ConfigRemove = 14
|
ConfigRemove = 14
|
||||||
ConfigErase = 15
|
ConfigErase = 15
|
||||||
|
|
||||||
StartProfiler = 9
|
|
||||||
StopProfiler = 10
|
|
||||||
GetProfile = 11
|
|
||||||
|
|
||||||
Hotswap = 4
|
|
||||||
Reboot = 5
|
Reboot = 5
|
||||||
|
|
||||||
DebugAllocator = 8
|
DebugAllocator = 8
|
||||||
|
@ -40,8 +33,6 @@ class Reply(Enum):
|
||||||
|
|
||||||
ConfigData = 7
|
ConfigData = 7
|
||||||
|
|
||||||
Profile = 5
|
|
||||||
|
|
||||||
RebootImminent = 3
|
RebootImminent = 3
|
||||||
|
|
||||||
|
|
||||||
|
@ -59,11 +50,18 @@ class CommMgmt:
|
||||||
self.host = host
|
self.host = host
|
||||||
self.port = port
|
self.port = port
|
||||||
|
|
||||||
def open(self, **kwargs):
|
def open(self):
|
||||||
if hasattr(self, "socket"):
|
if hasattr(self, "socket"):
|
||||||
return
|
return
|
||||||
self.socket = initialize_connection(self.host, self.port, **kwargs)
|
self.socket = create_connection(self.host, self.port)
|
||||||
self.socket.sendall(b"ARTIQ management\n")
|
self.socket.sendall(b"ARTIQ management\n")
|
||||||
|
endian = self._read(1)
|
||||||
|
if endian == b"e":
|
||||||
|
self.endian = "<"
|
||||||
|
elif endian == b"E":
|
||||||
|
self.endian = ">"
|
||||||
|
else:
|
||||||
|
raise IOError("Incorrect reply from device: expected e/E.")
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
if not hasattr(self, "socket"):
|
if not hasattr(self, "socket"):
|
||||||
|
@ -87,7 +85,7 @@ class CommMgmt:
|
||||||
self._write(struct.pack("B", value))
|
self._write(struct.pack("B", value))
|
||||||
|
|
||||||
def _write_int32(self, value):
|
def _write_int32(self, value):
|
||||||
self._write(struct.pack(">l", value))
|
self._write(struct.pack(self.endian + "l", value))
|
||||||
|
|
||||||
def _write_bytes(self, value):
|
def _write_bytes(self, value):
|
||||||
self._write_int32(len(value))
|
self._write_int32(len(value))
|
||||||
|
@ -112,12 +110,13 @@ class CommMgmt:
|
||||||
return ty
|
return ty
|
||||||
|
|
||||||
def _read_expect(self, ty):
|
def _read_expect(self, ty):
|
||||||
if self._read_header() != ty:
|
header = self._read_header()
|
||||||
|
if header != ty:
|
||||||
raise IOError("Incorrect reply from device: {} (expected {})".
|
raise IOError("Incorrect reply from device: {} (expected {})".
|
||||||
format(self._read_type, ty))
|
format(header, ty))
|
||||||
|
|
||||||
def _read_int32(self):
|
def _read_int32(self):
|
||||||
(value, ) = struct.unpack(">l", self._read(4))
|
(value, ) = struct.unpack(self.endian + "l", self._read(4))
|
||||||
return value
|
return value
|
||||||
|
|
||||||
def _read_bytes(self):
|
def _read_bytes(self):
|
||||||
|
@ -161,7 +160,12 @@ class CommMgmt:
|
||||||
def config_read(self, key):
|
def config_read(self, key):
|
||||||
self._write_header(Request.ConfigRead)
|
self._write_header(Request.ConfigRead)
|
||||||
self._write_string(key)
|
self._write_string(key)
|
||||||
self._read_expect(Reply.ConfigData)
|
ty = self._read_header()
|
||||||
|
if ty == Reply.Error:
|
||||||
|
raise IOError("Device failed to read config. The key may not exist.")
|
||||||
|
elif ty != Reply.ConfigData:
|
||||||
|
raise IOError("Incorrect reply from device: {} (expected {})".
|
||||||
|
format(ty, Reply.ConfigData))
|
||||||
return self._read_string()
|
return self._read_string()
|
||||||
|
|
||||||
def config_write(self, key, value):
|
def config_write(self, key, value):
|
||||||
|
@ -170,7 +174,7 @@ class CommMgmt:
|
||||||
self._write_bytes(value)
|
self._write_bytes(value)
|
||||||
ty = self._read_header()
|
ty = self._read_header()
|
||||||
if ty == Reply.Error:
|
if ty == Reply.Error:
|
||||||
raise IOError("Flash storage is full")
|
raise IOError("Device failed to write config. More information may be available in the log.")
|
||||||
elif ty != Reply.Success:
|
elif ty != Reply.Success:
|
||||||
raise IOError("Incorrect reply from device: {} (expected {})".
|
raise IOError("Incorrect reply from device: {} (expected {})".
|
||||||
format(ty, Reply.Success))
|
format(ty, Reply.Success))
|
||||||
|
@ -184,45 +188,6 @@ class CommMgmt:
|
||||||
self._write_header(Request.ConfigErase)
|
self._write_header(Request.ConfigErase)
|
||||||
self._read_expect(Reply.Success)
|
self._read_expect(Reply.Success)
|
||||||
|
|
||||||
def start_profiler(self, interval, edges_size, hits_size):
|
|
||||||
self._write_header(Request.StartProfiler)
|
|
||||||
self._write_int32(interval)
|
|
||||||
self._write_int32(edges_size)
|
|
||||||
self._write_int32(hits_size)
|
|
||||||
self._read_expect(Reply.Success)
|
|
||||||
|
|
||||||
def stop_profiler(self):
|
|
||||||
self._write_header(Request.StopProfiler)
|
|
||||||
self._read_expect(Reply.Success)
|
|
||||||
|
|
||||||
def stop_profiler(self):
|
|
||||||
self._write_header(Request.StopProfiler)
|
|
||||||
self._read_expect(Reply.Success)
|
|
||||||
|
|
||||||
def get_profile(self):
|
|
||||||
self._write_header(Request.GetProfile)
|
|
||||||
self._read_expect(Reply.Profile)
|
|
||||||
|
|
||||||
hits = {}
|
|
||||||
for _ in range(self._read_int32()):
|
|
||||||
addr = self._read_int32()
|
|
||||||
count = self._read_int32()
|
|
||||||
hits[addr] = count
|
|
||||||
|
|
||||||
edges = {}
|
|
||||||
for _ in range(self._read_int32()):
|
|
||||||
caller = self._read_int32()
|
|
||||||
callee = self._read_int32()
|
|
||||||
count = self._read_int32()
|
|
||||||
edges[(caller, callee)] = count
|
|
||||||
|
|
||||||
return hits, edges
|
|
||||||
|
|
||||||
def hotswap(self, firmware):
|
|
||||||
self._write_header(Request.Hotswap)
|
|
||||||
self._write_bytes(firmware)
|
|
||||||
self._read_expect(Reply.RebootImminent)
|
|
||||||
|
|
||||||
def reboot(self):
|
def reboot(self):
|
||||||
self._write_header(Request.Reboot)
|
self._write_header(Request.Reboot)
|
||||||
self._read_expect(Reply.RebootImminent)
|
self._read_expect(Reply.RebootImminent)
|
||||||
|
|
|
@ -3,6 +3,7 @@ import logging
|
||||||
import struct
|
import struct
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
|
||||||
|
from sipyco.keepalive import async_open_connection
|
||||||
|
|
||||||
__all__ = ["TTLProbe", "TTLOverride", "CommMonInj"]
|
__all__ = ["TTLProbe", "TTLOverride", "CommMonInj"]
|
||||||
|
|
||||||
|
@ -28,7 +29,14 @@ class CommMonInj:
|
||||||
self.disconnect_cb = disconnect_cb
|
self.disconnect_cb = disconnect_cb
|
||||||
|
|
||||||
async def connect(self, host, port=1383):
|
async def connect(self, host, port=1383):
|
||||||
self._reader, self._writer = await asyncio.open_connection(host, port)
|
self._reader, self._writer = await async_open_connection(
|
||||||
|
host,
|
||||||
|
port,
|
||||||
|
after_idle=1,
|
||||||
|
interval=1,
|
||||||
|
max_fails=3,
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self._writer.write(b"ARTIQ moninj\n")
|
self._writer.write(b"ARTIQ moninj\n")
|
||||||
self._receive_task = asyncio.ensure_future(self._receive_cr())
|
self._receive_task = asyncio.ensure_future(self._receive_cr())
|
||||||
|
@ -38,6 +46,9 @@ class CommMonInj:
|
||||||
del self._writer
|
del self._writer
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
def wait_terminate(self):
|
||||||
|
return self._receive_task
|
||||||
|
|
||||||
async def close(self):
|
async def close(self):
|
||||||
self.disconnect_cb = None
|
self.disconnect_cb = None
|
||||||
try:
|
try:
|
||||||
|
@ -52,19 +63,19 @@ class CommMonInj:
|
||||||
del self._writer
|
del self._writer
|
||||||
|
|
||||||
def monitor_probe(self, enable, channel, probe):
|
def monitor_probe(self, enable, channel, probe):
|
||||||
packet = struct.pack(">bblb", 0, enable, channel, probe)
|
packet = struct.pack("<bblb", 0, enable, channel, probe)
|
||||||
self._writer.write(packet)
|
self._writer.write(packet)
|
||||||
|
|
||||||
def monitor_injection(self, enable, channel, overrd):
|
def monitor_injection(self, enable, channel, overrd):
|
||||||
packet = struct.pack(">bblb", 3, enable, channel, overrd)
|
packet = struct.pack("<bblb", 3, enable, channel, overrd)
|
||||||
self._writer.write(packet)
|
self._writer.write(packet)
|
||||||
|
|
||||||
def inject(self, channel, override, value):
|
def inject(self, channel, override, value):
|
||||||
packet = struct.pack(">blbb", 1, channel, override, value)
|
packet = struct.pack("<blbb", 1, channel, override, value)
|
||||||
self._writer.write(packet)
|
self._writer.write(packet)
|
||||||
|
|
||||||
def get_injection_status(self, channel, override):
|
def get_injection_status(self, channel, override):
|
||||||
packet = struct.pack(">blb", 2, channel, override)
|
packet = struct.pack("<blb", 2, channel, override)
|
||||||
self._writer.write(packet)
|
self._writer.write(packet)
|
||||||
|
|
||||||
async def _receive_cr(self):
|
async def _receive_cr(self):
|
||||||
|
@ -74,15 +85,19 @@ class CommMonInj:
|
||||||
if not ty:
|
if not ty:
|
||||||
return
|
return
|
||||||
if ty == b"\x00":
|
if ty == b"\x00":
|
||||||
payload = await self._reader.read(9)
|
payload = await self._reader.readexactly(13)
|
||||||
channel, probe, value = struct.unpack(">lbl", payload)
|
channel, probe, value = struct.unpack("<lbq", payload)
|
||||||
self.monitor_cb(channel, probe, value)
|
self.monitor_cb(channel, probe, value)
|
||||||
elif ty == b"\x01":
|
elif ty == b"\x01":
|
||||||
payload = await self._reader.read(6)
|
payload = await self._reader.readexactly(6)
|
||||||
channel, override, value = struct.unpack(">lbb", payload)
|
channel, override, value = struct.unpack("<lbb", payload)
|
||||||
self.injection_status_cb(channel, override, value)
|
self.injection_status_cb(channel, override, value)
|
||||||
else:
|
else:
|
||||||
raise ValueError("Unknown packet type", ty)
|
raise ValueError("Unknown packet type", ty)
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
raise
|
||||||
|
except:
|
||||||
|
logger.error("Moninj connection terminating with exception", exc_info=True)
|
||||||
finally:
|
finally:
|
||||||
if self.disconnect_cb is not None:
|
if self.disconnect_cb is not None:
|
||||||
self.disconnect_cb()
|
self.disconnect_cb()
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
import os, sys
|
import os, sys
|
||||||
import numpy
|
import numpy
|
||||||
|
from inspect import getfullargspec
|
||||||
|
from functools import wraps
|
||||||
|
|
||||||
from pythonparser import diagnostic
|
from pythonparser import diagnostic
|
||||||
|
|
||||||
|
@ -11,7 +13,7 @@ from artiq.language.units import *
|
||||||
|
|
||||||
from artiq.compiler.module import Module
|
from artiq.compiler.module import Module
|
||||||
from artiq.compiler.embedding import Stitcher
|
from artiq.compiler.embedding import Stitcher
|
||||||
from artiq.compiler.targets import OR1KTarget
|
from artiq.compiler.targets import RV32IMATarget, RV32GTarget, CortexA9Target
|
||||||
|
|
||||||
from artiq.coredevice.comm_kernel import CommKernel, CommKernelDummy
|
from artiq.coredevice.comm_kernel import CommKernel, CommKernelDummy
|
||||||
# Import for side effects (creating the exception classes).
|
# Import for side effects (creating the exception classes).
|
||||||
|
@ -44,14 +46,25 @@ def rtio_init() -> TNone:
|
||||||
raise NotImplementedError("syscall not simulated")
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
@syscall(flags={"nounwind", "nowrite"})
|
@syscall(flags={"nounwind", "nowrite"})
|
||||||
def rtio_get_counter() -> TInt64:
|
def rtio_get_destination_status(linkno: TInt32) -> TBool:
|
||||||
raise NotImplementedError("syscall not simulated")
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
@syscall(flags={"nounwind", "nowrite"})
|
@syscall(flags={"nounwind", "nowrite"})
|
||||||
def drtio_get_link_status(linkno: TInt32) -> TBool:
|
def rtio_get_counter() -> TInt64:
|
||||||
raise NotImplementedError("syscall not simulated")
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
|
|
||||||
|
def get_target_cls(target):
|
||||||
|
if target == "rv32g":
|
||||||
|
return RV32GTarget
|
||||||
|
elif target == "rv32ima":
|
||||||
|
return RV32IMATarget
|
||||||
|
elif target == "cortexa9":
|
||||||
|
return CortexA9Target
|
||||||
|
else:
|
||||||
|
raise ValueError("Unsupported target")
|
||||||
|
|
||||||
|
|
||||||
class Core:
|
class Core:
|
||||||
"""Core device driver.
|
"""Core device driver.
|
||||||
|
|
||||||
|
@ -65,73 +78,164 @@ class Core:
|
||||||
:param ref_multiplier: ratio between the RTIO fine timestamp frequency
|
:param ref_multiplier: ratio between the RTIO fine timestamp frequency
|
||||||
and the RTIO coarse timestamp frequency (e.g. SERDES multiplication
|
and the RTIO coarse timestamp frequency (e.g. SERDES multiplication
|
||||||
factor).
|
factor).
|
||||||
|
:param analyzer_proxy: name of the core device analyzer proxy to trigger
|
||||||
|
(optional).
|
||||||
|
:param analyze_at_run_end: automatically trigger the core device analyzer
|
||||||
|
proxy after the Experiment's run stage finishes.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
kernel_invariants = {
|
kernel_invariants = {
|
||||||
"core", "ref_period", "coarse_ref_period", "ref_multiplier",
|
"core", "ref_period", "coarse_ref_period", "ref_multiplier",
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self, dmgr, host, ref_period, ref_multiplier=8):
|
def __init__(self, dmgr,
|
||||||
|
host, ref_period,
|
||||||
|
analyzer_proxy=None, analyze_at_run_end=False,
|
||||||
|
ref_multiplier=8,
|
||||||
|
target="rv32g", satellite_cpu_targets={}):
|
||||||
self.ref_period = ref_period
|
self.ref_period = ref_period
|
||||||
self.ref_multiplier = ref_multiplier
|
self.ref_multiplier = ref_multiplier
|
||||||
|
self.satellite_cpu_targets = satellite_cpu_targets
|
||||||
|
self.target_cls = get_target_cls(target)
|
||||||
self.coarse_ref_period = ref_period*ref_multiplier
|
self.coarse_ref_period = ref_period*ref_multiplier
|
||||||
if host is None:
|
if host is None:
|
||||||
self.comm = CommKernelDummy()
|
self.comm = CommKernelDummy()
|
||||||
else:
|
else:
|
||||||
self.comm = CommKernel(host)
|
self.comm = CommKernel(host)
|
||||||
|
self.analyzer_proxy_name = analyzer_proxy
|
||||||
|
self.analyze_at_run_end = analyze_at_run_end
|
||||||
|
|
||||||
self.first_run = True
|
self.first_run = True
|
||||||
self.dmgr = dmgr
|
self.dmgr = dmgr
|
||||||
self.core = self
|
self.core = self
|
||||||
self.comm.core = self
|
self.comm.core = self
|
||||||
|
self.analyzer_proxy = None
|
||||||
|
|
||||||
|
def notify_run_end(self):
|
||||||
|
if self.analyze_at_run_end:
|
||||||
|
self.trigger_analyzer_proxy()
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
self.comm.close()
|
self.comm.close()
|
||||||
|
|
||||||
def compile(self, function, args, kwargs, set_result=None,
|
def compile(self, function, args, kwargs, set_result=None,
|
||||||
attribute_writeback=True, print_as_rpc=True):
|
attribute_writeback=True, print_as_rpc=True,
|
||||||
|
target=None, destination=0, subkernel_arg_types=[]):
|
||||||
try:
|
try:
|
||||||
engine = _DiagnosticEngine(all_errors_are_fatal=True)
|
engine = _DiagnosticEngine(all_errors_are_fatal=True)
|
||||||
|
|
||||||
stitcher = Stitcher(engine=engine, core=self, dmgr=self.dmgr,
|
stitcher = Stitcher(engine=engine, core=self, dmgr=self.dmgr,
|
||||||
print_as_rpc=print_as_rpc)
|
print_as_rpc=print_as_rpc,
|
||||||
|
destination=destination, subkernel_arg_types=subkernel_arg_types)
|
||||||
stitcher.stitch_call(function, args, kwargs, set_result)
|
stitcher.stitch_call(function, args, kwargs, set_result)
|
||||||
stitcher.finalize()
|
stitcher.finalize()
|
||||||
|
|
||||||
module = Module(stitcher,
|
module = Module(stitcher,
|
||||||
ref_period=self.ref_period,
|
ref_period=self.ref_period,
|
||||||
attribute_writeback=attribute_writeback)
|
attribute_writeback=attribute_writeback)
|
||||||
target = OR1KTarget()
|
target = target if target is not None else self.target_cls()
|
||||||
|
|
||||||
library = target.compile_and_link([module])
|
library = target.compile_and_link([module])
|
||||||
stripped_library = target.strip(library)
|
stripped_library = target.strip(library)
|
||||||
|
|
||||||
return stitcher.embedding_map, stripped_library, \
|
return stitcher.embedding_map, stripped_library, \
|
||||||
lambda addresses: target.symbolize(library, addresses), \
|
lambda addresses: target.symbolize(library, addresses), \
|
||||||
lambda symbols: target.demangle(symbols)
|
lambda symbols: target.demangle(symbols), \
|
||||||
|
module.subkernel_arg_types
|
||||||
except diagnostic.Error as error:
|
except diagnostic.Error as error:
|
||||||
raise CompileError(error.diagnostic) from error
|
raise CompileError(error.diagnostic) from error
|
||||||
|
|
||||||
|
def _run_compiled(self, kernel_library, embedding_map, symbolizer, demangler):
|
||||||
|
if self.first_run:
|
||||||
|
self.comm.check_system_info()
|
||||||
|
self.first_run = False
|
||||||
|
self.comm.load(kernel_library)
|
||||||
|
self.comm.run()
|
||||||
|
self.comm.serve(embedding_map, symbolizer, demangler)
|
||||||
|
|
||||||
def run(self, function, args, kwargs):
|
def run(self, function, args, kwargs):
|
||||||
result = None
|
result = None
|
||||||
@rpc(flags={"async"})
|
@rpc(flags={"async"})
|
||||||
def set_result(new_result):
|
def set_result(new_result):
|
||||||
nonlocal result
|
nonlocal result
|
||||||
result = new_result
|
result = new_result
|
||||||
|
embedding_map, kernel_library, symbolizer, demangler, subkernel_arg_types = \
|
||||||
embedding_map, kernel_library, symbolizer, demangler = \
|
|
||||||
self.compile(function, args, kwargs, set_result)
|
self.compile(function, args, kwargs, set_result)
|
||||||
|
self.compile_and_upload_subkernels(embedding_map, args, subkernel_arg_types)
|
||||||
if self.first_run:
|
self._run_compiled(kernel_library, embedding_map, symbolizer, demangler)
|
||||||
self.comm.check_system_info()
|
|
||||||
self.first_run = False
|
|
||||||
|
|
||||||
self.comm.load(kernel_library)
|
|
||||||
self.comm.run()
|
|
||||||
self.comm.serve(embedding_map, symbolizer, demangler)
|
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
def compile_subkernel(self, sid, subkernel_fn, embedding_map, args, subkernel_arg_types):
|
||||||
|
# pass self to subkernels (if applicable)
|
||||||
|
# assuming the first argument is self
|
||||||
|
subkernel_args = getfullargspec(subkernel_fn.artiq_embedded.function)
|
||||||
|
self_arg = []
|
||||||
|
if len(subkernel_args[0]) > 0:
|
||||||
|
if subkernel_args[0][0] == 'self':
|
||||||
|
self_arg = args[:1]
|
||||||
|
destination = subkernel_fn.artiq_embedded.destination
|
||||||
|
destination_tgt = self.satellite_cpu_targets[destination]
|
||||||
|
target = get_target_cls(destination_tgt)(subkernel_id=sid)
|
||||||
|
object_map, kernel_library, _, _, _ = \
|
||||||
|
self.compile(subkernel_fn, self_arg, {}, attribute_writeback=False,
|
||||||
|
print_as_rpc=False, target=target, destination=destination,
|
||||||
|
subkernel_arg_types=subkernel_arg_types.get(sid, []))
|
||||||
|
if object_map.has_rpc_or_subkernel():
|
||||||
|
raise ValueError("Subkernel must not use RPC or subkernels in other destinations")
|
||||||
|
return destination, kernel_library
|
||||||
|
|
||||||
|
def compile_and_upload_subkernels(self, embedding_map, args, subkernel_arg_types):
|
||||||
|
for sid, subkernel_fn in embedding_map.subkernels().items():
|
||||||
|
destination, kernel_library = \
|
||||||
|
self.compile_subkernel(sid, subkernel_fn, embedding_map,
|
||||||
|
args, subkernel_arg_types)
|
||||||
|
self.comm.upload_subkernel(kernel_library, sid, destination)
|
||||||
|
|
||||||
|
def precompile(self, function, *args, **kwargs):
|
||||||
|
"""Precompile a kernel and return a callable that executes it on the core device
|
||||||
|
at a later time.
|
||||||
|
|
||||||
|
Arguments to the kernel are set at compilation time and passed to this function,
|
||||||
|
as additional positional and keyword arguments.
|
||||||
|
The returned callable accepts no arguments.
|
||||||
|
|
||||||
|
Precompiled kernels may use RPCs and subkernels.
|
||||||
|
|
||||||
|
Object attributes at the beginning of a precompiled kernel execution have the
|
||||||
|
values they had at precompilation time. If up-to-date values are required,
|
||||||
|
use RPC to read them.
|
||||||
|
Similarly, modified values are not written back, and explicit RPC should be used
|
||||||
|
to modify host objects.
|
||||||
|
Carefully review the source code of drivers calls used in precompiled kernels, as
|
||||||
|
they may rely on host object attributes being transfered between kernel calls.
|
||||||
|
Examples include code used to control DDS phase, and Urukul RF switch control
|
||||||
|
via the CPLD register.
|
||||||
|
|
||||||
|
The return value of the callable is the return value of the kernel, if any.
|
||||||
|
|
||||||
|
The callable may be called several times.
|
||||||
|
"""
|
||||||
|
if not hasattr(function, "artiq_embedded"):
|
||||||
|
raise ValueError("Argument is not a kernel")
|
||||||
|
|
||||||
|
result = None
|
||||||
|
@rpc(flags={"async"})
|
||||||
|
def set_result(new_result):
|
||||||
|
nonlocal result
|
||||||
|
result = new_result
|
||||||
|
|
||||||
|
embedding_map, kernel_library, symbolizer, demangler, subkernel_arg_types = \
|
||||||
|
self.compile(function, args, kwargs, set_result, attribute_writeback=False)
|
||||||
|
self.compile_and_upload_subkernels(embedding_map, args, subkernel_arg_types)
|
||||||
|
|
||||||
|
@wraps(function)
|
||||||
|
def run_precompiled():
|
||||||
|
nonlocal result
|
||||||
|
self._run_compiled(kernel_library, embedding_map, symbolizer, demangler)
|
||||||
|
return result
|
||||||
|
|
||||||
|
return run_precompiled
|
||||||
|
|
||||||
@portable
|
@portable
|
||||||
def seconds_to_mu(self, seconds):
|
def seconds_to_mu(self, seconds):
|
||||||
"""Convert seconds to the corresponding number of machine units
|
"""Convert seconds to the corresponding number of machine units
|
||||||
|
@ -174,12 +278,11 @@ class Core:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def get_drtio_link_status(self, linkno):
|
def get_rtio_destination_status(self, destination):
|
||||||
"""Return whether the specified DRTIO link is up.
|
"""Returns whether the specified RTIO destination is up.
|
||||||
|
|
||||||
This is particularly useful in startup kernels to delay
|
This is particularly useful in startup kernels to delay
|
||||||
startup until certain DRTIO links are up."""
|
startup until certain DRTIO destinations are up."""
|
||||||
return drtio_get_link_status(linkno)
|
return rtio_get_destination_status(destination)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def reset(self):
|
def reset(self):
|
||||||
|
@ -199,3 +302,23 @@ class Core:
|
||||||
min_now = rtio_get_counter() + 125000
|
min_now = rtio_get_counter() + 125000
|
||||||
if now_mu() < min_now:
|
if now_mu() < min_now:
|
||||||
at_mu(min_now)
|
at_mu(min_now)
|
||||||
|
|
||||||
|
def trigger_analyzer_proxy(self):
|
||||||
|
"""Causes the core analyzer proxy to retrieve a dump from the device,
|
||||||
|
and distribute it to all connected clients (typically dashboards).
|
||||||
|
|
||||||
|
Returns only after the dump has been retrieved from the device.
|
||||||
|
|
||||||
|
Raises IOError if no analyzer proxy has been configured, or if the
|
||||||
|
analyzer proxy fails. In the latter case, more details would be
|
||||||
|
available in the proxy log.
|
||||||
|
"""
|
||||||
|
if self.analyzer_proxy is None:
|
||||||
|
if self.analyzer_proxy_name is not None:
|
||||||
|
self.analyzer_proxy = self.dmgr.get(self.analyzer_proxy_name)
|
||||||
|
if self.analyzer_proxy is None:
|
||||||
|
raise IOError("No analyzer proxy configured")
|
||||||
|
else:
|
||||||
|
success = self.analyzer_proxy.trigger()
|
||||||
|
if not success:
|
||||||
|
raise IOError("Analyzer proxy reported failure")
|
||||||
|
|
|
@ -0,0 +1,641 @@
|
||||||
|
{
|
||||||
|
"$id": "https://m-labs.hk/kasli_generic.schema.json",
|
||||||
|
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||||
|
"title": "Kasli variant description",
|
||||||
|
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"_description": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Free-form description text"
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Target board"
|
||||||
|
},
|
||||||
|
"variant": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Target board variant name"
|
||||||
|
},
|
||||||
|
"min_artiq_version": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Minimum required ARTIQ version",
|
||||||
|
"default": "0"
|
||||||
|
},
|
||||||
|
"hw_rev": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Hardware revision"
|
||||||
|
},
|
||||||
|
"base": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["use_drtio_role", "standalone", "master", "satellite"],
|
||||||
|
"description": "Deprecated, use drtio_role instead",
|
||||||
|
"default": "use_drtio_role"
|
||||||
|
},
|
||||||
|
"drtio_role": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["standalone", "master", "satellite"],
|
||||||
|
"description": "Role that this device takes in a DRTIO network; 'standalone' means no DRTIO",
|
||||||
|
"default": "standalone"
|
||||||
|
},
|
||||||
|
"ext_ref_frequency": {
|
||||||
|
"type": "number",
|
||||||
|
"exclusiveMinimum": 0,
|
||||||
|
"description": "External reference frequency"
|
||||||
|
},
|
||||||
|
"rtio_frequency": {
|
||||||
|
"type": "number",
|
||||||
|
"exclusiveMinimum": 0,
|
||||||
|
"default": 125e6,
|
||||||
|
"description": "RTIO frequency"
|
||||||
|
},
|
||||||
|
"core_addr": {
|
||||||
|
"type": "string",
|
||||||
|
"format": "ipv4",
|
||||||
|
"description": "IPv4 address",
|
||||||
|
"default": "192.168.1.70"
|
||||||
|
},
|
||||||
|
"vendor": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Board vendor"
|
||||||
|
},
|
||||||
|
"eui48": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "^([0-9a-f]{2}-){5}[0-9a-f]{2}$",
|
||||||
|
"examples": ["80-1f-12-4c-22-7f"]
|
||||||
|
},
|
||||||
|
"description": "Ethernet MAC addresses"
|
||||||
|
},
|
||||||
|
"enable_sata_drtio": {
|
||||||
|
"type": "boolean",
|
||||||
|
"default": false
|
||||||
|
},
|
||||||
|
"sed_lanes": {
|
||||||
|
"type": "number",
|
||||||
|
"minimum": 1,
|
||||||
|
"maximum": 32,
|
||||||
|
"default": 8,
|
||||||
|
"description": "Number of FIFOs in the SED, must be a power of 2"
|
||||||
|
},
|
||||||
|
"peripherals": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/definitions/peripheral"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"if": {
|
||||||
|
"properties": {
|
||||||
|
"target": { "const": "kasli" },
|
||||||
|
"hw_rev": {
|
||||||
|
"not": {
|
||||||
|
"oneOf": [
|
||||||
|
{ "const": "v1.0" },
|
||||||
|
{ "const": "v1.1" }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"then": {
|
||||||
|
"properties": {
|
||||||
|
"enable_sata_drtio": {
|
||||||
|
"const": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["target", "variant", "hw_rev", "base", "peripherals"],
|
||||||
|
"additionalProperties": false,
|
||||||
|
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"properties": {
|
||||||
|
"target": {
|
||||||
|
"type": "string",
|
||||||
|
"const": "kasli"
|
||||||
|
},
|
||||||
|
"hw_rev": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["v1.0", "v1.1", "v2.0"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"properties": {
|
||||||
|
"target": {
|
||||||
|
"type": "string",
|
||||||
|
"const": "kasli_soc"
|
||||||
|
},
|
||||||
|
"hw_rev": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["v1.0", "v1.1"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
|
||||||
|
"definitions": {
|
||||||
|
"peripheral": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["dio", "dio_spi", "urukul", "novogorny", "sampler", "suservo", "zotino", "grabber", "mirny", "fastino", "phaser", "hvamp", "shuttler"]
|
||||||
|
},
|
||||||
|
"board": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"hw_rev": {
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "^v[0-9]+\\.[0-9]+"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["type"],
|
||||||
|
"allOf": [{
|
||||||
|
"title": "DIO",
|
||||||
|
"if": {
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"const": "dio"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"then": {
|
||||||
|
"properties": {
|
||||||
|
"ports": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"minItems": 1,
|
||||||
|
"maxItems": 1
|
||||||
|
},
|
||||||
|
"edge_counter": {
|
||||||
|
"type": "boolean",
|
||||||
|
"default": false
|
||||||
|
},
|
||||||
|
"bank_direction_low": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["input", "output", "clkgen"]
|
||||||
|
},
|
||||||
|
"bank_direction_high": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["input", "output", "clkgen"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["ports", "bank_direction_low", "bank_direction_high"]
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
"title": "DIO_SPI",
|
||||||
|
"if": {
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"const": "dio_spi"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"then": {
|
||||||
|
"properties": {
|
||||||
|
"ports": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"minItems": 1,
|
||||||
|
"maxItems": 1
|
||||||
|
},
|
||||||
|
"spi": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"default": "dio_spi"
|
||||||
|
},
|
||||||
|
"clk": {
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 7
|
||||||
|
},
|
||||||
|
"mosi": {
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 7
|
||||||
|
},
|
||||||
|
"miso": {
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 7
|
||||||
|
},
|
||||||
|
"cs": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 7
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["clk"]
|
||||||
|
},
|
||||||
|
"minItems": 1
|
||||||
|
},
|
||||||
|
"ttl": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"default": "ttl"
|
||||||
|
},
|
||||||
|
"pin": {
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 7
|
||||||
|
},
|
||||||
|
"direction": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["input", "output"]
|
||||||
|
},
|
||||||
|
"edge_counter": {
|
||||||
|
"type": "boolean",
|
||||||
|
"default": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["pin", "direction"]
|
||||||
|
},
|
||||||
|
"default": []
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["ports", "spi"]
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
"title": "Urukul",
|
||||||
|
"if": {
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"const": "urukul"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"then": {
|
||||||
|
"properties": {
|
||||||
|
"ports": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"minItems": 1,
|
||||||
|
"maxItems": 2
|
||||||
|
},
|
||||||
|
"synchronization": {
|
||||||
|
"type": "boolean",
|
||||||
|
"default": false
|
||||||
|
},
|
||||||
|
"refclk": {
|
||||||
|
"type": "number",
|
||||||
|
"minimum": 0
|
||||||
|
},
|
||||||
|
"clk_sel": {
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 3
|
||||||
|
},
|
||||||
|
"clk_div": {
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 3,
|
||||||
|
"default": 0
|
||||||
|
},
|
||||||
|
"pll_n": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"pll_en": {
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 1,
|
||||||
|
"default": 1
|
||||||
|
},
|
||||||
|
"pll_vco": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"dds": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["ad9910", "ad9912"],
|
||||||
|
"default": "ad9910"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["ports"]
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
"title": "Novogorny",
|
||||||
|
"if": {
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"const": "novogorny"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"then": {
|
||||||
|
"properties": {
|
||||||
|
"ports": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"minItems": 1,
|
||||||
|
"maxItems": 1
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["ports"]
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
"title": "Sampler",
|
||||||
|
"if": {
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"const": "sampler"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"then": {
|
||||||
|
"properties": {
|
||||||
|
"ports": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"minItems": 1,
|
||||||
|
"maxItems": 2
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["ports"]
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
"title": "SUServo",
|
||||||
|
"if": {
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"const": "suservo"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"then": {
|
||||||
|
"properties": {
|
||||||
|
"sampler_ports": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"minItems": 2,
|
||||||
|
"maxItems": 2
|
||||||
|
},
|
||||||
|
"sampler_hw_rev": {
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "^v[0-9]+\\.[0-9]+",
|
||||||
|
"default": "v2.2"
|
||||||
|
},
|
||||||
|
"urukul0_ports": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"minItems": 2,
|
||||||
|
"maxItems": 2
|
||||||
|
},
|
||||||
|
"urukul1_ports": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"minItems": 2,
|
||||||
|
"maxItems": 2
|
||||||
|
},
|
||||||
|
"refclk": {
|
||||||
|
"type": "number",
|
||||||
|
"minimum": 0
|
||||||
|
},
|
||||||
|
"clk_sel": {
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 3
|
||||||
|
},
|
||||||
|
"pll_n": {
|
||||||
|
"type": "integer",
|
||||||
|
"default": 32
|
||||||
|
},
|
||||||
|
"pll_en": {
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 1,
|
||||||
|
"default": 1
|
||||||
|
},
|
||||||
|
"pll_vco": {
|
||||||
|
"type": "integer"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["sampler_ports", "urukul0_ports"]
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
"title": "Zotino",
|
||||||
|
"if": {
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"const": "zotino"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"then": {
|
||||||
|
"properties": {
|
||||||
|
"ports": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"minItems": 1,
|
||||||
|
"maxItems": 1
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["ports"]
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
"title": "Grabber",
|
||||||
|
"if": {
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"const": "grabber"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"then": {
|
||||||
|
"properties": {
|
||||||
|
"ports": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"minItems": 1,
|
||||||
|
"maxItems": 3
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["ports"]
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
"title": "Mirny",
|
||||||
|
"if": {
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"const": "mirny"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"then": {
|
||||||
|
"properties": {
|
||||||
|
"ports": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"minItems": 1,
|
||||||
|
"maxItems": 1
|
||||||
|
},
|
||||||
|
"refclk": {
|
||||||
|
"type": "number",
|
||||||
|
"exclusiveMinimum": 0,
|
||||||
|
"default": 100e6
|
||||||
|
},
|
||||||
|
"clk_sel": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 0,
|
||||||
|
"maximum": 3
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["xo", "mmcx", "sma"]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"default": 0
|
||||||
|
},
|
||||||
|
"almazny": {
|
||||||
|
"type": "boolean",
|
||||||
|
"default": false
|
||||||
|
},
|
||||||
|
"almazny_hw_rev": {
|
||||||
|
"type": "string",
|
||||||
|
"pattern": "^v[0-9]+\\.[0-9]+",
|
||||||
|
"default": "v1.2"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["ports"]
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
"title": "Fastino",
|
||||||
|
"if": {
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"const": "fastino"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"then": {
|
||||||
|
"properties": {
|
||||||
|
"ports": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"minItems": 1,
|
||||||
|
"maxItems": 1
|
||||||
|
},
|
||||||
|
"log2_width": {
|
||||||
|
"type": "integer",
|
||||||
|
"default": 0,
|
||||||
|
"description": "Width of DAC channel group (logarithm base 2)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["ports"]
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
"title": "Phaser",
|
||||||
|
"if": {
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"const": "phaser"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"then": {
|
||||||
|
"properties": {
|
||||||
|
"ports": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"minItems": 1,
|
||||||
|
"maxItems": 1
|
||||||
|
},
|
||||||
|
"mode": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["base", "miqro"],
|
||||||
|
"default": "base"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["ports"]
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
"title": "HVAmp",
|
||||||
|
"if": {
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"const": "hvamp"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"then": {
|
||||||
|
"properties": {
|
||||||
|
"ports": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"minItems": 1,
|
||||||
|
"maxItems": 1
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["ports"]
|
||||||
|
}
|
||||||
|
},{
|
||||||
|
"title": "Shuttler",
|
||||||
|
"if": {
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"const": "shuttler"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"then": {
|
||||||
|
"properties": {
|
||||||
|
"ports": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"minItems": 1,
|
||||||
|
"maxItems": 2
|
||||||
|
},
|
||||||
|
"drtio_destination": {
|
||||||
|
"type": "integer"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["ports"]
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,277 @@
|
||||||
|
class DAC34H84:
|
||||||
|
"""DAC34H84 settings and register map.
|
||||||
|
|
||||||
|
For possible values, documentation, and explanation, see the DAC datasheet
|
||||||
|
at https://www.ti.com/lit/pdf/slas751
|
||||||
|
"""
|
||||||
|
qmc_corr_ena = 0 # msb ab
|
||||||
|
qmc_offset_ena = 0 # msb ab
|
||||||
|
invsinc_ena = 0 # msb ab
|
||||||
|
interpolation = 1 # 2x
|
||||||
|
fifo_ena = 1
|
||||||
|
alarm_out_ena = 1
|
||||||
|
alarm_out_pol = 1
|
||||||
|
clkdiv_sync_ena = 1
|
||||||
|
|
||||||
|
iotest_ena = 0
|
||||||
|
cnt64_ena = 0
|
||||||
|
oddeven_parity = 0 # even
|
||||||
|
single_parity_ena = 1
|
||||||
|
dual_parity_ena = 0
|
||||||
|
rev_interface = 0
|
||||||
|
dac_complement = 0b0000 # msb A
|
||||||
|
alarm_fifo = 0b111 # msb 2-away
|
||||||
|
|
||||||
|
dacclkgone_ena = 1
|
||||||
|
dataclkgone_ena = 1
|
||||||
|
collisiongone_ena = 1
|
||||||
|
sif4_ena = 1
|
||||||
|
mixer_ena = 0
|
||||||
|
mixer_gain = 1
|
||||||
|
nco_ena = 0
|
||||||
|
revbus = 0
|
||||||
|
twos = 1
|
||||||
|
|
||||||
|
coarse_dac = 9 # 18.75 mA, 0-15
|
||||||
|
sif_txenable = 0
|
||||||
|
|
||||||
|
mask_alarm_from_zerochk = 0
|
||||||
|
mask_alarm_fifo_collision = 0
|
||||||
|
mask_alarm_fifo_1away = 0
|
||||||
|
mask_alarm_fifo_2away = 0
|
||||||
|
mask_alarm_dacclk_gone = 0
|
||||||
|
mask_alarm_dataclk_gone = 0
|
||||||
|
mask_alarm_output_gone = 0
|
||||||
|
mask_alarm_from_iotest = 0
|
||||||
|
mask_alarm_from_pll = 0
|
||||||
|
mask_alarm_parity = 0b0000 # msb a
|
||||||
|
|
||||||
|
qmc_offseta = 0 # 12b
|
||||||
|
fifo_offset = 2 # 0-7
|
||||||
|
qmc_offsetb = 0 # 12b
|
||||||
|
|
||||||
|
qmc_offsetc = 0 # 12b
|
||||||
|
|
||||||
|
qmc_offsetd = 0 # 12b
|
||||||
|
|
||||||
|
qmc_gaina = 0 # 11b
|
||||||
|
|
||||||
|
cmix_fs8 = 0
|
||||||
|
cmix_fs4 = 0
|
||||||
|
cmix_fs2 = 0
|
||||||
|
cmix_nfs4 = 0
|
||||||
|
qmc_gainb = 0 # 11b
|
||||||
|
|
||||||
|
qmc_gainc = 0 # 11b
|
||||||
|
|
||||||
|
output_delayab = 0b00
|
||||||
|
output_delaycd = 0b00
|
||||||
|
qmc_gaind = 0 # 11b
|
||||||
|
|
||||||
|
qmc_phaseab = 0 # 12b
|
||||||
|
|
||||||
|
qmc_phasecd = 0 # 12b
|
||||||
|
|
||||||
|
phase_offsetab = 0 # 16b
|
||||||
|
phase_offsetcd = 0 # 16b
|
||||||
|
phase_addab_lsb = 0 # 16b
|
||||||
|
phase_addab_msb = 0 # 16b
|
||||||
|
phase_addcd_lsb = 0 # 16b
|
||||||
|
phase_addcd_msb = 0 # 16b
|
||||||
|
|
||||||
|
pll_reset = 0
|
||||||
|
pll_ndivsync_ena = 1
|
||||||
|
pll_ena = 1
|
||||||
|
pll_cp = 0b01 # single charge pump
|
||||||
|
pll_p = 0b100 # p=4
|
||||||
|
|
||||||
|
pll_m2 = 1 # x2
|
||||||
|
pll_m = 8 # m = 8
|
||||||
|
pll_n = 0b0001 # n = 2
|
||||||
|
pll_vcotune = 0b01
|
||||||
|
|
||||||
|
pll_vco = 0x3f # 4 GHz
|
||||||
|
bias_sleep = 0
|
||||||
|
tsense_sleep = 0
|
||||||
|
pll_sleep = 0
|
||||||
|
clkrecv_sleep = 0
|
||||||
|
dac_sleep = 0b0000 # msb a
|
||||||
|
|
||||||
|
extref_ena = 0
|
||||||
|
fuse_sleep = 1
|
||||||
|
atest = 0b00000 # atest mode
|
||||||
|
|
||||||
|
syncsel_qmcoffsetab = 0b1001 # sif_sync and register write
|
||||||
|
syncsel_qmcoffsetcd = 0b1001 # sif_sync and register write
|
||||||
|
syncsel_qmccorrab = 0b1001 # sif_sync and register write
|
||||||
|
syncsel_qmccorrcd = 0b1001 # sif_sync and register write
|
||||||
|
|
||||||
|
syncsel_mixerab = 0b1001 # sif_sync and register write
|
||||||
|
syncsel_mixercd = 0b1001 # sif_sync and register write
|
||||||
|
syncsel_nco = 0b1000 # sif_sync
|
||||||
|
syncsel_fifo_input = 0b10 # external lvds istr
|
||||||
|
sif_sync = 0
|
||||||
|
|
||||||
|
syncsel_fifoin = 0b0010 # istr
|
||||||
|
syncsel_fifoout = 0b0100 # ostr
|
||||||
|
clkdiv_sync_sel = 0 # ostr
|
||||||
|
|
||||||
|
path_a_sel = 0
|
||||||
|
path_b_sel = 1
|
||||||
|
path_c_sel = 2
|
||||||
|
path_d_sel = 3
|
||||||
|
# swap dac pairs (CDAB) for layout
|
||||||
|
# swap I-Q dacs for spectral inversion
|
||||||
|
dac_a_sel = 3
|
||||||
|
dac_b_sel = 2
|
||||||
|
dac_c_sel = 1
|
||||||
|
dac_d_sel = 0
|
||||||
|
|
||||||
|
dac_sleep_en = 0b1111 # msb a
|
||||||
|
clkrecv_sleep_en = 1
|
||||||
|
pll_sleep_en = 1
|
||||||
|
lvds_data_sleep_en = 1
|
||||||
|
lvds_control_sleep_en = 1
|
||||||
|
temp_sense_sleep_en = 1
|
||||||
|
bias_sleep_en = 1
|
||||||
|
|
||||||
|
data_dly = 2
|
||||||
|
clk_dly = 0
|
||||||
|
|
||||||
|
ostrtodig_sel = 0
|
||||||
|
ramp_ena = 0
|
||||||
|
sifdac_ena = 0
|
||||||
|
|
||||||
|
grp_delaya = 0x00
|
||||||
|
grp_delayb = 0x00
|
||||||
|
|
||||||
|
grp_delayc = 0x00
|
||||||
|
grp_delayd = 0x00
|
||||||
|
|
||||||
|
sifdac = 0
|
||||||
|
|
||||||
|
def __init__(self, updates=None):
|
||||||
|
if updates is None:
|
||||||
|
return
|
||||||
|
for key, value in updates.items():
|
||||||
|
if not hasattr(self, key):
|
||||||
|
raise KeyError("invalid setting", key)
|
||||||
|
setattr(self, key, value)
|
||||||
|
|
||||||
|
def get_mmap(self):
|
||||||
|
mmap = []
|
||||||
|
mmap.append(
|
||||||
|
(0x00 << 16) |
|
||||||
|
(self.qmc_offset_ena << 14) | (self.qmc_corr_ena << 12) |
|
||||||
|
(self.interpolation << 8) | (self.fifo_ena << 7) |
|
||||||
|
(self.alarm_out_ena << 4) | (self.alarm_out_pol << 3) |
|
||||||
|
(self.clkdiv_sync_ena << 2) | (self.invsinc_ena << 0))
|
||||||
|
mmap.append(
|
||||||
|
(0x01 << 16) |
|
||||||
|
(self.iotest_ena << 15) | (self.cnt64_ena << 12) |
|
||||||
|
(self.oddeven_parity << 11) | (self.single_parity_ena << 10) |
|
||||||
|
(self.dual_parity_ena << 9) | (self.rev_interface << 8) |
|
||||||
|
(self.dac_complement << 4) | (self.alarm_fifo << 1))
|
||||||
|
mmap.append(
|
||||||
|
(0x02 << 16) |
|
||||||
|
(self.dacclkgone_ena << 14) | (self.dataclkgone_ena << 13) |
|
||||||
|
(self.collisiongone_ena << 12) | (self.sif4_ena << 7) |
|
||||||
|
(self.mixer_ena << 6) | (self.mixer_gain << 5) |
|
||||||
|
(self.nco_ena << 4) | (self.revbus << 3) | (self.twos << 1))
|
||||||
|
mmap.append((0x03 << 16) | (self.coarse_dac << 12) |
|
||||||
|
(self.sif_txenable << 0))
|
||||||
|
mmap.append(
|
||||||
|
(0x07 << 16) |
|
||||||
|
(self.mask_alarm_from_zerochk << 15) | (1 << 14) |
|
||||||
|
(self.mask_alarm_fifo_collision << 13) |
|
||||||
|
(self.mask_alarm_fifo_1away << 12) |
|
||||||
|
(self.mask_alarm_fifo_2away << 11) |
|
||||||
|
(self.mask_alarm_dacclk_gone << 10) |
|
||||||
|
(self.mask_alarm_dataclk_gone << 9) |
|
||||||
|
(self.mask_alarm_output_gone << 8) |
|
||||||
|
(self.mask_alarm_from_iotest << 7) | (1 << 6) |
|
||||||
|
(self.mask_alarm_from_pll << 5) | (self.mask_alarm_parity << 1))
|
||||||
|
mmap.append(
|
||||||
|
(0x08 << 16) | (self.qmc_offseta << 0))
|
||||||
|
mmap.append(
|
||||||
|
(0x09 << 16) | (self.fifo_offset << 13) | (self.qmc_offsetb << 0))
|
||||||
|
mmap.append((0x0a << 16) | (self.qmc_offsetc << 0))
|
||||||
|
mmap.append((0x0b << 16) | (self.qmc_offsetd << 0))
|
||||||
|
mmap.append((0x0c << 16) | (self.qmc_gaina << 0))
|
||||||
|
mmap.append(
|
||||||
|
(0x0d << 16) |
|
||||||
|
(self.cmix_fs8 << 15) | (self.cmix_fs4 << 14) |
|
||||||
|
(self.cmix_fs2 << 13) | (self.cmix_nfs4 << 12) |
|
||||||
|
(self.qmc_gainb << 0))
|
||||||
|
mmap.append((0x0e << 16) | (self.qmc_gainc << 0))
|
||||||
|
mmap.append(
|
||||||
|
(0x0f << 16) |
|
||||||
|
(self.output_delayab << 14) | (self.output_delaycd << 12) |
|
||||||
|
(self.qmc_gaind << 0))
|
||||||
|
mmap.append((0x10 << 16) | (self.qmc_phaseab << 0))
|
||||||
|
mmap.append((0x11 << 16) | (self.qmc_phasecd << 0))
|
||||||
|
mmap.append((0x12 << 16) | (self.phase_offsetab << 0))
|
||||||
|
mmap.append((0x13 << 16) | (self.phase_offsetcd << 0))
|
||||||
|
mmap.append((0x14 << 16) | (self.phase_addab_lsb << 0))
|
||||||
|
mmap.append((0x15 << 16) | (self.phase_addab_msb << 0))
|
||||||
|
mmap.append((0x16 << 16) | (self.phase_addcd_lsb << 0))
|
||||||
|
mmap.append((0x17 << 16) | (self.phase_addcd_msb << 0))
|
||||||
|
mmap.append(
|
||||||
|
(0x18 << 16) |
|
||||||
|
(0b001 << 13) | (self.pll_reset << 12) |
|
||||||
|
(self.pll_ndivsync_ena << 11) | (self.pll_ena << 10) |
|
||||||
|
(self.pll_cp << 6) | (self.pll_p << 3))
|
||||||
|
mmap.append(
|
||||||
|
(0x19 << 16) |
|
||||||
|
(self.pll_m2 << 15) | (self.pll_m << 8) | (self.pll_n << 4) |
|
||||||
|
(self.pll_vcotune << 2))
|
||||||
|
mmap.append(
|
||||||
|
(0x1a << 16) |
|
||||||
|
(self.pll_vco << 10) | (self.bias_sleep << 7) |
|
||||||
|
(self.tsense_sleep << 6) |
|
||||||
|
(self.pll_sleep << 5) | (self.clkrecv_sleep << 4) |
|
||||||
|
(self.dac_sleep << 0))
|
||||||
|
mmap.append(
|
||||||
|
(0x1b << 16) |
|
||||||
|
(self.extref_ena << 15) | (self.fuse_sleep << 11) |
|
||||||
|
(self.atest << 0))
|
||||||
|
mmap.append(
|
||||||
|
(0x1e << 16) |
|
||||||
|
(self.syncsel_qmcoffsetab << 12) |
|
||||||
|
(self.syncsel_qmcoffsetcd << 8) |
|
||||||
|
(self.syncsel_qmccorrab << 4) |
|
||||||
|
(self.syncsel_qmccorrcd << 0))
|
||||||
|
mmap.append(
|
||||||
|
(0x1f << 16) |
|
||||||
|
(self.syncsel_mixerab << 12) | (self.syncsel_mixercd << 8) |
|
||||||
|
(self.syncsel_nco << 4) | (self.syncsel_fifo_input << 2) |
|
||||||
|
(self.sif_sync << 1))
|
||||||
|
mmap.append(
|
||||||
|
(0x20 << 16) |
|
||||||
|
(self.syncsel_fifoin << 12) | (self.syncsel_fifoout << 8) |
|
||||||
|
(self.clkdiv_sync_sel << 0))
|
||||||
|
mmap.append(
|
||||||
|
(0x22 << 16) |
|
||||||
|
(self.path_a_sel << 14) | (self.path_b_sel << 12) |
|
||||||
|
(self.path_c_sel << 10) | (self.path_d_sel << 8) |
|
||||||
|
(self.dac_a_sel << 6) | (self.dac_b_sel << 4) |
|
||||||
|
(self.dac_c_sel << 2) | (self.dac_d_sel << 0))
|
||||||
|
mmap.append(
|
||||||
|
(0x23 << 16) |
|
||||||
|
(self.dac_sleep_en << 12) | (self.clkrecv_sleep_en << 11) |
|
||||||
|
(self.pll_sleep_en << 10) | (self.lvds_data_sleep_en << 9) |
|
||||||
|
(self.lvds_control_sleep_en << 8) |
|
||||||
|
(self.temp_sense_sleep_en << 7) | (1 << 6) |
|
||||||
|
(self.bias_sleep_en << 5) | (0x1f << 0))
|
||||||
|
mmap.append(
|
||||||
|
(0x24 << 16) | (self.data_dly << 13) | (self.clk_dly << 10))
|
||||||
|
mmap.append(
|
||||||
|
(0x2d << 16) |
|
||||||
|
(self.ostrtodig_sel << 14) | (self.ramp_ena << 13) |
|
||||||
|
(0x002 << 1) | (self.sifdac_ena << 0))
|
||||||
|
mmap.append(
|
||||||
|
(0x2e << 16) | (self.grp_delaya << 8) | (self.grp_delayb << 0))
|
||||||
|
mmap.append(
|
||||||
|
(0x2f << 16) | (self.grp_delayc << 8) | (self.grp_delayd << 0))
|
||||||
|
mmap.append((0x30 << 16) | self.sifdac)
|
||||||
|
return mmap
|
|
@ -6,7 +6,7 @@ alone could achieve.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from artiq.language.core import syscall, kernel
|
from artiq.language.core import syscall, kernel
|
||||||
from artiq.language.types import TInt32, TInt64, TStr, TNone, TTuple
|
from artiq.language.types import TInt32, TInt64, TStr, TNone, TTuple, TBool
|
||||||
from artiq.coredevice.exceptions import DMAError
|
from artiq.coredevice.exceptions import DMAError
|
||||||
|
|
||||||
from numpy import int64
|
from numpy import int64
|
||||||
|
@ -17,7 +17,7 @@ def dma_record_start(name: TStr) -> TNone:
|
||||||
raise NotImplementedError("syscall not simulated")
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
@syscall
|
@syscall
|
||||||
def dma_record_stop(duration: TInt64) -> TNone:
|
def dma_record_stop(duration: TInt64, enable_ddma: TBool) -> TNone:
|
||||||
raise NotImplementedError("syscall not simulated")
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
@syscall
|
@syscall
|
||||||
|
@ -25,11 +25,11 @@ def dma_erase(name: TStr) -> TNone:
|
||||||
raise NotImplementedError("syscall not simulated")
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
@syscall
|
@syscall
|
||||||
def dma_retrieve(name: TStr) -> TTuple([TInt64, TInt32]):
|
def dma_retrieve(name: TStr) -> TTuple([TInt64, TInt32, TBool]):
|
||||||
raise NotImplementedError("syscall not simulated")
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
@syscall
|
@syscall
|
||||||
def dma_playback(timestamp: TInt64, ptr: TInt32) -> TNone:
|
def dma_playback(timestamp: TInt64, ptr: TInt32, enable_ddma: TBool) -> TNone:
|
||||||
raise NotImplementedError("syscall not simulated")
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
|
|
||||||
|
@ -47,6 +47,7 @@ class DMARecordContextManager:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.name = ""
|
self.name = ""
|
||||||
self.saved_now_mu = int64(0)
|
self.saved_now_mu = int64(0)
|
||||||
|
self.enable_ddma = False
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
|
@ -56,7 +57,7 @@ class DMARecordContextManager:
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def __exit__(self, type, value, traceback):
|
def __exit__(self, type, value, traceback):
|
||||||
dma_record_stop(now_mu()) # see above
|
dma_record_stop(now_mu(), self.enable_ddma) # see above
|
||||||
at_mu(self.saved_now_mu)
|
at_mu(self.saved_now_mu)
|
||||||
|
|
||||||
|
|
||||||
|
@ -74,12 +75,20 @@ class CoreDMA:
|
||||||
self.epoch = 0
|
self.epoch = 0
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def record(self, name):
|
def record(self, name, enable_ddma=False):
|
||||||
"""Returns a context manager that will record a DMA trace called ``name``.
|
"""Returns a context manager that will record a DMA trace called ``name``.
|
||||||
Any previously recorded trace with the same name is overwritten.
|
Any previously recorded trace with the same name is overwritten.
|
||||||
The trace will persist across kernel switches."""
|
The trace will persist across kernel switches.
|
||||||
|
|
||||||
|
In DRTIO context, distributed DMA can be toggled with ``enable_ddma``.
|
||||||
|
Enabling it allows running DMA on satellites, rather than sending all
|
||||||
|
events from the master.
|
||||||
|
|
||||||
|
Keeping it disabled it may improve performance in some scenarios,
|
||||||
|
e.g. when there are many small satellite buffers."""
|
||||||
self.epoch += 1
|
self.epoch += 1
|
||||||
self.recorder.name = name
|
self.recorder.name = name
|
||||||
|
self.recorder.enable_ddma = enable_ddma
|
||||||
return self.recorder
|
return self.recorder
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
|
@ -92,24 +101,24 @@ class CoreDMA:
|
||||||
def playback(self, name):
|
def playback(self, name):
|
||||||
"""Replays a previously recorded DMA trace. This function blocks until
|
"""Replays a previously recorded DMA trace. This function blocks until
|
||||||
the entire trace is submitted to the RTIO FIFOs."""
|
the entire trace is submitted to the RTIO FIFOs."""
|
||||||
(advance_mu, ptr) = dma_retrieve(name)
|
(advance_mu, ptr, uses_ddma) = dma_retrieve(name)
|
||||||
dma_playback(now_mu(), ptr)
|
dma_playback(now_mu(), ptr, uses_ddma)
|
||||||
delay_mu(advance_mu)
|
delay_mu(advance_mu)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def get_handle(self, name):
|
def get_handle(self, name):
|
||||||
"""Returns a handle to a previously recorded DMA trace. The returned handle
|
"""Returns a handle to a previously recorded DMA trace. The returned handle
|
||||||
is only valid until the next call to :meth:`record` or :meth:`erase`."""
|
is only valid until the next call to :meth:`record` or :meth:`erase`."""
|
||||||
(advance_mu, ptr) = dma_retrieve(name)
|
(advance_mu, ptr, uses_ddma) = dma_retrieve(name)
|
||||||
return (self.epoch, advance_mu, ptr)
|
return (self.epoch, advance_mu, ptr, uses_ddma)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def playback_handle(self, handle):
|
def playback_handle(self, handle):
|
||||||
"""Replays a handle obtained with :meth:`get_handle`. Using this function
|
"""Replays a handle obtained with :meth:`get_handle`. Using this function
|
||||||
is much faster than :meth:`playback` for replaying a set of traces repeatedly,
|
is much faster than :meth:`playback` for replaying a set of traces repeatedly,
|
||||||
but incurs the overhead of managing the handles onto the programmer."""
|
but incurs the overhead of managing the handles onto the programmer."""
|
||||||
(epoch, advance_mu, ptr) = handle
|
(epoch, advance_mu, ptr, uses_ddma) = handle
|
||||||
if self.epoch != epoch:
|
if self.epoch != epoch:
|
||||||
raise DMAError("Invalid handle")
|
raise DMAError("Invalid handle")
|
||||||
dma_playback(now_mu(), ptr)
|
dma_playback(now_mu(), ptr, uses_ddma)
|
||||||
delay_mu(advance_mu)
|
delay_mu(advance_mu)
|
||||||
|
|
|
@ -1,17 +0,0 @@
|
||||||
"""
|
|
||||||
DRTIO debugging functions.
|
|
||||||
|
|
||||||
Those syscalls are intended for ARTIQ developers only.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from artiq.language.core import syscall
|
|
||||||
from artiq.language.types import TTuple, TInt32, TInt64, TNone
|
|
||||||
|
|
||||||
|
|
||||||
@syscall(flags={"nounwind", "nowrite"})
|
|
||||||
def drtio_get_packet_counts(linkno: TInt32) -> TTuple([TInt32, TInt32]):
|
|
||||||
raise NotImplementedError("syscall not simulated")
|
|
||||||
|
|
||||||
@syscall(flags={"nounwind", "nowrite"})
|
|
||||||
def drtio_get_fifo_space_req_count(linkno: TInt32) -> TInt32:
|
|
||||||
raise NotImplementedError("syscall not simulated")
|
|
|
@ -0,0 +1,240 @@
|
||||||
|
"""Driver for RTIO-enabled TTL edge counter.
|
||||||
|
|
||||||
|
Like for the TTL input PHYs, sensitivity can be configured over RTIO
|
||||||
|
(``gate_rising()``, etc.). In contrast to the former, however, the count is
|
||||||
|
accumulated in gateware, and only a single input event is generated at the end
|
||||||
|
of each gate period::
|
||||||
|
|
||||||
|
with parallel:
|
||||||
|
doppler_cool()
|
||||||
|
self.pmt_counter.gate_rising(1 * ms)
|
||||||
|
|
||||||
|
with parallel:
|
||||||
|
readout()
|
||||||
|
self.pmt_counter.gate_rising(100 * us)
|
||||||
|
|
||||||
|
print("Doppler cooling counts:", self.pmt_counter.fetch_count())
|
||||||
|
print("Readout counts:", self.pmt_counter.fetch_count())
|
||||||
|
|
||||||
|
For applications where the timestamps of the individual input events are not
|
||||||
|
required, this has two advantages over ``TTLInOut.count()`` beyond raw
|
||||||
|
throughput. First, it is easy to count events during multiple separate periods
|
||||||
|
without blocking to read back counts in between, as illustrated in the above
|
||||||
|
example. Secondly, as each count total only takes up a single input event, it
|
||||||
|
is much easier to acquire counts on several channels in parallel without
|
||||||
|
risking input FIFO overflows::
|
||||||
|
|
||||||
|
# Using the TTLInOut driver, pmt_1 input events are only processed
|
||||||
|
# after pmt_0 is done counting. To avoid RTIOOverflows, a round-robin
|
||||||
|
# scheme would have to be implemented manually.
|
||||||
|
|
||||||
|
with parallel:
|
||||||
|
self.pmt_0.gate_rising(10 * ms)
|
||||||
|
self.pmt_1.gate_rising(10 * ms)
|
||||||
|
|
||||||
|
counts_0 = self.pmt_0.count(now_mu()) # blocks
|
||||||
|
counts_1 = self.pmt_1.count(now_mu())
|
||||||
|
|
||||||
|
#
|
||||||
|
|
||||||
|
# Using gateware counters, only a single input event each is
|
||||||
|
# generated, greatly reducing the load on the input FIFOs:
|
||||||
|
|
||||||
|
with parallel:
|
||||||
|
self.pmt_0_counter.gate_rising(10 * ms)
|
||||||
|
self.pmt_1_counter.gate_rising(10 * ms)
|
||||||
|
|
||||||
|
counts_0 = self.pmt_0_counter.fetch_count() # blocks
|
||||||
|
counts_1 = self.pmt_1_counter.fetch_count()
|
||||||
|
|
||||||
|
See :mod:`artiq.gateware.rtio.phy.edge_counter` and
|
||||||
|
:meth:`artiq.gateware.eem.DIO.add_std` for the gateware components.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from artiq.language.core import *
|
||||||
|
from artiq.language.types import *
|
||||||
|
from artiq.coredevice.rtio import (rtio_output, rtio_input_data,
|
||||||
|
rtio_input_timestamped_data)
|
||||||
|
from numpy import int32, int64
|
||||||
|
|
||||||
|
CONFIG_COUNT_RISING = 0b0001
|
||||||
|
CONFIG_COUNT_FALLING = 0b0010
|
||||||
|
CONFIG_SEND_COUNT_EVENT = 0b0100
|
||||||
|
CONFIG_RESET_TO_ZERO = 0b1000
|
||||||
|
|
||||||
|
|
||||||
|
class CounterOverflow(Exception):
|
||||||
|
"""Raised when an edge counter value is read which indicates that the
|
||||||
|
counter might have overflowed."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class EdgeCounter:
|
||||||
|
"""RTIO TTL edge counter driver driver.
|
||||||
|
|
||||||
|
Like for regular TTL inputs, timeline periods where the counter is
|
||||||
|
sensitive to a chosen set of input transitions can be specified. Unlike the
|
||||||
|
former, however, the specified edges do not create individual input events;
|
||||||
|
rather, the total count can be requested as a single input event from the
|
||||||
|
core (typically at the end of the gate window).
|
||||||
|
|
||||||
|
:param channel: The RTIO channel of the gateware phy.
|
||||||
|
:param gateware_width: The width of the gateware counter register, in
|
||||||
|
bits. This is only used for overflow handling; to change the size,
|
||||||
|
the gateware needs to be rebuilt.
|
||||||
|
"""
|
||||||
|
|
||||||
|
kernel_invariants = {"core", "channel", "counter_max"}
|
||||||
|
|
||||||
|
def __init__(self, dmgr, channel, gateware_width=31, core_device="core"):
|
||||||
|
self.core = dmgr.get(core_device)
|
||||||
|
self.channel = channel
|
||||||
|
self.counter_max = (1 << (gateware_width - 1)) - 1
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(channel, **kwargs):
|
||||||
|
return [(channel, None)]
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def gate_rising(self, duration):
|
||||||
|
"""Count rising edges for the given duration and request the total at
|
||||||
|
the end.
|
||||||
|
|
||||||
|
The counter is reset at the beginning of the gate period. Use
|
||||||
|
:meth:`set_config` directly for more detailed control.
|
||||||
|
|
||||||
|
:param duration: The duration for which the gate is to stay open.
|
||||||
|
|
||||||
|
:return: The timestamp at the end of the gate period, in machine units.
|
||||||
|
"""
|
||||||
|
return self.gate_rising_mu(self.core.seconds_to_mu(duration))
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def gate_falling(self, duration):
|
||||||
|
"""Count falling edges for the given duration and request the total at
|
||||||
|
the end.
|
||||||
|
|
||||||
|
The counter is reset at the beginning of the gate period. Use
|
||||||
|
:meth:`set_config` directly for more detailed control.
|
||||||
|
|
||||||
|
:param duration: The duration for which the gate is to stay open.
|
||||||
|
|
||||||
|
:return: The timestamp at the end of the gate period, in machine units.
|
||||||
|
"""
|
||||||
|
return self.gate_falling_mu(self.core.seconds_to_mu(duration))
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def gate_both(self, duration):
|
||||||
|
"""Count both rising and falling edges for the given duration, and
|
||||||
|
request the total at the end.
|
||||||
|
|
||||||
|
The counter is reset at the beginning of the gate period. Use
|
||||||
|
:meth:`set_config` directly for more detailed control.
|
||||||
|
|
||||||
|
:param duration: The duration for which the gate is to stay open.
|
||||||
|
|
||||||
|
:return: The timestamp at the end of the gate period, in machine units.
|
||||||
|
"""
|
||||||
|
return self.gate_both_mu(self.core.seconds_to_mu(duration))
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def gate_rising_mu(self, duration_mu):
|
||||||
|
"""See :meth:`gate_rising`."""
|
||||||
|
return self._gate_mu(
|
||||||
|
duration_mu, count_rising=True, count_falling=False)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def gate_falling_mu(self, duration_mu):
|
||||||
|
"""See :meth:`gate_falling`."""
|
||||||
|
return self._gate_mu(
|
||||||
|
duration_mu, count_rising=False, count_falling=True)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def gate_both_mu(self, duration_mu):
|
||||||
|
"""See :meth:`gate_both_mu`."""
|
||||||
|
return self._gate_mu(
|
||||||
|
duration_mu, count_rising=True, count_falling=True)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def _gate_mu(self, duration_mu, count_rising, count_falling):
|
||||||
|
self.set_config(
|
||||||
|
count_rising=count_rising,
|
||||||
|
count_falling=count_falling,
|
||||||
|
send_count_event=False,
|
||||||
|
reset_to_zero=True)
|
||||||
|
delay_mu(duration_mu)
|
||||||
|
self.set_config(
|
||||||
|
count_rising=False,
|
||||||
|
count_falling=False,
|
||||||
|
send_count_event=True,
|
||||||
|
reset_to_zero=False)
|
||||||
|
return now_mu()
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_config(self, count_rising: TBool, count_falling: TBool,
|
||||||
|
send_count_event: TBool, reset_to_zero: TBool):
|
||||||
|
"""Emit an RTIO event at the current timeline position to set the
|
||||||
|
gateware configuration.
|
||||||
|
|
||||||
|
For most use cases, the `gate_*` wrappers will be more convenient.
|
||||||
|
|
||||||
|
:param count_rising: Whether to count rising signal edges.
|
||||||
|
:param count_falling: Whether to count falling signal edges.
|
||||||
|
:param send_count_event: If `True`, an input event with the current
|
||||||
|
counter value is generated on the next clock cycle (once).
|
||||||
|
:param reset_to_zero: If `True`, the counter value is reset to zero on
|
||||||
|
the next clock cycle (once).
|
||||||
|
"""
|
||||||
|
config = int32(0)
|
||||||
|
if count_rising:
|
||||||
|
config |= CONFIG_COUNT_RISING
|
||||||
|
if count_falling:
|
||||||
|
config |= CONFIG_COUNT_FALLING
|
||||||
|
if send_count_event:
|
||||||
|
config |= CONFIG_SEND_COUNT_EVENT
|
||||||
|
if reset_to_zero:
|
||||||
|
config |= CONFIG_RESET_TO_ZERO
|
||||||
|
rtio_output(self.channel << 8, config)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def fetch_count(self) -> TInt32:
|
||||||
|
"""Wait for and return count total from previously requested input
|
||||||
|
event.
|
||||||
|
|
||||||
|
It is valid to trigger multiple gate periods without immediately
|
||||||
|
reading back the count total; the results will be returned in order on
|
||||||
|
subsequent fetch calls.
|
||||||
|
|
||||||
|
This function blocks until a result becomes available.
|
||||||
|
"""
|
||||||
|
count = rtio_input_data(self.channel)
|
||||||
|
if count == self.counter_max:
|
||||||
|
raise CounterOverflow(
|
||||||
|
"Input edge counter overflow on RTIO channel {0}",
|
||||||
|
int64(self.channel))
|
||||||
|
return count
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def fetch_timestamped_count(
|
||||||
|
self, timeout_mu=int64(-1)) -> TTuple([TInt64, TInt32]):
|
||||||
|
"""Wait for and return the timestamp and count total of a previously
|
||||||
|
requested input event.
|
||||||
|
|
||||||
|
It is valid to trigger multiple gate periods without immediately
|
||||||
|
reading back the count total; the results will be returned in order on
|
||||||
|
subsequent fetch calls.
|
||||||
|
|
||||||
|
This function blocks until a result becomes available or the given
|
||||||
|
timeout elapses.
|
||||||
|
|
||||||
|
:return: A tuple of timestamp (-1 if timeout elapsed) and counter
|
||||||
|
value. (The timestamp is that of the requested input event –
|
||||||
|
typically the gate closing time – and not that of any input edges.)
|
||||||
|
"""
|
||||||
|
timestamp, count = rtio_input_timestamped_data(timeout_mu,
|
||||||
|
self.channel)
|
||||||
|
if count == self.counter_max:
|
||||||
|
raise CounterOverflow(
|
||||||
|
"Input edge counter overflow on RTIO channel {0}",
|
||||||
|
int64(self.channel))
|
||||||
|
return timestamp, count
|
|
@ -11,54 +11,99 @@ ZeroDivisionError = builtins.ZeroDivisionError
|
||||||
ValueError = builtins.ValueError
|
ValueError = builtins.ValueError
|
||||||
IndexError = builtins.IndexError
|
IndexError = builtins.IndexError
|
||||||
RuntimeError = builtins.RuntimeError
|
RuntimeError = builtins.RuntimeError
|
||||||
|
AssertionError = builtins.AssertionError
|
||||||
|
|
||||||
|
|
||||||
class CoreException:
|
class CoreException:
|
||||||
"""Information about an exception raised or passed through the core device."""
|
"""Information about an exception raised or passed through the core device."""
|
||||||
|
def __init__(self, exceptions, exception_info, traceback, stack_pointers):
|
||||||
|
self.exceptions = exceptions
|
||||||
|
self.exception_info = exception_info
|
||||||
|
self.traceback = list(traceback)
|
||||||
|
self.stack_pointers = stack_pointers
|
||||||
|
|
||||||
def __init__(self, name, message, params, traceback):
|
first_exception = exceptions[0]
|
||||||
|
name = first_exception[0]
|
||||||
if ':' in name:
|
if ':' in name:
|
||||||
exn_id, self.name = name.split(':', 2)
|
exn_id, self.name = name.split(':', 2)
|
||||||
self.id = int(exn_id)
|
self.id = int(exn_id)
|
||||||
else:
|
else:
|
||||||
self.id, self.name = 0, name
|
self.id, self.name = 0, name
|
||||||
self.message, self.params = message, params
|
self.message = first_exception[1]
|
||||||
self.traceback = list(traceback)
|
self.params = first_exception[2]
|
||||||
|
|
||||||
|
def append_backtrace(self, record, inlined=False):
|
||||||
|
filename, line, column, function, address = record
|
||||||
|
stub_globals = {"__name__": filename, "__loader__": source_loader}
|
||||||
|
source_line = linecache.getline(filename, line, stub_globals)
|
||||||
|
indentation = re.search(r"^\s*", source_line).end()
|
||||||
|
|
||||||
|
if address is None:
|
||||||
|
formatted_address = ""
|
||||||
|
elif inlined:
|
||||||
|
formatted_address = " (inlined)"
|
||||||
|
else:
|
||||||
|
formatted_address = " (RA=+0x{:x})".format(address)
|
||||||
|
|
||||||
|
filename = filename.replace(artiq_dir, "<artiq>")
|
||||||
|
lines = []
|
||||||
|
if column == -1:
|
||||||
|
lines.append(" {}".format(source_line.strip() if source_line else "<unknown>"))
|
||||||
|
lines.append(" File \"{file}\", line {line}, in {function}{address}".
|
||||||
|
format(file=filename, line=line, function=function,
|
||||||
|
address=formatted_address))
|
||||||
|
else:
|
||||||
|
lines.append(" {}^".format(" " * (column - indentation)))
|
||||||
|
lines.append(" {}".format(source_line.strip() if source_line else "<unknown>"))
|
||||||
|
lines.append(" File \"{file}\", line {line}, column {column},"
|
||||||
|
" in {function}{address}".
|
||||||
|
format(file=filename, line=line, column=column + 1,
|
||||||
|
function=function, address=formatted_address))
|
||||||
|
return lines
|
||||||
|
|
||||||
|
def single_traceback(self, exception_index):
|
||||||
|
# note that we insert in reversed order
|
||||||
|
lines = []
|
||||||
|
last_sp = 0
|
||||||
|
start_backtrace_index = self.exception_info[exception_index][1]
|
||||||
|
zipped = list(zip(self.traceback[start_backtrace_index:],
|
||||||
|
self.stack_pointers[start_backtrace_index:]))
|
||||||
|
exception = self.exceptions[exception_index]
|
||||||
|
name = exception[0]
|
||||||
|
message = exception[1]
|
||||||
|
params = exception[2]
|
||||||
|
if ':' in name:
|
||||||
|
exn_id, name = name.split(':', 2)
|
||||||
|
exn_id = int(exn_id)
|
||||||
|
else:
|
||||||
|
exn_id = 0
|
||||||
|
lines.append("{}({}): {}".format(name, exn_id, message.format(*params)))
|
||||||
|
zipped.append(((exception[3], exception[4], exception[5], exception[6],
|
||||||
|
None, []), None))
|
||||||
|
|
||||||
|
for ((filename, line, column, function, address, inlined), sp) in zipped:
|
||||||
|
# backtrace of nested exceptions may be discontinuous
|
||||||
|
# but the stack pointer must increase monotonically
|
||||||
|
if sp is not None and sp <= last_sp:
|
||||||
|
continue
|
||||||
|
last_sp = sp
|
||||||
|
|
||||||
|
for record in reversed(inlined):
|
||||||
|
lines += self.append_backtrace(record, True)
|
||||||
|
lines += self.append_backtrace((filename, line, column, function,
|
||||||
|
address))
|
||||||
|
|
||||||
|
lines.append("Traceback (most recent call first):")
|
||||||
|
|
||||||
|
return "\n".join(reversed(lines))
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
lines = []
|
tracebacks = [self.single_traceback(i) for i in range(len(self.exceptions))]
|
||||||
lines.append("Core Device Traceback (most recent call last):")
|
traceback_str = ('\n\nDuring handling of the above exception, ' +
|
||||||
last_address = 0
|
'another exception occurred:\n\n').join(tracebacks)
|
||||||
for (filename, line, column, function, address) in self.traceback:
|
return 'Core Device Traceback:\n' +\
|
||||||
stub_globals = {"__name__": filename, "__loader__": source_loader}
|
traceback_str +\
|
||||||
source_line = linecache.getline(filename, line, stub_globals)
|
'\n\nEnd of Core Device Traceback\n'
|
||||||
indentation = re.search(r"^\s*", source_line).end()
|
|
||||||
|
|
||||||
if address is None:
|
|
||||||
formatted_address = ""
|
|
||||||
elif address == last_address:
|
|
||||||
formatted_address = " (inlined)"
|
|
||||||
else:
|
|
||||||
formatted_address = " (RA=+0x{:x})".format(address)
|
|
||||||
last_address = address
|
|
||||||
|
|
||||||
filename = filename.replace(artiq_dir, "<artiq>")
|
|
||||||
if column == -1:
|
|
||||||
lines.append(" File \"{file}\", line {line}, in {function}{address}".
|
|
||||||
format(file=filename, line=line, function=function,
|
|
||||||
address=formatted_address))
|
|
||||||
lines.append(" {}".format(source_line.strip() if source_line else "<unknown>"))
|
|
||||||
else:
|
|
||||||
lines.append(" File \"{file}\", line {line}, column {column},"
|
|
||||||
" in {function}{address}".
|
|
||||||
format(file=filename, line=line, column=column + 1,
|
|
||||||
function=function, address=formatted_address))
|
|
||||||
lines.append(" {}".format(source_line.strip() if source_line else "<unknown>"))
|
|
||||||
lines.append(" {}^".format(" " * (column - indentation)))
|
|
||||||
|
|
||||||
lines.append("{}({}): {}".format(self.name, self.id,
|
|
||||||
self.message.format(*self.params)))
|
|
||||||
return "\n".join(lines)
|
|
||||||
|
|
||||||
|
|
||||||
class InternalError(Exception):
|
class InternalError(Exception):
|
||||||
|
@ -91,7 +136,7 @@ class RTIOOverflow(Exception):
|
||||||
artiq_builtin = True
|
artiq_builtin = True
|
||||||
|
|
||||||
|
|
||||||
class RTIOLinkError(Exception):
|
class RTIODestinationUnreachable(Exception):
|
||||||
"""Raised with a RTIO operation could not be completed due to a DRTIO link
|
"""Raised with a RTIO operation could not be completed due to a DRTIO link
|
||||||
being down.
|
being down.
|
||||||
"""
|
"""
|
||||||
|
@ -103,8 +148,11 @@ class DMAError(Exception):
|
||||||
artiq_builtin = True
|
artiq_builtin = True
|
||||||
|
|
||||||
|
|
||||||
class WatchdogExpired(Exception):
|
class SubkernelError(Exception):
|
||||||
"""Raised when a watchdog expires."""
|
"""Raised when an operation regarding a subkernel is invalid
|
||||||
|
or cannot be completed.
|
||||||
|
"""
|
||||||
|
artiq_builtin = True
|
||||||
|
|
||||||
|
|
||||||
class ClockFailure(Exception):
|
class ClockFailure(Exception):
|
||||||
|
|
|
@ -0,0 +1,304 @@
|
||||||
|
"""RTIO driver for the Fastino 32channel, 16 bit, 2.5 MS/s per channel,
|
||||||
|
streaming DAC.
|
||||||
|
"""
|
||||||
|
from numpy import int32, int64
|
||||||
|
|
||||||
|
from artiq.language.core import kernel, portable, delay, delay_mu
|
||||||
|
from artiq.coredevice.rtio import (rtio_output, rtio_output_wide,
|
||||||
|
rtio_input_data)
|
||||||
|
from artiq.language.units import ns
|
||||||
|
from artiq.language.types import TInt32, TList
|
||||||
|
|
||||||
|
|
||||||
|
class Fastino:
|
||||||
|
"""Fastino 32-channel, 16-bit, 2.5 MS/s per channel streaming DAC
|
||||||
|
|
||||||
|
The RTIO PHY supports staging DAC data before transmitting them by writing
|
||||||
|
to the DAC RTIO addresses, if a channel is not "held" by setting its bit
|
||||||
|
using :meth:`set_hold`, the next frame will contain the update. For the
|
||||||
|
DACs held, the update is triggered explicitly by setting the corresponding
|
||||||
|
bit using :meth:`set_update`. Update is self-clearing. This enables atomic
|
||||||
|
DAC updates synchronized to a frame edge.
|
||||||
|
|
||||||
|
The `log2_width=0` RTIO layout uses one DAC channel per RTIO address and a
|
||||||
|
dense RTIO address space. The RTIO words are narrow (32 bit) and
|
||||||
|
few-channel updates are efficient. There is the least amount of DAC state
|
||||||
|
tracking in kernels, at the cost of more DMA and RTIO data.
|
||||||
|
The setting here and in the RTIO PHY (gateware) must match.
|
||||||
|
|
||||||
|
Other `log2_width` (up to `log2_width=5`) settings pack multiple
|
||||||
|
(in powers of two) DAC channels into one group and into one RTIO write.
|
||||||
|
The RTIO data width increases accordingly. The `log2_width`
|
||||||
|
LSBs of the RTIO address for a DAC channel write must be zero and the
|
||||||
|
address space is sparse. For `log2_width=5` the RTIO data is 512 bit wide.
|
||||||
|
|
||||||
|
If `log2_width` is zero, the :meth:`set_dac`/:meth:`set_dac_mu` interface
|
||||||
|
must be used. If non-zero, the :meth:`set_group`/:meth:`set_group_mu`
|
||||||
|
interface must be used.
|
||||||
|
|
||||||
|
:param channel: RTIO channel number
|
||||||
|
:param core_device: Core device name (default: "core")
|
||||||
|
:param log2_width: Width of DAC channel group (logarithm base 2).
|
||||||
|
Value must match the corresponding value in the RTIO PHY (gateware).
|
||||||
|
"""
|
||||||
|
kernel_invariants = {"core", "channel", "width", "t_frame"}
|
||||||
|
|
||||||
|
def __init__(self, dmgr, channel, core_device="core", log2_width=0):
|
||||||
|
self.channel = channel << 8
|
||||||
|
self.core = dmgr.get(core_device)
|
||||||
|
self.width = 1 << log2_width
|
||||||
|
# frame duration in mu (14 words each 7 clock cycles each 4 ns)
|
||||||
|
# self.core.seconds_to_mu(14*7*4*ns) # unfortunately this may round wrong
|
||||||
|
assert self.core.ref_period == 1*ns
|
||||||
|
self.t_frame = int64(14*7*4)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(channel, **kwargs):
|
||||||
|
return [(channel, None)]
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def init(self):
|
||||||
|
"""Initialize the device.
|
||||||
|
|
||||||
|
* disables RESET, DAC_CLR, enables AFE_PWR
|
||||||
|
* clears error counters, enables error counting
|
||||||
|
* turns LEDs off
|
||||||
|
* clears `hold` and `continuous` on all channels
|
||||||
|
* clear and resets interpolators to unit rate change on all
|
||||||
|
channels
|
||||||
|
|
||||||
|
It does not change set channel voltages and does not reset the PLLs or clock
|
||||||
|
domains.
|
||||||
|
|
||||||
|
Note: On Fastino gateware before v0.2 this may lead to 0 voltage being emitted
|
||||||
|
transiently.
|
||||||
|
"""
|
||||||
|
self.set_cfg(reset=0, afe_power_down=0, dac_clr=0, clr_err=1)
|
||||||
|
delay_mu(self.t_frame)
|
||||||
|
self.set_cfg(reset=0, afe_power_down=0, dac_clr=0, clr_err=0)
|
||||||
|
delay_mu(self.t_frame)
|
||||||
|
self.set_continuous(0)
|
||||||
|
delay_mu(self.t_frame)
|
||||||
|
self.stage_cic(1)
|
||||||
|
delay_mu(self.t_frame)
|
||||||
|
self.apply_cic(0xffffffff)
|
||||||
|
delay_mu(self.t_frame)
|
||||||
|
self.set_leds(0)
|
||||||
|
delay_mu(self.t_frame)
|
||||||
|
self.set_hold(0)
|
||||||
|
delay_mu(self.t_frame)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def write(self, addr, data):
|
||||||
|
"""Write data to a Fastino register.
|
||||||
|
|
||||||
|
:param addr: Address to write to.
|
||||||
|
:param data: Data to write.
|
||||||
|
"""
|
||||||
|
rtio_output(self.channel | addr, data)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def read(self, addr):
|
||||||
|
"""Read from Fastino register.
|
||||||
|
|
||||||
|
TODO: untested
|
||||||
|
|
||||||
|
:param addr: Address to read from.
|
||||||
|
:return: The data read.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
# rtio_output(self.channel | addr | 0x80)
|
||||||
|
# return rtio_input_data(self.channel >> 8)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_dac_mu(self, dac, data):
|
||||||
|
"""Write DAC data in machine units.
|
||||||
|
|
||||||
|
:param dac: DAC channel to write to (0-31).
|
||||||
|
:param data: DAC word to write, 16 bit unsigned integer, in machine
|
||||||
|
units.
|
||||||
|
"""
|
||||||
|
self.write(dac, data)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_group_mu(self, dac: TInt32, data: TList(TInt32)):
|
||||||
|
"""Write a group of DAC channels in machine units.
|
||||||
|
|
||||||
|
:param dac: First channel in DAC channel group (0-31). The `log2_width`
|
||||||
|
LSBs must be zero.
|
||||||
|
:param data: List of DAC data pairs (2x16 bit unsigned) to write,
|
||||||
|
in machine units. Data exceeding group size is ignored.
|
||||||
|
If the list length is less than group size, the remaining
|
||||||
|
DAC channels within the group are cleared to 0 (machine units).
|
||||||
|
"""
|
||||||
|
if dac & (self.width - 1):
|
||||||
|
raise ValueError("Group index LSBs must be zero")
|
||||||
|
rtio_output_wide(self.channel | dac, data)
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def voltage_to_mu(self, voltage):
|
||||||
|
"""Convert SI Volts to DAC machine units.
|
||||||
|
|
||||||
|
:param voltage: Voltage in SI Volts.
|
||||||
|
:return: DAC data word in machine units, 16 bit integer.
|
||||||
|
"""
|
||||||
|
data = int32(round((0x8000/10.)*voltage)) + int32(0x8000)
|
||||||
|
if data < 0 or data > 0xffff:
|
||||||
|
raise ValueError("DAC voltage out of bounds")
|
||||||
|
return data
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def voltage_group_to_mu(self, voltage, data):
|
||||||
|
"""Convert SI Volts to packed DAC channel group machine units.
|
||||||
|
|
||||||
|
:param voltage: List of SI Volt voltages.
|
||||||
|
:param data: List of DAC channel data pairs to write to.
|
||||||
|
Half the length of `voltage`.
|
||||||
|
"""
|
||||||
|
for i in range(len(voltage)):
|
||||||
|
v = self.voltage_to_mu(voltage[i])
|
||||||
|
if i & 1:
|
||||||
|
v = data[i // 2] | (v << 16)
|
||||||
|
data[i // 2] = int32(v)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_dac(self, dac, voltage):
|
||||||
|
"""Set DAC data to given voltage.
|
||||||
|
|
||||||
|
:param dac: DAC channel (0-31).
|
||||||
|
:param voltage: Desired output voltage.
|
||||||
|
"""
|
||||||
|
self.write(dac, self.voltage_to_mu(voltage))
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_group(self, dac, voltage):
|
||||||
|
"""Set DAC group data to given voltage.
|
||||||
|
|
||||||
|
:param dac: DAC channel (0-31).
|
||||||
|
:param voltage: Desired output voltage.
|
||||||
|
"""
|
||||||
|
data = [int32(0)] * (len(voltage) // 2)
|
||||||
|
self.voltage_group_to_mu(voltage, data)
|
||||||
|
self.set_group_mu(dac, data)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def update(self, update):
|
||||||
|
"""Schedule channels for update.
|
||||||
|
|
||||||
|
:param update: Bit mask of channels to update (32 bit).
|
||||||
|
"""
|
||||||
|
self.write(0x20, update)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_hold(self, hold):
|
||||||
|
"""Set channels to manual update.
|
||||||
|
|
||||||
|
:param hold: Bit mask of channels to hold (32 bit).
|
||||||
|
"""
|
||||||
|
self.write(0x21, hold)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_cfg(self, reset=0, afe_power_down=0, dac_clr=0, clr_err=0):
|
||||||
|
"""Set configuration bits.
|
||||||
|
|
||||||
|
:param reset: Reset SPI PLL and SPI clock domain.
|
||||||
|
:param afe_power_down: Disable AFE power.
|
||||||
|
:param dac_clr: Assert all 32 DAC_CLR signals setting all DACs to
|
||||||
|
mid-scale (0 V).
|
||||||
|
:param clr_err: Clear error counters and PLL reset indicator.
|
||||||
|
This clears the sticky red error LED. Must be cleared to enable
|
||||||
|
error counting.
|
||||||
|
"""
|
||||||
|
self.write(0x22, (reset << 0) | (afe_power_down << 1) |
|
||||||
|
(dac_clr << 2) | (clr_err << 3))
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_leds(self, leds):
|
||||||
|
"""Set the green user-defined LEDs
|
||||||
|
|
||||||
|
:param leds: LED status, 8 bit integer each bit corresponding to one
|
||||||
|
green LED.
|
||||||
|
"""
|
||||||
|
self.write(0x23, leds)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_continuous(self, channel_mask):
|
||||||
|
"""Enable continuous DAC updates on channels regardless of new data
|
||||||
|
being submitted.
|
||||||
|
"""
|
||||||
|
self.write(0x25, channel_mask)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def stage_cic_mu(self, rate_mantissa, rate_exponent, gain_exponent):
|
||||||
|
"""Stage machine unit CIC interpolator configuration.
|
||||||
|
"""
|
||||||
|
if rate_mantissa < 0 or rate_mantissa >= 1 << 6:
|
||||||
|
raise ValueError("rate_mantissa out of bounds")
|
||||||
|
if rate_exponent < 0 or rate_exponent >= 1 << 4:
|
||||||
|
raise ValueError("rate_exponent out of bounds")
|
||||||
|
if gain_exponent < 0 or gain_exponent >= 1 << 6:
|
||||||
|
raise ValueError("gain_exponent out of bounds")
|
||||||
|
config = rate_mantissa | (rate_exponent << 6) | (gain_exponent << 10)
|
||||||
|
self.write(0x26, config)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def stage_cic(self, rate) -> TInt32:
|
||||||
|
"""Compute and stage interpolator configuration.
|
||||||
|
|
||||||
|
This method approximates the desired interpolation rate using a 10 bit
|
||||||
|
floating point representation (6 bit mantissa, 4 bit exponent) and
|
||||||
|
then determines an optimal interpolation gain compensation exponent
|
||||||
|
to avoid clipping. Gains for rates that are powers of two are accurately
|
||||||
|
compensated. Other rates lead to overall less than unity gain (but more
|
||||||
|
than 0.5 gain).
|
||||||
|
|
||||||
|
The overall gain including gain compensation is
|
||||||
|
`actual_rate**order/2**ceil(log2(actual_rate**order))`
|
||||||
|
where `order = 3`.
|
||||||
|
|
||||||
|
Returns the actual interpolation rate.
|
||||||
|
"""
|
||||||
|
if rate <= 0 or rate > 1 << 16:
|
||||||
|
raise ValueError("rate out of bounds")
|
||||||
|
rate_mantissa = rate
|
||||||
|
rate_exponent = 0
|
||||||
|
while rate_mantissa > 1 << 6:
|
||||||
|
rate_exponent += 1
|
||||||
|
rate_mantissa >>= 1
|
||||||
|
order = 3
|
||||||
|
gain = 1
|
||||||
|
for i in range(order):
|
||||||
|
gain *= rate_mantissa
|
||||||
|
gain_exponent = 0
|
||||||
|
while gain > 1 << gain_exponent:
|
||||||
|
gain_exponent += 1
|
||||||
|
gain_exponent += order*rate_exponent
|
||||||
|
assert gain_exponent <= order*16
|
||||||
|
self.stage_cic_mu(rate_mantissa - 1, rate_exponent, gain_exponent)
|
||||||
|
return rate_mantissa << rate_exponent
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def apply_cic(self, channel_mask):
|
||||||
|
"""Apply the staged interpolator configuration on the specified channels.
|
||||||
|
|
||||||
|
Each Fastino channel starting with gateware v0.2 includes a fourth order
|
||||||
|
(cubic) CIC interpolator with variable rate change and variable output
|
||||||
|
gain compensation (see :meth:`stage_cic`).
|
||||||
|
|
||||||
|
Fastino gateware before v0.2 does not include the interpolators and the
|
||||||
|
methods affecting the CICs should not be used.
|
||||||
|
|
||||||
|
Channels using non-unity interpolation rate should have
|
||||||
|
continous DAC updates enabled (see :meth:`set_continuous`) unless
|
||||||
|
their output is supposed to be constant.
|
||||||
|
|
||||||
|
This method resets and settles the affected interpolators. There will be
|
||||||
|
no output updates for the next `order = 3` input samples.
|
||||||
|
Affected channels will only accept one input sample per input sample
|
||||||
|
period. This method synchronizes the input sample period to the current
|
||||||
|
frame on the affected channels.
|
||||||
|
|
||||||
|
If application of new interpolator settings results in a change of the
|
||||||
|
overall gain, there will be a corresponding output step.
|
||||||
|
"""
|
||||||
|
self.write(0x27, channel_mask)
|
|
@ -23,7 +23,11 @@ class Grabber:
|
||||||
count_width = min(31, 2*res_width + 16 - count_shift)
|
count_width = min(31, 2*res_width + 16 - count_shift)
|
||||||
# This value is inserted by the gateware to mark the start of a series of
|
# This value is inserted by the gateware to mark the start of a series of
|
||||||
# ROI engine outputs for one video frame.
|
# ROI engine outputs for one video frame.
|
||||||
self.sentinel = int32(2**count_width)
|
self.sentinel = int32(int64(2**count_width))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(channel_base, **kwargs):
|
||||||
|
return [(channel_base, "ROI coordinates"), (channel_base + 1, "ROI mask")]
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def setup_roi(self, n, x0, y0, x1, y1):
|
def setup_roi(self, n, x0, y0, x1, y1):
|
||||||
|
@ -40,13 +44,13 @@ class Grabber:
|
||||||
Advances the timeline by 4 coarse RTIO cycles.
|
Advances the timeline by 4 coarse RTIO cycles.
|
||||||
"""
|
"""
|
||||||
c = int64(self.core.ref_multiplier)
|
c = int64(self.core.ref_multiplier)
|
||||||
rtio_output(now_mu(), self.channel_base, 4*n+0, x0)
|
rtio_output((self.channel_base << 8) | (4*n+0), x0)
|
||||||
delay_mu(c)
|
delay_mu(c)
|
||||||
rtio_output(now_mu(), self.channel_base, 4*n+1, y0)
|
rtio_output((self.channel_base << 8) | (4*n+1), y0)
|
||||||
delay_mu(c)
|
delay_mu(c)
|
||||||
rtio_output(now_mu(), self.channel_base, 4*n+2, x1)
|
rtio_output((self.channel_base << 8) | (4*n+2), x1)
|
||||||
delay_mu(c)
|
delay_mu(c)
|
||||||
rtio_output(now_mu(), self.channel_base, 4*n+3, y1)
|
rtio_output((self.channel_base << 8) | (4*n+3), y1)
|
||||||
delay_mu(c)
|
delay_mu(c)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
|
@ -67,7 +71,7 @@ class Grabber:
|
||||||
|
|
||||||
:param mask: bitmask enabling or disabling each ROI engine.
|
:param mask: bitmask enabling or disabling each ROI engine.
|
||||||
"""
|
"""
|
||||||
rtio_output(now_mu(), self.channel_base+1, 0, mask)
|
rtio_output((self.channel_base + 1) << 8, mask)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def gate_roi_pulse(self, mask, dt):
|
def gate_roi_pulse(self, mask, dt):
|
||||||
|
|
|
@ -33,6 +33,11 @@ def i2c_read(busno: TInt32, ack: TBool) -> TInt32:
|
||||||
raise NotImplementedError("syscall not simulated")
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
|
|
||||||
|
@syscall(flags={"nounwind", "nowrite"})
|
||||||
|
def i2c_switch_select(busno: TInt32, address: TInt32, mask: TInt32) -> TNone:
|
||||||
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def i2c_poll(busno, busaddr):
|
def i2c_poll(busno, busaddr):
|
||||||
"""Poll I2C device at address.
|
"""Poll I2C device at address.
|
||||||
|
@ -137,8 +142,10 @@ def i2c_read_many(busno, busaddr, addr, data):
|
||||||
i2c_stop(busno)
|
i2c_stop(busno)
|
||||||
|
|
||||||
|
|
||||||
class PCA9548:
|
class I2CSwitch:
|
||||||
"""Driver for the PCA9548 I2C bus switch.
|
"""Driver for the I2C bus switch.
|
||||||
|
|
||||||
|
PCA954X (or other) type detection is done by the CPU during I2C init.
|
||||||
|
|
||||||
I2C transactions not real-time, and are performed by the CPU without
|
I2C transactions not real-time, and are performed by the CPU without
|
||||||
involving RTIO.
|
involving RTIO.
|
||||||
|
@ -151,25 +158,19 @@ class PCA9548:
|
||||||
self.busno = busno
|
self.busno = busno
|
||||||
self.address = address
|
self.address = address
|
||||||
|
|
||||||
@kernel
|
|
||||||
def select(self, mask):
|
|
||||||
"""Enable/disable channels.
|
|
||||||
|
|
||||||
:param mask: Bit mask of enabled channels
|
|
||||||
"""
|
|
||||||
i2c_write_byte(self.busno, self.address, mask)
|
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set(self, channel):
|
def set(self, channel):
|
||||||
"""Enable one channel.
|
"""Enable one channel.
|
||||||
|
|
||||||
:param channel: channel number (0-7)
|
:param channel: channel number (0-7)
|
||||||
"""
|
"""
|
||||||
self.select(1 << channel)
|
i2c_switch_select(self.busno, self.address >> 1, 1 << channel)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def readback(self):
|
def unset(self):
|
||||||
return i2c_read_byte(self.busno, self.address)
|
"""Disable output of the I2C switch.
|
||||||
|
"""
|
||||||
|
i2c_switch_select(self.busno, self.address >> 1, 0)
|
||||||
|
|
||||||
|
|
||||||
class TCA6424A:
|
class TCA6424A:
|
||||||
|
@ -207,3 +208,46 @@ class TCA6424A:
|
||||||
|
|
||||||
self._write24(0x8c, 0) # set all directions to output
|
self._write24(0x8c, 0) # set all directions to output
|
||||||
self._write24(0x84, outputs_le) # set levels
|
self._write24(0x84, outputs_le) # set levels
|
||||||
|
|
||||||
|
class PCF8574A:
|
||||||
|
"""Driver for the PCF8574 I2C remote 8-bit I/O expander.
|
||||||
|
|
||||||
|
I2C transactions not real-time, and are performed by the CPU without
|
||||||
|
involving RTIO.
|
||||||
|
"""
|
||||||
|
def __init__(self, dmgr, busno=0, address=0x7c, core_device="core"):
|
||||||
|
self.core = dmgr.get(core_device)
|
||||||
|
self.busno = busno
|
||||||
|
self.address = address
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set(self, data):
|
||||||
|
"""Drive data on the quasi-bidirectional pins.
|
||||||
|
|
||||||
|
:param data: Pin data. High bits are weakly driven high
|
||||||
|
(and thus inputs), low bits are strongly driven low.
|
||||||
|
"""
|
||||||
|
i2c_start(self.busno)
|
||||||
|
try:
|
||||||
|
if not i2c_write(self.busno, self.address):
|
||||||
|
raise I2CError("PCF8574A failed to ack address")
|
||||||
|
if not i2c_write(self.busno, data):
|
||||||
|
raise I2CError("PCF8574A failed to ack data")
|
||||||
|
finally:
|
||||||
|
i2c_stop(self.busno)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def get(self):
|
||||||
|
"""Retrieve quasi-bidirectional pin input data.
|
||||||
|
|
||||||
|
:return: Pin data
|
||||||
|
"""
|
||||||
|
i2c_start(self.busno)
|
||||||
|
ret = 0
|
||||||
|
try:
|
||||||
|
if not i2c_write(self.busno, self.address | 1):
|
||||||
|
raise I2CError("PCF8574A failed to ack address")
|
||||||
|
ret = i2c_read(self.busno, False)
|
||||||
|
finally:
|
||||||
|
i2c_stop(self.busno)
|
||||||
|
return ret
|
||||||
|
|
|
@ -0,0 +1,38 @@
|
||||||
|
from os import path
|
||||||
|
import json
|
||||||
|
from jsonschema import Draft7Validator, validators
|
||||||
|
|
||||||
|
def extend_with_default(validator_class):
|
||||||
|
validate_properties = validator_class.VALIDATORS["properties"]
|
||||||
|
|
||||||
|
def set_defaults(validator, properties, instance, schema):
|
||||||
|
for property, subschema in properties.items():
|
||||||
|
if "default" in subschema:
|
||||||
|
instance.setdefault(property, subschema["default"])
|
||||||
|
|
||||||
|
for error in validate_properties(
|
||||||
|
validator, properties, instance, schema,
|
||||||
|
):
|
||||||
|
yield error
|
||||||
|
|
||||||
|
return validators.extend(
|
||||||
|
validator_class, {"properties" : set_defaults},
|
||||||
|
)
|
||||||
|
|
||||||
|
schema_path = path.join(path.dirname(__file__), "coredevice_generic.schema.json")
|
||||||
|
with open(schema_path, "r") as f:
|
||||||
|
schema = json.load(f)
|
||||||
|
|
||||||
|
validator = extend_with_default(Draft7Validator)(schema)
|
||||||
|
|
||||||
|
def load(description_path):
|
||||||
|
with open(description_path, "r") as f:
|
||||||
|
result = json.load(f)
|
||||||
|
|
||||||
|
global validator
|
||||||
|
validator.validate(result)
|
||||||
|
|
||||||
|
if result["base"] != "use_drtio_role":
|
||||||
|
result["drtio_role"] = result["base"]
|
||||||
|
|
||||||
|
return result
|
|
@ -0,0 +1,77 @@
|
||||||
|
from numpy import int32
|
||||||
|
|
||||||
|
from artiq.experiment import *
|
||||||
|
from artiq.coredevice.i2c import i2c_write_many, i2c_read_many, i2c_poll
|
||||||
|
|
||||||
|
|
||||||
|
port_mapping = {
|
||||||
|
"EEM0": 7,
|
||||||
|
"EEM1": 5,
|
||||||
|
"EEM2": 4,
|
||||||
|
"EEM3": 3,
|
||||||
|
"EEM4": 2,
|
||||||
|
"EEM5": 1,
|
||||||
|
"EEM6": 0,
|
||||||
|
"EEM7": 6,
|
||||||
|
"EEM8": 12,
|
||||||
|
"EEM9": 13,
|
||||||
|
"EEM10": 15,
|
||||||
|
"EEM11": 14,
|
||||||
|
"SFP0": 8,
|
||||||
|
"SFP1": 9,
|
||||||
|
"SFP2": 10,
|
||||||
|
"LOC0": 11,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class KasliEEPROM:
|
||||||
|
def __init__(self, dmgr, port, busno=0,
|
||||||
|
core_device="core", sw0_device="i2c_switch0", sw1_device="i2c_switch1"):
|
||||||
|
self.core = dmgr.get(core_device)
|
||||||
|
self.sw0 = dmgr.get(sw0_device)
|
||||||
|
self.sw1 = dmgr.get(sw1_device)
|
||||||
|
self.busno = busno
|
||||||
|
self.port = port_mapping[port]
|
||||||
|
self.address = 0xa0 # i2c 8 bit
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def select(self):
|
||||||
|
mask = 1 << self.port
|
||||||
|
if self.port < 8:
|
||||||
|
self.sw0.set(self.port)
|
||||||
|
self.sw1.unset()
|
||||||
|
else:
|
||||||
|
self.sw0.unset()
|
||||||
|
self.sw1.set(self.port - 8)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def deselect(self):
|
||||||
|
self.sw0.unset()
|
||||||
|
self.sw1.unset()
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def write_i32(self, addr, value):
|
||||||
|
self.select()
|
||||||
|
try:
|
||||||
|
data = [0]*4
|
||||||
|
for i in range(4):
|
||||||
|
data[i] = (value >> 24) & 0xff
|
||||||
|
value <<= 8
|
||||||
|
i2c_write_many(self.busno, self.address, addr, data)
|
||||||
|
i2c_poll(self.busno, self.address)
|
||||||
|
finally:
|
||||||
|
self.deselect()
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def read_i32(self, addr):
|
||||||
|
self.select()
|
||||||
|
try:
|
||||||
|
data = [0]*4
|
||||||
|
i2c_read_many(self.busno, self.address, addr, data)
|
||||||
|
value = int32(0)
|
||||||
|
for i in range(4):
|
||||||
|
value <<= 8
|
||||||
|
value |= data[i]
|
||||||
|
finally:
|
||||||
|
self.deselect()
|
||||||
|
return value
|
|
@ -0,0 +1,169 @@
|
||||||
|
"""RTIO driver for Mirny (4 channel GHz PLLs)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from artiq.language.core import kernel, delay, portable
|
||||||
|
from artiq.language.units import us
|
||||||
|
|
||||||
|
from numpy import int32
|
||||||
|
|
||||||
|
from artiq.coredevice import spi2 as spi
|
||||||
|
|
||||||
|
|
||||||
|
SPI_CONFIG = (
|
||||||
|
0 * spi.SPI_OFFLINE
|
||||||
|
| 0 * spi.SPI_END
|
||||||
|
| 0 * spi.SPI_INPUT
|
||||||
|
| 1 * spi.SPI_CS_POLARITY
|
||||||
|
| 0 * spi.SPI_CLK_POLARITY
|
||||||
|
| 0 * spi.SPI_CLK_PHASE
|
||||||
|
| 0 * spi.SPI_LSB_FIRST
|
||||||
|
| 0 * spi.SPI_HALF_DUPLEX
|
||||||
|
)
|
||||||
|
|
||||||
|
# SPI clock write and read dividers
|
||||||
|
SPIT_WR = 4
|
||||||
|
SPIT_RD = 16
|
||||||
|
|
||||||
|
SPI_CS = 1
|
||||||
|
|
||||||
|
WE = 1 << 24
|
||||||
|
|
||||||
|
# supported CPLD code version
|
||||||
|
PROTO_REV_MATCH = 0x0
|
||||||
|
|
||||||
|
|
||||||
|
class Mirny:
|
||||||
|
"""
|
||||||
|
Mirny PLL-based RF generator.
|
||||||
|
|
||||||
|
:param spi_device: SPI bus device
|
||||||
|
:param refclk: Reference clock (SMA, MMCX or on-board 100 MHz oscillator)
|
||||||
|
frequency in Hz
|
||||||
|
:param clk_sel: Reference clock selection.
|
||||||
|
Valid options are: "XO" - onboard crystal oscillator;
|
||||||
|
"SMA" - front-panel SMA connector; "MMCX" - internal MMCX connector.
|
||||||
|
Passing an integer writes it as ``clk_sel`` in the CPLD's register 1.
|
||||||
|
The effect depends on the hardware revision.
|
||||||
|
:param core_device: Core device name (default: "core")
|
||||||
|
"""
|
||||||
|
|
||||||
|
kernel_invariants = {"bus", "core", "refclk", "clk_sel_hw_rev"}
|
||||||
|
|
||||||
|
def __init__(self, dmgr, spi_device, refclk=100e6, clk_sel="XO", core_device="core"):
|
||||||
|
self.core = dmgr.get(core_device)
|
||||||
|
self.bus = dmgr.get(spi_device)
|
||||||
|
|
||||||
|
# reference clock frequency
|
||||||
|
self.refclk = refclk
|
||||||
|
if not (10 <= self.refclk / 1e6 <= 600):
|
||||||
|
raise ValueError("Invalid refclk")
|
||||||
|
|
||||||
|
# reference clock selection
|
||||||
|
try:
|
||||||
|
self.clk_sel_hw_rev = {
|
||||||
|
# clk source: [reserved, reserved, v1.1, v1.0]
|
||||||
|
"xo": [-1, -1, 0, 0],
|
||||||
|
"mmcx": [-1, -1, 3, 2],
|
||||||
|
"sma": [-1, -1, 2, 3],
|
||||||
|
}[clk_sel.lower()]
|
||||||
|
except AttributeError: # not a string, fallback to int
|
||||||
|
if clk_sel & 0x3 != clk_sel:
|
||||||
|
raise ValueError("Invalid clk_sel") from None
|
||||||
|
self.clk_sel_hw_rev = [clk_sel] * 4
|
||||||
|
except KeyError:
|
||||||
|
raise ValueError("Invalid clk_sel") from None
|
||||||
|
|
||||||
|
self.clk_sel = -1
|
||||||
|
|
||||||
|
# board hardware revision
|
||||||
|
self.hw_rev = 0 # v1.0: 3, v1.1: 2
|
||||||
|
|
||||||
|
# TODO: support clk_div on v1.0 boards
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def read_reg(self, addr):
|
||||||
|
"""Read a register"""
|
||||||
|
self.bus.set_config_mu(
|
||||||
|
SPI_CONFIG | spi.SPI_INPUT | spi.SPI_END, 24, SPIT_RD, SPI_CS
|
||||||
|
)
|
||||||
|
self.bus.write((addr << 25))
|
||||||
|
return self.bus.read() & int32(0xFFFF)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def write_reg(self, addr, data):
|
||||||
|
"""Write a register"""
|
||||||
|
self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END, 24, SPIT_WR, SPI_CS)
|
||||||
|
self.bus.write((addr << 25) | WE | ((data & 0xFFFF) << 8))
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def init(self, blind=False):
|
||||||
|
"""
|
||||||
|
Initialize and detect Mirny.
|
||||||
|
|
||||||
|
Select the clock source based the board's hardware revision.
|
||||||
|
Raise ValueError if the board's hardware revision is not supported.
|
||||||
|
|
||||||
|
:param blind: Verify presence and protocol compatibility. Raise ValueError on failure.
|
||||||
|
"""
|
||||||
|
reg0 = self.read_reg(0)
|
||||||
|
self.hw_rev = reg0 & 0x3
|
||||||
|
|
||||||
|
if not blind:
|
||||||
|
if (reg0 >> 2) & 0x3 != PROTO_REV_MATCH:
|
||||||
|
raise ValueError("Mirny PROTO_REV mismatch")
|
||||||
|
delay(100 * us) # slack
|
||||||
|
|
||||||
|
# select clock source
|
||||||
|
self.clk_sel = self.clk_sel_hw_rev[self.hw_rev]
|
||||||
|
|
||||||
|
if self.clk_sel < 0:
|
||||||
|
raise ValueError("Hardware revision not supported")
|
||||||
|
|
||||||
|
self.write_reg(1, (self.clk_sel << 4))
|
||||||
|
delay(1000 * us)
|
||||||
|
|
||||||
|
@portable(flags={"fast-math"})
|
||||||
|
def att_to_mu(self, att):
|
||||||
|
"""Convert an attenuation setting in dB to machine units.
|
||||||
|
|
||||||
|
:param att: Attenuation setting in dB.
|
||||||
|
:return: Digital attenuation setting.
|
||||||
|
"""
|
||||||
|
code = int32(255) - int32(round(att * 8))
|
||||||
|
if code < 0 or code > 255:
|
||||||
|
raise ValueError("Invalid Mirny attenuation!")
|
||||||
|
return code
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_att_mu(self, channel, att):
|
||||||
|
"""Set digital step attenuator in machine units.
|
||||||
|
|
||||||
|
:param att: Attenuation setting, 8 bit digital.
|
||||||
|
"""
|
||||||
|
self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END, 16, SPIT_WR, SPI_CS)
|
||||||
|
self.bus.write(((channel | 8) << 25) | (att << 16))
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_att(self, channel, att):
|
||||||
|
"""Set digital step attenuator in SI units.
|
||||||
|
|
||||||
|
This method will write the attenuator settings of the selected channel.
|
||||||
|
|
||||||
|
.. seealso:: :meth:`set_att_mu`
|
||||||
|
|
||||||
|
:param channel: Attenuator channel (0-3).
|
||||||
|
:param att: Attenuation setting in dB. Higher value is more
|
||||||
|
attenuation. Minimum attenuation is 0*dB, maximum attenuation is
|
||||||
|
31.5*dB.
|
||||||
|
"""
|
||||||
|
self.set_att_mu(channel, self.att_to_mu(att))
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def write_ext(self, addr, length, data, ext_div=SPIT_WR):
|
||||||
|
"""Perform SPI write to a prefixed address"""
|
||||||
|
self.bus.set_config_mu(SPI_CONFIG, 8, SPIT_WR, SPI_CS)
|
||||||
|
self.bus.write(addr << 25)
|
||||||
|
self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END, length, ext_div, SPI_CS)
|
||||||
|
if length < 32:
|
||||||
|
data <<= 32 - length
|
||||||
|
self.bus.write(data)
|
|
@ -1,47 +0,0 @@
|
||||||
from artiq.experiment import kernel
|
|
||||||
from artiq.coredevice.i2c import (
|
|
||||||
i2c_start, i2c_write, i2c_read, i2c_stop, I2CError)
|
|
||||||
|
|
||||||
|
|
||||||
class PCF8574A:
|
|
||||||
"""Driver for the PCF8574 I2C remote 8-bit I/O expander.
|
|
||||||
|
|
||||||
I2C transactions not real-time, and are performed by the CPU without
|
|
||||||
involving RTIO.
|
|
||||||
"""
|
|
||||||
def __init__(self, dmgr, busno=0, address=0x7c, core_device="core"):
|
|
||||||
self.core = dmgr.get(core_device)
|
|
||||||
self.busno = busno
|
|
||||||
self.address = address
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set(self, data):
|
|
||||||
"""Drive data on the quasi-bidirectional pins.
|
|
||||||
|
|
||||||
:param data: Pin data. High bits are weakly driven high
|
|
||||||
(and thus inputs), low bits are strongly driven low.
|
|
||||||
"""
|
|
||||||
i2c_start(self.busno)
|
|
||||||
try:
|
|
||||||
if not i2c_write(self.busno, self.address):
|
|
||||||
raise I2CError("PCF8574A failed to ack address")
|
|
||||||
if not i2c_write(self.busno, data):
|
|
||||||
raise I2CError("PCF8574A failed to ack data")
|
|
||||||
finally:
|
|
||||||
i2c_stop(self.busno)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def get(self):
|
|
||||||
"""Retrieve quasi-bidirectional pin input data.
|
|
||||||
|
|
||||||
:return: Pin data
|
|
||||||
"""
|
|
||||||
i2c_start(self.busno)
|
|
||||||
ret = 0
|
|
||||||
try:
|
|
||||||
if not i2c_write(self.busno, self.address | 1):
|
|
||||||
raise I2CError("PCF8574A failed to ack address")
|
|
||||||
ret = i2c_read(self.busno, False)
|
|
||||||
finally:
|
|
||||||
i2c_stop(self.busno)
|
|
||||||
return ret
|
|
|
@ -1,77 +0,0 @@
|
||||||
from .spr import mtspr, mfspr
|
|
||||||
from artiq.language.core import kernel
|
|
||||||
|
|
||||||
|
|
||||||
_MAX_SPRS_PER_GRP_BITS = 11
|
|
||||||
_SPRGROUP_PC = 7 << _MAX_SPRS_PER_GRP_BITS
|
|
||||||
_SPR_PCMR_CP = 0x00000001 # Counter present
|
|
||||||
_SPR_PCMR_CISM = 0x00000004 # Count in supervisor mode
|
|
||||||
_SPR_PCMR_CIUM = 0x00000008 # Count in user mode
|
|
||||||
_SPR_PCMR_LA = 0x00000010 # Load access event
|
|
||||||
_SPR_PCMR_SA = 0x00000020 # Store access event
|
|
||||||
_SPR_PCMR_IF = 0x00000040 # Instruction fetch event
|
|
||||||
_SPR_PCMR_DCM = 0x00000080 # Data cache miss event
|
|
||||||
_SPR_PCMR_ICM = 0x00000100 # Insn cache miss event
|
|
||||||
_SPR_PCMR_IFS = 0x00000200 # Insn fetch stall event
|
|
||||||
_SPR_PCMR_LSUS = 0x00000400 # LSU stall event
|
|
||||||
_SPR_PCMR_BS = 0x00000800 # Branch stall event
|
|
||||||
_SPR_PCMR_DTLBM = 0x00001000 # DTLB miss event
|
|
||||||
_SPR_PCMR_ITLBM = 0x00002000 # ITLB miss event
|
|
||||||
_SPR_PCMR_DDS = 0x00004000 # Data dependency stall event
|
|
||||||
_SPR_PCMR_WPE = 0x03ff8000 # Watchpoint events
|
|
||||||
|
|
||||||
|
|
||||||
@kernel(flags={"nowrite", "nounwind"})
|
|
||||||
def _PCCR(n):
|
|
||||||
return _SPRGROUP_PC + n
|
|
||||||
|
|
||||||
|
|
||||||
@kernel(flags={"nowrite", "nounwind"})
|
|
||||||
def _PCMR(n):
|
|
||||||
return _SPRGROUP_PC + 8 + n
|
|
||||||
|
|
||||||
|
|
||||||
class CorePCU:
|
|
||||||
"""Core device performance counter unit (PCU) access"""
|
|
||||||
def __init__(self, dmgr, core_device="core"):
|
|
||||||
self.core = dmgr.get(core_device)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def start(self):
|
|
||||||
"""
|
|
||||||
Configure and clear the kernel CPU performance counters.
|
|
||||||
|
|
||||||
The eight counters are configured to count the following events:
|
|
||||||
* Load or store
|
|
||||||
* Instruction fetch
|
|
||||||
* Data cache miss
|
|
||||||
* Instruction cache miss
|
|
||||||
* Instruction fetch stall
|
|
||||||
* Load-store-unit stall
|
|
||||||
* Branch stall
|
|
||||||
* Data dependency stall
|
|
||||||
"""
|
|
||||||
for i in range(8):
|
|
||||||
if not mfspr(_PCMR(i)) & _SPR_PCMR_CP:
|
|
||||||
raise ValueError("counter not present")
|
|
||||||
mtspr(_PCMR(i), 0)
|
|
||||||
mtspr(_PCCR(i), 0)
|
|
||||||
mtspr(_PCMR(0), _SPR_PCMR_CISM | _SPR_PCMR_LA | _SPR_PCMR_SA)
|
|
||||||
mtspr(_PCMR(1), _SPR_PCMR_CISM | _SPR_PCMR_IF)
|
|
||||||
mtspr(_PCMR(2), _SPR_PCMR_CISM | _SPR_PCMR_DCM)
|
|
||||||
mtspr(_PCMR(3), _SPR_PCMR_CISM | _SPR_PCMR_ICM)
|
|
||||||
mtspr(_PCMR(4), _SPR_PCMR_CISM | _SPR_PCMR_IFS)
|
|
||||||
mtspr(_PCMR(5), _SPR_PCMR_CISM | _SPR_PCMR_LSUS)
|
|
||||||
mtspr(_PCMR(6), _SPR_PCMR_CISM | _SPR_PCMR_BS)
|
|
||||||
mtspr(_PCMR(7), _SPR_PCMR_CISM | _SPR_PCMR_DDS)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def get(self, r):
|
|
||||||
"""
|
|
||||||
Read the performance counters and store the counts in the
|
|
||||||
array provided.
|
|
||||||
|
|
||||||
:param list[int] r: array to store the counter values
|
|
||||||
"""
|
|
||||||
for i in range(8):
|
|
||||||
r[i] = mfspr(_PCCR(i))
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,92 +0,0 @@
|
||||||
from collections import defaultdict
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
|
|
||||||
class Symbolizer:
|
|
||||||
def __init__(self, binary, triple, demangle=True):
|
|
||||||
cmdline = [
|
|
||||||
triple + "-addr2line", "--exe=" + binary,
|
|
||||||
"--addresses", "--functions", "--inlines"
|
|
||||||
]
|
|
||||||
if demangle:
|
|
||||||
cmdline.append("--demangle=rust")
|
|
||||||
self._addr2line = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
|
|
||||||
universal_newlines=True)
|
|
||||||
|
|
||||||
def symbolize(self, addr):
|
|
||||||
self._addr2line.stdin.write("0x{:08x}\n0\n".format(addr))
|
|
||||||
self._addr2line.stdin.flush()
|
|
||||||
self._addr2line.stdout.readline() # 0x[addr]
|
|
||||||
|
|
||||||
result = []
|
|
||||||
while True:
|
|
||||||
function = self._addr2line.stdout.readline().rstrip()
|
|
||||||
|
|
||||||
# check for end marker
|
|
||||||
if function == "0x00000000": # 0x00000000
|
|
||||||
self._addr2line.stdout.readline() # ??
|
|
||||||
self._addr2line.stdout.readline() # ??:0
|
|
||||||
return result
|
|
||||||
|
|
||||||
file, line = self._addr2line.stdout.readline().rstrip().split(":")
|
|
||||||
|
|
||||||
result.append((function, file, line, addr))
|
|
||||||
|
|
||||||
|
|
||||||
class CallgrindWriter:
|
|
||||||
def __init__(self, output, binary, triple, compression=True, demangle=True):
|
|
||||||
self._output = output
|
|
||||||
self._binary = binary
|
|
||||||
self._current = defaultdict(lambda: None)
|
|
||||||
self._ids = defaultdict(lambda: {})
|
|
||||||
self._compression = compression
|
|
||||||
self._symbolizer = Symbolizer(binary, triple, demangle=demangle)
|
|
||||||
|
|
||||||
def _write(self, fmt, *args, **kwargs):
|
|
||||||
self._output.write(fmt.format(*args, **kwargs))
|
|
||||||
self._output.write("\n")
|
|
||||||
|
|
||||||
def _spec(self, spec, value):
|
|
||||||
if self._current[spec] == value:
|
|
||||||
return
|
|
||||||
self._current[spec] = value
|
|
||||||
|
|
||||||
if not self._compression or value == "??":
|
|
||||||
self._write("{}={}", spec, value)
|
|
||||||
return
|
|
||||||
|
|
||||||
spec_ids = self._ids[spec]
|
|
||||||
if value in spec_ids:
|
|
||||||
self._write("{}=({})", spec, spec_ids[value])
|
|
||||||
else:
|
|
||||||
spec_ids[value] = len(spec_ids) + 1
|
|
||||||
self._write("{}=({}) {}", spec, spec_ids[value], value)
|
|
||||||
|
|
||||||
def header(self):
|
|
||||||
self._write("# callgrind format")
|
|
||||||
self._write("version: 1")
|
|
||||||
self._write("creator: ARTIQ")
|
|
||||||
self._write("positions: instr line")
|
|
||||||
self._write("events: Hits")
|
|
||||||
self._write("")
|
|
||||||
self._spec("ob", self._binary)
|
|
||||||
self._spec("cob", self._binary)
|
|
||||||
|
|
||||||
def hit(self, addr, count):
|
|
||||||
for function, file, line, addr in self._symbolizer.symbolize(addr):
|
|
||||||
self._spec("fl", file)
|
|
||||||
self._spec("fn", function)
|
|
||||||
self._write("0x{:08x} {} {}", addr, line, count)
|
|
||||||
|
|
||||||
def edge(self, caller, callee, count):
|
|
||||||
edges = self._symbolizer.symbolize(callee) + self._symbolizer.symbolize(caller)
|
|
||||||
for (callee, caller) in zip(edges, edges[1:]):
|
|
||||||
function, file, line, addr = callee
|
|
||||||
self._spec("cfl", file)
|
|
||||||
self._spec("cfn", function)
|
|
||||||
self._write("calls={} 0x{:08x} {}", count, addr, line)
|
|
||||||
|
|
||||||
function, file, line, addr = caller
|
|
||||||
self._spec("fl", file)
|
|
||||||
self._spec("fn", function)
|
|
||||||
self._write("0x{:08x} {} {}", addr, line, count)
|
|
|
@ -1,16 +1,14 @@
|
||||||
from artiq.language.core import syscall
|
from artiq.language.core import syscall
|
||||||
from artiq.language.types import TInt64, TInt32, TNone, TList
|
from artiq.language.types import TInt32, TInt64, TList, TNone, TTuple
|
||||||
|
|
||||||
|
|
||||||
@syscall(flags={"nowrite"})
|
@syscall(flags={"nowrite"})
|
||||||
def rtio_output(time_mu: TInt64, channel: TInt32, addr: TInt32, data: TInt32
|
def rtio_output(target: TInt32, data: TInt32) -> TNone:
|
||||||
) -> TNone:
|
|
||||||
raise NotImplementedError("syscall not simulated")
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
|
|
||||||
@syscall(flags={"nowrite"})
|
@syscall(flags={"nowrite"})
|
||||||
def rtio_output_wide(time_mu: TInt64, channel: TInt32, addr: TInt32,
|
def rtio_output_wide(target: TInt32, data: TList(TInt32)) -> TNone:
|
||||||
data: TList(TInt32)) -> TNone:
|
|
||||||
raise NotImplementedError("syscall not simulated")
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
|
|
||||||
|
@ -22,3 +20,12 @@ def rtio_input_timestamp(timeout_mu: TInt64, channel: TInt32) -> TInt64:
|
||||||
@syscall(flags={"nowrite"})
|
@syscall(flags={"nowrite"})
|
||||||
def rtio_input_data(channel: TInt32) -> TInt32:
|
def rtio_input_data(channel: TInt32) -> TInt32:
|
||||||
raise NotImplementedError("syscall not simulated")
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
||||||
|
|
||||||
|
@syscall(flags={"nowrite"})
|
||||||
|
def rtio_input_timestamped_data(timeout_mu: TInt64,
|
||||||
|
channel: TInt32) -> TTuple([TInt64, TInt32]):
|
||||||
|
"""Wait for an input event up to timeout_mu on the given channel, and
|
||||||
|
return a tuple of timestamp and attached data, or (-1, 0) if the timeout is
|
||||||
|
reached."""
|
||||||
|
raise NotImplementedError("syscall not simulated")
|
||||||
|
|
|
@ -15,24 +15,26 @@ SPI_CS_PGIA = 1 # separate SPI bus, CS used as RCLK
|
||||||
|
|
||||||
|
|
||||||
@portable
|
@portable
|
||||||
def adc_mu_to_volt(data, gain=0):
|
def adc_mu_to_volt(data, gain=0, corrected_fs=True):
|
||||||
"""Convert ADC data in machine units to Volts.
|
"""Convert ADC data in machine units to Volts.
|
||||||
|
|
||||||
:param data: 16 bit signed ADC word
|
:param data: 16 bit signed ADC word
|
||||||
:param gain: PGIA gain setting (0: 1, ..., 3: 1000)
|
:param gain: PGIA gain setting (0: 1, ..., 3: 1000)
|
||||||
|
:param corrected_fs: use corrected ADC FS reference.
|
||||||
|
Should be True for Samplers' revisions after v2.1. False for v2.1 and earlier.
|
||||||
:return: Voltage in Volts
|
:return: Voltage in Volts
|
||||||
"""
|
"""
|
||||||
if gain == 0:
|
if gain == 0:
|
||||||
volt_per_lsb = 20./(1 << 16)
|
volt_per_lsb = 20.48 / (1 << 16) if corrected_fs else 20. / (1 << 16)
|
||||||
elif gain == 1:
|
elif gain == 1:
|
||||||
volt_per_lsb = 2./(1 << 16)
|
volt_per_lsb = 2.048 / (1 << 16) if corrected_fs else 2. / (1 << 16)
|
||||||
elif gain == 2:
|
elif gain == 2:
|
||||||
volt_per_lsb = .2/(1 << 16)
|
volt_per_lsb = .2048 / (1 << 16) if corrected_fs else .2 / (1 << 16)
|
||||||
elif gain == 3:
|
elif gain == 3:
|
||||||
volt_per_lsb = .02/(1 << 16)
|
volt_per_lsb = 0.02048 / (1 << 16) if corrected_fs else .02 / (1 << 16)
|
||||||
else:
|
else:
|
||||||
raise ValueError("invalid gain")
|
raise ValueError("invalid gain")
|
||||||
return data*volt_per_lsb
|
return data * volt_per_lsb
|
||||||
|
|
||||||
|
|
||||||
class Sampler:
|
class Sampler:
|
||||||
|
@ -48,12 +50,13 @@ class Sampler:
|
||||||
:param gains: Initial value for PGIA gains shift register
|
:param gains: Initial value for PGIA gains shift register
|
||||||
(default: 0x0000). Knowledge of this state is not transferred
|
(default: 0x0000). Knowledge of this state is not transferred
|
||||||
between experiments.
|
between experiments.
|
||||||
|
:param hw_rev: Sampler's hardware revision string (default 'v2.2')
|
||||||
:param core_device: Core device name
|
:param core_device: Core device name
|
||||||
"""
|
"""
|
||||||
kernel_invariants = {"bus_adc", "bus_pgia", "core", "cnv", "div"}
|
kernel_invariants = {"bus_adc", "bus_pgia", "core", "cnv", "div", "corrected_fs"}
|
||||||
|
|
||||||
def __init__(self, dmgr, spi_adc_device, spi_pgia_device, cnv_device,
|
def __init__(self, dmgr, spi_adc_device, spi_pgia_device, cnv_device,
|
||||||
div=8, gains=0x0000, core_device="core"):
|
div=8, gains=0x0000, hw_rev="v2.2", core_device="core"):
|
||||||
self.bus_adc = dmgr.get(spi_adc_device)
|
self.bus_adc = dmgr.get(spi_adc_device)
|
||||||
self.bus_adc.update_xfer_duration_mu(div, 32)
|
self.bus_adc.update_xfer_duration_mu(div, 32)
|
||||||
self.bus_pgia = dmgr.get(spi_pgia_device)
|
self.bus_pgia = dmgr.get(spi_pgia_device)
|
||||||
|
@ -62,6 +65,11 @@ class Sampler:
|
||||||
self.cnv = dmgr.get(cnv_device)
|
self.cnv = dmgr.get(cnv_device)
|
||||||
self.div = div
|
self.div = div
|
||||||
self.gains = gains
|
self.gains = gains
|
||||||
|
self.corrected_fs = self.use_corrected_fs(hw_rev)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def use_corrected_fs(hw_rev):
|
||||||
|
return hw_rev != "v2.1"
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def init(self):
|
def init(self):
|
||||||
|
@ -144,4 +152,4 @@ class Sampler:
|
||||||
for i in range(n):
|
for i in range(n):
|
||||||
channel = i + 8 - len(data)
|
channel = i + 8 - len(data)
|
||||||
gain = (self.gains >> (channel*2)) & 0b11
|
gain = (self.gains >> (channel*2)) & 0b11
|
||||||
data[i] = adc_mu_to_volt(adc_data[i], gain)
|
data[i] = adc_mu_to_volt(adc_data[i], gain, self.corrected_fs)
|
||||||
|
|
|
@ -1,372 +0,0 @@
|
||||||
"""
|
|
||||||
Driver for the Smart Arbitrary Waveform Generator (SAWG) on RTIO.
|
|
||||||
|
|
||||||
The SAWG is an "improved DDS" built in gateware and interfacing to
|
|
||||||
high-speed DACs.
|
|
||||||
|
|
||||||
Output event replacement is supported except on the configuration channel.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
from artiq.language.types import TInt32, TFloat
|
|
||||||
from numpy import int32, int64
|
|
||||||
from artiq.language.core import kernel, now_mu
|
|
||||||
from artiq.coredevice.spline import Spline
|
|
||||||
from artiq.coredevice.rtio import rtio_output
|
|
||||||
|
|
||||||
|
|
||||||
# sawg.Config addresses
|
|
||||||
_SAWG_DIV = 0
|
|
||||||
_SAWG_CLR = 1
|
|
||||||
_SAWG_IQ_EN = 2
|
|
||||||
# _SAWF_PAD = 3 # reserved
|
|
||||||
_SAWG_OUT_MIN = 4
|
|
||||||
_SAWG_OUT_MAX = 5
|
|
||||||
_SAWG_DUC_MIN = 6
|
|
||||||
_SAWG_DUC_MAX = 7
|
|
||||||
|
|
||||||
|
|
||||||
class Config:
|
|
||||||
"""SAWG configuration.
|
|
||||||
|
|
||||||
Exposes the configurable quantities of a single SAWG channel.
|
|
||||||
|
|
||||||
Access to the configuration registers for a SAWG channel can not
|
|
||||||
be concurrent. There must be at least :attr:`_rtio_interval` machine
|
|
||||||
units of delay between accesses. Replacement is not supported and will be
|
|
||||||
lead to an ``RTIOCollision`` as this is likely a programming error.
|
|
||||||
All methods therefore advance the timeline by the duration of one
|
|
||||||
configuration register transfer.
|
|
||||||
|
|
||||||
:param channel: RTIO channel number of the channel.
|
|
||||||
:param core: Core device.
|
|
||||||
"""
|
|
||||||
kernel_invariants = {"channel", "core", "_out_scale", "_duc_scale",
|
|
||||||
"_rtio_interval"}
|
|
||||||
|
|
||||||
def __init__(self, channel, core, cordic_gain=1.):
|
|
||||||
self.channel = channel
|
|
||||||
self.core = core
|
|
||||||
# normalized DAC output
|
|
||||||
self._out_scale = (1 << 15) - 1.
|
|
||||||
# normalized DAC output including DUC cordic gain
|
|
||||||
self._duc_scale = self._out_scale/cordic_gain
|
|
||||||
# configuration channel access interval
|
|
||||||
self._rtio_interval = int64(3*self.core.ref_multiplier)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_div(self, div: TInt32, n: TInt32=0):
|
|
||||||
"""Set the spline evolution divider and current counter value.
|
|
||||||
|
|
||||||
The divider and the spline evolution are synchronized across all
|
|
||||||
spline channels within a SAWG channel. The DDS/DUC phase accumulators
|
|
||||||
always evolves at full speed.
|
|
||||||
|
|
||||||
.. note:: The spline evolution divider has not been tested extensively
|
|
||||||
and is currently considered a technological preview only.
|
|
||||||
|
|
||||||
:param div: Spline evolution divider, such that
|
|
||||||
``t_sawg_spline/t_rtio_coarse = div + 1``. Default: ``0``.
|
|
||||||
:param n: Current value of the counter. Default: ``0``.
|
|
||||||
"""
|
|
||||||
rtio_output(now_mu(), self.channel, _SAWG_DIV, div | (n << 16))
|
|
||||||
delay_mu(self._rtio_interval)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_clr(self, clr0: TInt32, clr1: TInt32, clr2: TInt32):
|
|
||||||
"""Set the accumulator clear mode for the three phase accumulators.
|
|
||||||
|
|
||||||
When the ``clr`` bit for a given DDS/DUC phase accumulator is
|
|
||||||
set, that phase accumulator will be cleared with every phase offset
|
|
||||||
RTIO command and the output phase of the DDS/DUC will be
|
|
||||||
exactly the phase RTIO value ("absolute phase update mode").
|
|
||||||
|
|
||||||
.. math::
|
|
||||||
q^\prime(t) = p^\prime + (t - t^\prime) f^\prime
|
|
||||||
|
|
||||||
In turn, when the bit is cleared, the phase RTIO channels
|
|
||||||
determine a phase offset to the current (carrier-) value of the
|
|
||||||
DDS/DUC phase accumulator. This "relative phase update mode" is
|
|
||||||
sometimes also called “continuous phase mode”.
|
|
||||||
|
|
||||||
.. math::
|
|
||||||
q^\prime(t) = q(t^\prime) + (p^\prime - p) +
|
|
||||||
(t - t^\prime) f^\prime
|
|
||||||
|
|
||||||
Where:
|
|
||||||
|
|
||||||
* :math:`q`, :math:`q^\prime`: old/new phase accumulator
|
|
||||||
* :math:`p`, :math:`p^\prime`: old/new phase offset
|
|
||||||
* :math:`f^\prime`: new frequency
|
|
||||||
* :math:`t^\prime`: timestamp of setting new :math:`p`, :math:`f`
|
|
||||||
* :math:`t`: running time
|
|
||||||
|
|
||||||
:param clr0: Auto-clear phase accumulator of the ``phase0``/
|
|
||||||
``frequency0`` DUC. Default: ``True``
|
|
||||||
:param clr1: Auto-clear phase accumulator of the ``phase1``/
|
|
||||||
``frequency1`` DDS. Default: ``True``
|
|
||||||
:param clr2: Auto-clear phase accumulator of the ``phase2``/
|
|
||||||
``frequency2`` DDS. Default: ``True``
|
|
||||||
"""
|
|
||||||
rtio_output(now_mu(), self.channel, _SAWG_CLR, clr0 |
|
|
||||||
(clr1 << 1) | (clr2 << 2))
|
|
||||||
delay_mu(self._rtio_interval)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_iq_en(self, i_enable: TInt32, q_enable: TInt32):
|
|
||||||
"""Enable I/Q data on this DAC channel.
|
|
||||||
|
|
||||||
Every pair of SAWG channels forms a buddy pair.
|
|
||||||
The ``iq_en`` configuration controls which DDS data is emitted to the
|
|
||||||
DACs.
|
|
||||||
|
|
||||||
Refer to the documentation of :class:`SAWG` for a mathematical
|
|
||||||
description of ``i_enable`` and ``q_enable``.
|
|
||||||
|
|
||||||
.. note:: Quadrature data from the buddy channel is currently
|
|
||||||
a technological preview only. The data is ignored in the SAWG
|
|
||||||
gateware and not added to the DAC output.
|
|
||||||
This is equivalent to the ``q_enable`` switch always being ``0``.
|
|
||||||
|
|
||||||
:param i_enable: Controls adding the in-phase
|
|
||||||
DUC-DDS data of *this* SAWG channel to *this* DAC channel.
|
|
||||||
Default: ``1``.
|
|
||||||
:param q_enable: controls adding the quadrature
|
|
||||||
DUC-DDS data of this SAWG's *buddy* channel to *this* DAC
|
|
||||||
channel. Default: ``0``.
|
|
||||||
"""
|
|
||||||
rtio_output(now_mu(), self.channel, _SAWG_IQ_EN, i_enable |
|
|
||||||
(q_enable << 1))
|
|
||||||
delay_mu(self._rtio_interval)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_duc_max_mu(self, limit: TInt32):
|
|
||||||
"""Set the digital up-converter (DUC) I and Q data summing junctions
|
|
||||||
upper limit. In machine units.
|
|
||||||
|
|
||||||
The default limits are chosen to reach maximum and minimum DAC output
|
|
||||||
amplitude.
|
|
||||||
|
|
||||||
For a description of the limiter functions in normalized units see:
|
|
||||||
|
|
||||||
.. seealso:: :meth:`set_duc_max`
|
|
||||||
"""
|
|
||||||
rtio_output(now_mu(), self.channel, _SAWG_DUC_MAX, limit)
|
|
||||||
delay_mu(self._rtio_interval)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_duc_min_mu(self, limit: TInt32):
|
|
||||||
""".. seealso:: :meth:`set_duc_max_mu`"""
|
|
||||||
rtio_output(now_mu(), self.channel, _SAWG_DUC_MIN, limit)
|
|
||||||
delay_mu(self._rtio_interval)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_out_max_mu(self, limit: TInt32):
|
|
||||||
""".. seealso:: :meth:`set_duc_max_mu`"""
|
|
||||||
rtio_output(now_mu(), self.channel, _SAWG_OUT_MAX, limit)
|
|
||||||
delay_mu(self._rtio_interval)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_out_min_mu(self, limit: TInt32):
|
|
||||||
""".. seealso:: :meth:`set_duc_max_mu`"""
|
|
||||||
rtio_output(now_mu(), self.channel, _SAWG_OUT_MIN, limit)
|
|
||||||
delay_mu(self._rtio_interval)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_duc_max(self, limit: TFloat):
|
|
||||||
"""Set the digital up-converter (DUC) I and Q data summing junctions
|
|
||||||
upper limit.
|
|
||||||
|
|
||||||
Each of the three summing junctions has a saturating adder with
|
|
||||||
configurable upper and lower limits. The three summing junctions are:
|
|
||||||
|
|
||||||
* At the in-phase input to the ``phase0``/``frequency0`` fast DUC,
|
|
||||||
after the anti-aliasing FIR filter.
|
|
||||||
* At the quadrature input to the ``phase0``/``frequency0``
|
|
||||||
fast DUC, after the anti-aliasing FIR filter. The in-phase and
|
|
||||||
quadrature data paths both use the same limits.
|
|
||||||
* Before the DAC, where the following three data streams
|
|
||||||
are added together:
|
|
||||||
|
|
||||||
* the output of the ``offset`` spline,
|
|
||||||
* (optionally, depending on ``i_enable``) the in-phase output
|
|
||||||
of the ``phase0``/``frequency0`` fast DUC, and
|
|
||||||
* (optionally, depending on ``q_enable``) the quadrature
|
|
||||||
output of the ``phase0``/``frequency0`` fast DUC of the
|
|
||||||
buddy channel.
|
|
||||||
|
|
||||||
Refer to the documentation of :class:`SAWG` for a mathematical
|
|
||||||
description of the summing junctions.
|
|
||||||
|
|
||||||
:param limit: Limit value ``[-1, 1]``. The output of the limiter will
|
|
||||||
never exceed this limit. The default limits are the full range
|
|
||||||
``[-1, 1]``.
|
|
||||||
|
|
||||||
.. seealso::
|
|
||||||
* :meth:`set_duc_max`: Upper limit of the in-phase and quadrature
|
|
||||||
inputs to the DUC.
|
|
||||||
* :meth:`set_duc_min`: Lower limit of the in-phase and quadrature
|
|
||||||
inputs to the DUC.
|
|
||||||
* :meth:`set_out_max`: Upper limit of the DAC output.
|
|
||||||
* :meth:`set_out_min`: Lower limit of the DAC output.
|
|
||||||
"""
|
|
||||||
self.set_duc_max_mu(int32(round(limit*self._duc_scale)))
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_duc_min(self, limit: TFloat):
|
|
||||||
""".. seealso:: :meth:`set_duc_max`"""
|
|
||||||
self.set_duc_min_mu(int32(round(limit*self._duc_scale)))
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_out_max(self, limit: TFloat):
|
|
||||||
""".. seealso:: :meth:`set_duc_max`"""
|
|
||||||
self.set_out_max_mu(int32(round(limit*self._out_scale)))
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_out_min(self, limit: TFloat):
|
|
||||||
""".. seealso:: :meth:`set_duc_max`"""
|
|
||||||
self.set_out_min_mu(int32(round(limit*self._out_scale)))
|
|
||||||
|
|
||||||
|
|
||||||
class SAWG:
|
|
||||||
"""Smart arbitrary waveform generator channel.
|
|
||||||
The channel is parametrized as: ::
|
|
||||||
|
|
||||||
oscillators = exp(2j*pi*(frequency0*t + phase0))*(
|
|
||||||
amplitude1*exp(2j*pi*(frequency1*t + phase1)) +
|
|
||||||
amplitude2*exp(2j*pi*(frequency2*t + phase2)))
|
|
||||||
|
|
||||||
output = (offset +
|
|
||||||
i_enable*Re(oscillators) +
|
|
||||||
q_enable*Im(buddy_oscillators))
|
|
||||||
|
|
||||||
This parametrization can be viewed as two complex (quadrature) oscillators
|
|
||||||
(``frequency1``/``phase1`` and ``frequency2``/``phase2``) that are
|
|
||||||
executing and sampling at the coarse RTIO frequency. They can represent
|
|
||||||
frequencies within the first Nyquist zone from ``-f_rtio_coarse/2`` to
|
|
||||||
``f_rtio_coarse/2``.
|
|
||||||
|
|
||||||
.. note:: The coarse RTIO frequency ``f_rtio_coarse`` is the inverse of
|
|
||||||
``ref_period*multiplier``. Both are arguments of the ``Core`` device,
|
|
||||||
specified in the device database ``device_db.py``.
|
|
||||||
|
|
||||||
The sum of their outputs is then interpolated by a factor of
|
|
||||||
:attr:`parallelism` (2, 4, 8 depending on the bitstream) using a
|
|
||||||
finite-impulse-response (FIR) anti-aliasing filter (more accurately
|
|
||||||
a half-band filter).
|
|
||||||
|
|
||||||
The filter is followed by a configurable saturating limiter.
|
|
||||||
|
|
||||||
After the limiter, the data is shifted in frequency using a complex
|
|
||||||
digital up-converter (DUC, ``frequency0``/``phase0``) running at
|
|
||||||
:attr:`parallelism` times the coarse RTIO frequency. The first Nyquist
|
|
||||||
zone of the DUC extends from ``-f_rtio_coarse*parallelism/2`` to
|
|
||||||
``f_rtio_coarse*parallelism/2``. Other Nyquist zones are usable depending
|
|
||||||
on the interpolation/modulation options configured in the DAC.
|
|
||||||
|
|
||||||
The real/in-phase data after digital up-conversion can be offset using
|
|
||||||
another spline interpolator ``offset``.
|
|
||||||
|
|
||||||
The ``i_enable``/``q_enable`` switches enable emission of quadrature
|
|
||||||
signals for later analog quadrature mixing distinguishing upper and lower
|
|
||||||
sidebands and thus doubling the bandwidth. They can also be used to emit
|
|
||||||
four-tone signals.
|
|
||||||
|
|
||||||
.. note:: Quadrature data from the buddy channel is currently
|
|
||||||
ignored in the SAWG gateware and not added to the DAC output.
|
|
||||||
This is equivalent to the ``q_enable`` switch always being ``0``.
|
|
||||||
|
|
||||||
The configuration channel and the nine
|
|
||||||
:class:`artiq.coredevice.spline.Spline` interpolators are accessible as
|
|
||||||
attributes:
|
|
||||||
|
|
||||||
* :attr:`config`: :class:`Config`
|
|
||||||
* :attr:`offset`, :attr:`amplitude1`, :attr:`amplitude2`: in units
|
|
||||||
of full scale
|
|
||||||
* :attr:`phase0`, :attr:`phase1`, :attr:`phase2`: in units of turns
|
|
||||||
* :attr:`frequency0`, :attr:`frequency1`, :attr:`frequency2`: in units
|
|
||||||
of Hz
|
|
||||||
|
|
||||||
.. note:: The latencies (pipeline depths) of the nine data channels (i.e.
|
|
||||||
all except :attr:`config`) are matched. Equivalent channels (e.g.
|
|
||||||
:attr:`phase1` and :attr:`phase2`) are exactly matched. Channels of
|
|
||||||
different type or functionality (e.g. :attr:`offset` vs
|
|
||||||
:attr:`amplitude1`, DDS vs DUC, :attr:`phase0` vs :attr:`phase1`) are
|
|
||||||
only matched to within one coarse RTIO cycle.
|
|
||||||
|
|
||||||
:param channel_base: RTIO channel number of the first channel (amplitude).
|
|
||||||
The configuration channel and frequency/phase/amplitude channels are
|
|
||||||
then assumed to be successive channels.
|
|
||||||
:param parallelism: Number of output samples per coarse RTIO clock cycle.
|
|
||||||
:param core_device: Name of the core device that this SAWG is on.
|
|
||||||
"""
|
|
||||||
kernel_invariants = {"channel_base", "core", "parallelism",
|
|
||||||
"amplitude1", "frequency1", "phase1",
|
|
||||||
"amplitude2", "frequency2", "phase2",
|
|
||||||
"frequency0", "phase0", "offset"}
|
|
||||||
|
|
||||||
def __init__(self, dmgr, channel_base, parallelism, core_device="core"):
|
|
||||||
self.core = dmgr.get(core_device)
|
|
||||||
self.channel_base = channel_base
|
|
||||||
self.parallelism = parallelism
|
|
||||||
width = 16
|
|
||||||
time_width = 16
|
|
||||||
cordic_gain = 1.646760258057163 # Cordic(width=16, guard=None).gain
|
|
||||||
head_room = 1.001
|
|
||||||
self.config = Config(channel_base, self.core, cordic_gain)
|
|
||||||
self.offset = Spline(width, time_width, channel_base + 1,
|
|
||||||
self.core, 2.*head_room)
|
|
||||||
self.amplitude1 = Spline(width, time_width, channel_base + 2,
|
|
||||||
self.core, 2*head_room*cordic_gain**2)
|
|
||||||
self.frequency1 = Spline(3*width, time_width, channel_base + 3,
|
|
||||||
self.core, 1/self.core.coarse_ref_period)
|
|
||||||
self.phase1 = Spline(width, time_width, channel_base + 4,
|
|
||||||
self.core, 1.)
|
|
||||||
self.amplitude2 = Spline(width, time_width, channel_base + 5,
|
|
||||||
self.core, 2*head_room*cordic_gain**2)
|
|
||||||
self.frequency2 = Spline(3*width, time_width, channel_base + 6,
|
|
||||||
self.core, 1/self.core.coarse_ref_period)
|
|
||||||
self.phase2 = Spline(width, time_width, channel_base + 7,
|
|
||||||
self.core, 1.)
|
|
||||||
self.frequency0 = Spline(2*width, time_width, channel_base + 8,
|
|
||||||
self.core,
|
|
||||||
parallelism/self.core.coarse_ref_period)
|
|
||||||
self.phase0 = Spline(width, time_width, channel_base + 9,
|
|
||||||
self.core, 1.)
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def reset(self):
|
|
||||||
"""Re-establish initial conditions.
|
|
||||||
|
|
||||||
This clears all spline interpolators, accumulators and configuration
|
|
||||||
settings.
|
|
||||||
|
|
||||||
This method advances the timeline by the time required to perform all
|
|
||||||
7 writes to the configuration channel, plus 9 coarse RTIO cycles.
|
|
||||||
"""
|
|
||||||
self.config.set_div(0, 0)
|
|
||||||
self.config.set_clr(1, 1, 1)
|
|
||||||
self.config.set_iq_en(1, 0)
|
|
||||||
self.config.set_duc_min(-1.)
|
|
||||||
self.config.set_duc_max(1.)
|
|
||||||
self.config.set_out_min(-1.)
|
|
||||||
self.config.set_out_max(1.)
|
|
||||||
self.frequency0.set_mu(0)
|
|
||||||
coarse_cycle = int64(self.core.ref_multiplier)
|
|
||||||
delay_mu(coarse_cycle)
|
|
||||||
self.frequency1.set_mu(0)
|
|
||||||
delay_mu(coarse_cycle)
|
|
||||||
self.frequency2.set_mu(0)
|
|
||||||
delay_mu(coarse_cycle)
|
|
||||||
self.phase0.set_mu(0)
|
|
||||||
delay_mu(coarse_cycle)
|
|
||||||
self.phase1.set_mu(0)
|
|
||||||
delay_mu(coarse_cycle)
|
|
||||||
self.phase2.set_mu(0)
|
|
||||||
delay_mu(coarse_cycle)
|
|
||||||
self.amplitude1.set_mu(0)
|
|
||||||
delay_mu(coarse_cycle)
|
|
||||||
self.amplitude2.set_mu(0)
|
|
||||||
delay_mu(coarse_cycle)
|
|
||||||
self.offset.set_mu(0)
|
|
||||||
delay_mu(coarse_cycle)
|
|
|
@ -1,36 +0,0 @@
|
||||||
from artiq.language.core import kernel, delay
|
|
||||||
from artiq.language.units import us
|
|
||||||
|
|
||||||
|
|
||||||
class ShiftReg:
|
|
||||||
"""Driver for shift registers/latch combos connected to TTLs"""
|
|
||||||
kernel_invariants = {"dt", "n"}
|
|
||||||
|
|
||||||
def __init__(self, dmgr, clk, ser, latch, n=32, dt=10*us):
|
|
||||||
self.core = dmgr.get("core")
|
|
||||||
self.clk = dmgr.get(clk)
|
|
||||||
self.ser = dmgr.get(ser)
|
|
||||||
self.latch = dmgr.get(latch)
|
|
||||||
self.n = n
|
|
||||||
self.dt = dt
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set(self, data):
|
|
||||||
"""Sets the values of the latch outputs. This does not
|
|
||||||
advance the timeline and the waveform is generated before
|
|
||||||
`now`."""
|
|
||||||
delay(-2*(self.n + 1)*self.dt)
|
|
||||||
for i in range(self.n):
|
|
||||||
if (data >> (self.n-i-1)) & 1 == 0:
|
|
||||||
self.ser.off()
|
|
||||||
else:
|
|
||||||
self.ser.on()
|
|
||||||
self.clk.off()
|
|
||||||
delay(self.dt)
|
|
||||||
self.clk.on()
|
|
||||||
delay(self.dt)
|
|
||||||
self.clk.off()
|
|
||||||
self.latch.on()
|
|
||||||
delay(self.dt)
|
|
||||||
self.latch.off()
|
|
||||||
delay(self.dt)
|
|
|
@ -0,0 +1,623 @@
|
||||||
|
from artiq.language.core import *
|
||||||
|
from artiq.language.types import *
|
||||||
|
from artiq.coredevice.rtio import rtio_output, rtio_input_data
|
||||||
|
from artiq.coredevice import spi2 as spi
|
||||||
|
from artiq.language.units import us
|
||||||
|
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def shuttler_volt_to_mu(volt):
|
||||||
|
"""Return the equivalent DAC code. Valid input range is from -10 to
|
||||||
|
10 - LSB.
|
||||||
|
"""
|
||||||
|
return round((1 << 16) * (volt / 20.0)) & 0xffff
|
||||||
|
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
"""Shuttler configuration registers interface.
|
||||||
|
|
||||||
|
The configuration registers control waveform phase auto-clear, and pre-DAC
|
||||||
|
gain & offset values for calibration with ADC on the Shuttler AFE card.
|
||||||
|
|
||||||
|
To find the calibrated DAC code, the Shuttler Core first multiplies the
|
||||||
|
output data with pre-DAC gain, then adds the offset.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
The DAC code is capped at 0x7fff and 0x8000.
|
||||||
|
|
||||||
|
:param channel: RTIO channel number of this interface.
|
||||||
|
:param core_device: Core device name.
|
||||||
|
"""
|
||||||
|
kernel_invariants = {
|
||||||
|
"core", "channel", "target_base", "target_read",
|
||||||
|
"target_gain", "target_offset", "target_clr"
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, dmgr, channel, core_device="core"):
|
||||||
|
self.core = dmgr.get(core_device)
|
||||||
|
self.channel = channel
|
||||||
|
self.target_base = channel << 8
|
||||||
|
self.target_read = 1 << 6
|
||||||
|
self.target_gain = 0 * (1 << 4)
|
||||||
|
self.target_offset = 1 * (1 << 4)
|
||||||
|
self.target_clr = 1 * (1 << 5)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_clr(self, clr):
|
||||||
|
"""Set/Unset waveform phase clear bits.
|
||||||
|
|
||||||
|
Each bit corresponds to a Shuttler waveform generator core. Setting a
|
||||||
|
clear bit forces the Shuttler Core to clear the phase accumulator on
|
||||||
|
waveform trigger (See :class:`Trigger` for the trigger method).
|
||||||
|
Otherwise, the phase accumulator increments from its original value.
|
||||||
|
|
||||||
|
:param clr: Waveform phase clear bits. The MSB corresponds to Channel
|
||||||
|
15, LSB corresponds to Channel 0.
|
||||||
|
"""
|
||||||
|
rtio_output(self.target_base | self.target_clr, clr)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_gain(self, channel, gain):
|
||||||
|
"""Set the 16-bits pre-DAC gain register of a Shuttler Core channel.
|
||||||
|
|
||||||
|
The `gain` parameter represents the decimal portion of the gain
|
||||||
|
factor. The MSB represents 0.5 and the sign bit. Hence, the valid
|
||||||
|
total gain value (1 +/- 0.gain) ranges from 0.5 to 1.5 - LSB.
|
||||||
|
|
||||||
|
:param channel: Shuttler Core channel to be configured.
|
||||||
|
:param gain: Shuttler Core channel gain.
|
||||||
|
"""
|
||||||
|
rtio_output(self.target_base | self.target_gain | channel, gain)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def get_gain(self, channel):
|
||||||
|
"""Return the pre-DAC gain value of a Shuttler Core channel.
|
||||||
|
|
||||||
|
:param channel: The Shuttler Core channel.
|
||||||
|
:return: Pre-DAC gain value. See :meth:`set_gain`.
|
||||||
|
"""
|
||||||
|
rtio_output(self.target_base | self.target_gain |
|
||||||
|
self.target_read | channel, 0)
|
||||||
|
return rtio_input_data(self.channel)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_offset(self, channel, offset):
|
||||||
|
"""Set the 16-bits pre-DAC offset register of a Shuttler Core channel.
|
||||||
|
|
||||||
|
.. seealso::
|
||||||
|
:meth:`shuttler_volt_to_mu`
|
||||||
|
|
||||||
|
:param channel: Shuttler Core channel to be configured.
|
||||||
|
:param offset: Shuttler Core channel offset.
|
||||||
|
"""
|
||||||
|
rtio_output(self.target_base | self.target_offset | channel, offset)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def get_offset(self, channel):
|
||||||
|
"""Return the pre-DAC offset value of a Shuttler Core channel.
|
||||||
|
|
||||||
|
:param channel: The Shuttler Core channel.
|
||||||
|
:return: Pre-DAC offset value. See :meth:`set_offset`.
|
||||||
|
"""
|
||||||
|
rtio_output(self.target_base | self.target_offset |
|
||||||
|
self.target_read | channel, 0)
|
||||||
|
return rtio_input_data(self.channel)
|
||||||
|
|
||||||
|
|
||||||
|
class DCBias:
|
||||||
|
"""Shuttler Core cubic DC-bias spline.
|
||||||
|
|
||||||
|
A Shuttler channel can generate a waveform `w(t)` that is the sum of a
|
||||||
|
cubic spline `a(t)` and a sinusoid modulated in amplitude by a cubic
|
||||||
|
spline `b(t)` and in phase/frequency by a quadratic spline `c(t)`, where
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
w(t) = a(t) + b(t) * cos(c(t))
|
||||||
|
|
||||||
|
And `t` corresponds to time in seconds.
|
||||||
|
This class controls the cubic spline `a(t)`, in which
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
a(t) = p_0 + p_1t + \\frac{p_2t^2}{2} + \\frac{p_3t^3}{6}
|
||||||
|
|
||||||
|
And `a(t)` is in Volt.
|
||||||
|
|
||||||
|
:param channel: RTIO channel number of this DC-bias spline interface.
|
||||||
|
:param core_device: Core device name.
|
||||||
|
"""
|
||||||
|
kernel_invariants = {"core", "channel", "target_o"}
|
||||||
|
|
||||||
|
def __init__(self, dmgr, channel, core_device="core"):
|
||||||
|
self.core = dmgr.get(core_device)
|
||||||
|
self.channel = channel
|
||||||
|
self.target_o = channel << 8
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_waveform(self, a0: TInt32, a1: TInt32, a2: TInt64, a3: TInt64):
|
||||||
|
"""Set the DC-bias spline waveform.
|
||||||
|
|
||||||
|
Given `a(t)` as defined in :class:`DCBias`, the coefficients should be
|
||||||
|
configured by the following formulae.
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
T &= 8*10^{-9}
|
||||||
|
|
||||||
|
a_0 &= p_0
|
||||||
|
|
||||||
|
a_1 &= p_1T + \\frac{p_2T^2}{2} + \\frac{p_3T^3}{6}
|
||||||
|
|
||||||
|
a_2 &= p_2T^2 + p_3T^3
|
||||||
|
|
||||||
|
a_3 &= p_3T^3
|
||||||
|
|
||||||
|
:math:`a_0`, :math:`a_1`, :math:`a_2` and :math:`a_3` are 16, 32, 48
|
||||||
|
and 48 bits in width respectively. See :meth:`shuttler_volt_to_mu` for
|
||||||
|
machine unit conversion.
|
||||||
|
|
||||||
|
Note: The waveform is not updated to the Shuttler Core until
|
||||||
|
triggered. See :class:`Trigger` for the update triggering mechanism.
|
||||||
|
|
||||||
|
:param a0: The :math:`a_0` coefficient in machine unit.
|
||||||
|
:param a1: The :math:`a_1` coefficient in machine unit.
|
||||||
|
:param a2: The :math:`a_2` coefficient in machine unit.
|
||||||
|
:param a3: The :math:`a_3` coefficient in machine unit.
|
||||||
|
"""
|
||||||
|
coef_words = [
|
||||||
|
a0,
|
||||||
|
a1,
|
||||||
|
a1 >> 16,
|
||||||
|
a2 & 0xFFFF,
|
||||||
|
(a2 >> 16) & 0xFFFF,
|
||||||
|
(a2 >> 32) & 0xFFFF,
|
||||||
|
a3 & 0xFFFF,
|
||||||
|
(a3 >> 16) & 0xFFFF,
|
||||||
|
(a3 >> 32) & 0xFFFF,
|
||||||
|
]
|
||||||
|
|
||||||
|
for i in range(len(coef_words)):
|
||||||
|
rtio_output(self.target_o | i, coef_words[i])
|
||||||
|
delay_mu(int64(self.core.ref_multiplier))
|
||||||
|
|
||||||
|
|
||||||
|
class DDS:
|
||||||
|
"""Shuttler Core DDS spline.
|
||||||
|
|
||||||
|
A Shuttler channel can generate a waveform `w(t)` that is the sum of a
|
||||||
|
cubic spline `a(t)` and a sinusoid modulated in amplitude by a cubic
|
||||||
|
spline `b(t)` and in phase/frequency by a quadratic spline `c(t)`, where
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
w(t) = a(t) + b(t) * cos(c(t))
|
||||||
|
|
||||||
|
And `t` corresponds to time in seconds.
|
||||||
|
This class controls the cubic spline `b(t)` and quadratic spline `c(t)`,
|
||||||
|
in which
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
b(t) &= g * (q_0 + q_1t + \\frac{q_2t^2}{2} + \\frac{q_3t^3}{6})
|
||||||
|
|
||||||
|
c(t) &= r_0 + r_1t + \\frac{r_2t^2}{2}
|
||||||
|
|
||||||
|
And `b(t)` is in Volt, `c(t)` is in number of turns. Note that `b(t)`
|
||||||
|
contributes to a constant gain of :math:`g=1.64676`.
|
||||||
|
|
||||||
|
:param channel: RTIO channel number of this DC-bias spline interface.
|
||||||
|
:param core_device: Core device name.
|
||||||
|
"""
|
||||||
|
kernel_invariants = {"core", "channel", "target_o"}
|
||||||
|
|
||||||
|
def __init__(self, dmgr, channel, core_device="core"):
|
||||||
|
self.core = dmgr.get(core_device)
|
||||||
|
self.channel = channel
|
||||||
|
self.target_o = channel << 8
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_waveform(self, b0: TInt32, b1: TInt32, b2: TInt64, b3: TInt64,
|
||||||
|
c0: TInt32, c1: TInt32, c2: TInt32):
|
||||||
|
"""Set the DDS spline waveform.
|
||||||
|
|
||||||
|
Given `b(t)` and `c(t)` as defined in :class:`DDS`, the coefficients
|
||||||
|
should be configured by the following formulae.
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
T &= 8*10^{-9}
|
||||||
|
|
||||||
|
b_0 &= q_0
|
||||||
|
|
||||||
|
b_1 &= q_1T + \\frac{q_2T^2}{2} + \\frac{q_3T^3}{6}
|
||||||
|
|
||||||
|
b_2 &= q_2T^2 + q_3T^3
|
||||||
|
|
||||||
|
b_3 &= q_3T^3
|
||||||
|
|
||||||
|
c_0 &= r_0
|
||||||
|
|
||||||
|
c_1 &= r_1T + \\frac{r_2T^2}{2}
|
||||||
|
|
||||||
|
c_2 &= r_2T^2
|
||||||
|
|
||||||
|
:math:`b_0`, :math:`b_1`, :math:`b_2` and :math:`b_3` are 16, 32, 48
|
||||||
|
and 48 bits in width respectively. See :meth:`shuttler_volt_to_mu` for
|
||||||
|
machine unit conversion. :math:`c_0`, :math:`c_1` and :math:`c_2` are
|
||||||
|
16, 32 and 32 bits in width respectively.
|
||||||
|
|
||||||
|
Note: The waveform is not updated to the Shuttler Core until
|
||||||
|
triggered. See :class:`Trigger` for the update triggering mechanism.
|
||||||
|
|
||||||
|
:param b0: The :math:`b_0` coefficient in machine unit.
|
||||||
|
:param b1: The :math:`b_1` coefficient in machine unit.
|
||||||
|
:param b2: The :math:`b_2` coefficient in machine unit.
|
||||||
|
:param b3: The :math:`b_3` coefficient in machine unit.
|
||||||
|
:param c0: The :math:`c_0` coefficient in machine unit.
|
||||||
|
:param c1: The :math:`c_1` coefficient in machine unit.
|
||||||
|
:param c2: The :math:`c_2` coefficient in machine unit.
|
||||||
|
"""
|
||||||
|
coef_words = [
|
||||||
|
b0,
|
||||||
|
b1,
|
||||||
|
b1 >> 16,
|
||||||
|
b2 & 0xFFFF,
|
||||||
|
(b2 >> 16) & 0xFFFF,
|
||||||
|
(b2 >> 32) & 0xFFFF,
|
||||||
|
b3 & 0xFFFF,
|
||||||
|
(b3 >> 16) & 0xFFFF,
|
||||||
|
(b3 >> 32) & 0xFFFF,
|
||||||
|
c0,
|
||||||
|
c1,
|
||||||
|
c1 >> 16,
|
||||||
|
c2,
|
||||||
|
c2 >> 16,
|
||||||
|
]
|
||||||
|
|
||||||
|
for i in range(len(coef_words)):
|
||||||
|
rtio_output(self.target_o | i, coef_words[i])
|
||||||
|
delay_mu(int64(self.core.ref_multiplier))
|
||||||
|
|
||||||
|
|
||||||
|
class Trigger:
|
||||||
|
"""Shuttler Core spline coefficients update trigger.
|
||||||
|
|
||||||
|
:param channel: RTIO channel number of the trigger interface.
|
||||||
|
:param core_device: Core device name.
|
||||||
|
"""
|
||||||
|
kernel_invariants = {"core", "channel", "target_o"}
|
||||||
|
|
||||||
|
def __init__(self, dmgr, channel, core_device="core"):
|
||||||
|
self.core = dmgr.get(core_device)
|
||||||
|
self.channel = channel
|
||||||
|
self.target_o = channel << 8
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def trigger(self, trig_out):
|
||||||
|
"""Triggers coefficient update of (a) Shuttler Core channel(s).
|
||||||
|
|
||||||
|
Each bit corresponds to a Shuttler waveform generator core. Setting
|
||||||
|
`trig_out` bits commits the pending coefficient update (from
|
||||||
|
`set_waveform` in :class:`DCBias` and :class:`DDS`) to the Shuttler Core
|
||||||
|
synchronously.
|
||||||
|
|
||||||
|
:param trig_out: Coefficient update trigger bits. The MSB corresponds
|
||||||
|
to Channel 15, LSB corresponds to Channel 0.
|
||||||
|
"""
|
||||||
|
rtio_output(self.target_o, trig_out)
|
||||||
|
|
||||||
|
|
||||||
|
RELAY_SPI_CONFIG = (0*spi.SPI_OFFLINE | 1*spi.SPI_END |
|
||||||
|
0*spi.SPI_INPUT | 0*spi.SPI_CS_POLARITY |
|
||||||
|
0*spi.SPI_CLK_POLARITY | 0*spi.SPI_CLK_PHASE |
|
||||||
|
0*spi.SPI_LSB_FIRST | 0*spi.SPI_HALF_DUPLEX)
|
||||||
|
|
||||||
|
ADC_SPI_CONFIG = (0*spi.SPI_OFFLINE | 0*spi.SPI_END |
|
||||||
|
0*spi.SPI_INPUT | 0*spi.SPI_CS_POLARITY |
|
||||||
|
1*spi.SPI_CLK_POLARITY | 1*spi.SPI_CLK_PHASE |
|
||||||
|
0*spi.SPI_LSB_FIRST | 0*spi.SPI_HALF_DUPLEX)
|
||||||
|
|
||||||
|
# SPI clock write and read dividers
|
||||||
|
# CS should assert at least 9.5 ns after clk pulse
|
||||||
|
SPIT_RELAY_WR = 4
|
||||||
|
# 25 ns high/low pulse hold (limiting for write)
|
||||||
|
SPIT_ADC_WR = 4
|
||||||
|
SPIT_ADC_RD = 16
|
||||||
|
|
||||||
|
# SPI CS line
|
||||||
|
CS_RELAY = 1 << 0
|
||||||
|
CS_LED = 1 << 1
|
||||||
|
CS_ADC = 1 << 0
|
||||||
|
|
||||||
|
# Referenced AD4115 registers
|
||||||
|
_AD4115_REG_STATUS = 0x00
|
||||||
|
_AD4115_REG_ADCMODE = 0x01
|
||||||
|
_AD4115_REG_DATA = 0x04
|
||||||
|
_AD4115_REG_ID = 0x07
|
||||||
|
_AD4115_REG_CH0 = 0x10
|
||||||
|
_AD4115_REG_SETUPCON0 = 0x20
|
||||||
|
|
||||||
|
|
||||||
|
class Relay:
|
||||||
|
"""Shuttler AFE relay switches.
|
||||||
|
|
||||||
|
It controls the AFE relay switches and the LEDs. Switch on the relay to
|
||||||
|
enable AFE output; And off to disable the output. The LEDs indicates the
|
||||||
|
relay status.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
The relay does not disable ADC measurements. Voltage of any channels
|
||||||
|
can still be read by the ADC even after switching off the relays.
|
||||||
|
|
||||||
|
:param spi_device: SPI bus device name.
|
||||||
|
:param core_device: Core device name.
|
||||||
|
"""
|
||||||
|
kernel_invariant = {"core", "bus"}
|
||||||
|
|
||||||
|
def __init__(self, dmgr, spi_device, core_device="core"):
|
||||||
|
self.core = dmgr.get(core_device)
|
||||||
|
self.bus = dmgr.get(spi_device)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def init(self):
|
||||||
|
"""Initialize SPI device.
|
||||||
|
|
||||||
|
Configures the SPI bus to 16-bits, write-only, simultaneous relay
|
||||||
|
switches and LED control.
|
||||||
|
"""
|
||||||
|
self.bus.set_config_mu(
|
||||||
|
RELAY_SPI_CONFIG, 16, SPIT_RELAY_WR, CS_RELAY | CS_LED)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def enable(self, en: TInt32):
|
||||||
|
"""Enable/Disable relay switches of corresponding channels.
|
||||||
|
|
||||||
|
Each bit corresponds to the relay switch of a channel. Asserting a bit
|
||||||
|
turns on the corresponding relay switch; Deasserting the same bit
|
||||||
|
turns off the switch instead.
|
||||||
|
|
||||||
|
:param en: Switch enable bits. The MSB corresponds to Channel 15, LSB
|
||||||
|
corresponds to Channel 0.
|
||||||
|
"""
|
||||||
|
self.bus.write(en << 16)
|
||||||
|
|
||||||
|
|
||||||
|
class ADC:
|
||||||
|
"""Shuttler AFE ADC (AD4115) driver.
|
||||||
|
|
||||||
|
:param spi_device: SPI bus device name.
|
||||||
|
:param core_device: Core device name.
|
||||||
|
"""
|
||||||
|
kernel_invariant = {"core", "bus"}
|
||||||
|
|
||||||
|
def __init__(self, dmgr, spi_device, core_device="core"):
|
||||||
|
self.core = dmgr.get(core_device)
|
||||||
|
self.bus = dmgr.get(spi_device)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def read_id(self) -> TInt32:
|
||||||
|
"""Read the product ID of the ADC.
|
||||||
|
|
||||||
|
The expected return value is 0x38DX, the 4 LSbs are don't cares.
|
||||||
|
|
||||||
|
:return: The read-back product ID.
|
||||||
|
"""
|
||||||
|
return self.read16(_AD4115_REG_ID)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def reset(self):
|
||||||
|
"""AD4115 reset procedure.
|
||||||
|
|
||||||
|
This performs a write operation of 96 serial clock cycles with DIN
|
||||||
|
held at high. It resets the entire device, including the register
|
||||||
|
contents.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
The datasheet only requires 64 cycles, but reasserting `CS_n` right
|
||||||
|
after the transfer appears to interrupt the start-up sequence.
|
||||||
|
"""
|
||||||
|
self.bus.set_config_mu(ADC_SPI_CONFIG, 32, SPIT_ADC_WR, CS_ADC)
|
||||||
|
self.bus.write(0xffffffff)
|
||||||
|
self.bus.write(0xffffffff)
|
||||||
|
self.bus.set_config_mu(
|
||||||
|
ADC_SPI_CONFIG | spi.SPI_END, 32, SPIT_ADC_WR, CS_ADC)
|
||||||
|
self.bus.write(0xffffffff)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def read8(self, addr: TInt32) -> TInt32:
|
||||||
|
"""Read from 8 bit register.
|
||||||
|
|
||||||
|
:param addr: Register address.
|
||||||
|
:return: Read-back register content.
|
||||||
|
"""
|
||||||
|
self.bus.set_config_mu(
|
||||||
|
ADC_SPI_CONFIG | spi.SPI_END | spi.SPI_INPUT,
|
||||||
|
16, SPIT_ADC_RD, CS_ADC)
|
||||||
|
self.bus.write((addr | 0x40) << 24)
|
||||||
|
return self.bus.read() & 0xff
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def read16(self, addr: TInt32) -> TInt32:
|
||||||
|
"""Read from 16 bit register.
|
||||||
|
|
||||||
|
:param addr: Register address.
|
||||||
|
:return: Read-back register content.
|
||||||
|
"""
|
||||||
|
self.bus.set_config_mu(
|
||||||
|
ADC_SPI_CONFIG | spi.SPI_END | spi.SPI_INPUT,
|
||||||
|
24, SPIT_ADC_RD, CS_ADC)
|
||||||
|
self.bus.write((addr | 0x40) << 24)
|
||||||
|
return self.bus.read() & 0xffff
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def read24(self, addr: TInt32) -> TInt32:
|
||||||
|
"""Read from 24 bit register.
|
||||||
|
|
||||||
|
:param addr: Register address.
|
||||||
|
:return: Read-back register content.
|
||||||
|
"""
|
||||||
|
self.bus.set_config_mu(
|
||||||
|
ADC_SPI_CONFIG | spi.SPI_END | spi.SPI_INPUT,
|
||||||
|
32, SPIT_ADC_RD, CS_ADC)
|
||||||
|
self.bus.write((addr | 0x40) << 24)
|
||||||
|
return self.bus.read() & 0xffffff
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def write8(self, addr: TInt32, data: TInt32):
|
||||||
|
"""Write to 8 bit register.
|
||||||
|
|
||||||
|
:param addr: Register address.
|
||||||
|
:param data: Data to be written.
|
||||||
|
"""
|
||||||
|
self.bus.set_config_mu(
|
||||||
|
ADC_SPI_CONFIG | spi.SPI_END, 16, SPIT_ADC_WR, CS_ADC)
|
||||||
|
self.bus.write(addr << 24 | (data & 0xff) << 16)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def write16(self, addr: TInt32, data: TInt32):
|
||||||
|
"""Write to 16 bit register.
|
||||||
|
|
||||||
|
:param addr: Register address.
|
||||||
|
:param data: Data to be written.
|
||||||
|
"""
|
||||||
|
self.bus.set_config_mu(
|
||||||
|
ADC_SPI_CONFIG | spi.SPI_END, 24, SPIT_ADC_WR, CS_ADC)
|
||||||
|
self.bus.write(addr << 24 | (data & 0xffff) << 8)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def write24(self, addr: TInt32, data: TInt32):
|
||||||
|
"""Write to 24 bit register.
|
||||||
|
|
||||||
|
:param addr: Register address.
|
||||||
|
:param data: Data to be written.
|
||||||
|
"""
|
||||||
|
self.bus.set_config_mu(
|
||||||
|
ADC_SPI_CONFIG | spi.SPI_END, 32, SPIT_ADC_WR, CS_ADC)
|
||||||
|
self.bus.write(addr << 24 | (data & 0xffffff))
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def read_ch(self, channel: TInt32) -> TFloat:
|
||||||
|
"""Sample a Shuttler channel on the AFE.
|
||||||
|
|
||||||
|
It performs a single conversion using profile 0 and setup 0, on the
|
||||||
|
selected channel. The sample is then recovered and converted to volt.
|
||||||
|
|
||||||
|
:param channel: Shuttler channel to be sampled.
|
||||||
|
:return: Voltage sample in volt.
|
||||||
|
"""
|
||||||
|
# Always configure Profile 0 for single conversion
|
||||||
|
self.write16(_AD4115_REG_CH0, 0x8000 | ((channel * 2 + 1) << 4))
|
||||||
|
self.write16(_AD4115_REG_SETUPCON0, 0x1300)
|
||||||
|
self.single_conversion()
|
||||||
|
|
||||||
|
delay(100*us)
|
||||||
|
adc_code = self.read24(_AD4115_REG_DATA)
|
||||||
|
return ((adc_code / (1 << 23)) - 1) * 2.5 / 0.1
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def single_conversion(self):
|
||||||
|
"""Place the ADC in single conversion mode.
|
||||||
|
|
||||||
|
The ADC returns to standby mode after the conversion is complete.
|
||||||
|
"""
|
||||||
|
self.write16(_AD4115_REG_ADCMODE, 0x8010)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def standby(self):
|
||||||
|
"""Place the ADC in standby mode and disables power down the clock.
|
||||||
|
|
||||||
|
The ADC can be returned to single conversion mode by calling
|
||||||
|
:meth:`single_conversion`.
|
||||||
|
"""
|
||||||
|
# Selecting internal XO (0b00) also disables clock during standby
|
||||||
|
self.write16(_AD4115_REG_ADCMODE, 0x8020)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def power_down(self):
|
||||||
|
"""Place the ADC in power-down mode.
|
||||||
|
|
||||||
|
The ADC must be reset before returning to other modes.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
The AD4115 datasheet suggests placing the ADC in standby mode
|
||||||
|
before power-down. This is to prevent accidental entry into the
|
||||||
|
power-down mode.
|
||||||
|
|
||||||
|
.. seealso::
|
||||||
|
:meth:`standby`
|
||||||
|
|
||||||
|
:meth:`power_up`
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.write16(_AD4115_REG_ADCMODE, 0x8030)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def power_up(self):
|
||||||
|
"""Exit the ADC power-down mode.
|
||||||
|
|
||||||
|
The ADC should be in power-down mode before calling this method.
|
||||||
|
|
||||||
|
.. seealso::
|
||||||
|
:meth:`power_down`
|
||||||
|
"""
|
||||||
|
self.reset()
|
||||||
|
# Although the datasheet claims 500 us reset wait time, only waiting
|
||||||
|
# for ~500 us can result in DOUT pin stuck in high
|
||||||
|
delay(2500*us)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def calibrate(self, volts, trigger, config, samples=[-5.0, 0.0, 5.0]):
|
||||||
|
"""Calibrate the Shuttler waveform generator using the ADC on the AFE.
|
||||||
|
|
||||||
|
It finds the average slope rate and average offset by samples, and
|
||||||
|
compensate by writing the pre-DAC gain and offset registers in the
|
||||||
|
configuration registers.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
If the pre-calibration slope rate < 1, the calibration procedure
|
||||||
|
will introduce a pre-DAC gain compensation. However, this may
|
||||||
|
saturate the pre-DAC voltage code. (See :class:`Config` notes).
|
||||||
|
Shuttler cannot cover the entire +/- 10 V range in this case.
|
||||||
|
|
||||||
|
.. seealso::
|
||||||
|
:meth:`Config.set_gain`
|
||||||
|
|
||||||
|
:meth:`Config.set_offset`
|
||||||
|
|
||||||
|
:param volts: A list of all 16 cubic DC-bias spline.
|
||||||
|
(See :class:`DCBias`)
|
||||||
|
:param trigger: The Shuttler spline coefficient update trigger.
|
||||||
|
:param config: The Shuttler Core configuration registers.
|
||||||
|
:param samples: A list of sample voltages for calibration. There must
|
||||||
|
be at least 2 samples to perform slope rate calculation.
|
||||||
|
"""
|
||||||
|
assert len(volts) == 16
|
||||||
|
assert len(samples) > 1
|
||||||
|
|
||||||
|
measurements = [0.0] * len(samples)
|
||||||
|
|
||||||
|
for ch in range(16):
|
||||||
|
# Find the average slope rate and offset
|
||||||
|
for i in range(len(samples)):
|
||||||
|
self.core.break_realtime()
|
||||||
|
volts[ch].set_waveform(
|
||||||
|
shuttler_volt_to_mu(samples[i]), 0, 0, 0)
|
||||||
|
trigger.trigger(1 << ch)
|
||||||
|
measurements[i] = self.read_ch(ch)
|
||||||
|
|
||||||
|
# Find the average output slope
|
||||||
|
slope_sum = 0.0
|
||||||
|
for i in range(len(samples) - 1):
|
||||||
|
slope_sum += (measurements[i+1] - measurements[i])/(samples[i+1] - samples[i])
|
||||||
|
slope_avg = slope_sum / (len(samples) - 1)
|
||||||
|
|
||||||
|
gain_code = int32(1 / slope_avg * (2 ** 16)) & 0xffff
|
||||||
|
|
||||||
|
# Scale the measurements by 1/slope, find average offset
|
||||||
|
offset_sum = 0.0
|
||||||
|
for i in range(len(samples)):
|
||||||
|
offset_sum += (measurements[i] / slope_avg) - samples[i]
|
||||||
|
offset_avg = offset_sum / len(samples)
|
||||||
|
|
||||||
|
offset_code = shuttler_volt_to_mu(-offset_avg)
|
||||||
|
|
||||||
|
self.core.break_realtime()
|
||||||
|
config.set_gain(ch, gain_code)
|
||||||
|
|
||||||
|
delay_mu(int64(self.core.ref_multiplier))
|
||||||
|
config.set_offset(ch, offset_code)
|
|
@ -7,7 +7,7 @@ Output event replacement is not supported and issuing commands at the same
|
||||||
time is an error.
|
time is an error.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from artiq.language.core import syscall, kernel, portable, now_mu, delay_mu
|
from artiq.language.core import syscall, kernel, portable, delay_mu
|
||||||
from artiq.language.types import TInt32, TNone
|
from artiq.language.types import TInt32, TNone
|
||||||
from artiq.coredevice.rtio import rtio_output, rtio_input_data
|
from artiq.coredevice.rtio import rtio_output, rtio_input_data
|
||||||
|
|
||||||
|
@ -72,6 +72,10 @@ class SPIMaster:
|
||||||
self.channel = channel
|
self.channel = channel
|
||||||
self.update_xfer_duration_mu(div, length)
|
self.update_xfer_duration_mu(div, length)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(channel, **kwargs):
|
||||||
|
return [(channel, None)]
|
||||||
|
|
||||||
@portable
|
@portable
|
||||||
def frequency_to_div(self, f):
|
def frequency_to_div(self, f):
|
||||||
"""Convert a SPI clock frequency to the closest SPI clock divider."""
|
"""Convert a SPI clock frequency to the closest SPI clock divider."""
|
||||||
|
@ -166,7 +170,7 @@ class SPIMaster:
|
||||||
raise ValueError("Invalid SPI transfer length")
|
raise ValueError("Invalid SPI transfer length")
|
||||||
if div > 257 or div < 2:
|
if div > 257 or div < 2:
|
||||||
raise ValueError("Invalid SPI clock divider")
|
raise ValueError("Invalid SPI clock divider")
|
||||||
rtio_output(now_mu(), self.channel, SPI_CONFIG_ADDR, flags |
|
rtio_output((self.channel << 8) | SPI_CONFIG_ADDR, flags |
|
||||||
((length - 1) << 8) | ((div - 2) << 16) | (cs << 24))
|
((length - 1) << 8) | ((div - 2) << 16) | (cs << 24))
|
||||||
self.update_xfer_duration_mu(div, length)
|
self.update_xfer_duration_mu(div, length)
|
||||||
delay_mu(self.ref_period_mu)
|
delay_mu(self.ref_period_mu)
|
||||||
|
@ -186,6 +190,12 @@ class SPIMaster:
|
||||||
This method is portable and can also be called from e.g.
|
This method is portable and can also be called from e.g.
|
||||||
:meth:`__init__`.
|
:meth:`__init__`.
|
||||||
|
|
||||||
|
.. warning:: If this method is called while recording a DMA
|
||||||
|
sequence, the playback of the sequence will not update the
|
||||||
|
driver state.
|
||||||
|
When required, update the driver state manually (by calling
|
||||||
|
this method) after playing back a DMA sequence.
|
||||||
|
|
||||||
:param div: SPI clock divider (see: :meth:`set_config_mu`)
|
:param div: SPI clock divider (see: :meth:`set_config_mu`)
|
||||||
:param length: SPI transfer length (see: :meth:`set_config_mu`)
|
:param length: SPI transfer length (see: :meth:`set_config_mu`)
|
||||||
"""
|
"""
|
||||||
|
@ -216,7 +226,7 @@ class SPIMaster:
|
||||||
|
|
||||||
:param data: SPI output data to be written.
|
:param data: SPI output data to be written.
|
||||||
"""
|
"""
|
||||||
rtio_output(now_mu(), self.channel, SPI_DATA_ADDR, data)
|
rtio_output((self.channel << 8) | SPI_DATA_ADDR, data)
|
||||||
delay_mu(self.xfer_duration_mu)
|
delay_mu(self.xfer_duration_mu)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
|
@ -267,9 +277,8 @@ class NRTSPIMaster:
|
||||||
def set_config_mu(self, flags=0, length=8, div=6, cs=1):
|
def set_config_mu(self, flags=0, length=8, div=6, cs=1):
|
||||||
"""Set the ``config`` register.
|
"""Set the ``config`` register.
|
||||||
|
|
||||||
Note that the non-realtime SPI cores are usually clocked by the system
|
In many cases, the SPI configuration is already set by the firmware
|
||||||
clock and not the RTIO clock. In many cases, the SPI configuration is
|
and you do not need to call this method.
|
||||||
already set by the firmware and you do not need to call this method.
|
|
||||||
"""
|
"""
|
||||||
spi_set_config(self.busno, flags, length, div, cs)
|
spi_set_config(self.busno, flags, length, div, cs)
|
||||||
|
|
||||||
|
|
|
@ -1,228 +0,0 @@
|
||||||
from numpy import int32, int64
|
|
||||||
from artiq.language.core import kernel, now_mu, portable, delay
|
|
||||||
from artiq.coredevice.rtio import rtio_output, rtio_output_wide
|
|
||||||
from artiq.language.types import TInt32, TInt64, TFloat
|
|
||||||
|
|
||||||
|
|
||||||
class Spline:
|
|
||||||
r"""Spline interpolating RTIO channel.
|
|
||||||
|
|
||||||
One knot of a polynomial basis spline (B-spline) :math:`u(t)`
|
|
||||||
is defined by the coefficients :math:`u_n` up to order :math:`n = k`.
|
|
||||||
If the coefficients are evaluated starting at time :math:`t_0`,
|
|
||||||
the output :math:`u(t)` for :math:`t > t_0, t_0` is:
|
|
||||||
|
|
||||||
.. math::
|
|
||||||
u(t) &= \sum_{n=0}^k \frac{u_n}{n!} (t - t_0)^n \\
|
|
||||||
&= u_0 + u_1 (t - t_0) + \frac{u_2}{2} (t - t_0)^2 + \dots
|
|
||||||
|
|
||||||
This class contains multiple methods to convert spline knot data from SI
|
|
||||||
to machine units and multiple methods that set the current spline
|
|
||||||
coefficient data. None of these advance the timeline. The :meth:`smooth`
|
|
||||||
method is the only method that advances the timeline.
|
|
||||||
|
|
||||||
:param width: Width in bits of the quantity that this spline controls
|
|
||||||
:param time_width: Width in bits of the time counter of this spline
|
|
||||||
:param channel: RTIO channel number
|
|
||||||
:param core_device: Core device that this spline is attached to
|
|
||||||
:param scale: Scale for conversion between machine units and physical
|
|
||||||
units; to be given as the "full scale physical value".
|
|
||||||
"""
|
|
||||||
|
|
||||||
kernel_invariants = {"channel", "core", "scale", "width",
|
|
||||||
"time_width", "time_scale"}
|
|
||||||
|
|
||||||
def __init__(self, width, time_width, channel, core_device, scale=1.):
|
|
||||||
self.core = core_device
|
|
||||||
self.channel = channel
|
|
||||||
self.width = width
|
|
||||||
self.scale = float((int64(1) << width) / scale)
|
|
||||||
self.time_width = time_width
|
|
||||||
self.time_scale = float((1 << time_width) *
|
|
||||||
core_device.coarse_ref_period)
|
|
||||||
|
|
||||||
@portable(flags={"fast-math"})
|
|
||||||
def to_mu(self, value: TFloat) -> TInt32:
|
|
||||||
"""Convert floating point ``value`` from physical units to 32 bit
|
|
||||||
integer machine units."""
|
|
||||||
return int32(round(value*self.scale))
|
|
||||||
|
|
||||||
@portable(flags={"fast-math"})
|
|
||||||
def from_mu(self, value: TInt32) -> TFloat:
|
|
||||||
"""Convert 32 bit integer ``value`` from machine units to floating
|
|
||||||
point physical units."""
|
|
||||||
return value/self.scale
|
|
||||||
|
|
||||||
@portable(flags={"fast-math"})
|
|
||||||
def to_mu64(self, value: TFloat) -> TInt64:
|
|
||||||
"""Convert floating point ``value`` from physical units to 64 bit
|
|
||||||
integer machine units."""
|
|
||||||
return int64(round(value*self.scale))
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_mu(self, value: TInt32):
|
|
||||||
"""Set spline value (machine units).
|
|
||||||
|
|
||||||
:param value: Spline value in integer machine units.
|
|
||||||
"""
|
|
||||||
rtio_output(now_mu(), self.channel, 0, value)
|
|
||||||
|
|
||||||
@kernel(flags={"fast-math"})
|
|
||||||
def set(self, value: TFloat):
|
|
||||||
"""Set spline value.
|
|
||||||
|
|
||||||
:param value: Spline value relative to full-scale.
|
|
||||||
"""
|
|
||||||
if self.width > 32:
|
|
||||||
l = [int32(0)] * 2
|
|
||||||
self.pack_coeff_mu([self.to_mu64(value)], l)
|
|
||||||
rtio_output_wide(now_mu(), self.channel, 0, l)
|
|
||||||
else:
|
|
||||||
rtio_output(now_mu(), self.channel, 0, self.to_mu(value))
|
|
||||||
|
|
||||||
@kernel
|
|
||||||
def set_coeff_mu(self, value): # TList(TInt32)
|
|
||||||
"""Set spline raw values.
|
|
||||||
|
|
||||||
:param value: Spline packed raw values.
|
|
||||||
"""
|
|
||||||
rtio_output_wide(now_mu(), self.channel, 0, value)
|
|
||||||
|
|
||||||
@portable(flags={"fast-math"})
|
|
||||||
def pack_coeff_mu(self, coeff, packed): # TList(TInt64), TList(TInt32)
|
|
||||||
"""Pack coefficients into RTIO data
|
|
||||||
|
|
||||||
:param coeff: TList(TInt64) list of machine units spline coefficients.
|
|
||||||
Lowest (zeroth) order first. The coefficient list is zero-extended
|
|
||||||
by the RTIO gateware.
|
|
||||||
:param packed: TList(TInt32) list for packed RTIO data. Must be
|
|
||||||
pre-allocated. Length in bits is
|
|
||||||
``n*width + (n - 1)*n//2*time_width``
|
|
||||||
"""
|
|
||||||
pos = 0
|
|
||||||
for i in range(len(coeff)):
|
|
||||||
wi = self.width + i*self.time_width
|
|
||||||
ci = coeff[i]
|
|
||||||
while wi != 0:
|
|
||||||
j = pos//32
|
|
||||||
used = pos - 32*j
|
|
||||||
avail = 32 - used
|
|
||||||
if avail > wi:
|
|
||||||
avail = wi
|
|
||||||
cij = int32(ci)
|
|
||||||
if avail != 32:
|
|
||||||
cij &= (1 << avail) - 1
|
|
||||||
packed[j] |= cij << used
|
|
||||||
ci >>= avail
|
|
||||||
wi -= avail
|
|
||||||
pos += avail
|
|
||||||
|
|
||||||
@portable(flags={"fast-math"})
|
|
||||||
def coeff_to_mu(self, coeff, coeff64): # TList(TFloat), TList(TInt64)
|
|
||||||
"""Convert a floating point list of coefficients into a 64 bit
|
|
||||||
integer (preallocated).
|
|
||||||
|
|
||||||
:param coeff: TList(TFloat) list of coefficients in physical units.
|
|
||||||
:param coeff64: TList(TInt64) preallocated list of coefficients in
|
|
||||||
machine units.
|
|
||||||
"""
|
|
||||||
for i in range(len(coeff)):
|
|
||||||
vi = coeff[i] * self.scale
|
|
||||||
for j in range(i):
|
|
||||||
vi *= self.time_scale
|
|
||||||
ci = int64(round(vi))
|
|
||||||
coeff64[i] = ci
|
|
||||||
# artiq.wavesynth.coefficients.discrete_compensate:
|
|
||||||
if i == 2:
|
|
||||||
coeff64[1] += ci >> self.time_width + 1
|
|
||||||
elif i == 3:
|
|
||||||
coeff64[2] += ci >> self.time_width
|
|
||||||
coeff64[1] += ci // 6 >> 2*self.time_width
|
|
||||||
|
|
||||||
def coeff_as_packed_mu(self, coeff64):
|
|
||||||
"""Pack 64 bit integer machine units coefficients into 32 bit integer
|
|
||||||
RTIO data list.
|
|
||||||
|
|
||||||
This is a host-only method that can be used to generate packed
|
|
||||||
spline coefficient data to be frozen into kernels at compile time.
|
|
||||||
"""
|
|
||||||
n = len(coeff64)
|
|
||||||
width = n*self.width + (n - 1)*n//2*self.time_width
|
|
||||||
packed = [int32(0)] * ((width + 31)//32)
|
|
||||||
self.pack_coeff_mu(coeff64, packed)
|
|
||||||
return packed
|
|
||||||
|
|
||||||
def coeff_as_packed(self, coeff):
|
|
||||||
"""Convert floating point spline coefficients into 32 bit integer
|
|
||||||
packed data.
|
|
||||||
|
|
||||||
This is a host-only method that can be used to generate packed
|
|
||||||
spline coefficient data to be frozen into kernels at compile time.
|
|
||||||
"""
|
|
||||||
coeff64 = [int64(0)] * len(coeff)
|
|
||||||
self.coeff_to_mu(coeff, coeff64)
|
|
||||||
return self.coeff_as_packed_mu(coeff64)
|
|
||||||
|
|
||||||
@kernel(flags={"fast-math"})
|
|
||||||
def set_coeff(self, coeff): # TList(TFloat)
|
|
||||||
"""Set spline coefficients.
|
|
||||||
|
|
||||||
Missing coefficients (high order) are zero-extended byt the RTIO
|
|
||||||
gateware.
|
|
||||||
|
|
||||||
If more coefficients are supplied than the gateware supports the extra
|
|
||||||
coefficients are ignored.
|
|
||||||
|
|
||||||
:param value: List of floating point spline coefficients,
|
|
||||||
lowest order (constant) coefficient first. Units are the
|
|
||||||
unit of this spline's value times increasing powers of 1/s.
|
|
||||||
"""
|
|
||||||
n = len(coeff)
|
|
||||||
coeff64 = [int64(0)] * n
|
|
||||||
self.coeff_to_mu(coeff, coeff64)
|
|
||||||
width = n*self.width + (n - 1)*n//2*self.time_width
|
|
||||||
packed = [int32(0)] * ((width + 31)//32)
|
|
||||||
self.pack_coeff_mu(coeff64, packed)
|
|
||||||
self.set_coeff_mu(packed)
|
|
||||||
|
|
||||||
@kernel(flags={"fast-math"})
|
|
||||||
def smooth(self, start: TFloat, stop: TFloat, duration: TFloat,
|
|
||||||
order: TInt32):
|
|
||||||
"""Initiate an interpolated value change.
|
|
||||||
|
|
||||||
For zeroth order (step) interpolation, the step is at
|
|
||||||
``start + duration/2``.
|
|
||||||
|
|
||||||
First order interpolation corresponds to a linear value ramp from
|
|
||||||
``start`` to ``stop`` over ``duration``.
|
|
||||||
|
|
||||||
The third order interpolation is constrained to have zero first
|
|
||||||
order derivative at both `start` and `stop`.
|
|
||||||
|
|
||||||
For first order and third order interpolation (linear and cubic)
|
|
||||||
the interpolator needs to be stopped explicitly at the stop time
|
|
||||||
(e.g. by setting spline coefficient data or starting a new
|
|
||||||
:meth:`smooth` interpolation).
|
|
||||||
|
|
||||||
This method advances the timeline by ``duration``.
|
|
||||||
|
|
||||||
:param start: Initial value of the change. In physical units.
|
|
||||||
:param stop: Final value of the change. In physical units.
|
|
||||||
:param duration: Duration of the interpolation. In physical units.
|
|
||||||
:param order: Order of the interpolation. Only 0, 1,
|
|
||||||
and 3 are valid: step, linear, cubic.
|
|
||||||
"""
|
|
||||||
if order == 0:
|
|
||||||
delay(duration/2.)
|
|
||||||
self.set_coeff([stop])
|
|
||||||
delay(duration/2.)
|
|
||||||
elif order == 1:
|
|
||||||
self.set_coeff([start, (stop - start)/duration])
|
|
||||||
delay(duration)
|
|
||||||
elif order == 3:
|
|
||||||
v2 = 6.*(stop - start)/(duration*duration)
|
|
||||||
self.set_coeff([start, 0., v2, -2.*v2/duration])
|
|
||||||
delay(duration)
|
|
||||||
else:
|
|
||||||
raise ValueError("Invalid interpolation order. "
|
|
||||||
"Supported orders are: 0, 1, 3.")
|
|
|
@ -1,12 +0,0 @@
|
||||||
from artiq.language.core import syscall
|
|
||||||
from artiq.language.types import TInt32, TNone
|
|
||||||
|
|
||||||
|
|
||||||
@syscall(flags={"nounwind", "nowrite"})
|
|
||||||
def mfspr(spr: TInt32) -> TInt32:
|
|
||||||
raise NotImplementedError("syscall not simulated")
|
|
||||||
|
|
||||||
|
|
||||||
@syscall(flags={"nowrite", "nowrite"})
|
|
||||||
def mtspr(spr: TInt32, value: TInt32) -> TNone:
|
|
||||||
raise NotImplementedError("syscall not simulated")
|
|
|
@ -1,4 +1,4 @@
|
||||||
from artiq.language.core import kernel, delay, now_mu, delay_mu, portable
|
from artiq.language.core import kernel, delay, delay_mu, portable
|
||||||
from artiq.language.units import us, ns
|
from artiq.language.units import us, ns
|
||||||
from artiq.coredevice.rtio import rtio_output, rtio_input_data
|
from artiq.coredevice.rtio import rtio_output, rtio_input_data
|
||||||
from artiq.coredevice import spi2 as spi
|
from artiq.coredevice import spi2 as spi
|
||||||
|
@ -6,28 +6,29 @@ from artiq.coredevice import urukul, sampler
|
||||||
|
|
||||||
|
|
||||||
COEFF_WIDTH = 18
|
COEFF_WIDTH = 18
|
||||||
|
Y_FULL_SCALE_MU = (1 << (COEFF_WIDTH - 1)) - 1
|
||||||
COEFF_DEPTH = 10 + 1
|
COEFF_DEPTH = 10 + 1
|
||||||
WE = 1 << COEFF_DEPTH + 1
|
WE = 1 << COEFF_DEPTH + 1
|
||||||
STATE_SEL = 1 << COEFF_DEPTH
|
STATE_SEL = 1 << COEFF_DEPTH
|
||||||
CONFIG_SEL = 1 << COEFF_DEPTH - 1
|
CONFIG_SEL = 1 << COEFF_DEPTH - 1
|
||||||
CONFIG_ADDR = CONFIG_SEL | STATE_SEL
|
CONFIG_ADDR = CONFIG_SEL | STATE_SEL
|
||||||
T_CYCLE = (2*(8 + 64) + 2 + 1)*8*ns
|
T_CYCLE = (2*(8 + 64) + 2)*8*ns # Must match gateware Servo.t_cycle.
|
||||||
COEFF_SHIFT = 11
|
COEFF_SHIFT = 11
|
||||||
|
|
||||||
|
|
||||||
@portable
|
@portable
|
||||||
def y_mu_to_full_scale(y):
|
def y_mu_to_full_scale(y):
|
||||||
"""Convert servo Y data from machine units to units of full scale."""
|
"""Convert servo Y data from machine units to units of full scale."""
|
||||||
return y*(1./(1 << COEFF_WIDTH - 1))
|
return y / Y_FULL_SCALE_MU
|
||||||
|
|
||||||
|
|
||||||
@portable
|
@portable
|
||||||
def adc_mu_to_volts(x, gain):
|
def adc_mu_to_volts(x, gain, corrected_fs=True):
|
||||||
"""Convert servo ADC data from machine units to Volt."""
|
"""Convert servo ADC data from machine units to Volt."""
|
||||||
val = (x >> 1) & 0xffff
|
val = (x >> 1) & 0xffff
|
||||||
mask = 1 << 15
|
mask = 1 << 15
|
||||||
val = -(val & mask) + (val & ~mask)
|
val = -(val & mask) + (val & ~mask)
|
||||||
return sampler.adc_mu_to_volt(val, gain)
|
return sampler.adc_mu_to_volt(val, gain, corrected_fs)
|
||||||
|
|
||||||
|
|
||||||
class SUServo:
|
class SUServo:
|
||||||
|
@ -56,38 +57,38 @@ class SUServo:
|
||||||
|
|
||||||
:param channel: RTIO channel number
|
:param channel: RTIO channel number
|
||||||
:param pgia_device: Name of the Sampler PGIA gain setting SPI bus
|
:param pgia_device: Name of the Sampler PGIA gain setting SPI bus
|
||||||
:param cpld0_device: Name of the first Urukul CPLD SPI bus
|
:param cpld_devices: Names of the Urukul CPLD SPI buses
|
||||||
:param cpld1_device: Name of the second Urukul CPLD SPI bus
|
:param dds_devices: Names of the AD9910 devices
|
||||||
:param dds0_device: Name of the AD9910 device for the DDS on the first
|
|
||||||
Urukul
|
|
||||||
:param dds1_device: Name of the AD9910 device for the DDS on the second
|
|
||||||
Urukul
|
|
||||||
:param gains: Initial value for PGIA gains shift register
|
:param gains: Initial value for PGIA gains shift register
|
||||||
(default: 0x0000). Knowledge of this state is not transferred
|
(default: 0x0000). Knowledge of this state is not transferred
|
||||||
between experiments.
|
between experiments.
|
||||||
|
:param sampler_hw_rev: Sampler's revision string
|
||||||
:param core_device: Core device name
|
:param core_device: Core device name
|
||||||
"""
|
"""
|
||||||
kernel_invariants = {"channel", "core", "pgia", "cpld0", "cpld1",
|
kernel_invariants = {"channel", "core", "pgia", "cplds", "ddses",
|
||||||
"dds0", "dds1", "ref_period_mu"}
|
"ref_period_mu", "corrected_fs"}
|
||||||
|
|
||||||
def __init__(self, dmgr, channel, pgia_device,
|
def __init__(self, dmgr, channel, pgia_device,
|
||||||
cpld0_device, cpld1_device,
|
cpld_devices, dds_devices,
|
||||||
dds0_device, dds1_device,
|
gains=0x0000, sampler_hw_rev="v2.2", core_device="core"):
|
||||||
gains=0x0000, core_device="core"):
|
|
||||||
|
|
||||||
self.core = dmgr.get(core_device)
|
self.core = dmgr.get(core_device)
|
||||||
self.pgia = dmgr.get(pgia_device)
|
self.pgia = dmgr.get(pgia_device)
|
||||||
self.pgia.update_xfer_duration_mu(div=4, length=16)
|
self.pgia.update_xfer_duration_mu(div=4, length=16)
|
||||||
self.dds0 = dmgr.get(dds0_device)
|
assert len(dds_devices) == len(cpld_devices)
|
||||||
self.dds1 = dmgr.get(dds1_device)
|
self.ddses = [dmgr.get(dds) for dds in dds_devices]
|
||||||
self.cpld0 = dmgr.get(cpld0_device)
|
self.cplds = [dmgr.get(cpld) for cpld in cpld_devices]
|
||||||
self.cpld1 = dmgr.get(cpld1_device)
|
|
||||||
self.channel = channel
|
self.channel = channel
|
||||||
self.gains = gains
|
self.gains = gains
|
||||||
self.ref_period_mu = self.core.seconds_to_mu(
|
self.ref_period_mu = self.core.seconds_to_mu(
|
||||||
self.core.coarse_ref_period)
|
self.core.coarse_ref_period)
|
||||||
|
self.corrected_fs = sampler.Sampler.use_corrected_fs(sampler_hw_rev)
|
||||||
assert self.ref_period_mu == self.core.ref_multiplier
|
assert self.ref_period_mu == self.core.ref_multiplier
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(channel, **kwargs):
|
||||||
|
return [(channel, None)]
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def init(self):
|
def init(self):
|
||||||
"""Initialize the servo, Sampler and both Urukuls.
|
"""Initialize the servo, Sampler and both Urukuls.
|
||||||
|
@ -108,17 +109,15 @@ class SUServo:
|
||||||
sampler.SPI_CONFIG | spi.SPI_END,
|
sampler.SPI_CONFIG | spi.SPI_END,
|
||||||
16, 4, sampler.SPI_CS_PGIA)
|
16, 4, sampler.SPI_CS_PGIA)
|
||||||
|
|
||||||
self.cpld0.init(blind=True)
|
for i in range(len(self.cplds)):
|
||||||
cfg0 = self.cpld0.cfg_reg
|
cpld = self.cplds[i]
|
||||||
self.cpld0.cfg_write(cfg0 | (0xf << urukul.CFG_MASK_NU))
|
dds = self.ddses[i]
|
||||||
self.dds0.init(blind=True)
|
|
||||||
self.cpld0.cfg_write(cfg0)
|
|
||||||
|
|
||||||
self.cpld1.init(blind=True)
|
cpld.init(blind=True)
|
||||||
cfg1 = self.cpld1.cfg_reg
|
prev_cpld_cfg = cpld.cfg_reg
|
||||||
self.cpld1.cfg_write(cfg1 | (0xf << urukul.CFG_MASK_NU))
|
cpld.cfg_write(prev_cpld_cfg | (0xf << urukul.CFG_MASK_NU))
|
||||||
self.dds1.init(blind=True)
|
dds.init(blind=True)
|
||||||
self.cpld1.cfg_write(cfg1)
|
cpld.cfg_write(prev_cpld_cfg)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def write(self, addr, value):
|
def write(self, addr, value):
|
||||||
|
@ -129,7 +128,11 @@ class SUServo:
|
||||||
:param addr: Memory location address.
|
:param addr: Memory location address.
|
||||||
:param value: Data to be written.
|
:param value: Data to be written.
|
||||||
"""
|
"""
|
||||||
rtio_output(now_mu(), self.channel, addr | WE, value)
|
addr |= WE
|
||||||
|
value &= (1 << COEFF_WIDTH) - 1
|
||||||
|
value |= (addr >> 8) << COEFF_WIDTH
|
||||||
|
addr = addr & 0xff
|
||||||
|
rtio_output((self.channel << 8) | addr, value)
|
||||||
delay_mu(self.ref_period_mu)
|
delay_mu(self.ref_period_mu)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
|
@ -140,7 +143,9 @@ class SUServo:
|
||||||
|
|
||||||
:param addr: Memory location address.
|
:param addr: Memory location address.
|
||||||
"""
|
"""
|
||||||
rtio_output(now_mu(), self.channel, addr, 0)
|
value = (addr >> 8) << COEFF_WIDTH
|
||||||
|
addr = addr & 0xff
|
||||||
|
rtio_output((self.channel << 8) | addr, value)
|
||||||
return rtio_input_data(self.channel)
|
return rtio_input_data(self.channel)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
|
@ -184,10 +189,14 @@ class SUServo:
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def get_adc_mu(self, adc):
|
def get_adc_mu(self, adc):
|
||||||
"""Get an ADC reading (IIR filter input X0) in machine units.
|
"""Get the latest ADC reading (IIR filter input X0) in machine units.
|
||||||
|
|
||||||
This method does not advance the timeline but consumes all slack.
|
This method does not advance the timeline but consumes all slack.
|
||||||
|
|
||||||
|
If reading servo state through this method collides with the servo
|
||||||
|
writing that same data, the data can become invalid. To ensure
|
||||||
|
consistent and valid data, stop the servo before using this method.
|
||||||
|
|
||||||
:param adc: ADC channel number (0-7)
|
:param adc: ADC channel number (0-7)
|
||||||
:return: 17 bit signed X0
|
:return: 17 bit signed X0
|
||||||
"""
|
"""
|
||||||
|
@ -214,10 +223,14 @@ class SUServo:
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def get_adc(self, channel):
|
def get_adc(self, channel):
|
||||||
"""Get an ADC reading (IIR filter input X0).
|
"""Get the latest ADC reading (IIR filter input X0).
|
||||||
|
|
||||||
This method does not advance the timeline but consumes all slack.
|
This method does not advance the timeline but consumes all slack.
|
||||||
|
|
||||||
|
If reading servo state through this method collides with the servo
|
||||||
|
writing that same data, the data can become invalid. To ensure
|
||||||
|
consistent and valid data, stop the servo before using this method.
|
||||||
|
|
||||||
The PGIA gain setting must be known prior to using this method, either
|
The PGIA gain setting must be known prior to using this method, either
|
||||||
by setting the gain (:meth:`set_pgia_mu`) or by supplying it
|
by setting the gain (:meth:`set_pgia_mu`) or by supplying it
|
||||||
(:attr:`gains` or via the constructor/device database).
|
(:attr:`gains` or via the constructor/device database).
|
||||||
|
@ -227,7 +240,7 @@ class SUServo:
|
||||||
"""
|
"""
|
||||||
val = self.get_adc_mu(channel)
|
val = self.get_adc_mu(channel)
|
||||||
gain = (self.gains >> (channel*2)) & 0b11
|
gain = (self.gains >> (channel*2)) & 0b11
|
||||||
return adc_mu_to_volts(val, gain)
|
return adc_mu_to_volts(val, gain, self.corrected_fs)
|
||||||
|
|
||||||
|
|
||||||
class Channel:
|
class Channel:
|
||||||
|
@ -242,9 +255,15 @@ class Channel:
|
||||||
self.servo = dmgr.get(servo_device)
|
self.servo = dmgr.get(servo_device)
|
||||||
self.core = self.servo.core
|
self.core = self.servo.core
|
||||||
self.channel = channel
|
self.channel = channel
|
||||||
# FIXME: this assumes the mem channel is right after the control
|
# This assumes the mem channel is right after the control channels
|
||||||
# channels
|
# Make sure this is always the case in eem.py
|
||||||
self.servo_channel = self.channel + 8 - self.servo.channel
|
self.servo_channel = (self.channel + 4 * len(self.servo.cplds) -
|
||||||
|
self.servo.channel)
|
||||||
|
self.dds = self.servo.ddses[self.servo_channel // 4]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(channel, **kwargs):
|
||||||
|
return [(channel, None)]
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set(self, en_out, en_iir=0, profile=0):
|
def set(self, en_out, en_iir=0, profile=0):
|
||||||
|
@ -253,16 +272,16 @@ class Channel:
|
||||||
This method does not advance the timeline. Output RF switch setting
|
This method does not advance the timeline. Output RF switch setting
|
||||||
takes effect immediately and is independent of any other activity
|
takes effect immediately and is independent of any other activity
|
||||||
(profile settings, other channels). The RF switch behaves like
|
(profile settings, other channels). The RF switch behaves like
|
||||||
:class:`artiq.coredevice.ttl.TTLOut`. RTIO event replacement is supported. IIR updates take place
|
:class:`artiq.coredevice.ttl.TTLOut`. RTIO event replacement is
|
||||||
once the RF switch has been enabled for the configured delay and the
|
supported. IIR updates take place once the RF switch has been enabled
|
||||||
profile setting has been stable. Profile changes take between one and
|
for the configured delay and the profile setting has been stable.
|
||||||
two servo cycles to reach the DDS.
|
Profile changes take between one and two servo cycles to reach the DDS.
|
||||||
|
|
||||||
:param en_out: RF switch enable
|
:param en_out: RF switch enable
|
||||||
:param en_iir: IIR updates enable
|
:param en_iir: IIR updates enable
|
||||||
:param profile: Active profile (0-31)
|
:param profile: Active profile (0-31)
|
||||||
"""
|
"""
|
||||||
rtio_output(now_mu(), self.channel, 0,
|
rtio_output(self.channel << 8,
|
||||||
en_out | (en_iir << 1) | (profile << 2))
|
en_out | (en_iir << 1) | (profile << 2))
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
|
@ -278,8 +297,8 @@ class Channel:
|
||||||
"""
|
"""
|
||||||
base = (self.servo_channel << 8) | (profile << 3)
|
base = (self.servo_channel << 8) | (profile << 3)
|
||||||
self.servo.write(base + 0, ftw >> 16)
|
self.servo.write(base + 0, ftw >> 16)
|
||||||
self.servo.write(base + 6, ftw)
|
self.servo.write(base + 6, (ftw & 0xffff))
|
||||||
self.servo.write(base + 4, offs)
|
self.set_dds_offset_mu(profile, offs)
|
||||||
self.servo.write(base + 2, pow_)
|
self.servo.write(base + 2, pow_)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
|
@ -292,21 +311,49 @@ class Channel:
|
||||||
|
|
||||||
:param profile: Profile number (0-31)
|
:param profile: Profile number (0-31)
|
||||||
:param frequency: DDS frequency in Hz
|
:param frequency: DDS frequency in Hz
|
||||||
:param offset: IIR offset (negative setpoint) in units of full scale.
|
:param offset: IIR offset (negative setpoint) in units of full scale,
|
||||||
For positive ADC voltages as setpoints, this should be negative.
|
see :meth:`dds_offset_to_mu`
|
||||||
Due to rounding and representation as two's complement,
|
|
||||||
``offset=1`` can not be represented while ``offset=-1`` can.
|
|
||||||
:param phase: DDS phase in turns
|
:param phase: DDS phase in turns
|
||||||
"""
|
"""
|
||||||
if self.servo_channel < 4:
|
ftw = self.dds.frequency_to_ftw(frequency)
|
||||||
dds = self.servo.dds0
|
pow_ = self.dds.turns_to_pow(phase)
|
||||||
else:
|
offs = self.dds_offset_to_mu(offset)
|
||||||
dds = self.servo.dds1
|
|
||||||
ftw = dds.frequency_to_ftw(frequency)
|
|
||||||
pow_ = dds.turns_to_pow(phase)
|
|
||||||
offs = int(round(offset*(1 << COEFF_WIDTH - 1)))
|
|
||||||
self.set_dds_mu(profile, ftw, offs, pow_)
|
self.set_dds_mu(profile, ftw, offs, pow_)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_dds_offset_mu(self, profile, offs):
|
||||||
|
"""Set only IIR offset in DDS coefficient profile.
|
||||||
|
|
||||||
|
See :meth:`set_dds_mu` for setting the complete DDS profile.
|
||||||
|
|
||||||
|
:param profile: Profile number (0-31)
|
||||||
|
:param offs: IIR offset (17 bit signed)
|
||||||
|
"""
|
||||||
|
base = (self.servo_channel << 8) | (profile << 3)
|
||||||
|
self.servo.write(base + 4, offs)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_dds_offset(self, profile, offset):
|
||||||
|
"""Set only IIR offset in DDS coefficient profile.
|
||||||
|
|
||||||
|
See :meth:`set_dds` for setting the complete DDS profile.
|
||||||
|
|
||||||
|
:param profile: Profile number (0-31)
|
||||||
|
:param offset: IIR offset (negative setpoint) in units of full scale
|
||||||
|
"""
|
||||||
|
self.set_dds_offset_mu(profile, self.dds_offset_to_mu(offset))
|
||||||
|
|
||||||
|
@portable
|
||||||
|
def dds_offset_to_mu(self, offset):
|
||||||
|
"""Convert IIR offset (negative setpoint) from units of full scale to
|
||||||
|
machine units (see :meth:`set_dds_mu`, :meth:`set_dds_offset_mu`).
|
||||||
|
|
||||||
|
For positive ADC voltages as setpoints, this should be negative. Due to
|
||||||
|
rounding and representation as two's complement, ``offset=1`` can not
|
||||||
|
be represented while ``offset=-1`` can.
|
||||||
|
"""
|
||||||
|
return int(round(offset * (1 << COEFF_WIDTH - 1)))
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set_iir_mu(self, profile, adc, a1, b0, b1, dly=0):
|
def set_iir_mu(self, profile, adc, a1, b0, b1, dly=0):
|
||||||
"""Set profile IIR coefficients in machine units.
|
"""Set profile IIR coefficients in machine units.
|
||||||
|
@ -443,12 +490,16 @@ class Channel:
|
||||||
"""Get a profile's IIR state (filter output, Y0) in machine units.
|
"""Get a profile's IIR state (filter output, Y0) in machine units.
|
||||||
|
|
||||||
The IIR state is also know as the "integrator", or the DDS amplitude
|
The IIR state is also know as the "integrator", or the DDS amplitude
|
||||||
scale factor. It is 18 bits wide and unsigned.
|
scale factor. It is 17 bits wide and unsigned.
|
||||||
|
|
||||||
This method does not advance the timeline but consumes all slack.
|
This method does not advance the timeline but consumes all slack.
|
||||||
|
|
||||||
|
If reading servo state through this method collides with the servo
|
||||||
|
writing that same data, the data can become invalid. To ensure
|
||||||
|
consistent and valid data, stop the servo before using this method.
|
||||||
|
|
||||||
:param profile: Profile number (0-31)
|
:param profile: Profile number (0-31)
|
||||||
:return: 18 bit unsigned Y0
|
:return: 17 bit unsigned Y0
|
||||||
"""
|
"""
|
||||||
return self.servo.read(STATE_SEL | (self.servo_channel << 5) | profile)
|
return self.servo.read(STATE_SEL | (self.servo_channel << 5) | profile)
|
||||||
|
|
||||||
|
@ -457,10 +508,14 @@ class Channel:
|
||||||
"""Get a profile's IIR state (filter output, Y0).
|
"""Get a profile's IIR state (filter output, Y0).
|
||||||
|
|
||||||
The IIR state is also know as the "integrator", or the DDS amplitude
|
The IIR state is also know as the "integrator", or the DDS amplitude
|
||||||
scale factor. It is 18 bits wide and unsigned.
|
scale factor. It is 17 bits wide and unsigned.
|
||||||
|
|
||||||
This method does not advance the timeline but consumes all slack.
|
This method does not advance the timeline but consumes all slack.
|
||||||
|
|
||||||
|
If reading servo state through this method collides with the servo
|
||||||
|
writing that same data, the data can become invalid. To ensure
|
||||||
|
consistent and valid data, stop the servo before using this method.
|
||||||
|
|
||||||
:param profile: Profile number (0-31)
|
:param profile: Profile number (0-31)
|
||||||
:return: IIR filter output in Y0 units of full scale
|
:return: IIR filter output in Y0 units of full scale
|
||||||
"""
|
"""
|
||||||
|
@ -471,7 +526,7 @@ class Channel:
|
||||||
"""Set a profile's IIR state (filter output, Y0) in machine units.
|
"""Set a profile's IIR state (filter output, Y0) in machine units.
|
||||||
|
|
||||||
The IIR state is also know as the "integrator", or the DDS amplitude
|
The IIR state is also know as the "integrator", or the DDS amplitude
|
||||||
scale factor. It is 18 bits wide and unsigned.
|
scale factor. It is 17 bits wide and unsigned.
|
||||||
|
|
||||||
This method must not be used when the servo could be writing to the
|
This method must not be used when the servo could be writing to the
|
||||||
same location. Either deactivate the profile, or deactivate IIR
|
same location. Either deactivate the profile, or deactivate IIR
|
||||||
|
@ -491,7 +546,7 @@ class Channel:
|
||||||
"""Set a profile's IIR state (filter output, Y0).
|
"""Set a profile's IIR state (filter output, Y0).
|
||||||
|
|
||||||
The IIR state is also know as the "integrator", or the DDS amplitude
|
The IIR state is also know as the "integrator", or the DDS amplitude
|
||||||
scale factor. It is 18 bits wide and unsigned.
|
scale factor. It is 17 bits wide and unsigned.
|
||||||
|
|
||||||
This method must not be used when the servo could be writing to the
|
This method must not be used when the servo could be writing to the
|
||||||
same location. Either deactivate the profile, or deactivate IIR
|
same location. Either deactivate the profile, or deactivate IIR
|
||||||
|
@ -502,4 +557,8 @@ class Channel:
|
||||||
:param profile: Profile number (0-31)
|
:param profile: Profile number (0-31)
|
||||||
:param y: IIR state in units of full scale
|
:param y: IIR state in units of full scale
|
||||||
"""
|
"""
|
||||||
self.set_y_mu(profile, int(round((1 << COEFF_WIDTH - 1)*y)))
|
y_mu = int(round(y * Y_FULL_SCALE_MU))
|
||||||
|
if y_mu < 0 or y_mu > (1 << 17) - 1:
|
||||||
|
raise ValueError("Invalid SUServo y-value!")
|
||||||
|
self.set_y_mu(profile, y_mu)
|
||||||
|
return y_mu
|
||||||
|
|
|
@ -0,0 +1,136 @@
|
||||||
|
class TRF372017:
|
||||||
|
"""TRF372017 settings and register map.
|
||||||
|
|
||||||
|
For possible values, documentation, and explanation, see the datasheet.
|
||||||
|
https://www.ti.com/lit/gpn/trf372017
|
||||||
|
"""
|
||||||
|
rdiv = 2 # 13b - highest valid f_PFD
|
||||||
|
ref_inv = 0
|
||||||
|
neg_vco = 1
|
||||||
|
icp = 0 # 1.94 mA, 5b
|
||||||
|
icp_double = 0
|
||||||
|
cal_clk_sel = 0b1110 # div64, 4b
|
||||||
|
|
||||||
|
# default f_vco is 2.875 GHz
|
||||||
|
nint = 23 # 16b - lowest value suitable for fractional & integer mode
|
||||||
|
pll_div_sel = 0b01 # div2, 2b
|
||||||
|
prsc_sel = 0 # 4/5
|
||||||
|
vco_sel = 2 # 2b
|
||||||
|
vcosel_mode = 0
|
||||||
|
cal_acc = 0b00 # 2b
|
||||||
|
en_cal = 0 # leave at 0 - calibration is performed in `Phaser.init()`
|
||||||
|
|
||||||
|
nfrac = 0 # 25b
|
||||||
|
|
||||||
|
pwd_pll = 0
|
||||||
|
pwd_cp = 0
|
||||||
|
pwd_vco = 0
|
||||||
|
pwd_vcomux = 0
|
||||||
|
pwd_div124 = 0
|
||||||
|
pwd_presc = 0
|
||||||
|
pwd_out_buff = 1 # leave at 1 - only enable outputs after calibration
|
||||||
|
pwd_lo_div = 1 # leave at 1 - only enable outputs after calibration
|
||||||
|
pwd_tx_div = 1 # leave at 1 - only enable outputs after calibration
|
||||||
|
pwd_bb_vcm = 0
|
||||||
|
pwd_dc_off = 0
|
||||||
|
en_extvco = 0
|
||||||
|
en_isource = 0
|
||||||
|
ld_ana_prec = 0 # 2b
|
||||||
|
cp_tristate = 0 # 2b
|
||||||
|
speedup = 0
|
||||||
|
ld_dig_prec = 1
|
||||||
|
en_dith = 1
|
||||||
|
mod_ord = 2 # 3rd order, 2b
|
||||||
|
dith_sel = 0
|
||||||
|
del_sd_clk = 2 # 2b
|
||||||
|
en_frac = 0
|
||||||
|
|
||||||
|
vcobias_rtrim = 4 # 3b
|
||||||
|
pllbias_rtrim = 2 # 2b
|
||||||
|
vco_bias = 8 # 460 µA, 4b
|
||||||
|
vcobuf_bias = 2 # 2b
|
||||||
|
vcomux_bias = 3 # 2b
|
||||||
|
bufout_bias = 0 # 300 µA, 2b
|
||||||
|
vco_cal_ib = 0 # PTAT
|
||||||
|
vco_cal_ref = 2 # 1.04 V, 2b
|
||||||
|
vco_ampl_ctrl = 3 # 2b
|
||||||
|
vco_vb_ctrl = 0 # 1.2 V, 2b
|
||||||
|
en_ld_isource = 0
|
||||||
|
|
||||||
|
ioff = 0x80 # 8b
|
||||||
|
qoff = 0x80 # 8b
|
||||||
|
vref_sel = 4 # 0.85 V, 3b
|
||||||
|
tx_div_sel = 0 # div1, 2b
|
||||||
|
lo_div_sel = 0 # div1, 2b
|
||||||
|
tx_div_bias = 1 # 37.5 µA, 2b
|
||||||
|
lo_div_bias = 2 # 50 µA, 2b
|
||||||
|
|
||||||
|
vco_trim = 0x20 # 6b
|
||||||
|
vco_test_mode = 0
|
||||||
|
cal_bypass = 0
|
||||||
|
mux_ctrl = 1 # lock detect, 3b
|
||||||
|
isource_sink = 0
|
||||||
|
isource_trim = 4 # 3b
|
||||||
|
pd_tc = 0 # 2b
|
||||||
|
ib_vcm_sel = 0 # ptat
|
||||||
|
dcoffset_i = 2 # 150 µA, 2b
|
||||||
|
vco_bias_sel = 1 # spi
|
||||||
|
|
||||||
|
def __init__(self, updates=None):
|
||||||
|
if updates is None:
|
||||||
|
return
|
||||||
|
for key, value in updates.items():
|
||||||
|
if not hasattr(self, key):
|
||||||
|
raise KeyError("invalid setting", key)
|
||||||
|
setattr(self, key, value)
|
||||||
|
|
||||||
|
def get_mmap(self):
|
||||||
|
"""Memory map for TRF372017"""
|
||||||
|
mmap = []
|
||||||
|
mmap.append(
|
||||||
|
0x9 |
|
||||||
|
(self.rdiv << 5) | (self.ref_inv << 19) | (self.neg_vco << 20) |
|
||||||
|
(self.icp << 21) | (self.icp_double << 26) |
|
||||||
|
(self.cal_clk_sel << 27))
|
||||||
|
mmap.append(
|
||||||
|
0xa |
|
||||||
|
(self.nint << 5) | (self.pll_div_sel << 21) |
|
||||||
|
(self.prsc_sel << 23) | (self.vco_sel << 26) |
|
||||||
|
(self.vcosel_mode << 28) | (self.cal_acc << 29) |
|
||||||
|
(self.en_cal << 31))
|
||||||
|
mmap.append(0xb | (self.nfrac << 5))
|
||||||
|
mmap.append(
|
||||||
|
0xc |
|
||||||
|
(self.pwd_pll << 5) | (self.pwd_cp << 6) | (self.pwd_vco << 7) |
|
||||||
|
(self.pwd_vcomux << 8) | (self.pwd_div124 << 9) |
|
||||||
|
(self.pwd_presc << 10) | (self.pwd_out_buff << 12) |
|
||||||
|
(self.pwd_lo_div << 13) | (self.pwd_tx_div << 14) |
|
||||||
|
(self.pwd_bb_vcm << 15) | (self.pwd_dc_off << 16) |
|
||||||
|
(self.en_extvco << 17) | (self.en_isource << 18) |
|
||||||
|
(self.ld_ana_prec << 19) | (self.cp_tristate << 21) |
|
||||||
|
(self.speedup << 23) | (self.ld_dig_prec << 24) |
|
||||||
|
(self.en_dith << 25) | (self.mod_ord << 26) |
|
||||||
|
(self.dith_sel << 28) | (self.del_sd_clk << 29) |
|
||||||
|
(self.en_frac << 31))
|
||||||
|
mmap.append(
|
||||||
|
0xd |
|
||||||
|
(self.vcobias_rtrim << 5) | (self.pllbias_rtrim << 8) |
|
||||||
|
(self.vco_bias << 10) | (self.vcobuf_bias << 14) |
|
||||||
|
(self.vcomux_bias << 16) | (self.bufout_bias << 18) |
|
||||||
|
(1 << 21) | (self.vco_cal_ib << 22) | (self.vco_cal_ref << 23) |
|
||||||
|
(self.vco_ampl_ctrl << 26) | (self.vco_vb_ctrl << 28) |
|
||||||
|
(self.en_ld_isource << 31))
|
||||||
|
mmap.append(
|
||||||
|
0xe |
|
||||||
|
(self.ioff << 5) | (self.qoff << 13) | (self.vref_sel << 21) |
|
||||||
|
(self.tx_div_sel << 24) | (self.lo_div_sel << 26) |
|
||||||
|
(self.tx_div_bias << 28) | (self.lo_div_bias << 30))
|
||||||
|
mmap.append(
|
||||||
|
0xf |
|
||||||
|
(self.vco_trim << 7) | (self.vco_test_mode << 14) |
|
||||||
|
(self.cal_bypass << 15) | (self.mux_ctrl << 16) |
|
||||||
|
(self.isource_sink << 19) | (self.isource_trim << 20) |
|
||||||
|
(self.pd_tc << 23) | (self.ib_vcm_sel << 25) |
|
||||||
|
(1 << 28) | (self.dcoffset_i << 29) |
|
||||||
|
(self.vco_bias_sel << 31))
|
||||||
|
return mmap
|
|
@ -29,11 +29,16 @@ class TTLOut:
|
||||||
|
|
||||||
:param channel: channel number
|
:param channel: channel number
|
||||||
"""
|
"""
|
||||||
kernel_invariants = {"core", "channel"}
|
kernel_invariants = {"core", "channel", "target_o"}
|
||||||
|
|
||||||
def __init__(self, dmgr, channel, core_device="core"):
|
def __init__(self, dmgr, channel, core_device="core"):
|
||||||
self.core = dmgr.get(core_device)
|
self.core = dmgr.get(core_device)
|
||||||
self.channel = channel
|
self.channel = channel
|
||||||
|
self.target_o = channel << 8
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(channel, **kwargs):
|
||||||
|
return [(channel, None)]
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def output(self):
|
def output(self):
|
||||||
|
@ -41,11 +46,11 @@ class TTLOut:
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set_o(self, o):
|
def set_o(self, o):
|
||||||
rtio_output(now_mu(), self.channel, 0, 1 if o else 0)
|
rtio_output(self.target_o, 1 if o else 0)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def on(self):
|
def on(self):
|
||||||
"""Sets the output to a logic high state at the current position
|
"""Set the output to a logic high state at the current position
|
||||||
of the time cursor.
|
of the time cursor.
|
||||||
|
|
||||||
The time cursor is not modified by this function."""
|
The time cursor is not modified by this function."""
|
||||||
|
@ -106,15 +111,34 @@ class TTLInOut:
|
||||||
|
|
||||||
:param channel: channel number
|
:param channel: channel number
|
||||||
"""
|
"""
|
||||||
kernel_invariants = {"core", "channel"}
|
kernel_invariants = {"core", "channel", "gate_latency_mu",
|
||||||
|
"target_o", "target_oe", "target_sens", "target_sample"}
|
||||||
|
|
||||||
def __init__(self, dmgr, channel, core_device="core"):
|
def __init__(self, dmgr, channel, gate_latency_mu=None,
|
||||||
|
core_device="core"):
|
||||||
self.core = dmgr.get(core_device)
|
self.core = dmgr.get(core_device)
|
||||||
self.channel = channel
|
self.channel = channel
|
||||||
|
|
||||||
|
# With TTLs inputs, the gate control is connected to a high-latency
|
||||||
|
# path through SED. When looking at the RTIO counter to determine if
|
||||||
|
# the gate has closed, we need to take this latency into account.
|
||||||
|
# See: https://github.com/m-labs/artiq/issues/1137
|
||||||
|
if gate_latency_mu is None:
|
||||||
|
gate_latency_mu = 13*self.core.ref_multiplier
|
||||||
|
self.gate_latency_mu = gate_latency_mu
|
||||||
|
|
||||||
|
self.target_o = (channel << 8) + 0
|
||||||
|
self.target_oe = (channel << 8) + 1
|
||||||
|
self.target_sens = (channel << 8) + 2
|
||||||
|
self.target_sample = (channel << 8) + 3
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(channel, **kwargs):
|
||||||
|
return [(channel, None)]
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set_oe(self, oe):
|
def set_oe(self, oe):
|
||||||
rtio_output(now_mu(), self.channel, 1, 1 if oe else 0)
|
rtio_output(self.target_oe, 1 if oe else 0)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def output(self):
|
def output(self):
|
||||||
|
@ -122,7 +146,11 @@ class TTLInOut:
|
||||||
cursor.
|
cursor.
|
||||||
|
|
||||||
There must be a delay of at least one RTIO clock cycle before any
|
There must be a delay of at least one RTIO clock cycle before any
|
||||||
other command can be issued."""
|
other command can be issued.
|
||||||
|
|
||||||
|
This method only configures the direction at the FPGA. When using
|
||||||
|
buffered I/O interfaces, such as the Sinara TTL cards, the buffer
|
||||||
|
direction must be configured separately in the hardware."""
|
||||||
self.set_oe(True)
|
self.set_oe(True)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
|
@ -131,12 +159,16 @@ class TTLInOut:
|
||||||
cursor.
|
cursor.
|
||||||
|
|
||||||
There must be a delay of at least one RTIO clock cycle before any
|
There must be a delay of at least one RTIO clock cycle before any
|
||||||
other command can be issued."""
|
other command can be issued.
|
||||||
|
|
||||||
|
This method only configures the direction at the FPGA. When using
|
||||||
|
buffered I/O interfaces, such as the Sinara TTL cards, the buffer
|
||||||
|
direction must be configured separately in the hardware."""
|
||||||
self.set_oe(False)
|
self.set_oe(False)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set_o(self, o):
|
def set_o(self, o):
|
||||||
rtio_output(now_mu(), self.channel, 0, 1 if o else 0)
|
rtio_output(self.target_o, 1 if o else 0)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def on(self):
|
def on(self):
|
||||||
|
@ -160,7 +192,7 @@ class TTLInOut:
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def pulse_mu(self, duration):
|
def pulse_mu(self, duration):
|
||||||
"""Pulses the output high for the specified duration
|
"""Pulse the output high for the specified duration
|
||||||
(in machine units).
|
(in machine units).
|
||||||
|
|
||||||
The time cursor is advanced by the specified duration."""
|
The time cursor is advanced by the specified duration."""
|
||||||
|
@ -170,7 +202,7 @@ class TTLInOut:
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def pulse(self, duration):
|
def pulse(self, duration):
|
||||||
"""Pulses the output high for the specified duration
|
"""Pulse the output high for the specified duration
|
||||||
(in seconds).
|
(in seconds).
|
||||||
|
|
||||||
The time cursor is advanced by the specified duration."""
|
The time cursor is advanced by the specified duration."""
|
||||||
|
@ -181,7 +213,7 @@ class TTLInOut:
|
||||||
# Input API: gating
|
# Input API: gating
|
||||||
@kernel
|
@kernel
|
||||||
def _set_sensitivity(self, value):
|
def _set_sensitivity(self, value):
|
||||||
rtio_output(now_mu(), self.channel, 2, value)
|
rtio_output(self.target_sens, value)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def gate_rising_mu(self, duration):
|
def gate_rising_mu(self, duration):
|
||||||
|
@ -323,7 +355,7 @@ class TTLInOut:
|
||||||
ttl_input.count(ttl_input.gate_rising(100 * us))
|
ttl_input.count(ttl_input.gate_rising(100 * us))
|
||||||
"""
|
"""
|
||||||
count = 0
|
count = 0
|
||||||
while rtio_input_timestamp(up_to_timestamp_mu, self.channel) >= 0:
|
while rtio_input_timestamp(up_to_timestamp_mu + self.gate_latency_mu, self.channel) >= 0:
|
||||||
count += 1
|
count += 1
|
||||||
return count
|
return count
|
||||||
|
|
||||||
|
@ -346,7 +378,7 @@ class TTLInOut:
|
||||||
:return: The timestamp (in machine units) of the first event received;
|
:return: The timestamp (in machine units) of the first event received;
|
||||||
-1 on timeout.
|
-1 on timeout.
|
||||||
"""
|
"""
|
||||||
return rtio_input_timestamp(up_to_timestamp_mu, self.channel)
|
return rtio_input_timestamp(up_to_timestamp_mu + self.gate_latency_mu, self.channel)
|
||||||
|
|
||||||
# Input API: sampling
|
# Input API: sampling
|
||||||
@kernel
|
@kernel
|
||||||
|
@ -355,7 +387,7 @@ class TTLInOut:
|
||||||
position of the time cursor.
|
position of the time cursor.
|
||||||
|
|
||||||
The time cursor is not modified by this function."""
|
The time cursor is not modified by this function."""
|
||||||
rtio_output(now_mu(), self.channel, 3, 0)
|
rtio_output(self.target_sample, 0)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def sample_get(self):
|
def sample_get(self):
|
||||||
|
@ -392,13 +424,13 @@ class TTLInOut:
|
||||||
|
|
||||||
The time cursor is not modified by this function.
|
The time cursor is not modified by this function.
|
||||||
"""
|
"""
|
||||||
rtio_output(now_mu(), self.channel, 3, 2) # gate falling
|
rtio_output(self.target_sample, 2) # gate falling
|
||||||
return rtio_input_data(self.channel) == 1
|
return rtio_input_data(self.channel) == 1
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def watch_stay_off(self):
|
def watch_stay_off(self):
|
||||||
"""Like :meth:`watch_stay_on`, but for low levels."""
|
"""Like :meth:`watch_stay_on`, but for low levels."""
|
||||||
rtio_output(now_mu(), self.channel, 3, 1) # gate rising
|
rtio_output(self.target_sample, 1) # gate rising
|
||||||
return rtio_input_data(self.channel) == 0
|
return rtio_input_data(self.channel) == 0
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
|
@ -411,10 +443,10 @@ class TTLInOut:
|
||||||
The time cursor is not modified by this function. This function
|
The time cursor is not modified by this function. This function
|
||||||
always makes the slack negative.
|
always makes the slack negative.
|
||||||
"""
|
"""
|
||||||
rtio_output(now_mu(), self.channel, 2, 0)
|
rtio_output(self.target_sens, 0)
|
||||||
success = True
|
success = True
|
||||||
try:
|
try:
|
||||||
while rtio_input_timestamp(now_mu(), self.channel) != -1:
|
while rtio_input_timestamp(now_mu() + self.gate_latency_mu, self.channel) != -1:
|
||||||
success = False
|
success = False
|
||||||
except RTIOOverflow:
|
except RTIOOverflow:
|
||||||
success = False
|
success = False
|
||||||
|
@ -432,14 +464,19 @@ class TTLClockGen:
|
||||||
:param channel: channel number
|
:param channel: channel number
|
||||||
:param acc_width: accumulator width in bits
|
:param acc_width: accumulator width in bits
|
||||||
"""
|
"""
|
||||||
kernel_invariants = {"core", "channel", "acc_width"}
|
kernel_invariants = {"core", "channel", "target", "acc_width"}
|
||||||
|
|
||||||
def __init__(self, dmgr, channel, acc_width=24, core_device="core"):
|
def __init__(self, dmgr, channel, acc_width=24, core_device="core"):
|
||||||
self.core = dmgr.get(core_device)
|
self.core = dmgr.get(core_device)
|
||||||
self.channel = channel
|
self.channel = channel
|
||||||
|
self.target = channel << 8
|
||||||
|
|
||||||
self.acc_width = numpy.int64(acc_width)
|
self.acc_width = numpy.int64(acc_width)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_rtio_channels(channel, **kwargs):
|
||||||
|
return [(channel, None)]
|
||||||
|
|
||||||
@portable
|
@portable
|
||||||
def frequency_to_ftw(self, frequency):
|
def frequency_to_ftw(self, frequency):
|
||||||
"""Returns the frequency tuning word corresponding to the given
|
"""Returns the frequency tuning word corresponding to the given
|
||||||
|
@ -472,7 +509,7 @@ class TTLClockGen:
|
||||||
Due to the way the clock generator operates, frequency tuning words
|
Due to the way the clock generator operates, frequency tuning words
|
||||||
that are not powers of two cause jitter of one RTIO clock cycle at the
|
that are not powers of two cause jitter of one RTIO clock cycle at the
|
||||||
output."""
|
output."""
|
||||||
rtio_output(now_mu(), self.channel, 0, frequency)
|
rtio_output(self.target, frequency)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set(self, frequency):
|
def set(self, frequency):
|
||||||
|
|
|
@ -1,15 +1,15 @@
|
||||||
|
from numpy import int32, int64
|
||||||
|
|
||||||
from artiq.language.core import kernel, delay, portable, at_mu, now_mu
|
from artiq.language.core import kernel, delay, portable, at_mu, now_mu
|
||||||
from artiq.language.units import us, ms
|
from artiq.language.units import us, ms
|
||||||
|
from artiq.language.types import TInt32, TFloat, TBool
|
||||||
from numpy import int32
|
|
||||||
|
|
||||||
from artiq.coredevice import spi2 as spi
|
from artiq.coredevice import spi2 as spi
|
||||||
|
|
||||||
|
SPI_CONFIG = (0 * spi.SPI_OFFLINE | 0 * spi.SPI_END |
|
||||||
SPI_CONFIG = (0*spi.SPI_OFFLINE | 0*spi.SPI_END |
|
0 * spi.SPI_INPUT | 1 * spi.SPI_CS_POLARITY |
|
||||||
0*spi.SPI_INPUT | 1*spi.SPI_CS_POLARITY |
|
0 * spi.SPI_CLK_POLARITY | 0 * spi.SPI_CLK_PHASE |
|
||||||
0*spi.SPI_CLK_POLARITY | 0*spi.SPI_CLK_PHASE |
|
0 * spi.SPI_LSB_FIRST | 0 * spi.SPI_HALF_DUPLEX)
|
||||||
0*spi.SPI_LSB_FIRST | 0*spi.SPI_HALF_DUPLEX)
|
|
||||||
|
|
||||||
# SPI clock write and read dividers
|
# SPI clock write and read dividers
|
||||||
SPIT_CFG_WR = 2
|
SPIT_CFG_WR = 2
|
||||||
|
@ -31,6 +31,7 @@ CFG_CLK_SEL1 = 21
|
||||||
CFG_SYNC_SEL = 18
|
CFG_SYNC_SEL = 18
|
||||||
CFG_RST = 19
|
CFG_RST = 19
|
||||||
CFG_IO_RST = 20
|
CFG_IO_RST = 20
|
||||||
|
CFG_CLK_DIV = 22
|
||||||
|
|
||||||
# STA status register bit offsets
|
# STA status register bit offsets
|
||||||
STA_RF_SW = 0
|
STA_RF_SW = 0
|
||||||
|
@ -51,10 +52,13 @@ CS_DDS_CH1 = 5
|
||||||
CS_DDS_CH2 = 6
|
CS_DDS_CH2 = 6
|
||||||
CS_DDS_CH3 = 7
|
CS_DDS_CH3 = 7
|
||||||
|
|
||||||
|
# Default profile
|
||||||
|
DEFAULT_PROFILE = 7
|
||||||
|
|
||||||
|
|
||||||
@portable
|
@portable
|
||||||
def urukul_cfg(rf_sw, led, profile, io_update, mask_nu,
|
def urukul_cfg(rf_sw, led, profile, io_update, mask_nu,
|
||||||
clk_sel, sync_sel, rst, io_rst):
|
clk_sel, sync_sel, rst, io_rst, clk_div):
|
||||||
"""Build Urukul CPLD configuration register"""
|
"""Build Urukul CPLD configuration register"""
|
||||||
return ((rf_sw << CFG_RF_SW) |
|
return ((rf_sw << CFG_RF_SW) |
|
||||||
(led << CFG_LED) |
|
(led << CFG_LED) |
|
||||||
|
@ -65,7 +69,8 @@ def urukul_cfg(rf_sw, led, profile, io_update, mask_nu,
|
||||||
((clk_sel & 0x02) << (CFG_CLK_SEL1 - 1)) |
|
((clk_sel & 0x02) << (CFG_CLK_SEL1 - 1)) |
|
||||||
(sync_sel << CFG_SYNC_SEL) |
|
(sync_sel << CFG_SYNC_SEL) |
|
||||||
(rst << CFG_RST) |
|
(rst << CFG_RST) |
|
||||||
(io_rst << CFG_IO_RST))
|
(io_rst << CFG_IO_RST) |
|
||||||
|
(clk_div << CFG_CLK_DIV))
|
||||||
|
|
||||||
|
|
||||||
@portable
|
@portable
|
||||||
|
@ -103,7 +108,7 @@ class _RegIOUpdate:
|
||||||
self.cpld = cpld
|
self.cpld = cpld
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def pulse(self, t):
|
def pulse(self, t: TFloat):
|
||||||
cfg = self.cpld.cfg_reg
|
cfg = self.cpld.cfg_reg
|
||||||
self.cpld.cfg_write(cfg | (1 << CFG_IO_UPDATE))
|
self.cpld.cfg_write(cfg | (1 << CFG_IO_UPDATE))
|
||||||
delay(t)
|
delay(t)
|
||||||
|
@ -115,7 +120,7 @@ class _DummySync:
|
||||||
self.cpld = cpld
|
self.cpld = cpld
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set_mu(self, ftw):
|
def set_mu(self, ftw: TInt32):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@ -133,28 +138,42 @@ class CPLD:
|
||||||
internal MMCX. For hardware revision <= v1.2 valid options are: 0 -
|
internal MMCX. For hardware revision <= v1.2 valid options are: 0 -
|
||||||
either XO or MMCX dependent on component population; 1 SMA. Unsupported
|
either XO or MMCX dependent on component population; 1 SMA. Unsupported
|
||||||
clocking options are silently ignored.
|
clocking options are silently ignored.
|
||||||
:param sync_sel: SYNC_IN selection. 0 corresponds to SYNC_IN over EEM
|
:param clk_div: Reference clock divider. Valid options are 0: variant
|
||||||
from FPGA. 1 corresponds to SYNC_IN from DDS0.
|
dependent default (divide-by-4 for AD9910 and divide-by-1 for AD9912);
|
||||||
|
1: divide-by-1; 2: divide-by-2; 3: divide-by-4.
|
||||||
|
On Urukul boards with CPLD gateware before v1.3.1 only the default
|
||||||
|
(0, i.e. variant dependent divider) is valid.
|
||||||
|
:param sync_sel: SYNC (multi-chip synchronisation) signal source selection.
|
||||||
|
0 corresponds to SYNC_IN being supplied by the FPGA via the EEM
|
||||||
|
connector. 1 corresponds to SYNC_OUT from DDS0 being distributed to the
|
||||||
|
other chips.
|
||||||
:param rf_sw: Initial CPLD RF switch register setting (default: 0x0).
|
:param rf_sw: Initial CPLD RF switch register setting (default: 0x0).
|
||||||
Knowledge of this state is not transferred between experiments.
|
Knowledge of this state is not transferred between experiments.
|
||||||
:param att: Initial attenuator setting shift register (default:
|
:param att: Initial attenuator setting shift register (default:
|
||||||
0x00000000). See also: :meth:`set_all_att_mu`. Knowledge of this state
|
0x00000000). See also :meth:`get_att_mu` which retrieves the hardware
|
||||||
is not transferred between experiments.
|
state without side effects. Knowledge of this state is not transferred
|
||||||
|
between experiments.
|
||||||
:param sync_div: SYNC_IN generator divider. The ratio between the coarse
|
:param sync_div: SYNC_IN generator divider. The ratio between the coarse
|
||||||
RTIO frequency and the SYNC_IN generator frequency (default: 2 if
|
RTIO frequency and the SYNC_IN generator frequency (default: 2 if
|
||||||
`sync_device` was specified).
|
`sync_device` was specified).
|
||||||
:param core_device: Core device name
|
:param core_device: Core device name
|
||||||
|
|
||||||
|
If the clocking is incorrect (for example, setting ``clk_sel`` to the
|
||||||
|
front panel SMA with no clock connected), then the ``init()`` method of
|
||||||
|
the DDS channels can fail with the error message ``PLL lock timeout``.
|
||||||
"""
|
"""
|
||||||
kernel_invariants = {"refclk", "bus", "core", "io_update"}
|
kernel_invariants = {"refclk", "bus", "core", "io_update", "clk_div"}
|
||||||
|
|
||||||
def __init__(self, dmgr, spi_device, io_update_device=None,
|
def __init__(self, dmgr, spi_device, io_update_device=None,
|
||||||
dds_reset_device=None, sync_device=None,
|
dds_reset_device=None, sync_device=None,
|
||||||
sync_sel=0, clk_sel=0, rf_sw=0,
|
sync_sel=0, clk_sel=0, clk_div=0, rf_sw=0,
|
||||||
refclk=125e6, att=0x00000000, sync_div=None,
|
refclk=125e6, att=0x00000000, sync_div=None,
|
||||||
core_device="core"):
|
core_device="core"):
|
||||||
|
|
||||||
self.core = dmgr.get(core_device)
|
self.core = dmgr.get(core_device)
|
||||||
self.refclk = refclk
|
self.refclk = refclk
|
||||||
|
assert 0 <= clk_div <= 3
|
||||||
|
self.clk_div = clk_div
|
||||||
|
|
||||||
self.bus = dmgr.get(spi_device)
|
self.bus = dmgr.get(spi_device)
|
||||||
if io_update_device is not None:
|
if io_update_device is not None:
|
||||||
|
@ -172,19 +191,20 @@ class CPLD:
|
||||||
assert sync_div is None
|
assert sync_div is None
|
||||||
sync_div = 0
|
sync_div = 0
|
||||||
|
|
||||||
self.cfg_reg = urukul_cfg(rf_sw=rf_sw, led=0, profile=0,
|
self.cfg_reg = urukul_cfg(rf_sw=rf_sw, led=0, profile=DEFAULT_PROFILE,
|
||||||
io_update=0, mask_nu=0, clk_sel=clk_sel,
|
io_update=0, mask_nu=0, clk_sel=clk_sel,
|
||||||
sync_sel=sync_sel, rst=0, io_rst=0)
|
sync_sel=sync_sel,
|
||||||
self.att_reg = int32(att)
|
rst=0, io_rst=0, clk_div=clk_div)
|
||||||
|
self.att_reg = int32(int64(att))
|
||||||
self.sync_div = sync_div
|
self.sync_div = sync_div
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def cfg_write(self, cfg):
|
def cfg_write(self, cfg: TInt32):
|
||||||
"""Write to the configuration register.
|
"""Write to the configuration register.
|
||||||
|
|
||||||
See :func:`urukul_cfg` for possible flags.
|
See :func:`urukul_cfg` for possible flags.
|
||||||
|
|
||||||
:param data: 24 bit data to be written. Will be stored at
|
:param cfg: 24 bit data to be written. Will be stored at
|
||||||
:attr:`cfg_reg`.
|
:attr:`cfg_reg`.
|
||||||
"""
|
"""
|
||||||
self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END, 24,
|
self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END, 24,
|
||||||
|
@ -193,7 +213,7 @@ class CPLD:
|
||||||
self.cfg_reg = cfg
|
self.cfg_reg = cfg
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def sta_read(self):
|
def sta_read(self) -> TInt32:
|
||||||
"""Read the status register.
|
"""Read the status register.
|
||||||
|
|
||||||
Use any of the following functions to extract values:
|
Use any of the following functions to extract values:
|
||||||
|
@ -212,7 +232,7 @@ class CPLD:
|
||||||
return self.bus.read()
|
return self.bus.read()
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def init(self, blind=False):
|
def init(self, blind: TBool = False):
|
||||||
"""Initialize and detect Urukul.
|
"""Initialize and detect Urukul.
|
||||||
|
|
||||||
Resets the DDS I/O interface and verifies correct CPLD gateware
|
Resets the DDS I/O interface and verifies correct CPLD gateware
|
||||||
|
@ -230,12 +250,12 @@ class CPLD:
|
||||||
proto_rev = urukul_sta_proto_rev(self.sta_read())
|
proto_rev = urukul_sta_proto_rev(self.sta_read())
|
||||||
if proto_rev != STA_PROTO_REV_MATCH:
|
if proto_rev != STA_PROTO_REV_MATCH:
|
||||||
raise ValueError("Urukul proto_rev mismatch")
|
raise ValueError("Urukul proto_rev mismatch")
|
||||||
delay(100*us) # reset, slack
|
delay(100 * us) # reset, slack
|
||||||
self.cfg_write(cfg)
|
self.cfg_write(cfg)
|
||||||
if self.sync_div:
|
if self.sync_div:
|
||||||
at_mu(now_mu() & ~0xf) # align to RTIO/2
|
at_mu(now_mu() & ~0xf) # align to RTIO/2
|
||||||
self.set_sync_div(self.sync_div) # 125 MHz/2 = 1 GHz/16
|
self.set_sync_div(self.sync_div) # 125 MHz/2 = 1 GHz/16
|
||||||
delay(1*ms) # DDS wake up
|
delay(1 * ms) # DDS wake up
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def io_rst(self):
|
def io_rst(self):
|
||||||
|
@ -244,7 +264,7 @@ class CPLD:
|
||||||
self.cfg_write(self.cfg_reg & ~(1 << CFG_IO_RST))
|
self.cfg_write(self.cfg_reg & ~(1 << CFG_IO_RST))
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def cfg_sw(self, channel, on):
|
def cfg_sw(self, channel: TInt32, on: TBool):
|
||||||
"""Configure the RF switches through the configuration register.
|
"""Configure the RF switches through the configuration register.
|
||||||
|
|
||||||
These values are logically OR-ed with the LVDS lines on EEM1.
|
These values are logically OR-ed with the LVDS lines on EEM1.
|
||||||
|
@ -260,21 +280,44 @@ class CPLD:
|
||||||
self.cfg_write(c)
|
self.cfg_write(c)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def cfg_switches(self, state):
|
def cfg_switches(self, state: TInt32):
|
||||||
"""Configure all four RF switches through the configuration register.
|
"""Configure all four RF switches through the configuration register.
|
||||||
|
|
||||||
:param state: RF switch state as a 4 bit integer.
|
:param state: RF switch state as a 4 bit integer.
|
||||||
"""
|
"""
|
||||||
self.cfg_write((self.cfg_reg & ~0xf) | state)
|
self.cfg_write((self.cfg_reg & ~0xf) | state)
|
||||||
|
|
||||||
|
@portable(flags={"fast-math"})
|
||||||
|
def mu_to_att(self, att_mu: TInt32) -> TFloat:
|
||||||
|
"""Convert a digital attenuation setting to dB.
|
||||||
|
|
||||||
|
:param att_mu: Digital attenuation setting.
|
||||||
|
:return: Attenuation setting in dB.
|
||||||
|
"""
|
||||||
|
return (255 - (att_mu & 0xff)) / 8
|
||||||
|
|
||||||
|
@portable(flags={"fast-math"})
|
||||||
|
def att_to_mu(self, att: TFloat) -> TInt32:
|
||||||
|
"""Convert an attenuation setting in dB to machine units.
|
||||||
|
|
||||||
|
:param att: Attenuation setting in dB.
|
||||||
|
:return: Digital attenuation setting.
|
||||||
|
"""
|
||||||
|
code = int32(255) - int32(round(att * 8))
|
||||||
|
if code < 0 or code > 255:
|
||||||
|
raise ValueError("Invalid urukul.CPLD attenuation!")
|
||||||
|
return code
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set_att_mu(self, channel, att):
|
def set_att_mu(self, channel: TInt32, att: TInt32):
|
||||||
"""Set digital step attenuator in machine units.
|
"""Set digital step attenuator in machine units.
|
||||||
|
|
||||||
This method will write the attenuator settings of all four channels.
|
This method will also write the attenuator settings of the three
|
||||||
|
other channels. Use :meth:`get_att_mu` to retrieve the hardware
|
||||||
|
state set in previous experiments.
|
||||||
|
|
||||||
:param channel: Attenuator channel (0-3).
|
:param channel: Attenuator channel (0-3).
|
||||||
:param att: Digital attenuation setting:
|
:param att: 8-bit digital attenuation setting:
|
||||||
255 minimum attenuation, 0 maximum attenuation (31.5 dB)
|
255 minimum attenuation, 0 maximum attenuation (31.5 dB)
|
||||||
"""
|
"""
|
||||||
a = self.att_reg & ~(0xff << (channel * 8))
|
a = self.att_reg & ~(0xff << (channel * 8))
|
||||||
|
@ -282,7 +325,7 @@ class CPLD:
|
||||||
self.set_all_att_mu(a)
|
self.set_all_att_mu(a)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set_all_att_mu(self, att_reg):
|
def set_all_att_mu(self, att_reg: TInt32):
|
||||||
"""Set all four digital step attenuators (in machine units).
|
"""Set all four digital step attenuators (in machine units).
|
||||||
|
|
||||||
.. seealso:: :meth:`set_att_mu`
|
.. seealso:: :meth:`set_att_mu`
|
||||||
|
@ -295,49 +338,88 @@ class CPLD:
|
||||||
self.att_reg = att_reg
|
self.att_reg = att_reg
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set_att(self, channel, att):
|
def set_att(self, channel: TInt32, att: TFloat):
|
||||||
"""Set digital step attenuator in SI units.
|
"""Set digital step attenuator in SI units.
|
||||||
|
|
||||||
|
This method will write the attenuator settings of all four channels.
|
||||||
|
|
||||||
|
.. seealso:: :meth:`set_att_mu`
|
||||||
|
|
||||||
:param channel: Attenuator channel (0-3).
|
:param channel: Attenuator channel (0-3).
|
||||||
:param att: Attenuation setting in dB. Higher value is more
|
:param att: Attenuation setting in dB. Higher value is more
|
||||||
attenuation. Minimum attenuation is 0*dB, maximum attenuation is
|
attenuation. Minimum attenuation is 0*dB, maximum attenuation is
|
||||||
31.5*dB.
|
31.5*dB.
|
||||||
"""
|
"""
|
||||||
self.set_att_mu(channel, 255 - int32(round(att*8)))
|
self.set_att_mu(channel, self.att_to_mu(att))
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def get_att_mu(self):
|
def get_att_mu(self) -> TInt32:
|
||||||
"""Return the digital step attenuator settings in machine units.
|
"""Return the digital step attenuator settings in machine units.
|
||||||
|
|
||||||
This method will also (as a side effect) write the attenuator
|
The result is stored and will be used in future calls of
|
||||||
settings of all four channels.
|
:meth:`set_att_mu` and :meth:`set_att`.
|
||||||
|
|
||||||
|
.. seealso:: :meth:`get_channel_att_mu`
|
||||||
|
|
||||||
:return: 32 bit attenuator settings
|
:return: 32 bit attenuator settings
|
||||||
"""
|
"""
|
||||||
self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END | spi.SPI_INPUT, 32,
|
self.bus.set_config_mu(SPI_CONFIG | spi.SPI_INPUT, 32,
|
||||||
SPIT_ATT_RD, CS_ATT)
|
SPIT_ATT_RD, CS_ATT)
|
||||||
self.bus.write(self.att_reg)
|
self.bus.write(0) # shift in zeros, shift out current value
|
||||||
return self.bus.read()
|
self.bus.set_config_mu(SPI_CONFIG | spi.SPI_END, 32,
|
||||||
|
SPIT_ATT_WR, CS_ATT)
|
||||||
|
delay(10 * us)
|
||||||
|
self.att_reg = self.bus.read()
|
||||||
|
self.bus.write(self.att_reg) # shift in current value again and latch
|
||||||
|
return self.att_reg
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set_sync_div(self, div):
|
def get_channel_att_mu(self, channel: TInt32) -> TInt32:
|
||||||
|
"""Get digital step attenuator value for a channel in machine units.
|
||||||
|
|
||||||
|
The result is stored and will be used in future calls of
|
||||||
|
:meth:`set_att_mu` and :meth:`set_att`.
|
||||||
|
|
||||||
|
.. seealso:: :meth:`get_att_mu`
|
||||||
|
|
||||||
|
:param channel: Attenuator channel (0-3).
|
||||||
|
:return: 8-bit digital attenuation setting:
|
||||||
|
255 minimum attenuation, 0 maximum attenuation (31.5 dB)
|
||||||
|
"""
|
||||||
|
return int32((self.get_att_mu() >> (channel * 8)) & 0xff)
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def get_channel_att(self, channel: TInt32) -> TFloat:
|
||||||
|
"""Get digital step attenuator value for a channel in SI units.
|
||||||
|
|
||||||
|
.. seealso:: :meth:`get_channel_att_mu`
|
||||||
|
|
||||||
|
:param channel: Attenuator channel (0-3).
|
||||||
|
:return: Attenuation setting in dB. Higher value is more
|
||||||
|
attenuation. Minimum attenuation is 0*dB, maximum attenuation is
|
||||||
|
31.5*dB.
|
||||||
|
"""
|
||||||
|
return self.mu_to_att(self.get_channel_att_mu(channel))
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def set_sync_div(self, div: TInt32):
|
||||||
"""Set the SYNC_IN AD9910 pulse generator frequency
|
"""Set the SYNC_IN AD9910 pulse generator frequency
|
||||||
and align it to the current RTIO timestamp.
|
and align it to the current RTIO timestamp.
|
||||||
|
|
||||||
The SYNC_IN signal is derived from the coarse RTIO clock
|
The SYNC_IN signal is derived from the coarse RTIO clock
|
||||||
and the divider must be a power of two two.
|
and the divider must be a power of two.
|
||||||
Configure ``sync_sel == 0``.
|
Configure ``sync_sel == 0``.
|
||||||
|
|
||||||
:param div: SYNC_IN frequency divider. Must be a power of two.
|
:param div: SYNC_IN frequency divider. Must be a power of two.
|
||||||
Minimum division ratio is 2. Maximum division ratio is 16.
|
Minimum division ratio is 2. Maximum division ratio is 16.
|
||||||
"""
|
"""
|
||||||
ftw_max = 1 << 4
|
ftw_max = 1 << 4
|
||||||
ftw = ftw_max//div
|
ftw = ftw_max // div
|
||||||
assert ftw*div == ftw_max
|
assert ftw * div == ftw_max
|
||||||
self.sync.set_mu(ftw)
|
self.sync.set_mu(ftw)
|
||||||
|
|
||||||
@kernel
|
@kernel
|
||||||
def set_profile(self, profile):
|
def set_profile(self, profile: TInt32):
|
||||||
"""Set the PROFILE pins.
|
"""Set the PROFILE pins.
|
||||||
|
|
||||||
The PROFILE pins are common to all four DDS channels.
|
The PROFILE pins are common to all four DDS channels.
|
||||||
|
|
|
@ -27,15 +27,15 @@ class Zotino(AD53xx):
|
||||||
:param clr_device: CLR RTIO TTLOut channel name.
|
:param clr_device: CLR RTIO TTLOut channel name.
|
||||||
:param div_write: SPI clock divider for write operations (default: 4,
|
:param div_write: SPI clock divider for write operations (default: 4,
|
||||||
50MHz max SPI clock)
|
50MHz max SPI clock)
|
||||||
:param div_read: SPI clock divider for read operations (default: 8, not
|
:param div_read: SPI clock divider for read operations (default: 16, not
|
||||||
optimized for speed, but cf data sheet t22: 25ns min SCLK edge to SDO
|
optimized for speed; datasheet says t22: 25ns min SCLK edge to SDO
|
||||||
valid)
|
valid, and suggests the SPI speed for reads should be <=20 MHz)
|
||||||
:param vref: DAC reference voltage (default: 5.)
|
:param vref: DAC reference voltage (default: 5.)
|
||||||
:param core_device: Core device name (default: "core")
|
:param core_device: Core device name (default: "core")
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, dmgr, spi_device, ldac_device=None, clr_device=None,
|
def __init__(self, dmgr, spi_device, ldac_device=None, clr_device=None,
|
||||||
div_write=4, div_read=8, vref=5., core="core"):
|
div_write=4, div_read=16, vref=5., core="core"):
|
||||||
AD53xx.__init__(self, dmgr=dmgr, spi_device=spi_device,
|
AD53xx.__init__(self, dmgr=dmgr, spi_device=spi_device,
|
||||||
ldac_device=ldac_device, clr_device=clr_device,
|
ldac_device=ldac_device, clr_device=clr_device,
|
||||||
chip_select=_SPI_CS_DAC, div_write=div_write,
|
chip_select=_SPI_CS_DAC, div_write=div_write,
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from PyQt5 import QtCore, QtWidgets
|
from PyQt5 import QtCore, QtWidgets
|
||||||
|
@ -44,6 +45,7 @@ class AppletsCCBDock(applets.AppletsDock):
|
||||||
self.ccbp_group_action.setMenu(ccbp_group_menu)
|
self.ccbp_group_action.setMenu(ccbp_group_menu)
|
||||||
self.table.addAction(self.ccbp_group_action)
|
self.table.addAction(self.ccbp_group_action)
|
||||||
self.table.itemSelectionChanged.connect(self.update_group_ccbp_menu)
|
self.table.itemSelectionChanged.connect(self.update_group_ccbp_menu)
|
||||||
|
self.update_group_ccbp_menu()
|
||||||
|
|
||||||
ccbp_global_menu = QtWidgets.QMenu()
|
ccbp_global_menu = QtWidgets.QMenu()
|
||||||
actiongroup = QtWidgets.QActionGroup(self.table)
|
actiongroup = QtWidgets.QActionGroup(self.table)
|
||||||
|
@ -149,15 +151,16 @@ class AppletsCCBDock(applets.AppletsDock):
|
||||||
corresponds to a single group. If ``group`` is ``None`` or an empty
|
corresponds to a single group. If ``group`` is ``None`` or an empty
|
||||||
list, it corresponds to the root.
|
list, it corresponds to the root.
|
||||||
|
|
||||||
``command`` gives the command line used to run the applet, as if it
|
``command`` gives the command line used to run the applet, as if it was
|
||||||
was started from a shell. The dashboard substitutes variables such as
|
started from a shell. The dashboard substitutes variables such as
|
||||||
``$python`` that gives the complete file name of the Python
|
``$python`` that gives the complete file name of the Python interpreter
|
||||||
interpreter running the dashboard.
|
running the dashboard.
|
||||||
|
|
||||||
If the name already exists (after following any specified groups), the
|
If the name already exists (after following any specified groups), the
|
||||||
command or code of the existing applet with that name is replaced, and
|
command or code of the existing applet with that name is replaced, and
|
||||||
the applet is shown at its previous position. If not, a new applet
|
the applet is restarted and shown at its previous position. If not, a
|
||||||
entry is created and the applet is shown at any position on the screen.
|
new applet entry is created and the applet is shown at any position on
|
||||||
|
the screen.
|
||||||
|
|
||||||
If the group(s) do not exist, they are created.
|
If the group(s) do not exist, they are created.
|
||||||
|
|
||||||
|
@ -181,9 +184,17 @@ class AppletsCCBDock(applets.AppletsDock):
|
||||||
else:
|
else:
|
||||||
spec = {"ty": "code", "code": code, "command": command}
|
spec = {"ty": "code", "code": code, "command": command}
|
||||||
if applet is None:
|
if applet is None:
|
||||||
|
logger.debug("Applet %s does not exist: creating", name)
|
||||||
applet = self.new(name=name, spec=spec, parent=parent)
|
applet = self.new(name=name, spec=spec, parent=parent)
|
||||||
else:
|
else:
|
||||||
self.set_spec(applet, spec)
|
if spec != self.get_spec(applet):
|
||||||
|
logger.debug("Applet %s already exists: updating existing spec", name)
|
||||||
|
self.set_spec(applet, spec)
|
||||||
|
if applet.applet_dock:
|
||||||
|
asyncio.ensure_future(applet.applet_dock.restart())
|
||||||
|
else:
|
||||||
|
logger.debug("Applet %s already exists and no update required", name)
|
||||||
|
|
||||||
if ccbp == "enable":
|
if ccbp == "enable":
|
||||||
applet.setCheckState(0, QtCore.Qt.Checked)
|
applet.setCheckState(0, QtCore.Qt.Checked)
|
||||||
|
|
||||||
|
|
|
@ -3,8 +3,9 @@ import logging
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from PyQt5 import QtCore, QtWidgets
|
from PyQt5 import QtCore, QtWidgets
|
||||||
|
from sipyco import pyon
|
||||||
|
|
||||||
from artiq.tools import short_format
|
from artiq.tools import scale_from_metadata, short_format, exc_to_warning
|
||||||
from artiq.gui.tools import LayoutWidget, QRecursiveFilterProxyModel
|
from artiq.gui.tools import LayoutWidget, QRecursiveFilterProxyModel
|
||||||
from artiq.gui.models import DictSyncTreeSepModel
|
from artiq.gui.models import DictSyncTreeSepModel
|
||||||
from artiq.gui.scientific_spinbox import ScientificSpinBox
|
from artiq.gui.scientific_spinbox import ScientificSpinBox
|
||||||
|
@ -13,73 +14,152 @@ from artiq.gui.scientific_spinbox import ScientificSpinBox
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class Editor(QtWidgets.QDialog):
|
async def rename(key, new_key, value, metadata, persist, dataset_ctl):
|
||||||
def __init__(self, parent, dataset_ctl, key, value):
|
if key != new_key:
|
||||||
|
await dataset_ctl.delete(key)
|
||||||
|
await dataset_ctl.set(new_key, value, metadata=metadata, persist=persist)
|
||||||
|
|
||||||
|
|
||||||
|
class CreateEditDialog(QtWidgets.QDialog):
|
||||||
|
def __init__(self, parent, dataset_ctl, key=None, value=None, metadata=None, persist=False):
|
||||||
QtWidgets.QDialog.__init__(self, parent=parent)
|
QtWidgets.QDialog.__init__(self, parent=parent)
|
||||||
self.dataset_ctl = dataset_ctl
|
self.dataset_ctl = dataset_ctl
|
||||||
self.key = key
|
|
||||||
self.initial_type = type(value)
|
|
||||||
|
|
||||||
self.setWindowTitle("Edit dataset")
|
self.setWindowTitle("Create dataset" if key is None else "Edit dataset")
|
||||||
grid = QtWidgets.QGridLayout()
|
grid = QtWidgets.QGridLayout()
|
||||||
|
grid.setRowMinimumHeight(1, 40)
|
||||||
|
grid.setColumnMinimumWidth(2, 60)
|
||||||
self.setLayout(grid)
|
self.setLayout(grid)
|
||||||
|
|
||||||
grid.addWidget(QtWidgets.QLabel("Name:"), 0, 0)
|
grid.addWidget(QtWidgets.QLabel("Name:"), 0, 0)
|
||||||
grid.addWidget(QtWidgets.QLabel(key), 0, 1)
|
self.name_widget = QtWidgets.QLineEdit()
|
||||||
|
grid.addWidget(self.name_widget, 0, 1)
|
||||||
|
|
||||||
grid.addWidget(QtWidgets.QLabel("Value:"), 1, 0)
|
grid.addWidget(QtWidgets.QLabel("Value:"), 1, 0)
|
||||||
grid.addWidget(self.get_edit_widget(value), 1, 1)
|
self.value_widget = QtWidgets.QLineEdit()
|
||||||
|
self.value_widget.setPlaceholderText('PYON (Python)')
|
||||||
|
grid.addWidget(self.value_widget, 1, 1)
|
||||||
|
self.data_type = QtWidgets.QLabel("data type")
|
||||||
|
grid.addWidget(self.data_type, 1, 2)
|
||||||
|
self.value_widget.textChanged.connect(self.dtype)
|
||||||
|
|
||||||
buttons = QtWidgets.QDialogButtonBox(
|
grid.addWidget(QtWidgets.QLabel("Unit:"), 2, 0)
|
||||||
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)
|
self.unit_widget = QtWidgets.QLineEdit()
|
||||||
grid.setRowStretch(2, 1)
|
grid.addWidget(self.unit_widget, 2, 1)
|
||||||
grid.addWidget(buttons, 3, 0, 1, 2)
|
|
||||||
buttons.accepted.connect(self.accept)
|
grid.addWidget(QtWidgets.QLabel("Scale:"), 3, 0)
|
||||||
buttons.rejected.connect(self.reject)
|
self.scale_widget = QtWidgets.QLineEdit()
|
||||||
|
grid.addWidget(self.scale_widget, 3, 1)
|
||||||
|
|
||||||
|
grid.addWidget(QtWidgets.QLabel("Precision:"), 4, 0)
|
||||||
|
self.precision_widget = QtWidgets.QLineEdit()
|
||||||
|
grid.addWidget(self.precision_widget, 4, 1)
|
||||||
|
|
||||||
|
grid.addWidget(QtWidgets.QLabel("Persist:"), 5, 0)
|
||||||
|
self.box_widget = QtWidgets.QCheckBox()
|
||||||
|
grid.addWidget(self.box_widget, 5, 1)
|
||||||
|
|
||||||
|
self.ok = QtWidgets.QPushButton('&Ok')
|
||||||
|
self.ok.setEnabled(False)
|
||||||
|
self.cancel = QtWidgets.QPushButton('&Cancel')
|
||||||
|
self.buttons = QtWidgets.QDialogButtonBox(self)
|
||||||
|
self.buttons.addButton(
|
||||||
|
self.ok, QtWidgets.QDialogButtonBox.AcceptRole)
|
||||||
|
self.buttons.addButton(
|
||||||
|
self.cancel, QtWidgets.QDialogButtonBox.RejectRole)
|
||||||
|
grid.setRowStretch(6, 1)
|
||||||
|
grid.addWidget(self.buttons, 7, 0, 1, 3, alignment=QtCore.Qt.AlignHCenter)
|
||||||
|
self.buttons.accepted.connect(self.accept)
|
||||||
|
self.buttons.rejected.connect(self.reject)
|
||||||
|
|
||||||
|
self.key = key
|
||||||
|
self.name_widget.setText(key)
|
||||||
|
|
||||||
|
value_edit_string = self.value_to_edit_string(value)
|
||||||
|
if metadata is not None:
|
||||||
|
scale = scale_from_metadata(metadata)
|
||||||
|
t = value.dtype if value is np.ndarray else type(value)
|
||||||
|
if scale != 1 and np.issubdtype(t, np.number):
|
||||||
|
# degenerates to float type
|
||||||
|
value_edit_string = self.value_to_edit_string(
|
||||||
|
np.float64(value / scale))
|
||||||
|
self.unit_widget.setText(metadata.get('unit', ''))
|
||||||
|
self.scale_widget.setText(str(metadata.get('scale', '')))
|
||||||
|
self.precision_widget.setText(str(metadata.get('precision', '')))
|
||||||
|
|
||||||
|
self.value_widget.setText(value_edit_string)
|
||||||
|
self.box_widget.setChecked(persist)
|
||||||
|
|
||||||
def accept(self):
|
def accept(self):
|
||||||
value = self.initial_type(self.get_edit_widget_value())
|
key = self.name_widget.text()
|
||||||
asyncio.ensure_future(self.dataset_ctl.set(self.key, value))
|
value = self.value_widget.text()
|
||||||
|
persist = self.box_widget.isChecked()
|
||||||
|
unit = self.unit_widget.text()
|
||||||
|
scale = self.scale_widget.text()
|
||||||
|
precision = self.precision_widget.text()
|
||||||
|
metadata = {}
|
||||||
|
if unit != "":
|
||||||
|
metadata['unit'] = unit
|
||||||
|
if scale != "":
|
||||||
|
metadata['scale'] = float(scale)
|
||||||
|
if precision != "":
|
||||||
|
metadata['precision'] = int(precision)
|
||||||
|
scale = scale_from_metadata(metadata)
|
||||||
|
value = self.parse_edit_string(value)
|
||||||
|
t = value.dtype if value is np.ndarray else type(value)
|
||||||
|
if scale != 1 and np.issubdtype(t, np.number):
|
||||||
|
# degenerates to float type
|
||||||
|
value = np.float64(value * scale)
|
||||||
|
if self.key and self.key != key:
|
||||||
|
asyncio.ensure_future(exc_to_warning(rename(self.key, key, value, metadata, persist, self.dataset_ctl)))
|
||||||
|
else:
|
||||||
|
asyncio.ensure_future(exc_to_warning(self.dataset_ctl.set(key, value, metadata=metadata, persist=persist)))
|
||||||
|
self.key = key
|
||||||
QtWidgets.QDialog.accept(self)
|
QtWidgets.QDialog.accept(self)
|
||||||
|
|
||||||
def get_edit_widget(self, initial_value):
|
def dtype(self):
|
||||||
raise NotImplementedError
|
txt = self.value_widget.text()
|
||||||
|
try:
|
||||||
|
result = self.parse_edit_string(txt)
|
||||||
|
# ensure only pyon compatible types are permissable
|
||||||
|
pyon.encode(result)
|
||||||
|
except:
|
||||||
|
pixmap = self.style().standardPixmap(
|
||||||
|
QtWidgets.QStyle.SP_MessageBoxWarning)
|
||||||
|
self.data_type.setPixmap(pixmap)
|
||||||
|
self.ok.setEnabled(False)
|
||||||
|
else:
|
||||||
|
self.data_type.setText(type(result).__name__)
|
||||||
|
self.ok.setEnabled(True)
|
||||||
|
|
||||||
def get_edit_widget_value(self):
|
@staticmethod
|
||||||
raise NotImplementedError
|
def parse_edit_string(s):
|
||||||
|
if s == "":
|
||||||
|
raise TypeError
|
||||||
|
_eval_dict = {
|
||||||
|
"__builtins__": {},
|
||||||
|
"array": np.array,
|
||||||
|
"null": np.nan,
|
||||||
|
"inf": np.inf
|
||||||
|
}
|
||||||
|
for t_ in pyon._numpy_scalar:
|
||||||
|
_eval_dict[t_] = eval("np.{}".format(t_), {"np": np})
|
||||||
|
return eval(s, _eval_dict, {})
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
class NumberEditor(Editor):
|
def value_to_edit_string(v):
|
||||||
def get_edit_widget(self, initial_value):
|
t = type(v)
|
||||||
self.edit_widget = ScientificSpinBox()
|
r = ""
|
||||||
self.edit_widget.setDecimals(13)
|
if isinstance(v, np.generic):
|
||||||
self.edit_widget.setPrecision()
|
r += t.__name__
|
||||||
self.edit_widget.setRelativeStep()
|
r += "("
|
||||||
self.edit_widget.setValue(float(initial_value))
|
r += repr(v)
|
||||||
return self.edit_widget
|
r += ")"
|
||||||
|
elif v is None:
|
||||||
def get_edit_widget_value(self):
|
return r
|
||||||
return self.edit_widget.value()
|
else:
|
||||||
|
r += repr(v)
|
||||||
|
return r
|
||||||
class BoolEditor(Editor):
|
|
||||||
def get_edit_widget(self, initial_value):
|
|
||||||
self.edit_widget = QtWidgets.QCheckBox()
|
|
||||||
self.edit_widget.setChecked(bool(initial_value))
|
|
||||||
return self.edit_widget
|
|
||||||
|
|
||||||
def get_edit_widget_value(self):
|
|
||||||
return self.edit_widget.isChecked()
|
|
||||||
|
|
||||||
|
|
||||||
class StringEditor(Editor):
|
|
||||||
def get_edit_widget(self, initial_value):
|
|
||||||
self.edit_widget = QtWidgets.QLineEdit()
|
|
||||||
self.edit_widget.setText(initial_value)
|
|
||||||
return self.edit_widget
|
|
||||||
|
|
||||||
def get_edit_widget_value(self):
|
|
||||||
return self.edit_widget.text()
|
|
||||||
|
|
||||||
|
|
||||||
class Model(DictSyncTreeSepModel):
|
class Model(DictSyncTreeSepModel):
|
||||||
|
@ -92,13 +172,13 @@ class Model(DictSyncTreeSepModel):
|
||||||
if column == 1:
|
if column == 1:
|
||||||
return "Y" if v[0] else "N"
|
return "Y" if v[0] else "N"
|
||||||
elif column == 2:
|
elif column == 2:
|
||||||
return short_format(v[1])
|
return short_format(v[1], v[2])
|
||||||
else:
|
else:
|
||||||
raise ValueError
|
raise ValueError
|
||||||
|
|
||||||
|
|
||||||
class DatasetsDock(QtWidgets.QDockWidget):
|
class DatasetsDock(QtWidgets.QDockWidget):
|
||||||
def __init__(self, datasets_sub, dataset_ctl):
|
def __init__(self, dataset_sub, dataset_ctl):
|
||||||
QtWidgets.QDockWidget.__init__(self, "Datasets")
|
QtWidgets.QDockWidget.__init__(self, "Datasets")
|
||||||
self.setObjectName("Datasets")
|
self.setObjectName("Datasets")
|
||||||
self.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
|
self.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
|
||||||
|
@ -120,6 +200,11 @@ class DatasetsDock(QtWidgets.QDockWidget):
|
||||||
grid.addWidget(self.table, 1, 0)
|
grid.addWidget(self.table, 1, 0)
|
||||||
|
|
||||||
self.table.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
|
self.table.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
|
||||||
|
create_action = QtWidgets.QAction("New dataset", self.table)
|
||||||
|
create_action.triggered.connect(self.create_clicked)
|
||||||
|
create_action.setShortcut("CTRL+N")
|
||||||
|
create_action.setShortcutContext(QtCore.Qt.WidgetShortcut)
|
||||||
|
self.table.addAction(create_action)
|
||||||
edit_action = QtWidgets.QAction("Edit dataset", self.table)
|
edit_action = QtWidgets.QAction("Edit dataset", self.table)
|
||||||
edit_action.triggered.connect(self.edit_clicked)
|
edit_action.triggered.connect(self.edit_clicked)
|
||||||
edit_action.setShortcut("RETURN")
|
edit_action.setShortcut("RETURN")
|
||||||
|
@ -133,7 +218,7 @@ class DatasetsDock(QtWidgets.QDockWidget):
|
||||||
self.table.addAction(delete_action)
|
self.table.addAction(delete_action)
|
||||||
|
|
||||||
self.table_model = Model(dict())
|
self.table_model = Model(dict())
|
||||||
datasets_sub.add_setmodel_callback(self.set_model)
|
dataset_sub.add_setmodel_callback(self.set_model)
|
||||||
|
|
||||||
def _search_datasets(self):
|
def _search_datasets(self):
|
||||||
if hasattr(self, "table_model_filter"):
|
if hasattr(self, "table_model_filter"):
|
||||||
|
@ -146,25 +231,17 @@ class DatasetsDock(QtWidgets.QDockWidget):
|
||||||
self.table_model_filter.setSourceModel(self.table_model)
|
self.table_model_filter.setSourceModel(self.table_model)
|
||||||
self.table.setModel(self.table_model_filter)
|
self.table.setModel(self.table_model_filter)
|
||||||
|
|
||||||
|
def create_clicked(self):
|
||||||
|
CreateEditDialog(self, self.dataset_ctl).open()
|
||||||
|
|
||||||
def edit_clicked(self):
|
def edit_clicked(self):
|
||||||
idx = self.table.selectedIndexes()
|
idx = self.table.selectedIndexes()
|
||||||
if idx:
|
if idx:
|
||||||
idx = self.table_model_filter.mapToSource(idx[0])
|
idx = self.table_model_filter.mapToSource(idx[0])
|
||||||
key = self.table_model.index_to_key(idx)
|
key = self.table_model.index_to_key(idx)
|
||||||
if key is not None:
|
if key is not None:
|
||||||
persist, value = self.table_model.backing_store[key]
|
persist, value, metadata = self.table_model.backing_store[key]
|
||||||
t = type(value)
|
CreateEditDialog(self, self.dataset_ctl, key, value, metadata, persist).open()
|
||||||
if np.issubdtype(t, np.number):
|
|
||||||
dialog_cls = NumberEditor
|
|
||||||
elif np.issubdtype(t, np.bool_):
|
|
||||||
dialog_cls = BoolEditor
|
|
||||||
elif np.issubdtype(t, np.unicode_):
|
|
||||||
dialog_cls = StringEditor
|
|
||||||
else:
|
|
||||||
logger.error("Cannot edit dataset %s: "
|
|
||||||
"type %s is not supported", key, t)
|
|
||||||
return
|
|
||||||
dialog_cls(self, self.dataset_ctl, key, value).open()
|
|
||||||
|
|
||||||
def delete_clicked(self):
|
def delete_clicked(self):
|
||||||
idx = self.table.selectedIndexes()
|
idx = self.table.selectedIndexes()
|
||||||
|
|
|
@ -7,9 +7,13 @@ from collections import OrderedDict
|
||||||
from PyQt5 import QtCore, QtGui, QtWidgets
|
from PyQt5 import QtCore, QtGui, QtWidgets
|
||||||
import h5py
|
import h5py
|
||||||
|
|
||||||
from artiq.gui.tools import LayoutWidget, log_level_to_name, get_open_file_name
|
from sipyco import pyon
|
||||||
|
|
||||||
from artiq.gui.entries import procdesc_to_entry, ScanEntry
|
from artiq.gui.entries import procdesc_to_entry, ScanEntry
|
||||||
from artiq.protocols import pyon
|
from artiq.gui.fuzzy_select import FuzzySelectWidget
|
||||||
|
from artiq.gui.tools import (LayoutWidget, WheelFilter,
|
||||||
|
log_level_to_name, get_open_file_name)
|
||||||
|
from artiq.tools import parse_devarg_override, unparse_devarg_override
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -21,15 +25,6 @@ logger = logging.getLogger(__name__)
|
||||||
# 2. file:<class name>@<file name>
|
# 2. file:<class name>@<file name>
|
||||||
|
|
||||||
|
|
||||||
class _WheelFilter(QtCore.QObject):
|
|
||||||
def eventFilter(self, obj, event):
|
|
||||||
if (event.type() == QtCore.QEvent.Wheel and
|
|
||||||
event.modifiers() != QtCore.Qt.NoModifier):
|
|
||||||
event.ignore()
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class _ArgumentEditor(QtWidgets.QTreeWidget):
|
class _ArgumentEditor(QtWidgets.QTreeWidget):
|
||||||
def __init__(self, manager, dock, expurl):
|
def __init__(self, manager, dock, expurl):
|
||||||
self.manager = manager
|
self.manager = manager
|
||||||
|
@ -53,7 +48,7 @@ class _ArgumentEditor(QtWidgets.QTreeWidget):
|
||||||
self.setStyleSheet("QTreeWidget {background: " +
|
self.setStyleSheet("QTreeWidget {background: " +
|
||||||
self.palette().midlight().color().name() + " ;}")
|
self.palette().midlight().color().name() + " ;}")
|
||||||
|
|
||||||
self.viewport().installEventFilter(_WheelFilter(self.viewport()))
|
self.viewport().installEventFilter(WheelFilter(self.viewport(), True))
|
||||||
|
|
||||||
self._groups = dict()
|
self._groups = dict()
|
||||||
self._arg_to_widgets = dict()
|
self._arg_to_widgets = dict()
|
||||||
|
@ -157,31 +152,15 @@ class _ArgumentEditor(QtWidgets.QTreeWidget):
|
||||||
self._groups[name] = group
|
self._groups[name] = group
|
||||||
return group
|
return group
|
||||||
|
|
||||||
def _recompute_argument_clicked(self, name):
|
def update_argument(self, name, argument):
|
||||||
asyncio.ensure_future(self._recompute_argument(name))
|
widgets = self._arg_to_widgets[name]
|
||||||
|
|
||||||
async def _recompute_argument(self, name):
|
|
||||||
try:
|
|
||||||
arginfo = await self.manager.compute_arginfo(self.expurl)
|
|
||||||
except:
|
|
||||||
logger.error("Could not recompute argument '%s' of '%s'",
|
|
||||||
name, self.expurl, exc_info=True)
|
|
||||||
return
|
|
||||||
argument = self.manager.get_submission_arguments(self.expurl)[name]
|
|
||||||
|
|
||||||
procdesc = arginfo[name][0]
|
|
||||||
state = procdesc_to_entry(procdesc).default_state(procdesc)
|
|
||||||
argument["desc"] = procdesc
|
|
||||||
argument["state"] = state
|
|
||||||
|
|
||||||
# Qt needs a setItemWidget() to handle layout correctly,
|
# Qt needs a setItemWidget() to handle layout correctly,
|
||||||
# simply replacing the entry inside the LayoutWidget
|
# simply replacing the entry inside the LayoutWidget
|
||||||
# results in a bug.
|
# results in a bug.
|
||||||
|
|
||||||
widgets = self._arg_to_widgets[name]
|
|
||||||
|
|
||||||
widgets["entry"].deleteLater()
|
widgets["entry"].deleteLater()
|
||||||
widgets["entry"] = procdesc_to_entry(procdesc)(argument)
|
widgets["entry"] = procdesc_to_entry(argument["desc"])(argument)
|
||||||
widgets["disable_other_scans"].setVisible(
|
widgets["disable_other_scans"].setVisible(
|
||||||
isinstance(widgets["entry"], ScanEntry))
|
isinstance(widgets["entry"], ScanEntry))
|
||||||
widgets["fix_layout"].deleteLater()
|
widgets["fix_layout"].deleteLater()
|
||||||
|
@ -190,6 +169,24 @@ class _ArgumentEditor(QtWidgets.QTreeWidget):
|
||||||
self.setItemWidget(widgets["widget_item"], 1, widgets["fix_layout"])
|
self.setItemWidget(widgets["widget_item"], 1, widgets["fix_layout"])
|
||||||
self.updateGeometries()
|
self.updateGeometries()
|
||||||
|
|
||||||
|
def _recompute_argument_clicked(self, name):
|
||||||
|
asyncio.ensure_future(self._recompute_argument(name))
|
||||||
|
|
||||||
|
async def _recompute_argument(self, name):
|
||||||
|
try:
|
||||||
|
expdesc, _ = await self.manager.compute_expdesc(self.expurl)
|
||||||
|
except:
|
||||||
|
logger.error("Could not recompute argument '%s' of '%s'",
|
||||||
|
name, self.expurl, exc_info=True)
|
||||||
|
return
|
||||||
|
argument = self.manager.get_submission_arguments(self.expurl)[name]
|
||||||
|
|
||||||
|
procdesc = expdesc["arginfo"][name][0]
|
||||||
|
state = procdesc_to_entry(procdesc).default_state(procdesc)
|
||||||
|
argument["desc"] = procdesc
|
||||||
|
argument["state"] = state
|
||||||
|
self.update_argument(name, argument)
|
||||||
|
|
||||||
def _disable_other_scans(self, current_name):
|
def _disable_other_scans(self, current_name):
|
||||||
for name, widgets in self._arg_to_widgets.items():
|
for name, widgets in self._arg_to_widgets.items():
|
||||||
if (name != current_name
|
if (name != current_name
|
||||||
|
@ -214,6 +211,15 @@ class _ArgumentEditor(QtWidgets.QTreeWidget):
|
||||||
pass
|
pass
|
||||||
self.verticalScrollBar().setValue(state["scroll"])
|
self.verticalScrollBar().setValue(state["scroll"])
|
||||||
|
|
||||||
|
# Hooks that allow user-supplied argument editors to react to imminent user
|
||||||
|
# actions. Here, we always keep the manager-stored submission arguments
|
||||||
|
# up-to-date, so no further action is required.
|
||||||
|
def about_to_submit(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def about_to_close(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
log_levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
|
log_levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
|
||||||
|
|
||||||
|
@ -239,7 +245,8 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
self.manager = manager
|
self.manager = manager
|
||||||
self.expurl = expurl
|
self.expurl = expurl
|
||||||
|
|
||||||
self.argeditor = _ArgumentEditor(self.manager, self, self.expurl)
|
editor_class = self.manager.get_argument_editor_class(expurl)
|
||||||
|
self.argeditor = editor_class(self.manager, self, self.expurl)
|
||||||
self.layout.addWidget(self.argeditor, 0, 0, 1, 5)
|
self.layout.addWidget(self.argeditor, 0, 0, 1, 5)
|
||||||
self.layout.setRowStretch(0, 1)
|
self.layout.setRowStretch(0, 1)
|
||||||
|
|
||||||
|
@ -256,7 +263,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
datetime.setDate(QtCore.QDate.currentDate())
|
datetime.setDate(QtCore.QDate.currentDate())
|
||||||
else:
|
else:
|
||||||
datetime.setDateTime(QtCore.QDateTime.fromMSecsSinceEpoch(
|
datetime.setDateTime(QtCore.QDateTime.fromMSecsSinceEpoch(
|
||||||
scheduling["due_date"]*1000))
|
int(scheduling["due_date"]*1000)))
|
||||||
datetime_en.setChecked(scheduling["due_date"] is not None)
|
datetime_en.setChecked(scheduling["due_date"] is not None)
|
||||||
|
|
||||||
def update_datetime(dt):
|
def update_datetime(dt):
|
||||||
|
@ -272,7 +279,8 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
scheduling["due_date"] = due_date
|
scheduling["due_date"] = due_date
|
||||||
datetime_en.stateChanged.connect(update_datetime_en)
|
datetime_en.stateChanged.connect(update_datetime_en)
|
||||||
|
|
||||||
pipeline_name = QtWidgets.QLineEdit()
|
self.pipeline_name = QtWidgets.QLineEdit()
|
||||||
|
pipeline_name = self.pipeline_name
|
||||||
self.layout.addWidget(QtWidgets.QLabel("Pipeline:"), 1, 2)
|
self.layout.addWidget(QtWidgets.QLabel("Pipeline:"), 1, 2)
|
||||||
self.layout.addWidget(pipeline_name, 1, 3)
|
self.layout.addWidget(pipeline_name, 1, 3)
|
||||||
|
|
||||||
|
@ -280,9 +288,10 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
|
|
||||||
def update_pipeline_name(text):
|
def update_pipeline_name(text):
|
||||||
scheduling["pipeline_name"] = text
|
scheduling["pipeline_name"] = text
|
||||||
pipeline_name.textEdited.connect(update_pipeline_name)
|
pipeline_name.textChanged.connect(update_pipeline_name)
|
||||||
|
|
||||||
priority = QtWidgets.QSpinBox()
|
self.priority = QtWidgets.QSpinBox()
|
||||||
|
priority = self.priority
|
||||||
priority.setRange(-99, 99)
|
priority.setRange(-99, 99)
|
||||||
self.layout.addWidget(QtWidgets.QLabel("Priority:"), 2, 0)
|
self.layout.addWidget(QtWidgets.QLabel("Priority:"), 2, 0)
|
||||||
self.layout.addWidget(priority, 2, 1)
|
self.layout.addWidget(priority, 2, 1)
|
||||||
|
@ -293,10 +302,11 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
scheduling["priority"] = value
|
scheduling["priority"] = value
|
||||||
priority.valueChanged.connect(update_priority)
|
priority.valueChanged.connect(update_priority)
|
||||||
|
|
||||||
flush = QtWidgets.QCheckBox("Flush")
|
self.flush = QtWidgets.QCheckBox("Flush")
|
||||||
|
flush = self.flush
|
||||||
flush.setToolTip("Flush the pipeline (of current- and higher-priority "
|
flush.setToolTip("Flush the pipeline (of current- and higher-priority "
|
||||||
"experiments) before starting the experiment")
|
"experiments) before starting the experiment")
|
||||||
self.layout.addWidget(flush, 2, 2, 1, 2)
|
self.layout.addWidget(flush, 2, 2)
|
||||||
|
|
||||||
flush.setChecked(scheduling["flush"])
|
flush.setChecked(scheduling["flush"])
|
||||||
|
|
||||||
|
@ -304,6 +314,20 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
scheduling["flush"] = bool(checked)
|
scheduling["flush"] = bool(checked)
|
||||||
flush.stateChanged.connect(update_flush)
|
flush.stateChanged.connect(update_flush)
|
||||||
|
|
||||||
|
devarg_override = QtWidgets.QComboBox()
|
||||||
|
devarg_override.setEditable(True)
|
||||||
|
devarg_override.lineEdit().setPlaceholderText("Override device arguments")
|
||||||
|
devarg_override.lineEdit().setClearButtonEnabled(True)
|
||||||
|
devarg_override.insertItem(0, "core:analyze_at_run_end=True")
|
||||||
|
self.layout.addWidget(devarg_override, 2, 3)
|
||||||
|
|
||||||
|
devarg_override.setCurrentText(options["devarg_override"])
|
||||||
|
|
||||||
|
def update_devarg_override(text):
|
||||||
|
options["devarg_override"] = text
|
||||||
|
devarg_override.editTextChanged.connect(update_devarg_override)
|
||||||
|
self.devarg_override = devarg_override
|
||||||
|
|
||||||
log_level = QtWidgets.QComboBox()
|
log_level = QtWidgets.QComboBox()
|
||||||
log_level.addItems(log_levels)
|
log_level.addItems(log_levels)
|
||||||
log_level.setCurrentIndex(1)
|
log_level.setCurrentIndex(1)
|
||||||
|
@ -324,6 +348,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
if "repo_rev" in options:
|
if "repo_rev" in options:
|
||||||
repo_rev = QtWidgets.QLineEdit()
|
repo_rev = QtWidgets.QLineEdit()
|
||||||
repo_rev.setPlaceholderText("current")
|
repo_rev.setPlaceholderText("current")
|
||||||
|
repo_rev.setClearButtonEnabled(True)
|
||||||
repo_rev_label = QtWidgets.QLabel("Revision:")
|
repo_rev_label = QtWidgets.QLabel("Revision:")
|
||||||
repo_rev_label.setToolTip("Experiment repository revision "
|
repo_rev_label.setToolTip("Experiment repository revision "
|
||||||
"(commit ID) to use")
|
"(commit ID) to use")
|
||||||
|
@ -364,6 +389,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
self.hdf5_load_directory = os.path.expanduser("~")
|
self.hdf5_load_directory = os.path.expanduser("~")
|
||||||
|
|
||||||
def submit_clicked(self):
|
def submit_clicked(self):
|
||||||
|
self.argeditor.about_to_submit()
|
||||||
try:
|
try:
|
||||||
self.manager.submit(self.expurl)
|
self.manager.submit(self.expurl)
|
||||||
except:
|
except:
|
||||||
|
@ -386,11 +412,12 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
|
|
||||||
async def _recompute_arguments_task(self, overrides=dict()):
|
async def _recompute_arguments_task(self, overrides=dict()):
|
||||||
try:
|
try:
|
||||||
arginfo = await self.manager.compute_arginfo(self.expurl)
|
expdesc, ui_name = await self.manager.compute_expdesc(self.expurl)
|
||||||
except:
|
except:
|
||||||
logger.error("Could not recompute arguments of '%s'",
|
logger.error("Could not recompute experiment description of '%s'",
|
||||||
self.expurl, exc_info=True)
|
self.expurl, exc_info=True)
|
||||||
return
|
return
|
||||||
|
arginfo = expdesc["arginfo"]
|
||||||
for k, v in overrides.items():
|
for k, v in overrides.items():
|
||||||
# Some values (e.g. scans) may have multiple defaults in a list
|
# Some values (e.g. scans) may have multiple defaults in a list
|
||||||
if ("default" in arginfo[k][0]
|
if ("default" in arginfo[k][0]
|
||||||
|
@ -398,15 +425,38 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
arginfo[k][0]["default"].insert(0, v)
|
arginfo[k][0]["default"].insert(0, v)
|
||||||
else:
|
else:
|
||||||
arginfo[k][0]["default"] = v
|
arginfo[k][0]["default"] = v
|
||||||
self.manager.initialize_submission_arguments(self.expurl, arginfo)
|
self.manager.initialize_submission_arguments(self.expurl, arginfo, ui_name)
|
||||||
|
|
||||||
argeditor_state = self.argeditor.save_state()
|
argeditor_state = self.argeditor.save_state()
|
||||||
self.argeditor.deleteLater()
|
self.argeditor.deleteLater()
|
||||||
|
|
||||||
self.argeditor = _ArgumentEditor(self.manager, self, self.expurl)
|
editor_class = self.manager.get_argument_editor_class(self.expurl)
|
||||||
|
self.argeditor = editor_class(self.manager, self, self.expurl)
|
||||||
self.argeditor.restore_state(argeditor_state)
|
self.argeditor.restore_state(argeditor_state)
|
||||||
self.layout.addWidget(self.argeditor, 0, 0, 1, 5)
|
self.layout.addWidget(self.argeditor, 0, 0, 1, 5)
|
||||||
|
|
||||||
|
def contextMenuEvent(self, event):
|
||||||
|
menu = QtWidgets.QMenu(self)
|
||||||
|
reset_sched = menu.addAction("Reset scheduler settings")
|
||||||
|
action = menu.exec_(self.mapToGlobal(event.pos()))
|
||||||
|
if action == reset_sched:
|
||||||
|
asyncio.ensure_future(self._recompute_sched_options_task())
|
||||||
|
|
||||||
|
async def _recompute_sched_options_task(self):
|
||||||
|
try:
|
||||||
|
expdesc, _ = await self.manager.compute_expdesc(self.expurl)
|
||||||
|
except:
|
||||||
|
logger.error("Could not recompute experiment description of '%s'",
|
||||||
|
self.expurl, exc_info=True)
|
||||||
|
return
|
||||||
|
sched_defaults = expdesc["scheduler_defaults"]
|
||||||
|
|
||||||
|
scheduling = self.manager.get_submission_scheduling(self.expurl)
|
||||||
|
scheduling.update(sched_defaults)
|
||||||
|
self.priority.setValue(scheduling["priority"])
|
||||||
|
self.pipeline_name.setText(scheduling["pipeline_name"])
|
||||||
|
self.flush.setChecked(scheduling["flush"])
|
||||||
|
|
||||||
def _load_hdf5_clicked(self):
|
def _load_hdf5_clicked(self):
|
||||||
asyncio.ensure_future(self._load_hdf5_task())
|
asyncio.ensure_future(self._load_hdf5_task())
|
||||||
|
|
||||||
|
@ -431,6 +481,9 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
if "devarg_override" in expid:
|
||||||
|
self.devarg_override.setCurrentText(
|
||||||
|
unparse_devarg_override(expid["devarg_override"]))
|
||||||
self.log_level.setCurrentIndex(log_levels.index(
|
self.log_level.setCurrentIndex(log_levels.index(
|
||||||
log_level_to_name(expid["log_level"])))
|
log_level_to_name(expid["log_level"])))
|
||||||
if ("repo_rev" in expid and
|
if ("repo_rev" in expid and
|
||||||
|
@ -445,6 +498,7 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
await self._recompute_arguments_task(arguments)
|
await self._recompute_arguments_task(arguments)
|
||||||
|
|
||||||
def closeEvent(self, event):
|
def closeEvent(self, event):
|
||||||
|
self.argeditor.about_to_close()
|
||||||
self.sigClosed.emit()
|
self.sigClosed.emit()
|
||||||
QtWidgets.QMdiSubWindow.closeEvent(self, event)
|
QtWidgets.QMdiSubWindow.closeEvent(self, event)
|
||||||
|
|
||||||
|
@ -461,8 +515,68 @@ class _ExperimentDock(QtWidgets.QMdiSubWindow):
|
||||||
self.hdf5_load_directory = state["hdf5_load_directory"]
|
self.hdf5_load_directory = state["hdf5_load_directory"]
|
||||||
|
|
||||||
|
|
||||||
|
class _QuickOpenDialog(QtWidgets.QDialog):
|
||||||
|
"""Modal dialog for opening/submitting experiments from a
|
||||||
|
FuzzySelectWidget."""
|
||||||
|
closed = QtCore.pyqtSignal()
|
||||||
|
|
||||||
|
def __init__(self, manager):
|
||||||
|
super().__init__(manager.main_window)
|
||||||
|
self.setModal(True)
|
||||||
|
|
||||||
|
self.manager = manager
|
||||||
|
|
||||||
|
self.setWindowTitle("Quick open...")
|
||||||
|
|
||||||
|
layout = QtWidgets.QGridLayout(self)
|
||||||
|
layout.setSpacing(0)
|
||||||
|
layout.setContentsMargins(0, 0, 0, 0)
|
||||||
|
self.setLayout(layout)
|
||||||
|
|
||||||
|
# Find matching experiment names. Open experiments are preferred to
|
||||||
|
# matches from the repository to ease quick window switching.
|
||||||
|
open_exps = list(self.manager.open_experiments.keys())
|
||||||
|
repo_exps = set("repo:" + k
|
||||||
|
for k in self.manager.explist.keys()) - set(open_exps)
|
||||||
|
choices = [(o, 100) for o in open_exps] + [(r, 0) for r in repo_exps]
|
||||||
|
|
||||||
|
self.select_widget = FuzzySelectWidget(choices)
|
||||||
|
layout.addWidget(self.select_widget)
|
||||||
|
self.select_widget.aborted.connect(self.close)
|
||||||
|
self.select_widget.finished.connect(self._open_experiment)
|
||||||
|
|
||||||
|
font_metrics = QtGui.QFontMetrics(self.select_widget.line_edit.font())
|
||||||
|
self.select_widget.setMinimumWidth(font_metrics.averageCharWidth() * 70)
|
||||||
|
|
||||||
|
def done(self, r):
|
||||||
|
if self.select_widget:
|
||||||
|
self.select_widget.abort()
|
||||||
|
self.closed.emit()
|
||||||
|
QtWidgets.QDialog.done(self, r)
|
||||||
|
|
||||||
|
def _open_experiment(self, exp_name, modifiers):
|
||||||
|
if modifiers & QtCore.Qt.ControlModifier:
|
||||||
|
try:
|
||||||
|
self.manager.submit(exp_name)
|
||||||
|
except:
|
||||||
|
# Not all open_experiments necessarily still exist in the explist
|
||||||
|
# (e.g. if the repository has been re-scanned since).
|
||||||
|
logger.warning("failed to submit experiment '%s'",
|
||||||
|
exp_name,
|
||||||
|
exc_info=True)
|
||||||
|
else:
|
||||||
|
self.manager.open_experiment(exp_name)
|
||||||
|
self.close()
|
||||||
|
|
||||||
|
|
||||||
class ExperimentManager:
|
class ExperimentManager:
|
||||||
def __init__(self, main_window,
|
#: Global registry for custom argument editor classes, indexed by the experiment
|
||||||
|
#: `argument_ui` string; can be populated by dashboard plugins such as ndscan.
|
||||||
|
#: If no handler for a requested UI name is found, the default built-in argument
|
||||||
|
#: editor will be used.
|
||||||
|
argument_ui_classes = dict()
|
||||||
|
|
||||||
|
def __init__(self, main_window, dataset_sub,
|
||||||
explist_sub, schedule_sub,
|
explist_sub, schedule_sub,
|
||||||
schedule_ctl, experiment_db_ctl):
|
schedule_ctl, experiment_db_ctl):
|
||||||
self.main_window = main_window
|
self.main_window = main_window
|
||||||
|
@ -473,7 +587,10 @@ class ExperimentManager:
|
||||||
self.submission_scheduling = dict()
|
self.submission_scheduling = dict()
|
||||||
self.submission_options = dict()
|
self.submission_options = dict()
|
||||||
self.submission_arguments = dict()
|
self.submission_arguments = dict()
|
||||||
|
self.argument_ui_names = dict()
|
||||||
|
|
||||||
|
self.datasets = dict()
|
||||||
|
dataset_sub.add_setmodel_callback(self.set_dataset_model)
|
||||||
self.explist = dict()
|
self.explist = dict()
|
||||||
explist_sub.add_setmodel_callback(self.set_explist_model)
|
explist_sub.add_setmodel_callback(self.set_explist_model)
|
||||||
self.schedule = dict()
|
self.schedule = dict()
|
||||||
|
@ -481,6 +598,16 @@ class ExperimentManager:
|
||||||
|
|
||||||
self.open_experiments = dict()
|
self.open_experiments = dict()
|
||||||
|
|
||||||
|
self.is_quick_open_shown = False
|
||||||
|
quick_open_shortcut = QtWidgets.QShortcut(
|
||||||
|
QtCore.Qt.CTRL + QtCore.Qt.Key_P,
|
||||||
|
main_window)
|
||||||
|
quick_open_shortcut.setContext(QtCore.Qt.ApplicationShortcut)
|
||||||
|
quick_open_shortcut.activated.connect(self.show_quick_open)
|
||||||
|
|
||||||
|
def set_dataset_model(self, model):
|
||||||
|
self.datasets = model
|
||||||
|
|
||||||
def set_explist_model(self, model):
|
def set_explist_model(self, model):
|
||||||
self.explist = model.backing_store
|
self.explist = model.backing_store
|
||||||
|
|
||||||
|
@ -497,6 +624,17 @@ class ExperimentManager:
|
||||||
else:
|
else:
|
||||||
raise ValueError("Malformed experiment URL")
|
raise ValueError("Malformed experiment URL")
|
||||||
|
|
||||||
|
def get_argument_editor_class(self, expurl):
|
||||||
|
ui_name = self.argument_ui_names.get(expurl, None)
|
||||||
|
if not ui_name and expurl[:5] == "repo:":
|
||||||
|
ui_name = self.explist.get(expurl[5:], {}).get("argument_ui", None)
|
||||||
|
if ui_name:
|
||||||
|
result = self.argument_ui_classes.get(ui_name, None)
|
||||||
|
if result:
|
||||||
|
return result
|
||||||
|
logger.warning("Ignoring unknown argument UI '%s'", ui_name)
|
||||||
|
return _ArgumentEditor
|
||||||
|
|
||||||
def get_submission_scheduling(self, expurl):
|
def get_submission_scheduling(self, expurl):
|
||||||
if expurl in self.submission_scheduling:
|
if expurl in self.submission_scheduling:
|
||||||
return self.submission_scheduling[expurl]
|
return self.submission_scheduling[expurl]
|
||||||
|
@ -508,6 +646,8 @@ class ExperimentManager:
|
||||||
"due_date": None,
|
"due_date": None,
|
||||||
"flush": False
|
"flush": False
|
||||||
}
|
}
|
||||||
|
if expurl[:5] == "repo:":
|
||||||
|
scheduling.update(self.explist[expurl[5:]]["scheduler_defaults"])
|
||||||
self.submission_scheduling[expurl] = scheduling
|
self.submission_scheduling[expurl] = scheduling
|
||||||
return scheduling
|
return scheduling
|
||||||
|
|
||||||
|
@ -517,14 +657,15 @@ class ExperimentManager:
|
||||||
else:
|
else:
|
||||||
# mutated by _ExperimentDock
|
# mutated by _ExperimentDock
|
||||||
options = {
|
options = {
|
||||||
"log_level": logging.WARNING
|
"log_level": logging.WARNING,
|
||||||
|
"devarg_override": ""
|
||||||
}
|
}
|
||||||
if expurl[:5] == "repo:":
|
if expurl[:5] == "repo:":
|
||||||
options["repo_rev"] = None
|
options["repo_rev"] = None
|
||||||
self.submission_options[expurl] = options
|
self.submission_options[expurl] = options
|
||||||
return options
|
return options
|
||||||
|
|
||||||
def initialize_submission_arguments(self, expurl, arginfo):
|
def initialize_submission_arguments(self, expurl, arginfo, ui_name):
|
||||||
arguments = OrderedDict()
|
arguments = OrderedDict()
|
||||||
for name, (procdesc, group, tooltip) in arginfo.items():
|
for name, (procdesc, group, tooltip) in arginfo.items():
|
||||||
state = procdesc_to_entry(procdesc).default_state(procdesc)
|
state = procdesc_to_entry(procdesc).default_state(procdesc)
|
||||||
|
@ -535,7 +676,22 @@ class ExperimentManager:
|
||||||
"state": state, # mutated by entries
|
"state": state, # mutated by entries
|
||||||
}
|
}
|
||||||
self.submission_arguments[expurl] = arguments
|
self.submission_arguments[expurl] = arguments
|
||||||
|
self.argument_ui_names[expurl] = ui_name
|
||||||
return arguments
|
return arguments
|
||||||
|
|
||||||
|
def set_argument_value(self, expurl, name, value):
|
||||||
|
try:
|
||||||
|
argument = self.submission_arguments[expurl][name]
|
||||||
|
if argument["desc"]["ty"] == "Scannable":
|
||||||
|
ty = value["ty"]
|
||||||
|
argument["state"]["selected"] = ty
|
||||||
|
argument["state"][ty] = value
|
||||||
|
else:
|
||||||
|
argument["state"] = value
|
||||||
|
if expurl in self.open_experiments.keys():
|
||||||
|
self.open_experiments[expurl].argeditor.update_argument(name, argument)
|
||||||
|
except:
|
||||||
|
logger.warn("Failed to set value for argument \"{}\" in experiment: {}.".format(name, expurl), exc_info=1)
|
||||||
|
|
||||||
def get_submission_arguments(self, expurl):
|
def get_submission_arguments(self, expurl):
|
||||||
if expurl in self.submission_arguments:
|
if expurl in self.submission_arguments:
|
||||||
|
@ -544,9 +700,9 @@ class ExperimentManager:
|
||||||
if expurl[:5] != "repo:":
|
if expurl[:5] != "repo:":
|
||||||
raise ValueError("Submission arguments must be preinitialized "
|
raise ValueError("Submission arguments must be preinitialized "
|
||||||
"when not using repository")
|
"when not using repository")
|
||||||
arginfo = self.explist[expurl[5:]]["arginfo"]
|
class_desc = self.explist[expurl[5:]]
|
||||||
arguments = self.initialize_submission_arguments(expurl, arginfo)
|
return self.initialize_submission_arguments(expurl,
|
||||||
return arguments
|
class_desc["arginfo"], class_desc.get("argument_ui", None))
|
||||||
|
|
||||||
def open_experiment(self, expurl):
|
def open_experiment(self, expurl):
|
||||||
if expurl in self.open_experiments:
|
if expurl in self.open_experiments:
|
||||||
|
@ -597,7 +753,14 @@ class ExperimentManager:
|
||||||
entry_cls = procdesc_to_entry(argument["desc"])
|
entry_cls = procdesc_to_entry(argument["desc"])
|
||||||
argument_values[name] = entry_cls.state_to_value(argument["state"])
|
argument_values[name] = entry_cls.state_to_value(argument["state"])
|
||||||
|
|
||||||
|
try:
|
||||||
|
devarg_override = parse_devarg_override(options["devarg_override"])
|
||||||
|
except:
|
||||||
|
logger.error("Failed to parse device argument overrides for %s", expurl)
|
||||||
|
return
|
||||||
|
|
||||||
expid = {
|
expid = {
|
||||||
|
"devarg_override": devarg_override,
|
||||||
"log_level": options["log_level"],
|
"log_level": options["log_level"],
|
||||||
"file": file,
|
"file": file,
|
||||||
"class_name": class_name,
|
"class_name": class_name,
|
||||||
|
@ -635,12 +798,12 @@ class ExperimentManager:
|
||||||
else:
|
else:
|
||||||
repo_match = "repo_rev" not in expid
|
repo_match = "repo_rev" not in expid
|
||||||
if (repo_match and
|
if (repo_match and
|
||||||
expid["file"] == file and
|
("file" in expid and expid["file"] == file) and
|
||||||
expid["class_name"] == class_name):
|
expid["class_name"] == class_name):
|
||||||
rids.append(rid)
|
rids.append(rid)
|
||||||
asyncio.ensure_future(self._request_term_multiple(rids))
|
asyncio.ensure_future(self._request_term_multiple(rids))
|
||||||
|
|
||||||
async def compute_arginfo(self, expurl):
|
async def compute_expdesc(self, expurl):
|
||||||
file, class_name, use_repository = self.resolve_expurl(expurl)
|
file, class_name, use_repository = self.resolve_expurl(expurl)
|
||||||
if use_repository:
|
if use_repository:
|
||||||
revision = self.get_submission_options(expurl)["repo_rev"]
|
revision = self.get_submission_options(expurl)["repo_rev"]
|
||||||
|
@ -648,13 +811,15 @@ class ExperimentManager:
|
||||||
revision = None
|
revision = None
|
||||||
description = await self.experiment_db_ctl.examine(
|
description = await self.experiment_db_ctl.examine(
|
||||||
file, use_repository, revision)
|
file, use_repository, revision)
|
||||||
return description[class_name]["arginfo"]
|
class_desc = description[class_name]
|
||||||
|
return class_desc, class_desc.get("argument_ui", None)
|
||||||
|
|
||||||
async def open_file(self, file):
|
async def open_file(self, file):
|
||||||
description = await self.experiment_db_ctl.examine(file, False)
|
description = await self.experiment_db_ctl.examine(file, False)
|
||||||
for class_name, class_desc in description.items():
|
for class_name, class_desc in description.items():
|
||||||
expurl = "file:{}@{}".format(class_name, file)
|
expurl = "file:{}@{}".format(class_name, file)
|
||||||
self.initialize_submission_arguments(expurl, class_desc["arginfo"])
|
self.initialize_submission_arguments(expurl, class_desc["arginfo"],
|
||||||
|
class_desc.get("argument_ui", None))
|
||||||
if expurl in self.open_experiments:
|
if expurl in self.open_experiments:
|
||||||
self.open_experiments[expurl].close()
|
self.open_experiments[expurl].close()
|
||||||
self.open_experiment(expurl)
|
self.open_experiment(expurl)
|
||||||
|
@ -667,6 +832,7 @@ class ExperimentManager:
|
||||||
"options": self.submission_options,
|
"options": self.submission_options,
|
||||||
"arguments": self.submission_arguments,
|
"arguments": self.submission_arguments,
|
||||||
"docks": self.dock_states,
|
"docks": self.dock_states,
|
||||||
|
"argument_uis": self.argument_ui_names,
|
||||||
"open_docks": set(self.open_experiments.keys())
|
"open_docks": set(self.open_experiments.keys())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -677,5 +843,17 @@ class ExperimentManager:
|
||||||
self.submission_scheduling = state["scheduling"]
|
self.submission_scheduling = state["scheduling"]
|
||||||
self.submission_options = state["options"]
|
self.submission_options = state["options"]
|
||||||
self.submission_arguments = state["arguments"]
|
self.submission_arguments = state["arguments"]
|
||||||
|
self.argument_ui_names = state.get("argument_uis", {})
|
||||||
for expurl in state["open_docks"]:
|
for expurl in state["open_docks"]:
|
||||||
self.open_experiment(expurl)
|
self.open_experiment(expurl)
|
||||||
|
|
||||||
|
def show_quick_open(self):
|
||||||
|
if self.is_quick_open_shown:
|
||||||
|
return
|
||||||
|
|
||||||
|
self.is_quick_open_shown = True
|
||||||
|
dialog = _QuickOpenDialog(self)
|
||||||
|
def closed():
|
||||||
|
self.is_quick_open_shown = False
|
||||||
|
dialog.closed.connect(closed)
|
||||||
|
dialog.show()
|
||||||
|
|
|
@ -159,7 +159,7 @@ class WaitingPanel(LayoutWidget):
|
||||||
class ExplorerDock(QtWidgets.QDockWidget):
|
class ExplorerDock(QtWidgets.QDockWidget):
|
||||||
def __init__(self, exp_manager, d_shortcuts,
|
def __init__(self, exp_manager, d_shortcuts,
|
||||||
explist_sub, explist_status_sub,
|
explist_sub, explist_status_sub,
|
||||||
schedule_ctl, experiment_db_ctl):
|
schedule_ctl, experiment_db_ctl, device_db_ctl):
|
||||||
QtWidgets.QDockWidget.__init__(self, "Explorer")
|
QtWidgets.QDockWidget.__init__(self, "Explorer")
|
||||||
self.setObjectName("Explorer")
|
self.setObjectName("Explorer")
|
||||||
self.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
|
self.setFeatures(QtWidgets.QDockWidget.DockWidgetMovable |
|
||||||
|
@ -251,6 +251,12 @@ class ExplorerDock(QtWidgets.QDockWidget):
|
||||||
scan_repository_action.triggered.connect(scan_repository)
|
scan_repository_action.triggered.connect(scan_repository)
|
||||||
self.el.addAction(scan_repository_action)
|
self.el.addAction(scan_repository_action)
|
||||||
|
|
||||||
|
scan_ddb_action = QtWidgets.QAction("Scan device database", self.el)
|
||||||
|
def scan_ddb():
|
||||||
|
asyncio.ensure_future(device_db_ctl.scan())
|
||||||
|
scan_ddb_action.triggered.connect(scan_ddb)
|
||||||
|
self.el.addAction(scan_ddb_action)
|
||||||
|
|
||||||
self.current_directory = ""
|
self.current_directory = ""
|
||||||
open_file_action = QtWidgets.QAction("Open file outside repository",
|
open_file_action = QtWidgets.QAction("Open file outside repository",
|
||||||
self.el)
|
self.el)
|
||||||
|
|
|
@ -1,17 +1,34 @@
|
||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
|
import textwrap
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
from PyQt5 import QtCore, QtWidgets, QtGui
|
from PyQt5 import QtCore, QtWidgets, QtGui
|
||||||
|
|
||||||
from artiq.protocols.sync_struct import Subscriber
|
from sipyco.sync_struct import Subscriber
|
||||||
|
|
||||||
from artiq.coredevice.comm_moninj import *
|
from artiq.coredevice.comm_moninj import *
|
||||||
|
from artiq.coredevice.ad9910 import (
|
||||||
|
_AD9910_REG_PROFILE0, _AD9910_REG_PROFILE7,
|
||||||
|
_AD9910_REG_FTW, _AD9910_REG_CFR1
|
||||||
|
)
|
||||||
|
from artiq.coredevice.ad9912_reg import AD9912_POW1, AD9912_SER_CONF
|
||||||
from artiq.gui.tools import LayoutWidget
|
from artiq.gui.tools import LayoutWidget
|
||||||
from artiq.gui.flowlayout import FlowLayout
|
from artiq.gui.flowlayout import FlowLayout
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class _CancellableLineEdit(QtWidgets.QLineEdit):
|
||||||
|
def escapePressedConnect(self, cb):
|
||||||
|
self.esc_cb = cb
|
||||||
|
|
||||||
|
def keyPressEvent(self, event):
|
||||||
|
key = event.key()
|
||||||
|
if key == QtCore.Qt.Key_Escape:
|
||||||
|
self.esc_cb(event)
|
||||||
|
QtWidgets.QLineEdit.keyPressEvent(self, event)
|
||||||
|
|
||||||
|
|
||||||
class _TTLWidget(QtWidgets.QFrame):
|
class _TTLWidget(QtWidgets.QFrame):
|
||||||
def __init__(self, dm, channel, force_out, title):
|
def __init__(self, dm, channel, force_out, title):
|
||||||
|
@ -167,15 +184,172 @@ class _SimpleDisplayWidget(QtWidgets.QFrame):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
class _DDSWidget(_SimpleDisplayWidget):
|
class _DDSModel:
|
||||||
def __init__(self, dm, bus_channel, channel, title):
|
def __init__(self, dds_type, ref_clk, cpld=None, pll=1, clk_div=0):
|
||||||
|
self.cpld = cpld
|
||||||
|
self.cur_frequency = 0
|
||||||
|
self.cur_reg = 0
|
||||||
|
self.dds_type = dds_type
|
||||||
|
self.is_urukul = dds_type in ["AD9910", "AD9912"]
|
||||||
|
|
||||||
|
if dds_type == "AD9914":
|
||||||
|
self.ftw_per_hz = 2**32 / ref_clk
|
||||||
|
else:
|
||||||
|
if dds_type == "AD9910":
|
||||||
|
max_freq = 1 << 32
|
||||||
|
clk_mult = [4, 1, 2, 4]
|
||||||
|
elif dds_type == "AD9912": # AD9912
|
||||||
|
max_freq = 1 << 48
|
||||||
|
clk_mult = [1, 1, 2, 4]
|
||||||
|
else:
|
||||||
|
raise NotImplementedError
|
||||||
|
sysclk = ref_clk / clk_mult[clk_div] * pll
|
||||||
|
self.ftw_per_hz = 1 / sysclk * max_freq
|
||||||
|
|
||||||
|
def monitor_update(self, probe, value):
|
||||||
|
if self.dds_type == "AD9912":
|
||||||
|
value = value << 16
|
||||||
|
self.cur_frequency = self._ftw_to_freq(value)
|
||||||
|
|
||||||
|
def _ftw_to_freq(self, ftw):
|
||||||
|
return ftw / self.ftw_per_hz
|
||||||
|
|
||||||
|
|
||||||
|
class _DDSWidget(QtWidgets.QFrame):
|
||||||
|
def __init__(self, dm, title, bus_channel=0, channel=0, dds_model=None):
|
||||||
|
self.dm = dm
|
||||||
self.bus_channel = bus_channel
|
self.bus_channel = bus_channel
|
||||||
self.channel = channel
|
self.channel = channel
|
||||||
|
self.dds_name = title
|
||||||
self.cur_frequency = 0
|
self.cur_frequency = 0
|
||||||
_SimpleDisplayWidget.__init__(self, title)
|
self.dds_model = dds_model
|
||||||
|
|
||||||
|
QtWidgets.QFrame.__init__(self)
|
||||||
|
|
||||||
|
self.setFrameShape(QtWidgets.QFrame.Box)
|
||||||
|
self.setFrameShadow(QtWidgets.QFrame.Raised)
|
||||||
|
|
||||||
|
grid = QtWidgets.QGridLayout()
|
||||||
|
grid.setContentsMargins(0, 0, 0, 0)
|
||||||
|
grid.setHorizontalSpacing(0)
|
||||||
|
grid.setVerticalSpacing(0)
|
||||||
|
self.setLayout(grid)
|
||||||
|
label = QtWidgets.QLabel(title)
|
||||||
|
label.setAlignment(QtCore.Qt.AlignCenter)
|
||||||
|
grid.addWidget(label, 1, 1)
|
||||||
|
|
||||||
|
# FREQ DATA/EDIT FIELD
|
||||||
|
self.data_stack = QtWidgets.QStackedWidget()
|
||||||
|
|
||||||
|
# page 1: display data
|
||||||
|
grid_disp = LayoutWidget()
|
||||||
|
grid_disp.layout.setContentsMargins(0, 0, 0, 0)
|
||||||
|
grid_disp.layout.setHorizontalSpacing(0)
|
||||||
|
grid_disp.layout.setVerticalSpacing(0)
|
||||||
|
|
||||||
|
self.value_label = QtWidgets.QLabel()
|
||||||
|
self.value_label.setAlignment(QtCore.Qt.AlignCenter)
|
||||||
|
grid_disp.addWidget(self.value_label, 0, 1, 1, 2)
|
||||||
|
|
||||||
|
unit = QtWidgets.QLabel("MHz")
|
||||||
|
unit.setAlignment(QtCore.Qt.AlignCenter)
|
||||||
|
grid_disp.addWidget(unit, 0, 3, 1, 1)
|
||||||
|
|
||||||
|
self.data_stack.addWidget(grid_disp)
|
||||||
|
|
||||||
|
# page 2: edit data
|
||||||
|
grid_edit = LayoutWidget()
|
||||||
|
grid_edit.layout.setContentsMargins(0, 0, 0, 0)
|
||||||
|
grid_edit.layout.setHorizontalSpacing(0)
|
||||||
|
grid_edit.layout.setVerticalSpacing(0)
|
||||||
|
|
||||||
|
self.value_edit = _CancellableLineEdit(self)
|
||||||
|
self.value_edit.setAlignment(QtCore.Qt.AlignRight)
|
||||||
|
grid_edit.addWidget(self.value_edit, 0, 1, 1, 2)
|
||||||
|
unit = QtWidgets.QLabel("MHz")
|
||||||
|
unit.setAlignment(QtCore.Qt.AlignCenter)
|
||||||
|
grid_edit.addWidget(unit, 0, 3, 1, 1)
|
||||||
|
self.data_stack.addWidget(grid_edit)
|
||||||
|
|
||||||
|
grid.addWidget(self.data_stack, 2, 1)
|
||||||
|
|
||||||
|
# BUTTONS
|
||||||
|
self.button_stack = QtWidgets.QStackedWidget()
|
||||||
|
|
||||||
|
# page 1: SET button
|
||||||
|
set_grid = LayoutWidget()
|
||||||
|
|
||||||
|
set_btn = QtWidgets.QToolButton()
|
||||||
|
set_btn.setText("Set")
|
||||||
|
set_btn.setToolTip("Set frequency")
|
||||||
|
set_grid.addWidget(set_btn, 0, 1, 1, 1)
|
||||||
|
|
||||||
|
# for urukuls also allow switching off RF
|
||||||
|
if self.dds_model.is_urukul:
|
||||||
|
off_btn = QtWidgets.QToolButton()
|
||||||
|
off_btn.setText("Off")
|
||||||
|
off_btn.setToolTip("Switch off the output")
|
||||||
|
set_grid.addWidget(off_btn, 0, 2, 1, 1)
|
||||||
|
|
||||||
|
self.button_stack.addWidget(set_grid)
|
||||||
|
|
||||||
|
# page 2: apply/cancel buttons
|
||||||
|
apply_grid = LayoutWidget()
|
||||||
|
apply = QtWidgets.QToolButton()
|
||||||
|
apply.setText("Apply")
|
||||||
|
apply.setToolTip("Apply changes")
|
||||||
|
apply_grid.addWidget(apply, 0, 1, 1, 1)
|
||||||
|
cancel = QtWidgets.QToolButton()
|
||||||
|
cancel.setText("Cancel")
|
||||||
|
cancel.setToolTip("Cancel changes")
|
||||||
|
apply_grid.addWidget(cancel, 0, 2, 1, 1)
|
||||||
|
self.button_stack.addWidget(apply_grid)
|
||||||
|
grid.addWidget(self.button_stack, 3, 1)
|
||||||
|
|
||||||
|
grid.setRowStretch(1, 1)
|
||||||
|
grid.setRowStretch(2, 1)
|
||||||
|
grid.setRowStretch(3, 1)
|
||||||
|
|
||||||
|
set_btn.clicked.connect(self.set_clicked)
|
||||||
|
apply.clicked.connect(self.apply_changes)
|
||||||
|
if self.dds_model.is_urukul:
|
||||||
|
off_btn.clicked.connect(self.off_clicked)
|
||||||
|
off_btn.setToolTip(textwrap.dedent(
|
||||||
|
"""Note: If TTL RTIO sw for the channel is switched high,
|
||||||
|
this button will not disable the channel.
|
||||||
|
Use the TTL override instead."""))
|
||||||
|
self.value_edit.returnPressed.connect(lambda: self.apply_changes(None))
|
||||||
|
self.value_edit.escapePressedConnect(self.cancel_changes)
|
||||||
|
cancel.clicked.connect(self.cancel_changes)
|
||||||
|
|
||||||
|
self.refresh_display()
|
||||||
|
|
||||||
|
def set_clicked(self, set):
|
||||||
|
self.data_stack.setCurrentIndex(1)
|
||||||
|
self.button_stack.setCurrentIndex(1)
|
||||||
|
self.value_edit.setText("{:.7f}"
|
||||||
|
.format(self.cur_frequency/1e6))
|
||||||
|
self.value_edit.setFocus()
|
||||||
|
self.value_edit.selectAll()
|
||||||
|
|
||||||
|
def off_clicked(self, set):
|
||||||
|
self.dm.dds_channel_toggle(self.dds_name, self.dds_model, sw=False)
|
||||||
|
|
||||||
|
def apply_changes(self, apply):
|
||||||
|
self.data_stack.setCurrentIndex(0)
|
||||||
|
self.button_stack.setCurrentIndex(0)
|
||||||
|
frequency = float(self.value_edit.text())*1e6
|
||||||
|
self.dm.dds_set_frequency(self.dds_name, self.dds_model, frequency)
|
||||||
|
|
||||||
|
def cancel_changes(self, cancel):
|
||||||
|
self.data_stack.setCurrentIndex(0)
|
||||||
|
self.button_stack.setCurrentIndex(0)
|
||||||
|
|
||||||
def refresh_display(self):
|
def refresh_display(self):
|
||||||
self.value.setText("<font size=\"4\">{:.7f}</font><font size=\"2\"> MHz</font>"
|
self.cur_frequency = self.dds_model.cur_frequency
|
||||||
|
self.value_label.setText("<font size=\"4\">{:.7f}</font>"
|
||||||
|
.format(self.cur_frequency/1e6))
|
||||||
|
self.value_edit.setText("{:.7f}"
|
||||||
.format(self.cur_frequency/1e6))
|
.format(self.cur_frequency/1e6))
|
||||||
|
|
||||||
def sort_key(self):
|
def sort_key(self):
|
||||||
|
@ -201,51 +375,74 @@ _WidgetDesc = namedtuple("_WidgetDesc", "uid comment cls arguments")
|
||||||
|
|
||||||
|
|
||||||
def setup_from_ddb(ddb):
|
def setup_from_ddb(ddb):
|
||||||
core_addr = None
|
mi_addr = None
|
||||||
|
mi_port = None
|
||||||
dds_sysclk = None
|
dds_sysclk = None
|
||||||
description = set()
|
description = set()
|
||||||
|
|
||||||
for k, v in ddb.items():
|
for k, v in ddb.items():
|
||||||
comment = None
|
|
||||||
if "comment" in v:
|
|
||||||
comment = v["comment"]
|
|
||||||
try:
|
try:
|
||||||
if isinstance(v, dict) and v["type"] == "local":
|
if isinstance(v, dict):
|
||||||
if k == "core":
|
comment = v.get("comment")
|
||||||
core_addr = v["arguments"]["host"]
|
if v["type"] == "local":
|
||||||
elif v["module"] == "artiq.coredevice.ttl":
|
if v["module"] == "artiq.coredevice.ttl":
|
||||||
channel = v["arguments"]["channel"]
|
if "ttl_urukul" in k:
|
||||||
force_out = v["class"] == "TTLOut"
|
continue
|
||||||
widget = _WidgetDesc(k, comment, _TTLWidget, (channel, force_out, k))
|
channel = v["arguments"]["channel"]
|
||||||
description.add(widget)
|
force_out = v["class"] == "TTLOut"
|
||||||
elif (v["module"] == "artiq.coredevice.ad9914"
|
widget = _WidgetDesc(k, comment, _TTLWidget, (channel, force_out, k))
|
||||||
and v["class"] == "AD9914"):
|
|
||||||
bus_channel = v["arguments"]["bus_channel"]
|
|
||||||
channel = v["arguments"]["channel"]
|
|
||||||
dds_sysclk = v["arguments"]["sysclk"]
|
|
||||||
widget = _WidgetDesc(k, comment, _DDSWidget, (bus_channel, channel, k))
|
|
||||||
description.add(widget)
|
|
||||||
elif ( (v["module"] == "artiq.coredevice.ad53xx" and v["class"] == "AD53XX")
|
|
||||||
or (v["module"] == "artiq.coredevice.zotino" and v["class"] == "Zotino")):
|
|
||||||
spi_device = v["arguments"]["spi_device"]
|
|
||||||
spi_device = ddb[spi_device]
|
|
||||||
while isinstance(spi_device, str):
|
|
||||||
spi_device = ddb[spi_device]
|
|
||||||
spi_channel = spi_device["arguments"]["channel"]
|
|
||||||
for channel in range(32):
|
|
||||||
widget = _WidgetDesc((k, channel), comment, _DACWidget, (spi_channel, channel, k))
|
|
||||||
description.add(widget)
|
description.add(widget)
|
||||||
|
elif (v["module"] == "artiq.coredevice.ad9914"
|
||||||
|
and v["class"] == "AD9914"):
|
||||||
|
bus_channel = v["arguments"]["bus_channel"]
|
||||||
|
channel = v["arguments"]["channel"]
|
||||||
|
dds_sysclk = v["arguments"]["sysclk"]
|
||||||
|
model = _DDSModel(v["class"], dds_sysclk)
|
||||||
|
widget = _WidgetDesc(k, comment, _DDSWidget, (k, bus_channel, channel, model))
|
||||||
|
description.add(widget)
|
||||||
|
elif (v["module"] == "artiq.coredevice.ad9910"
|
||||||
|
and v["class"] == "AD9910") or \
|
||||||
|
(v["module"] == "artiq.coredevice.ad9912"
|
||||||
|
and v["class"] == "AD9912"):
|
||||||
|
channel = v["arguments"]["chip_select"] - 4
|
||||||
|
if channel < 0:
|
||||||
|
continue
|
||||||
|
dds_cpld = v["arguments"]["cpld_device"]
|
||||||
|
spi_dev = ddb[dds_cpld]["arguments"]["spi_device"]
|
||||||
|
bus_channel = ddb[spi_dev]["arguments"]["channel"]
|
||||||
|
pll = v["arguments"]["pll_n"]
|
||||||
|
refclk = ddb[dds_cpld]["arguments"]["refclk"]
|
||||||
|
clk_div = v["arguments"].get("clk_div", 0)
|
||||||
|
model = _DDSModel( v["class"], refclk, dds_cpld, pll, clk_div)
|
||||||
|
widget = _WidgetDesc(k, comment, _DDSWidget, (k, bus_channel, channel, model))
|
||||||
|
description.add(widget)
|
||||||
|
elif ( (v["module"] == "artiq.coredevice.ad53xx" and v["class"] == "AD53xx")
|
||||||
|
or (v["module"] == "artiq.coredevice.zotino" and v["class"] == "Zotino")):
|
||||||
|
spi_device = v["arguments"]["spi_device"]
|
||||||
|
spi_device = ddb[spi_device]
|
||||||
|
while isinstance(spi_device, str):
|
||||||
|
spi_device = ddb[spi_device]
|
||||||
|
spi_channel = spi_device["arguments"]["channel"]
|
||||||
|
for channel in range(32):
|
||||||
|
widget = _WidgetDesc((k, channel), comment, _DACWidget, (spi_channel, channel, k))
|
||||||
|
description.add(widget)
|
||||||
|
elif v["type"] == "controller" and k == "core_moninj":
|
||||||
|
mi_addr = v["host"]
|
||||||
|
mi_port = v.get("port_proxy", 1383)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
return core_addr, dds_sysclk, description
|
return mi_addr, mi_port, description
|
||||||
|
|
||||||
|
|
||||||
class _DeviceManager:
|
class _DeviceManager:
|
||||||
def __init__(self):
|
def __init__(self, schedule_ctl):
|
||||||
self.core_addr = None
|
self.mi_addr = None
|
||||||
self.new_core_addr = asyncio.Event()
|
self.mi_port = None
|
||||||
self.core_connection = None
|
self.reconnect_mi = asyncio.Event()
|
||||||
self.core_connector_task = asyncio.ensure_future(self.core_connector())
|
self.mi_connection = None
|
||||||
|
self.mi_connector_task = asyncio.ensure_future(self.mi_connector())
|
||||||
|
|
||||||
|
self.schedule_ctl = schedule_ctl
|
||||||
|
|
||||||
self.ddb = dict()
|
self.ddb = dict()
|
||||||
self.description = set()
|
self.description = set()
|
||||||
|
@ -264,13 +461,12 @@ class _DeviceManager:
|
||||||
return ddb
|
return ddb
|
||||||
|
|
||||||
def notify(self, mod):
|
def notify(self, mod):
|
||||||
core_addr, dds_sysclk, description = setup_from_ddb(self.ddb)
|
mi_addr, mi_port, description = setup_from_ddb(self.ddb)
|
||||||
|
|
||||||
if core_addr != self.core_addr:
|
if (mi_addr, mi_port) != (self.mi_addr, self.mi_port):
|
||||||
self.core_addr = core_addr
|
self.mi_addr = mi_addr
|
||||||
self.new_core_addr.set()
|
self.mi_port = mi_port
|
||||||
|
self.reconnect_mi.set()
|
||||||
self.dds_sysclk = dds_sysclk
|
|
||||||
|
|
||||||
for to_remove in self.description - description:
|
for to_remove in self.description - description:
|
||||||
widget = self.widgets_by_uid[to_remove.uid]
|
widget = self.widgets_by_uid[to_remove.uid]
|
||||||
|
@ -318,44 +514,172 @@ class _DeviceManager:
|
||||||
self.description = description
|
self.description = description
|
||||||
|
|
||||||
def ttl_set_mode(self, channel, mode):
|
def ttl_set_mode(self, channel, mode):
|
||||||
if self.core_connection is not None:
|
if self.mi_connection is not None:
|
||||||
widget = self.ttl_widgets[channel]
|
widget = self.ttl_widgets[channel]
|
||||||
if mode == "0":
|
if mode == "0":
|
||||||
widget.cur_override = True
|
widget.cur_override = True
|
||||||
widget.cur_level = False
|
widget.cur_level = False
|
||||||
self.core_connection.inject(channel, TTLOverride.level.value, 0)
|
self.mi_connection.inject(channel, TTLOverride.level.value, 0)
|
||||||
self.core_connection.inject(channel, TTLOverride.oe.value, 1)
|
self.mi_connection.inject(channel, TTLOverride.oe.value, 1)
|
||||||
self.core_connection.inject(channel, TTLOverride.en.value, 1)
|
self.mi_connection.inject(channel, TTLOverride.en.value, 1)
|
||||||
elif mode == "1":
|
elif mode == "1":
|
||||||
widget.cur_override = True
|
widget.cur_override = True
|
||||||
widget.cur_level = True
|
widget.cur_level = True
|
||||||
self.core_connection.inject(channel, TTLOverride.level.value, 1)
|
self.mi_connection.inject(channel, TTLOverride.level.value, 1)
|
||||||
self.core_connection.inject(channel, TTLOverride.oe.value, 1)
|
self.mi_connection.inject(channel, TTLOverride.oe.value, 1)
|
||||||
self.core_connection.inject(channel, TTLOverride.en.value, 1)
|
self.mi_connection.inject(channel, TTLOverride.en.value, 1)
|
||||||
elif mode == "exp":
|
elif mode == "exp":
|
||||||
widget.cur_override = False
|
widget.cur_override = False
|
||||||
self.core_connection.inject(channel, TTLOverride.en.value, 0)
|
self.mi_connection.inject(channel, TTLOverride.en.value, 0)
|
||||||
else:
|
else:
|
||||||
raise ValueError
|
raise ValueError
|
||||||
# override state may have changed
|
# override state may have changed
|
||||||
widget.refresh_display()
|
widget.refresh_display()
|
||||||
|
|
||||||
|
async def _submit_by_content(self, content, class_name, title):
|
||||||
|
expid = {
|
||||||
|
"log_level": logging.WARNING,
|
||||||
|
"content": content,
|
||||||
|
"class_name": class_name,
|
||||||
|
"arguments": {}
|
||||||
|
}
|
||||||
|
scheduling = {
|
||||||
|
"pipeline_name": "main",
|
||||||
|
"priority": 0,
|
||||||
|
"due_date": None,
|
||||||
|
"flush": False
|
||||||
|
}
|
||||||
|
rid = await self.schedule_ctl.submit(
|
||||||
|
scheduling["pipeline_name"],
|
||||||
|
expid,
|
||||||
|
scheduling["priority"], scheduling["due_date"],
|
||||||
|
scheduling["flush"])
|
||||||
|
logger.info("Submitted '%s', RID is %d", title, rid)
|
||||||
|
|
||||||
|
def _dds_faux_injection(self, dds_channel, dds_model, action, title, log_msg):
|
||||||
|
# create kernel and fill it in and send-by-content
|
||||||
|
|
||||||
|
# initialize CPLD (if applicable)
|
||||||
|
if dds_model.is_urukul:
|
||||||
|
# urukuls need CPLD init and switch to on
|
||||||
|
cpld_dev = """self.setattr_device("core_cache")
|
||||||
|
self.setattr_device("{}")""".format(dds_model.cpld)
|
||||||
|
|
||||||
|
# `sta`/`rf_sw`` variables are guaranteed for urukuls
|
||||||
|
# so {action} can use it
|
||||||
|
# if there's no RF enabled, CPLD may have not been initialized
|
||||||
|
# but if there is, it has been initialised - no need to do again
|
||||||
|
cpld_init = """delay(15*ms)
|
||||||
|
was_init = self.core_cache.get("_{cpld}_init")
|
||||||
|
sta = self.{cpld}.sta_read()
|
||||||
|
rf_sw = urukul_sta_rf_sw(sta)
|
||||||
|
if rf_sw == 0 and len(was_init) == 0:
|
||||||
|
delay(15*ms)
|
||||||
|
self.{cpld}.init()
|
||||||
|
self.core_cache.put("_{cpld}_init", [1])
|
||||||
|
""".format(cpld=dds_model.cpld)
|
||||||
|
else:
|
||||||
|
cpld_dev = ""
|
||||||
|
cpld_init = ""
|
||||||
|
|
||||||
|
# AD9912/9910: init channel (if uninitialized)
|
||||||
|
if dds_model.dds_type == "AD9912":
|
||||||
|
# 0xFF before init, 0x99 after
|
||||||
|
channel_init = """
|
||||||
|
if self.{dds_channel}.read({cfgreg}, length=1) == 0xFF:
|
||||||
|
delay(10*ms)
|
||||||
|
self.{dds_channel}.init()
|
||||||
|
""".format(dds_channel=dds_channel, cfgreg=AD9912_SER_CONF)
|
||||||
|
elif dds_model.dds_type == "AD9910":
|
||||||
|
# -1 before init, 2 after
|
||||||
|
channel_init = """
|
||||||
|
if self.{dds_channel}.read32({cfgreg}) == -1:
|
||||||
|
delay(10*ms)
|
||||||
|
self.{dds_channel}.init()
|
||||||
|
""".format(dds_channel=dds_channel, cfgreg=AD9912_SER_CONF)
|
||||||
|
else:
|
||||||
|
channel_init = "self.{dds_channel}.init()".format(dds_channel=dds_channel)
|
||||||
|
|
||||||
|
dds_exp = textwrap.dedent("""
|
||||||
|
from artiq.experiment import *
|
||||||
|
from artiq.coredevice.urukul import *
|
||||||
|
|
||||||
|
class {title}(EnvExperiment):
|
||||||
|
def build(self):
|
||||||
|
self.setattr_device("core")
|
||||||
|
self.setattr_device("{dds_channel}")
|
||||||
|
{cpld_dev}
|
||||||
|
|
||||||
|
@kernel
|
||||||
|
def run(self):
|
||||||
|
self.core.break_realtime()
|
||||||
|
{cpld_init}
|
||||||
|
delay(10*ms)
|
||||||
|
{channel_init}
|
||||||
|
delay(15*ms)
|
||||||
|
{action}
|
||||||
|
""".format(title=title, action=action,
|
||||||
|
dds_channel=dds_channel,
|
||||||
|
cpld_dev=cpld_dev, cpld_init=cpld_init,
|
||||||
|
channel_init=channel_init))
|
||||||
|
asyncio.ensure_future(
|
||||||
|
self._submit_by_content(
|
||||||
|
dds_exp,
|
||||||
|
title,
|
||||||
|
log_msg))
|
||||||
|
|
||||||
|
def dds_set_frequency(self, dds_channel, dds_model, freq):
|
||||||
|
action = "self.{ch}.set({freq})".format(
|
||||||
|
freq=freq, ch=dds_channel)
|
||||||
|
if dds_model.is_urukul:
|
||||||
|
action += """
|
||||||
|
ch_no = self.{ch}.chip_select - 4
|
||||||
|
self.{cpld}.cfg_switches(rf_sw | 1 << ch_no)
|
||||||
|
""".format(ch=dds_channel, cpld=dds_model.cpld)
|
||||||
|
self._dds_faux_injection(
|
||||||
|
dds_channel,
|
||||||
|
dds_model,
|
||||||
|
action,
|
||||||
|
"SetDDS",
|
||||||
|
"Set DDS {} {}MHz".format(dds_channel, freq/1e6))
|
||||||
|
|
||||||
|
def dds_channel_toggle(self, dds_channel, dds_model, sw=True):
|
||||||
|
# urukul only
|
||||||
|
if sw:
|
||||||
|
switch = "| 1 << ch_no"
|
||||||
|
else:
|
||||||
|
switch = "& ~(1 << ch_no)"
|
||||||
|
action = """
|
||||||
|
ch_no = self.{dds_channel}.chip_select - 4
|
||||||
|
self.{cpld}.cfg_switches(rf_sw {switch})
|
||||||
|
""".format(
|
||||||
|
dds_channel=dds_channel,
|
||||||
|
cpld=dds_model.cpld,
|
||||||
|
switch=switch
|
||||||
|
)
|
||||||
|
self._dds_faux_injection(
|
||||||
|
dds_channel,
|
||||||
|
dds_model,
|
||||||
|
action,
|
||||||
|
"ToggleDDS",
|
||||||
|
"Toggle DDS {} {}".format(dds_channel, "on" if sw else "off"))
|
||||||
|
|
||||||
def setup_ttl_monitoring(self, enable, channel):
|
def setup_ttl_monitoring(self, enable, channel):
|
||||||
if self.core_connection is not None:
|
if self.mi_connection is not None:
|
||||||
self.core_connection.monitor_probe(enable, channel, TTLProbe.level.value)
|
self.mi_connection.monitor_probe(enable, channel, TTLProbe.level.value)
|
||||||
self.core_connection.monitor_probe(enable, channel, TTLProbe.oe.value)
|
self.mi_connection.monitor_probe(enable, channel, TTLProbe.oe.value)
|
||||||
self.core_connection.monitor_injection(enable, channel, TTLOverride.en.value)
|
self.mi_connection.monitor_injection(enable, channel, TTLOverride.en.value)
|
||||||
self.core_connection.monitor_injection(enable, channel, TTLOverride.level.value)
|
self.mi_connection.monitor_injection(enable, channel, TTLOverride.level.value)
|
||||||
if enable:
|
if enable:
|
||||||
self.core_connection.get_injection_status(channel, TTLOverride.en.value)
|
self.mi_connection.get_injection_status(channel, TTLOverride.en.value)
|
||||||
|
|
||||||
def setup_dds_monitoring(self, enable, bus_channel, channel):
|
def setup_dds_monitoring(self, enable, bus_channel, channel):
|
||||||
if self.core_connection is not None:
|
if self.mi_connection is not None:
|
||||||
self.core_connection.monitor_probe(enable, bus_channel, channel)
|
self.mi_connection.monitor_probe(enable, bus_channel, channel)
|
||||||
|
|
||||||
def setup_dac_monitoring(self, enable, spi_channel, channel):
|
def setup_dac_monitoring(self, enable, spi_channel, channel):
|
||||||
if self.core_connection is not None:
|
if self.mi_connection is not None:
|
||||||
self.core_connection.monitor_probe(enable, spi_channel, channel)
|
self.mi_connection.monitor_probe(enable, spi_channel, channel)
|
||||||
|
|
||||||
def monitor_cb(self, channel, probe, value):
|
def monitor_cb(self, channel, probe, value):
|
||||||
if channel in self.ttl_widgets:
|
if channel in self.ttl_widgets:
|
||||||
|
@ -365,11 +689,11 @@ class _DeviceManager:
|
||||||
elif probe == TTLProbe.oe.value:
|
elif probe == TTLProbe.oe.value:
|
||||||
widget.cur_oe = bool(value)
|
widget.cur_oe = bool(value)
|
||||||
widget.refresh_display()
|
widget.refresh_display()
|
||||||
if (channel, probe) in self.dds_widgets:
|
elif (channel, probe) in self.dds_widgets:
|
||||||
widget = self.dds_widgets[(channel, probe)]
|
widget = self.dds_widgets[(channel, probe)]
|
||||||
widget.cur_frequency = value*self.dds_sysclk/2**32
|
widget.dds_model.monitor_update(probe, value)
|
||||||
widget.refresh_display()
|
widget.refresh_display()
|
||||||
if (channel, probe) in self.dac_widgets:
|
elif (channel, probe) in self.dac_widgets:
|
||||||
widget = self.dac_widgets[(channel, probe)]
|
widget = self.dac_widgets[(channel, probe)]
|
||||||
widget.cur_value = value
|
widget.cur_value = value
|
||||||
widget.refresh_display()
|
widget.refresh_display()
|
||||||
|
@ -383,21 +707,32 @@ class _DeviceManager:
|
||||||
widget.cur_override_level = bool(value)
|
widget.cur_override_level = bool(value)
|
||||||
widget.refresh_display()
|
widget.refresh_display()
|
||||||
|
|
||||||
async def core_connector(self):
|
def disconnect_cb(self):
|
||||||
|
logger.error("lost connection to moninj")
|
||||||
|
self.reconnect_mi.set()
|
||||||
|
|
||||||
|
async def mi_connector(self):
|
||||||
while True:
|
while True:
|
||||||
await self.new_core_addr.wait()
|
await self.reconnect_mi.wait()
|
||||||
self.new_core_addr.clear()
|
self.reconnect_mi.clear()
|
||||||
if self.core_connection is not None:
|
if self.mi_connection is not None:
|
||||||
await self.core_connection.close()
|
await self.mi_connection.close()
|
||||||
self.core_connection = None
|
self.mi_connection = None
|
||||||
new_core_connection = CommMonInj(self.monitor_cb, self.injection_status_cb,
|
new_mi_connection = CommMonInj(self.monitor_cb, self.injection_status_cb,
|
||||||
lambda: logger.error("lost connection to core device moninj"))
|
self.disconnect_cb)
|
||||||
try:
|
try:
|
||||||
await new_core_connection.connect(self.core_addr, 1383)
|
await new_mi_connection.connect(self.mi_addr, self.mi_port)
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
logger.info("cancelled connection to moninj")
|
||||||
|
break
|
||||||
except:
|
except:
|
||||||
logger.error("failed to connect to core device moninj", exc_info=True)
|
logger.error("failed to connect to moninj. Is aqctl_moninj_proxy running?", exc_info=True)
|
||||||
|
await asyncio.sleep(10.)
|
||||||
|
self.reconnect_mi.set()
|
||||||
else:
|
else:
|
||||||
self.core_connection = new_core_connection
|
logger.info("ARTIQ dashboard connected to moninj (%s)",
|
||||||
|
self.mi_addr)
|
||||||
|
self.mi_connection = new_mi_connection
|
||||||
for ttl_channel in self.ttl_widgets.keys():
|
for ttl_channel in self.ttl_widgets.keys():
|
||||||
self.setup_ttl_monitoring(True, ttl_channel)
|
self.setup_ttl_monitoring(True, ttl_channel)
|
||||||
for bus_channel, channel in self.dds_widgets.keys():
|
for bus_channel, channel in self.dds_widgets.keys():
|
||||||
|
@ -406,13 +741,13 @@ class _DeviceManager:
|
||||||
self.setup_dac_monitoring(True, spi_channel, channel)
|
self.setup_dac_monitoring(True, spi_channel, channel)
|
||||||
|
|
||||||
async def close(self):
|
async def close(self):
|
||||||
self.core_connector_task.cancel()
|
self.mi_connector_task.cancel()
|
||||||
try:
|
try:
|
||||||
await asyncio.wait_for(self.core_connector_task, None)
|
await asyncio.wait_for(self.mi_connector_task, None)
|
||||||
except asyncio.CancelledError:
|
except asyncio.CancelledError:
|
||||||
pass
|
pass
|
||||||
if self.core_connection is not None:
|
if self.mi_connection is not None:
|
||||||
await self.core_connection.close()
|
await self.mi_connection.close()
|
||||||
|
|
||||||
|
|
||||||
class _MonInjDock(QtWidgets.QDockWidget):
|
class _MonInjDock(QtWidgets.QDockWidget):
|
||||||
|
@ -438,12 +773,12 @@ class _MonInjDock(QtWidgets.QDockWidget):
|
||||||
|
|
||||||
|
|
||||||
class MonInj:
|
class MonInj:
|
||||||
def __init__(self):
|
def __init__(self, schedule_ctl):
|
||||||
self.ttl_dock = _MonInjDock("TTL")
|
self.ttl_dock = _MonInjDock("TTL")
|
||||||
self.dds_dock = _MonInjDock("DDS")
|
self.dds_dock = _MonInjDock("DDS")
|
||||||
self.dac_dock = _MonInjDock("DAC")
|
self.dac_dock = _MonInjDock("DAC")
|
||||||
|
|
||||||
self.dm = _DeviceManager()
|
self.dm = _DeviceManager(schedule_ctl)
|
||||||
self.dm.ttl_cb = lambda: self.ttl_dock.layout_widgets(
|
self.dm.ttl_cb = lambda: self.ttl_dock.layout_widgets(
|
||||||
self.dm.ttl_widgets.values())
|
self.dm.ttl_widgets.values())
|
||||||
self.dm.dds_cb = lambda: self.dds_dock.layout_widgets(
|
self.dm.dds_cb = lambda: self.dds_dock.layout_widgets(
|
||||||
|
|
|
@ -48,7 +48,7 @@ class Model(DictSyncModel):
|
||||||
else:
|
else:
|
||||||
return "Outside repo."
|
return "Outside repo."
|
||||||
elif column == 6:
|
elif column == 6:
|
||||||
return v["expid"]["file"]
|
return v["expid"].get("file", "<none>")
|
||||||
elif column == 7:
|
elif column == 7:
|
||||||
if v["expid"]["class_name"] is None:
|
if v["expid"]["class_name"] is None:
|
||||||
return ""
|
return ""
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue