forked from M-Labs/artiq-zynq
Compare commits
24 Commits
master
...
idle_kerne
Author | SHA1 | Date |
---|---|---|
Simon Renblad | 7105a49ddc | |
Sebastien Bourdeauducq | cf0b83c3f9 | |
mwojcik | bfb582f99b | |
mwojcik | 52e64fb2f9 | |
mwojcik | facc98058c | |
mwojcik | f0f81dbf8a | |
mwojcik | 30e6bf4a3a | |
mwojcik | 8f4e30dd9c | |
mwojcik | e31a31c4ff | |
Sebastien Bourdeauducq | d044bbd8bb | |
Sebastien Bourdeauducq | 33cf924805 | |
Sebastien Bourdeauducq | f7887b14f6 | |
Sebastien Bourdeauducq | 3e3e23918e | |
mwojcik | 6ca1719033 | |
mwojcik | aebc739c1e | |
linuswck | e1b2c45813 | |
linuswck | e6372b9766 | |
linuswck | 07044752b6 | |
linuswck | 79fc5a7789 | |
Sebastien Bourdeauducq | d3f4602361 | |
mwojcik | 6c8346ca5f | |
mwojcik | b76f634686 | |
morgan | 4a34777b97 | |
morgan | 43e4527392 |
68
flake.lock
68
flake.lock
|
@ -11,11 +11,11 @@
|
||||||
"src-pythonparser": "src-pythonparser"
|
"src-pythonparser": "src-pythonparser"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1697537883,
|
"lastModified": 1706785107,
|
||||||
"narHash": "sha256-GfadmYHFkczltX+rPf08YpAHjYa/31ZmmVD578BcFow=",
|
"narHash": "sha256-Uj72tqigiOCdewSSBBMg6zUpVKhwjAo1HeLJgvyZ3oc=",
|
||||||
"ref": "refs/heads/master",
|
"ref": "refs/heads/master",
|
||||||
"rev": "b168f0bb4be1697ff100475c20ee304dcc31fcc2",
|
"rev": "3aaa7e04f26a495e8847e47424bfc16d76d82bf8",
|
||||||
"revCount": 8573,
|
"revCount": 8672,
|
||||||
"type": "git",
|
"type": "git",
|
||||||
"url": "https://github.com/m-labs/artiq.git"
|
"url": "https://github.com/m-labs/artiq.git"
|
||||||
},
|
},
|
||||||
|
@ -37,11 +37,11 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1693473687,
|
"lastModified": 1701573753,
|
||||||
"narHash": "sha256-BdLddCWbvoEyakcGwhph9b5dIU1iA0hCQV7KYgU8nos=",
|
"narHash": "sha256-vhEtXjb9AM6/HnsgfVmhJQeqQ9JqysUm7iWNzTIbexs=",
|
||||||
"owner": "m-labs",
|
"owner": "m-labs",
|
||||||
"repo": "artiq-comtools",
|
"repo": "artiq-comtools",
|
||||||
"rev": "f522ef3dbc65961f17b2d3d41e927409d970fd79",
|
"rev": "199bdabf4de49cb7ada8a4ac7133008e0f8434b7",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -55,11 +55,11 @@
|
||||||
"systems": "systems"
|
"systems": "systems"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1692799911,
|
"lastModified": 1694529238,
|
||||||
"narHash": "sha256-3eihraek4qL744EvQXsK1Ha6C3CR7nnT8X2qWap4RNk=",
|
"narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "flake-utils",
|
"repo": "flake-utils",
|
||||||
"rev": "f9e7cf818399d17d347f847525c5a5a8032e4e44",
|
"rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -71,11 +71,11 @@
|
||||||
"mozilla-overlay": {
|
"mozilla-overlay": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1695805681,
|
"lastModified": 1704373101,
|
||||||
"narHash": "sha256-1ElPLD8eFfnuIk0G52HGGpRtQZ4QPCjChRlEOfkZ5ro=",
|
"narHash": "sha256-+gi59LRWRQmwROrmE1E2b3mtocwueCQqZ60CwLG+gbg=",
|
||||||
"owner": "mozilla",
|
"owner": "mozilla",
|
||||||
"repo": "nixpkgs-mozilla",
|
"repo": "nixpkgs-mozilla",
|
||||||
"rev": "6eabade97bc28d707a8b9d82ad13ef143836736e",
|
"rev": "9b11a87c0cc54e308fa83aac5b4ee1816d5418a2",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -87,11 +87,11 @@
|
||||||
"mozilla-overlay_2": {
|
"mozilla-overlay_2": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1695805681,
|
"lastModified": 1704373101,
|
||||||
"narHash": "sha256-1ElPLD8eFfnuIk0G52HGGpRtQZ4QPCjChRlEOfkZ5ro=",
|
"narHash": "sha256-+gi59LRWRQmwROrmE1E2b3mtocwueCQqZ60CwLG+gbg=",
|
||||||
"owner": "mozilla",
|
"owner": "mozilla",
|
||||||
"repo": "nixpkgs-mozilla",
|
"repo": "nixpkgs-mozilla",
|
||||||
"rev": "6eabade97bc28d707a8b9d82ad13ef143836736e",
|
"rev": "9b11a87c0cc54e308fa83aac5b4ee1816d5418a2",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -118,16 +118,16 @@
|
||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1697226376,
|
"lastModified": 1706515015,
|
||||||
"narHash": "sha256-cumLLb1QOUtWieUnLGqo+ylNt3+fU8Lcv5Zl+tYbRUE=",
|
"narHash": "sha256-eFfY5A7wlYy3jD/75lx6IJRueg4noE+jowl0a8lIlVo=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "898cb2064b6e98b8c5499f37e81adbdf2925f7c5",
|
"rev": "f4a8d6d5324c327dcc2d863eb7f3cc06ad630df4",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"ref": "nixos-23.05",
|
"ref": "nixos-23.11",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
|
@ -147,11 +147,11 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1697528004,
|
"lastModified": 1701572254,
|
||||||
"narHash": "sha256-FFa2MbhAJEjwY58uOs0swvgymfjubHyWba6Q0X6CbB0=",
|
"narHash": "sha256-ixq8dlpyOytDr+d/OmW8v1Ioy9V2G2ibOlNj8GFDSq4=",
|
||||||
"owner": "m-labs",
|
"owner": "m-labs",
|
||||||
"repo": "sipyco",
|
"repo": "sipyco",
|
||||||
"rev": "c0a7ed350ccfb85474217057fc47b3f258ca8d99",
|
"rev": "cceac0df537887135f99aa6b1bdd82853f16b4d6",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -163,11 +163,11 @@
|
||||||
"src-migen": {
|
"src-migen": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1697013661,
|
"lastModified": 1702942348,
|
||||||
"narHash": "sha256-qNCqgWyE4vTDmyjE2XMJqW1djuBxT25A36AzQfZqluU=",
|
"narHash": "sha256-gKIfHZxsv+jcgDFRW9mPqmwqbZXuRvXefkZcSFjOGHw=",
|
||||||
"owner": "m-labs",
|
"owner": "m-labs",
|
||||||
"repo": "migen",
|
"repo": "migen",
|
||||||
"rev": "aadc19df93b7aa9ca761aaebbb98a11e0cf2d7c7",
|
"rev": "50934ad10a87ade47219b796535978b9bdf24023",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -179,11 +179,11 @@
|
||||||
"src-misoc": {
|
"src-misoc": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1693709836,
|
"lastModified": 1699352904,
|
||||||
"narHash": "sha256-YiCk05RYLzZu1CYkQ2r7XtjwVEqkUGTQn388uOls9tI=",
|
"narHash": "sha256-SglyTmXOPv8jJOjwAjJrj/WhAkItQfUbvKfUqrynwRg=",
|
||||||
"ref": "refs/heads/master",
|
"ref": "refs/heads/master",
|
||||||
"rev": "58dc4ee60d165ce9145cf3d904241fc154b6407f",
|
"rev": "a53859f2167c31ab5225b6c09f30cf05527b94f4",
|
||||||
"revCount": 2448,
|
"revCount": 2452,
|
||||||
"submodules": true,
|
"submodules": true,
|
||||||
"type": "git",
|
"type": "git",
|
||||||
"url": "https://github.com/m-labs/misoc.git"
|
"url": "https://github.com/m-labs/misoc.git"
|
||||||
|
@ -234,11 +234,11 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1697795161,
|
"lastModified": 1702982463,
|
||||||
"narHash": "sha256-p89w9eoFJ2VFTDZ5Mrv5vsH0E1Ko9z1C6Ett281hCHg=",
|
"narHash": "sha256-jKR3drE2rsTaYGEgIdv5kUo2LOb1JyIb4tJhVuCXTTc=",
|
||||||
"ref": "refs/heads/master",
|
"ref": "refs/heads/master",
|
||||||
"rev": "be672ab662d8134ee11412a651864824f6483d4a",
|
"rev": "4168eb63a7e846863331ae4e656cfd82a867cca8",
|
||||||
"revCount": 630,
|
"revCount": 636,
|
||||||
"type": "git",
|
"type": "git",
|
||||||
"url": "https://git.m-labs.hk/m-labs/zynq-rs"
|
"url": "https://git.m-labs.hk/m-labs/zynq-rs"
|
||||||
},
|
},
|
||||||
|
|
|
@ -78,6 +78,7 @@
|
||||||
|
|
||||||
# migen/misoc version checks are broken with pyproject for some reason
|
# migen/misoc version checks are broken with pyproject for some reason
|
||||||
postPatch = ''
|
postPatch = ''
|
||||||
|
sed -i "1,4d" pyproject.toml
|
||||||
substituteInPlace pyproject.toml \
|
substituteInPlace pyproject.toml \
|
||||||
--replace '"migen@git+https://github.com/m-labs/migen",' ""
|
--replace '"migen@git+https://github.com/m-labs/migen",' ""
|
||||||
substituteInPlace pyproject.toml \
|
substituteInPlace pyproject.toml \
|
||||||
|
@ -122,6 +123,9 @@
|
||||||
src = ./src;
|
src = ./src;
|
||||||
cargoLock = {
|
cargoLock = {
|
||||||
lockFile = src/Cargo.lock;
|
lockFile = src/Cargo.lock;
|
||||||
|
outputHashes = {
|
||||||
|
"tar-no-std-0.1.8" = "sha256-xm17108v4smXOqxdLvHl9CxTCJslmeogjm4Y87IXFuM=";
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
nativeBuildInputs = [
|
nativeBuildInputs = [
|
||||||
|
@ -149,6 +153,7 @@
|
||||||
|
|
||||||
doCheck = false;
|
doCheck = false;
|
||||||
dontFixup = true;
|
dontFixup = true;
|
||||||
|
auditable = false;
|
||||||
};
|
};
|
||||||
gateware = pkgs.runCommand "${target}-${variant}-gateware"
|
gateware = pkgs.runCommand "${target}-${variant}-gateware"
|
||||||
{
|
{
|
||||||
|
|
|
@ -2,6 +2,12 @@
|
||||||
# It is not intended for manual editing.
|
# It is not intended for manual editing.
|
||||||
version = 3
|
version = 3
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "arrayvec"
|
||||||
|
version = "0.7.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "async-recursion"
|
name = "async-recursion"
|
||||||
version = "0.3.2"
|
version = "0.3.2"
|
||||||
|
@ -474,6 +480,7 @@ dependencies = [
|
||||||
"log_buffer",
|
"log_buffer",
|
||||||
"num-derive",
|
"num-derive",
|
||||||
"num-traits",
|
"num-traits",
|
||||||
|
"tar-no-std",
|
||||||
"unwind",
|
"unwind",
|
||||||
"vcell",
|
"vcell",
|
||||||
"void",
|
"void",
|
||||||
|
@ -538,6 +545,16 @@ dependencies = [
|
||||||
"unicode-ident",
|
"unicode-ident",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "tar-no-std"
|
||||||
|
version = "0.1.8"
|
||||||
|
source = "git+https://git.m-labs.hk/M-Labs/tar-no-std?rev=2ab6dc5#2ab6dc58e5249c59c4eb03eaf3a119bcdd678d32"
|
||||||
|
dependencies = [
|
||||||
|
"arrayvec",
|
||||||
|
"bitflags",
|
||||||
|
"log",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "unicode-ident"
|
name = "unicode-ident"
|
||||||
version = "1.0.5"
|
version = "1.0.5"
|
||||||
|
|
|
@ -237,12 +237,16 @@ class GenericMaster(SoCCore):
|
||||||
gtx0 = self.gt_drtio.gtxs[0]
|
gtx0 = self.gt_drtio.gtxs[0]
|
||||||
self.specials += Instance("BUFG", i_I=gtx0.txoutclk, o_O=txout_buf)
|
self.specials += Instance("BUFG", i_I=gtx0.txoutclk, o_O=txout_buf)
|
||||||
|
|
||||||
|
ext_async_rst = Signal()
|
||||||
|
|
||||||
self.submodules.bootstrap = GTPBootstrapClock(self.platform, clk_freq)
|
self.submodules.bootstrap = GTPBootstrapClock(self.platform, clk_freq)
|
||||||
self.submodules.sys_crg = zynq_clocking.SYSCRG(
|
self.submodules.sys_crg = zynq_clocking.SYSCRG(
|
||||||
self.platform,
|
self.platform,
|
||||||
self.ps7,
|
self.ps7,
|
||||||
txout_buf,
|
txout_buf,
|
||||||
clk_sw=gtx0.tx_init.done)
|
clk_sw=self.gt_drtio.stable_clkin.storage,
|
||||||
|
clk_sw_status=gtx0.tx_init.done,
|
||||||
|
ext_async_rst=ext_async_rst)
|
||||||
self.csr_devices.append("sys_crg")
|
self.csr_devices.append("sys_crg")
|
||||||
self.crg = self.ps7 # HACK for eem_7series to find the clock
|
self.crg = self.ps7 # HACK for eem_7series to find the clock
|
||||||
self.crg.cd_sys = self.sys_crg.cd_sys
|
self.crg.cd_sys = self.sys_crg.cd_sys
|
||||||
|
@ -250,6 +254,9 @@ class GenericMaster(SoCCore):
|
||||||
self.bootstrap.cd_bootstrap.clk, self.sys_crg.cd_sys.clk)
|
self.bootstrap.cd_bootstrap.clk, self.sys_crg.cd_sys.clk)
|
||||||
fix_serdes_timing_path(platform)
|
fix_serdes_timing_path(platform)
|
||||||
|
|
||||||
|
self.comb += ext_async_rst.eq(self.sys_crg.clk_sw_fsm.o_clk_sw & ~gtx0.tx_init.done)
|
||||||
|
self.specials += MultiReg(self.sys_crg.clk_sw_fsm.o_clk_sw & self.sys_crg.mmcm_locked, self.gt_drtio.clk_path_ready, odomain="bootstrap")
|
||||||
|
|
||||||
self.config["HAS_SI5324"] = None
|
self.config["HAS_SI5324"] = None
|
||||||
self.config["SI5324_SOFT_RESET"] = None
|
self.config["SI5324_SOFT_RESET"] = None
|
||||||
|
|
||||||
|
@ -419,12 +426,16 @@ class GenericSatellite(SoCCore):
|
||||||
gtx0 = self.gt_drtio.gtxs[0]
|
gtx0 = self.gt_drtio.gtxs[0]
|
||||||
self.specials += Instance("BUFG", i_I=gtx0.txoutclk, o_O=txout_buf)
|
self.specials += Instance("BUFG", i_I=gtx0.txoutclk, o_O=txout_buf)
|
||||||
|
|
||||||
|
ext_async_rst = Signal()
|
||||||
|
|
||||||
self.submodules.bootstrap = GTPBootstrapClock(self.platform, clk_freq)
|
self.submodules.bootstrap = GTPBootstrapClock(self.platform, clk_freq)
|
||||||
self.submodules.sys_crg = zynq_clocking.SYSCRG(
|
self.submodules.sys_crg = zynq_clocking.SYSCRG(
|
||||||
self.platform,
|
self.platform,
|
||||||
self.ps7,
|
self.ps7,
|
||||||
txout_buf,
|
txout_buf,
|
||||||
clk_sw=gtx0.tx_init.done)
|
clk_sw=self.gt_drtio.stable_clkin.storage,
|
||||||
|
clk_sw_status=gtx0.tx_init.done,
|
||||||
|
ext_async_rst=ext_async_rst)
|
||||||
platform.add_false_path_constraints(
|
platform.add_false_path_constraints(
|
||||||
self.bootstrap.cd_bootstrap.clk, self.sys_crg.cd_sys.clk)
|
self.bootstrap.cd_bootstrap.clk, self.sys_crg.cd_sys.clk)
|
||||||
self.csr_devices.append("sys_crg")
|
self.csr_devices.append("sys_crg")
|
||||||
|
@ -433,6 +444,9 @@ class GenericSatellite(SoCCore):
|
||||||
|
|
||||||
fix_serdes_timing_path(platform)
|
fix_serdes_timing_path(platform)
|
||||||
|
|
||||||
|
self.comb += ext_async_rst.eq(self.sys_crg.clk_sw_fsm.o_clk_sw & ~gtx0.tx_init.done)
|
||||||
|
self.specials += MultiReg(self.sys_crg.clk_sw_fsm.o_clk_sw & self.sys_crg.mmcm_locked, self.gt_drtio.clk_path_ready, odomain="bootstrap")
|
||||||
|
|
||||||
self.rtio_channels = []
|
self.rtio_channels = []
|
||||||
has_grabber = any(peripheral["type"] == "grabber" for peripheral in description["peripherals"])
|
has_grabber = any(peripheral["type"] == "grabber" for peripheral in description["peripherals"])
|
||||||
if has_grabber:
|
if has_grabber:
|
||||||
|
|
|
@ -226,6 +226,7 @@ class _MasterBase(SoCCore):
|
||||||
self.csr_devices.append("gt_drtio")
|
self.csr_devices.append("gt_drtio")
|
||||||
|
|
||||||
self.submodules.rtio_tsc = rtio.TSC(glbl_fine_ts_width=3)
|
self.submodules.rtio_tsc = rtio.TSC(glbl_fine_ts_width=3)
|
||||||
|
ext_async_rst = Signal()
|
||||||
txout_buf = Signal()
|
txout_buf = Signal()
|
||||||
gtx0 = self.gt_drtio.gtxs[0]
|
gtx0 = self.gt_drtio.gtxs[0]
|
||||||
self.specials += Instance("BUFG", i_I=gtx0.txoutclk, o_O=txout_buf)
|
self.specials += Instance("BUFG", i_I=gtx0.txoutclk, o_O=txout_buf)
|
||||||
|
@ -234,12 +235,17 @@ class _MasterBase(SoCCore):
|
||||||
self.platform,
|
self.platform,
|
||||||
self.ps7,
|
self.ps7,
|
||||||
txout_buf,
|
txout_buf,
|
||||||
clk_sw=gtx0.tx_init.done,
|
clk_sw=self.gt_drtio.stable_clkin.storage,
|
||||||
|
clk_sw_status=gtx0.tx_init.done,
|
||||||
|
ext_async_rst=ext_async_rst,
|
||||||
freq=clk_freq)
|
freq=clk_freq)
|
||||||
platform.add_false_path_constraints(
|
platform.add_false_path_constraints(
|
||||||
self.bootstrap.cd_bootstrap.clk, self.sys_crg.cd_sys.clk)
|
self.bootstrap.cd_bootstrap.clk, self.sys_crg.cd_sys.clk)
|
||||||
self.csr_devices.append("sys_crg")
|
self.csr_devices.append("sys_crg")
|
||||||
|
|
||||||
|
self.comb += ext_async_rst.eq(self.sys_crg.clk_sw_fsm.o_clk_sw & ~gtx0.tx_init.done)
|
||||||
|
self.specials += MultiReg(self.sys_crg.clk_sw_fsm.o_clk_sw & self.sys_crg.mmcm_locked, self.gt_drtio.clk_path_ready, odomain="bootstrap")
|
||||||
|
|
||||||
drtio_csr_group = []
|
drtio_csr_group = []
|
||||||
drtioaux_csr_group = []
|
drtioaux_csr_group = []
|
||||||
drtioaux_memory_group = []
|
drtioaux_memory_group = []
|
||||||
|
@ -361,6 +367,7 @@ class _SatelliteBase(SoCCore):
|
||||||
clk_freq=clk_freq)
|
clk_freq=clk_freq)
|
||||||
self.csr_devices.append("gt_drtio")
|
self.csr_devices.append("gt_drtio")
|
||||||
|
|
||||||
|
ext_async_rst = Signal()
|
||||||
txout_buf = Signal()
|
txout_buf = Signal()
|
||||||
txout_buf.attr.add("keep")
|
txout_buf.attr.add("keep")
|
||||||
gtx0 = self.gt_drtio.gtxs[0]
|
gtx0 = self.gt_drtio.gtxs[0]
|
||||||
|
@ -373,12 +380,17 @@ class _SatelliteBase(SoCCore):
|
||||||
self.platform,
|
self.platform,
|
||||||
self.ps7,
|
self.ps7,
|
||||||
txout_buf,
|
txout_buf,
|
||||||
clk_sw=gtx0.tx_init.done,
|
clk_sw=self.gt_drtio.stable_clkin.storage,
|
||||||
|
clk_sw_status=gtx0.tx_init.done,
|
||||||
|
ext_async_rst=ext_async_rst,
|
||||||
freq=clk_freq)
|
freq=clk_freq)
|
||||||
platform.add_false_path_constraints(
|
platform.add_false_path_constraints(
|
||||||
self.bootstrap.cd_bootstrap.clk, self.sys_crg.cd_sys.clk)
|
self.bootstrap.cd_bootstrap.clk, self.sys_crg.cd_sys.clk)
|
||||||
self.csr_devices.append("sys_crg")
|
self.csr_devices.append("sys_crg")
|
||||||
|
|
||||||
|
self.comb += ext_async_rst.eq(self.sys_crg.clk_sw_fsm.o_clk_sw & ~gtx0.tx_init.done)
|
||||||
|
self.specials += MultiReg(self.sys_crg.clk_sw_fsm.o_clk_sw & self.sys_crg.mmcm_locked, self.gt_drtio.clk_path_ready, odomain="bootstrap")
|
||||||
|
|
||||||
drtioaux_csr_group = []
|
drtioaux_csr_group = []
|
||||||
drtioaux_memory_group = []
|
drtioaux_memory_group = []
|
||||||
drtiorep_csr_group = []
|
drtiorep_csr_group = []
|
||||||
|
|
|
@ -65,7 +65,7 @@ class ClockSwitchFSM(Module):
|
||||||
|
|
||||||
|
|
||||||
class SYSCRG(Module, AutoCSR):
|
class SYSCRG(Module, AutoCSR):
|
||||||
def __init__(self, platform, ps7, main_clk, clk_sw=None, freq=125e6):
|
def __init__(self, platform, ps7, main_clk, clk_sw=None, clk_sw_status=None, freq=125e6, ext_async_rst=None, ):
|
||||||
# assumes bootstrap clock is same freq as main and sys output
|
# assumes bootstrap clock is same freq as main and sys output
|
||||||
self.clock_domains.cd_sys = ClockDomain()
|
self.clock_domains.cd_sys = ClockDomain()
|
||||||
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
|
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
|
||||||
|
@ -88,7 +88,7 @@ class SYSCRG(Module, AutoCSR):
|
||||||
else:
|
else:
|
||||||
self.comb += self.clk_sw_fsm.i_clk_sw.eq(clk_sw)
|
self.comb += self.clk_sw_fsm.i_clk_sw.eq(clk_sw)
|
||||||
|
|
||||||
mmcm_locked = Signal()
|
self.mmcm_locked = Signal()
|
||||||
mmcm_sys = Signal()
|
mmcm_sys = Signal()
|
||||||
mmcm_sys4x = Signal()
|
mmcm_sys4x = Signal()
|
||||||
mmcm_sys5x = Signal()
|
mmcm_sys5x = Signal()
|
||||||
|
@ -96,7 +96,7 @@ class SYSCRG(Module, AutoCSR):
|
||||||
mmcm_fb_clk = Signal()
|
mmcm_fb_clk = Signal()
|
||||||
self.specials += [
|
self.specials += [
|
||||||
Instance("MMCME2_ADV",
|
Instance("MMCME2_ADV",
|
||||||
p_STARTUP_WAIT="FALSE", o_LOCKED=mmcm_locked,
|
p_STARTUP_WAIT="FALSE", o_LOCKED=self.mmcm_locked,
|
||||||
p_BANDWIDTH="HIGH",
|
p_BANDWIDTH="HIGH",
|
||||||
p_REF_JITTER1=0.001,
|
p_REF_JITTER1=0.001,
|
||||||
p_CLKIN1_PERIOD=period, i_CLKIN1=main_clk,
|
p_CLKIN1_PERIOD=period, i_CLKIN1=main_clk,
|
||||||
|
@ -125,8 +125,17 @@ class SYSCRG(Module, AutoCSR):
|
||||||
Instance("BUFG", i_I=mmcm_sys, o_O=self.cd_sys.clk),
|
Instance("BUFG", i_I=mmcm_sys, o_O=self.cd_sys.clk),
|
||||||
Instance("BUFG", i_I=mmcm_sys4x, o_O=self.cd_sys4x.clk),
|
Instance("BUFG", i_I=mmcm_sys4x, o_O=self.cd_sys4x.clk),
|
||||||
Instance("BUFG", i_I=mmcm_clk208, o_O=self.cd_clk200.clk),
|
Instance("BUFG", i_I=mmcm_clk208, o_O=self.cd_clk200.clk),
|
||||||
AsyncResetSynchronizer(self.cd_sys, ~mmcm_locked),
|
]
|
||||||
AsyncResetSynchronizer(self.cd_clk200, ~mmcm_locked),
|
|
||||||
|
if ext_async_rst is not None:
|
||||||
|
self.specials += [
|
||||||
|
AsyncResetSynchronizer(self.cd_sys, ~self.mmcm_locked | ext_async_rst),
|
||||||
|
AsyncResetSynchronizer(self.cd_clk200, ~self.mmcm_locked | ext_async_rst),
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
self.specials += [
|
||||||
|
AsyncResetSynchronizer(self.cd_sys, ~self.mmcm_locked),
|
||||||
|
AsyncResetSynchronizer(self.cd_clk200, ~self.mmcm_locked),
|
||||||
]
|
]
|
||||||
|
|
||||||
reset_counter = Signal(4, reset=15)
|
reset_counter = Signal(4, reset=15)
|
||||||
|
@ -139,4 +148,7 @@ class SYSCRG(Module, AutoCSR):
|
||||||
)
|
)
|
||||||
self.specials += Instance("IDELAYCTRL", i_REFCLK=ClockSignal("clk200"), i_RST=ic_reset)
|
self.specials += Instance("IDELAYCTRL", i_REFCLK=ClockSignal("clk200"), i_RST=ic_reset)
|
||||||
|
|
||||||
|
if clk_sw_status is None:
|
||||||
self.comb += self.current_clock.status.eq(self.clk_sw_fsm.o_clk_sw)
|
self.comb += self.current_clock.status.eq(self.clk_sw_fsm.o_clk_sw)
|
||||||
|
else:
|
||||||
|
self.comb += self.current_clock.status.eq(clk_sw_status)
|
||||||
|
|
|
@ -5,7 +5,7 @@ use io::proto::{ProtoRead, ProtoWrite};
|
||||||
// used by satellite -> master analyzer, subkernel exceptions
|
// used by satellite -> master analyzer, subkernel exceptions
|
||||||
pub const SAT_PAYLOAD_MAX_SIZE: usize = /*max size*/512 - /*CRC*/4 - /*packet ID*/1 - /*last*/1 - /*length*/2;
|
pub const SAT_PAYLOAD_MAX_SIZE: usize = /*max size*/512 - /*CRC*/4 - /*packet ID*/1 - /*last*/1 - /*length*/2;
|
||||||
// used by DDMA, subkernel program data (need to provide extra ID and destination)
|
// used by DDMA, subkernel program data (need to provide extra ID and destination)
|
||||||
pub const MASTER_PAYLOAD_MAX_SIZE: usize = SAT_PAYLOAD_MAX_SIZE - /*destination*/1 - /*ID*/4;
|
pub const MASTER_PAYLOAD_MAX_SIZE: usize = SAT_PAYLOAD_MAX_SIZE - /*source*/1 - /*destination*/1 - /*ID*/4;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
|
@ -19,6 +19,46 @@ impl From<IoError> for Error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Clone, Copy, Debug)]
|
||||||
|
#[repr(u8)]
|
||||||
|
pub enum PayloadStatus {
|
||||||
|
Middle = 0,
|
||||||
|
First = 1,
|
||||||
|
Last = 2,
|
||||||
|
FirstAndLast = 3,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<u8> for PayloadStatus {
|
||||||
|
fn from(value: u8) -> PayloadStatus {
|
||||||
|
match value {
|
||||||
|
0 => PayloadStatus::Middle,
|
||||||
|
1 => PayloadStatus::First,
|
||||||
|
2 => PayloadStatus::Last,
|
||||||
|
3 => PayloadStatus::FirstAndLast,
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PayloadStatus {
|
||||||
|
pub fn is_first(self) -> bool {
|
||||||
|
self == PayloadStatus::First || self == PayloadStatus::FirstAndLast
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn is_last(self) -> bool {
|
||||||
|
self == PayloadStatus::Last || self == PayloadStatus::FirstAndLast
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_status(first: bool, last: bool) -> PayloadStatus {
|
||||||
|
match (first, last) {
|
||||||
|
(true, true) => PayloadStatus::FirstAndLast,
|
||||||
|
(true, false) => PayloadStatus::First,
|
||||||
|
(false, true) => PayloadStatus::Last,
|
||||||
|
(false, false) => PayloadStatus::Middle,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Debug)]
|
#[derive(PartialEq, Debug)]
|
||||||
pub enum Packet {
|
pub enum Packet {
|
||||||
EchoRequest,
|
EchoRequest,
|
||||||
|
@ -49,6 +89,8 @@ pub enum Packet {
|
||||||
RoutingSetRank {
|
RoutingSetRank {
|
||||||
rank: u8,
|
rank: u8,
|
||||||
},
|
},
|
||||||
|
RoutingRetrievePackets,
|
||||||
|
RoutingNoPackets,
|
||||||
RoutingAck,
|
RoutingAck,
|
||||||
|
|
||||||
MonitorRequest {
|
MonitorRequest {
|
||||||
|
@ -157,31 +199,40 @@ pub enum Packet {
|
||||||
},
|
},
|
||||||
|
|
||||||
DmaAddTraceRequest {
|
DmaAddTraceRequest {
|
||||||
|
source: u8,
|
||||||
destination: u8,
|
destination: u8,
|
||||||
id: u32,
|
id: u32,
|
||||||
last: bool,
|
status: PayloadStatus,
|
||||||
length: u16,
|
length: u16,
|
||||||
trace: [u8; MASTER_PAYLOAD_MAX_SIZE],
|
trace: [u8; MASTER_PAYLOAD_MAX_SIZE],
|
||||||
},
|
},
|
||||||
DmaAddTraceReply {
|
DmaAddTraceReply {
|
||||||
|
source: u8,
|
||||||
|
destination: u8,
|
||||||
|
id: u32,
|
||||||
succeeded: bool,
|
succeeded: bool,
|
||||||
},
|
},
|
||||||
DmaRemoveTraceRequest {
|
DmaRemoveTraceRequest {
|
||||||
|
source: u8,
|
||||||
destination: u8,
|
destination: u8,
|
||||||
id: u32,
|
id: u32,
|
||||||
},
|
},
|
||||||
DmaRemoveTraceReply {
|
DmaRemoveTraceReply {
|
||||||
|
destination: u8,
|
||||||
succeeded: bool,
|
succeeded: bool,
|
||||||
},
|
},
|
||||||
DmaPlaybackRequest {
|
DmaPlaybackRequest {
|
||||||
|
source: u8,
|
||||||
destination: u8,
|
destination: u8,
|
||||||
id: u32,
|
id: u32,
|
||||||
timestamp: u64,
|
timestamp: u64,
|
||||||
},
|
},
|
||||||
DmaPlaybackReply {
|
DmaPlaybackReply {
|
||||||
|
destination: u8,
|
||||||
succeeded: bool,
|
succeeded: bool,
|
||||||
},
|
},
|
||||||
DmaPlaybackStatus {
|
DmaPlaybackStatus {
|
||||||
|
source: u8,
|
||||||
destination: u8,
|
destination: u8,
|
||||||
id: u32,
|
id: u32,
|
||||||
error: u8,
|
error: u8,
|
||||||
|
@ -192,7 +243,7 @@ pub enum Packet {
|
||||||
SubkernelAddDataRequest {
|
SubkernelAddDataRequest {
|
||||||
destination: u8,
|
destination: u8,
|
||||||
id: u32,
|
id: u32,
|
||||||
last: bool,
|
status: PayloadStatus,
|
||||||
length: u16,
|
length: u16,
|
||||||
data: [u8; MASTER_PAYLOAD_MAX_SIZE],
|
data: [u8; MASTER_PAYLOAD_MAX_SIZE],
|
||||||
},
|
},
|
||||||
|
@ -200,22 +251,20 @@ pub enum Packet {
|
||||||
succeeded: bool,
|
succeeded: bool,
|
||||||
},
|
},
|
||||||
SubkernelLoadRunRequest {
|
SubkernelLoadRunRequest {
|
||||||
|
source: u8,
|
||||||
destination: u8,
|
destination: u8,
|
||||||
id: u32,
|
id: u32,
|
||||||
run: bool,
|
run: bool,
|
||||||
},
|
},
|
||||||
SubkernelLoadRunReply {
|
SubkernelLoadRunReply {
|
||||||
succeeded: bool,
|
|
||||||
},
|
|
||||||
SubkernelStopRequest {
|
|
||||||
destination: u8,
|
destination: u8,
|
||||||
},
|
|
||||||
SubkernelStopReply {
|
|
||||||
succeeded: bool,
|
succeeded: bool,
|
||||||
},
|
},
|
||||||
SubkernelFinished {
|
SubkernelFinished {
|
||||||
|
destination: u8,
|
||||||
id: u32,
|
id: u32,
|
||||||
with_exception: bool,
|
with_exception: bool,
|
||||||
|
exception_src: u8,
|
||||||
},
|
},
|
||||||
SubkernelExceptionRequest {
|
SubkernelExceptionRequest {
|
||||||
destination: u8,
|
destination: u8,
|
||||||
|
@ -226,9 +275,10 @@ pub enum Packet {
|
||||||
data: [u8; SAT_PAYLOAD_MAX_SIZE],
|
data: [u8; SAT_PAYLOAD_MAX_SIZE],
|
||||||
},
|
},
|
||||||
SubkernelMessage {
|
SubkernelMessage {
|
||||||
|
source: u8,
|
||||||
destination: u8,
|
destination: u8,
|
||||||
id: u32,
|
id: u32,
|
||||||
last: bool,
|
status: PayloadStatus,
|
||||||
length: u16,
|
length: u16,
|
||||||
data: [u8; MASTER_PAYLOAD_MAX_SIZE],
|
data: [u8; MASTER_PAYLOAD_MAX_SIZE],
|
||||||
},
|
},
|
||||||
|
@ -275,6 +325,8 @@ impl Packet {
|
||||||
rank: reader.read_u8()?,
|
rank: reader.read_u8()?,
|
||||||
},
|
},
|
||||||
0x32 => Packet::RoutingAck,
|
0x32 => Packet::RoutingAck,
|
||||||
|
0x33 => Packet::RoutingRetrievePackets,
|
||||||
|
0x34 => Packet::RoutingNoPackets,
|
||||||
|
|
||||||
0x40 => Packet::MonitorRequest {
|
0x40 => Packet::MonitorRequest {
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
|
@ -389,39 +441,49 @@ impl Packet {
|
||||||
}
|
}
|
||||||
|
|
||||||
0xb0 => {
|
0xb0 => {
|
||||||
|
let source = reader.read_u8()?;
|
||||||
let destination = reader.read_u8()?;
|
let destination = reader.read_u8()?;
|
||||||
let id = reader.read_u32()?;
|
let id = reader.read_u32()?;
|
||||||
let last = reader.read_bool()?;
|
let status = reader.read_u8()?;
|
||||||
let length = reader.read_u16()?;
|
let length = reader.read_u16()?;
|
||||||
let mut trace: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
let mut trace: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
reader.read_exact(&mut trace[0..length as usize])?;
|
reader.read_exact(&mut trace[0..length as usize])?;
|
||||||
Packet::DmaAddTraceRequest {
|
Packet::DmaAddTraceRequest {
|
||||||
|
source: source,
|
||||||
destination: destination,
|
destination: destination,
|
||||||
id: id,
|
id: id,
|
||||||
last: last,
|
status: PayloadStatus::from(status),
|
||||||
length: length as u16,
|
length: length as u16,
|
||||||
trace: trace,
|
trace: trace,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
0xb1 => Packet::DmaAddTraceReply {
|
0xb1 => Packet::DmaAddTraceReply {
|
||||||
|
source: reader.read_u8()?,
|
||||||
|
destination: reader.read_u8()?,
|
||||||
|
id: reader.read_u32()?,
|
||||||
succeeded: reader.read_bool()?,
|
succeeded: reader.read_bool()?,
|
||||||
},
|
},
|
||||||
0xb2 => Packet::DmaRemoveTraceRequest {
|
0xb2 => Packet::DmaRemoveTraceRequest {
|
||||||
|
source: reader.read_u8()?,
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
id: reader.read_u32()?,
|
id: reader.read_u32()?,
|
||||||
},
|
},
|
||||||
0xb3 => Packet::DmaRemoveTraceReply {
|
0xb3 => Packet::DmaRemoveTraceReply {
|
||||||
|
destination: reader.read_u8()?,
|
||||||
succeeded: reader.read_bool()?,
|
succeeded: reader.read_bool()?,
|
||||||
},
|
},
|
||||||
0xb4 => Packet::DmaPlaybackRequest {
|
0xb4 => Packet::DmaPlaybackRequest {
|
||||||
|
source: reader.read_u8()?,
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
id: reader.read_u32()?,
|
id: reader.read_u32()?,
|
||||||
timestamp: reader.read_u64()?,
|
timestamp: reader.read_u64()?,
|
||||||
},
|
},
|
||||||
0xb5 => Packet::DmaPlaybackReply {
|
0xb5 => Packet::DmaPlaybackReply {
|
||||||
|
destination: reader.read_u8()?,
|
||||||
succeeded: reader.read_bool()?,
|
succeeded: reader.read_bool()?,
|
||||||
},
|
},
|
||||||
0xb6 => Packet::DmaPlaybackStatus {
|
0xb6 => Packet::DmaPlaybackStatus {
|
||||||
|
source: reader.read_u8()?,
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
id: reader.read_u32()?,
|
id: reader.read_u32()?,
|
||||||
error: reader.read_u8()?,
|
error: reader.read_u8()?,
|
||||||
|
@ -432,14 +494,14 @@ impl Packet {
|
||||||
0xc0 => {
|
0xc0 => {
|
||||||
let destination = reader.read_u8()?;
|
let destination = reader.read_u8()?;
|
||||||
let id = reader.read_u32()?;
|
let id = reader.read_u32()?;
|
||||||
let last = reader.read_bool()?;
|
let status = PayloadStatus::from(reader.read_u8()?);
|
||||||
let length = reader.read_u16()?;
|
let length = reader.read_u16()?;
|
||||||
let mut data: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
let mut data: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
reader.read_exact(&mut data[0..length as usize])?;
|
reader.read_exact(&mut data[0..length as usize])?;
|
||||||
Packet::SubkernelAddDataRequest {
|
Packet::SubkernelAddDataRequest {
|
||||||
destination: destination,
|
destination: destination,
|
||||||
id: id,
|
id: id,
|
||||||
last: last,
|
status: status,
|
||||||
length: length as u16,
|
length: length as u16,
|
||||||
data: data,
|
data: data,
|
||||||
}
|
}
|
||||||
|
@ -448,22 +510,20 @@ impl Packet {
|
||||||
succeeded: reader.read_bool()?,
|
succeeded: reader.read_bool()?,
|
||||||
},
|
},
|
||||||
0xc4 => Packet::SubkernelLoadRunRequest {
|
0xc4 => Packet::SubkernelLoadRunRequest {
|
||||||
|
source: reader.read_u8()?,
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
id: reader.read_u32()?,
|
id: reader.read_u32()?,
|
||||||
run: reader.read_bool()?,
|
run: reader.read_bool()?,
|
||||||
},
|
},
|
||||||
0xc5 => Packet::SubkernelLoadRunReply {
|
0xc5 => Packet::SubkernelLoadRunReply {
|
||||||
succeeded: reader.read_bool()?,
|
|
||||||
},
|
|
||||||
0xc6 => Packet::SubkernelStopRequest {
|
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
},
|
|
||||||
0xc7 => Packet::SubkernelStopReply {
|
|
||||||
succeeded: reader.read_bool()?,
|
succeeded: reader.read_bool()?,
|
||||||
},
|
},
|
||||||
0xc8 => Packet::SubkernelFinished {
|
0xc8 => Packet::SubkernelFinished {
|
||||||
|
destination: reader.read_u8()?,
|
||||||
id: reader.read_u32()?,
|
id: reader.read_u32()?,
|
||||||
with_exception: reader.read_bool()?,
|
with_exception: reader.read_bool()?,
|
||||||
|
exception_src: reader.read_u8()?,
|
||||||
},
|
},
|
||||||
0xc9 => Packet::SubkernelExceptionRequest {
|
0xc9 => Packet::SubkernelExceptionRequest {
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
|
@ -480,16 +540,18 @@ impl Packet {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
0xcb => {
|
0xcb => {
|
||||||
|
let source = reader.read_u8()?;
|
||||||
let destination = reader.read_u8()?;
|
let destination = reader.read_u8()?;
|
||||||
let id = reader.read_u32()?;
|
let id = reader.read_u32()?;
|
||||||
let last = reader.read_bool()?;
|
let status = reader.read_u8()?;
|
||||||
let length = reader.read_u16()?;
|
let length = reader.read_u16()?;
|
||||||
let mut data: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
let mut data: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
reader.read_exact(&mut data[0..length as usize])?;
|
reader.read_exact(&mut data[0..length as usize])?;
|
||||||
Packet::SubkernelMessage {
|
Packet::SubkernelMessage {
|
||||||
|
source: source,
|
||||||
destination: destination,
|
destination: destination,
|
||||||
id: id,
|
id: id,
|
||||||
last: last,
|
status: PayloadStatus::from(status),
|
||||||
length: length as u16,
|
length: length as u16,
|
||||||
data: data,
|
data: data,
|
||||||
}
|
}
|
||||||
|
@ -540,6 +602,8 @@ impl Packet {
|
||||||
writer.write_u8(rank)?;
|
writer.write_u8(rank)?;
|
||||||
}
|
}
|
||||||
Packet::RoutingAck => writer.write_u8(0x32)?,
|
Packet::RoutingAck => writer.write_u8(0x32)?,
|
||||||
|
Packet::RoutingRetrievePackets => writer.write_u8(0x33)?,
|
||||||
|
Packet::RoutingNoPackets => writer.write_u8(0x34)?,
|
||||||
|
|
||||||
Packet::MonitorRequest {
|
Packet::MonitorRequest {
|
||||||
destination,
|
destination,
|
||||||
|
@ -711,49 +775,69 @@ impl Packet {
|
||||||
}
|
}
|
||||||
|
|
||||||
Packet::DmaAddTraceRequest {
|
Packet::DmaAddTraceRequest {
|
||||||
|
source,
|
||||||
destination,
|
destination,
|
||||||
id,
|
id,
|
||||||
last,
|
status,
|
||||||
trace,
|
trace,
|
||||||
length,
|
length,
|
||||||
} => {
|
} => {
|
||||||
writer.write_u8(0xb0)?;
|
writer.write_u8(0xb0)?;
|
||||||
|
writer.write_u8(source)?;
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_bool(last)?;
|
writer.write_u8(status as u8)?;
|
||||||
// trace may be broken down to fit within drtio aux memory limit
|
// trace may be broken down to fit within drtio aux memory limit
|
||||||
// will be reconstructed by satellite
|
// will be reconstructed by satellite
|
||||||
writer.write_u16(length)?;
|
writer.write_u16(length)?;
|
||||||
writer.write_all(&trace[0..length as usize])?;
|
writer.write_all(&trace[0..length as usize])?;
|
||||||
}
|
}
|
||||||
Packet::DmaAddTraceReply { succeeded } => {
|
Packet::DmaAddTraceReply {
|
||||||
|
source,
|
||||||
|
destination,
|
||||||
|
id,
|
||||||
|
succeeded,
|
||||||
|
} => {
|
||||||
writer.write_u8(0xb1)?;
|
writer.write_u8(0xb1)?;
|
||||||
|
writer.write_u8(source)?;
|
||||||
|
writer.write_u8(destination)?;
|
||||||
|
writer.write_u32(id)?;
|
||||||
writer.write_bool(succeeded)?;
|
writer.write_bool(succeeded)?;
|
||||||
}
|
}
|
||||||
Packet::DmaRemoveTraceRequest { destination, id } => {
|
Packet::DmaRemoveTraceRequest {
|
||||||
|
source,
|
||||||
|
destination,
|
||||||
|
id,
|
||||||
|
} => {
|
||||||
writer.write_u8(0xb2)?;
|
writer.write_u8(0xb2)?;
|
||||||
|
writer.write_u8(source)?;
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
}
|
}
|
||||||
Packet::DmaRemoveTraceReply { succeeded } => {
|
Packet::DmaRemoveTraceReply { destination, succeeded } => {
|
||||||
writer.write_u8(0xb3)?;
|
writer.write_u8(0xb3)?;
|
||||||
|
writer.write_u8(destination)?;
|
||||||
writer.write_bool(succeeded)?;
|
writer.write_bool(succeeded)?;
|
||||||
}
|
}
|
||||||
Packet::DmaPlaybackRequest {
|
Packet::DmaPlaybackRequest {
|
||||||
|
source,
|
||||||
destination,
|
destination,
|
||||||
id,
|
id,
|
||||||
timestamp,
|
timestamp,
|
||||||
} => {
|
} => {
|
||||||
writer.write_u8(0xb4)?;
|
writer.write_u8(0xb4)?;
|
||||||
|
writer.write_u8(source)?;
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_u64(timestamp)?;
|
writer.write_u64(timestamp)?;
|
||||||
}
|
}
|
||||||
Packet::DmaPlaybackReply { succeeded } => {
|
Packet::DmaPlaybackReply { destination, succeeded } => {
|
||||||
writer.write_u8(0xb5)?;
|
writer.write_u8(0xb5)?;
|
||||||
|
writer.write_u8(destination)?;
|
||||||
writer.write_bool(succeeded)?;
|
writer.write_bool(succeeded)?;
|
||||||
}
|
}
|
||||||
Packet::DmaPlaybackStatus {
|
Packet::DmaPlaybackStatus {
|
||||||
|
source,
|
||||||
destination,
|
destination,
|
||||||
id,
|
id,
|
||||||
error,
|
error,
|
||||||
|
@ -761,6 +845,7 @@ impl Packet {
|
||||||
timestamp,
|
timestamp,
|
||||||
} => {
|
} => {
|
||||||
writer.write_u8(0xb6)?;
|
writer.write_u8(0xb6)?;
|
||||||
|
writer.write_u8(source)?;
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_u8(error)?;
|
writer.write_u8(error)?;
|
||||||
|
@ -771,14 +856,14 @@ impl Packet {
|
||||||
Packet::SubkernelAddDataRequest {
|
Packet::SubkernelAddDataRequest {
|
||||||
destination,
|
destination,
|
||||||
id,
|
id,
|
||||||
last,
|
status,
|
||||||
data,
|
data,
|
||||||
length,
|
length,
|
||||||
} => {
|
} => {
|
||||||
writer.write_u8(0xc0)?;
|
writer.write_u8(0xc0)?;
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_bool(last)?;
|
writer.write_u8(status as u8)?;
|
||||||
writer.write_u16(length)?;
|
writer.write_u16(length)?;
|
||||||
writer.write_all(&data[0..length as usize])?;
|
writer.write_all(&data[0..length as usize])?;
|
||||||
}
|
}
|
||||||
|
@ -786,28 +871,34 @@ impl Packet {
|
||||||
writer.write_u8(0xc1)?;
|
writer.write_u8(0xc1)?;
|
||||||
writer.write_bool(succeeded)?;
|
writer.write_bool(succeeded)?;
|
||||||
}
|
}
|
||||||
Packet::SubkernelLoadRunRequest { destination, id, run } => {
|
Packet::SubkernelLoadRunRequest {
|
||||||
|
source,
|
||||||
|
destination,
|
||||||
|
id,
|
||||||
|
run,
|
||||||
|
} => {
|
||||||
writer.write_u8(0xc4)?;
|
writer.write_u8(0xc4)?;
|
||||||
|
writer.write_u8(source)?;
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_bool(run)?;
|
writer.write_bool(run)?;
|
||||||
}
|
}
|
||||||
Packet::SubkernelLoadRunReply { succeeded } => {
|
Packet::SubkernelLoadRunReply { destination, succeeded } => {
|
||||||
writer.write_u8(0xc5)?;
|
writer.write_u8(0xc5)?;
|
||||||
writer.write_bool(succeeded)?;
|
|
||||||
}
|
|
||||||
Packet::SubkernelStopRequest { destination } => {
|
|
||||||
writer.write_u8(0xc6)?;
|
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
}
|
|
||||||
Packet::SubkernelStopReply { succeeded } => {
|
|
||||||
writer.write_u8(0xc7)?;
|
|
||||||
writer.write_bool(succeeded)?;
|
writer.write_bool(succeeded)?;
|
||||||
}
|
}
|
||||||
Packet::SubkernelFinished { id, with_exception } => {
|
Packet::SubkernelFinished {
|
||||||
|
destination,
|
||||||
|
id,
|
||||||
|
with_exception,
|
||||||
|
exception_src,
|
||||||
|
} => {
|
||||||
writer.write_u8(0xc8)?;
|
writer.write_u8(0xc8)?;
|
||||||
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_bool(with_exception)?;
|
writer.write_bool(with_exception)?;
|
||||||
|
writer.write_u8(exception_src)?;
|
||||||
}
|
}
|
||||||
Packet::SubkernelExceptionRequest { destination } => {
|
Packet::SubkernelExceptionRequest { destination } => {
|
||||||
writer.write_u8(0xc9)?;
|
writer.write_u8(0xc9)?;
|
||||||
|
@ -820,16 +911,18 @@ impl Packet {
|
||||||
writer.write_all(&data[0..length as usize])?;
|
writer.write_all(&data[0..length as usize])?;
|
||||||
}
|
}
|
||||||
Packet::SubkernelMessage {
|
Packet::SubkernelMessage {
|
||||||
|
source,
|
||||||
destination,
|
destination,
|
||||||
id,
|
id,
|
||||||
last,
|
status,
|
||||||
data,
|
data,
|
||||||
length,
|
length,
|
||||||
} => {
|
} => {
|
||||||
writer.write_u8(0xcb)?;
|
writer.write_u8(0xcb)?;
|
||||||
|
writer.write_u8(source)?;
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_bool(last)?;
|
writer.write_u8(status as u8)?;
|
||||||
writer.write_u16(length)?;
|
writer.write_u16(length)?;
|
||||||
writer.write_all(&data[0..length as usize])?;
|
writer.write_all(&data[0..length as usize])?;
|
||||||
}
|
}
|
||||||
|
@ -840,4 +933,39 @@ impl Packet {
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn routable_destination(&self) -> Option<u8> {
|
||||||
|
// only for packets that could be re-routed, not only forwarded
|
||||||
|
match self {
|
||||||
|
Packet::DmaAddTraceRequest { destination, .. } => Some(*destination),
|
||||||
|
Packet::DmaAddTraceReply { destination, .. } => Some(*destination),
|
||||||
|
Packet::DmaRemoveTraceRequest { destination, .. } => Some(*destination),
|
||||||
|
Packet::DmaRemoveTraceReply { destination, .. } => Some(*destination),
|
||||||
|
Packet::DmaPlaybackRequest { destination, .. } => Some(*destination),
|
||||||
|
Packet::DmaPlaybackReply { destination, .. } => Some(*destination),
|
||||||
|
Packet::SubkernelLoadRunRequest { destination, .. } => Some(*destination),
|
||||||
|
Packet::SubkernelLoadRunReply { destination, .. } => Some(*destination),
|
||||||
|
Packet::SubkernelMessage { destination, .. } => Some(*destination),
|
||||||
|
Packet::SubkernelMessageAck { destination } => Some(*destination),
|
||||||
|
Packet::DmaPlaybackStatus { destination, .. } => Some(*destination),
|
||||||
|
Packet::SubkernelFinished { destination, .. } => Some(*destination),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn expects_response(&self) -> bool {
|
||||||
|
// returns true if the routable packet should elicit a response
|
||||||
|
// e.g. reply, ACK packets end a conversation,
|
||||||
|
// and firmware should not wait for response
|
||||||
|
match self {
|
||||||
|
Packet::DmaAddTraceReply { .. }
|
||||||
|
| Packet::DmaRemoveTraceReply { .. }
|
||||||
|
| Packet::DmaPlaybackReply { .. }
|
||||||
|
| Packet::SubkernelLoadRunReply { .. }
|
||||||
|
| Packet::SubkernelMessageAck { .. }
|
||||||
|
| Packet::DmaPlaybackStatus { .. }
|
||||||
|
| Packet::SubkernelFinished { .. } => false,
|
||||||
|
_ => true,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,7 @@ pub mod drtioaux;
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
pub mod drtioaux_async;
|
pub mod drtioaux_async;
|
||||||
pub mod drtioaux_proto;
|
pub mod drtioaux_proto;
|
||||||
#[cfg(feature = "target_kasli_soc")]
|
#[cfg(all(feature = "target_kasli_soc", has_drtio))]
|
||||||
pub mod io_expander;
|
pub mod io_expander;
|
||||||
pub mod logger;
|
pub mod logger;
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
|
|
|
@ -170,6 +170,7 @@ pub extern "C" fn dma_playback(timestamp: i64, ptr: i32, _uses_ddma: bool) {
|
||||||
csr::rtio_dma::base_address_write(ptr as u32);
|
csr::rtio_dma::base_address_write(ptr as u32);
|
||||||
csr::rtio_dma::time_offset_write(timestamp as u64);
|
csr::rtio_dma::time_offset_write(timestamp as u64);
|
||||||
|
|
||||||
|
let old_cri_master = csr::cri_con::selected_read();
|
||||||
csr::cri_con::selected_write(1);
|
csr::cri_con::selected_write(1);
|
||||||
csr::rtio_dma::enable_write(1);
|
csr::rtio_dma::enable_write(1);
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
|
@ -183,7 +184,7 @@ pub extern "C" fn dma_playback(timestamp: i64, ptr: i32, _uses_ddma: bool) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
while csr::rtio_dma::enable_read() != 0 {}
|
while csr::rtio_dma::enable_read() != 0 {}
|
||||||
csr::cri_con::selected_write(0);
|
csr::cri_con::selected_write(old_cri_master);
|
||||||
|
|
||||||
let error = csr::rtio_dma::error_read();
|
let error = csr::rtio_dma::error_read();
|
||||||
if error != 0 {
|
if error != 0 {
|
||||||
|
|
|
@ -77,6 +77,7 @@ pub enum Message {
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
SubkernelLoadRunRequest {
|
SubkernelLoadRunRequest {
|
||||||
id: u32,
|
id: u32,
|
||||||
|
destination: u8,
|
||||||
run: bool,
|
run: bool,
|
||||||
},
|
},
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
|
@ -86,7 +87,7 @@ pub enum Message {
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
SubkernelAwaitFinishRequest {
|
SubkernelAwaitFinishRequest {
|
||||||
id: u32,
|
id: u32,
|
||||||
timeout: u64,
|
timeout: i64,
|
||||||
},
|
},
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
SubkernelAwaitFinishReply {
|
SubkernelAwaitFinishReply {
|
||||||
|
@ -95,14 +96,15 @@ pub enum Message {
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
SubkernelMsgSend {
|
SubkernelMsgSend {
|
||||||
id: u32,
|
id: u32,
|
||||||
|
destination: Option<u8>,
|
||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
},
|
},
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
SubkernelMsgSent,
|
SubkernelMsgSent,
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
SubkernelMsgRecvRequest {
|
SubkernelMsgRecvRequest {
|
||||||
id: u32,
|
id: i32,
|
||||||
timeout: u64,
|
timeout: i64,
|
||||||
tags: Vec<u8>,
|
tags: Vec<u8>,
|
||||||
},
|
},
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
|
|
|
@ -5,12 +5,16 @@ use cslice::CSlice;
|
||||||
use super::{Message, SubkernelStatus, KERNEL_CHANNEL_0TO1, KERNEL_CHANNEL_1TO0};
|
use super::{Message, SubkernelStatus, KERNEL_CHANNEL_0TO1, KERNEL_CHANNEL_1TO0};
|
||||||
use crate::{artiq_raise, rpc::send_args};
|
use crate::{artiq_raise, rpc::send_args};
|
||||||
|
|
||||||
pub extern "C" fn load_run(id: u32, run: bool) {
|
pub extern "C" fn load_run(id: u32, destination: u8, run: bool) {
|
||||||
unsafe {
|
unsafe {
|
||||||
KERNEL_CHANNEL_1TO0
|
KERNEL_CHANNEL_1TO0
|
||||||
.as_mut()
|
.as_mut()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.send(Message::SubkernelLoadRunRequest { id: id, run: run });
|
.send(Message::SubkernelLoadRunRequest {
|
||||||
|
id: id,
|
||||||
|
destination: destination,
|
||||||
|
run: run,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
match unsafe { KERNEL_CHANNEL_0TO1.as_mut().unwrap() }.recv() {
|
match unsafe { KERNEL_CHANNEL_0TO1.as_mut().unwrap() }.recv() {
|
||||||
Message::SubkernelLoadRunReply { succeeded: true } => (),
|
Message::SubkernelLoadRunReply { succeeded: true } => (),
|
||||||
|
@ -21,7 +25,7 @@ pub extern "C" fn load_run(id: u32, run: bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub extern "C" fn await_finish(id: u32, timeout: u64) {
|
pub extern "C" fn await_finish(id: u32, timeout: i64) {
|
||||||
unsafe {
|
unsafe {
|
||||||
KERNEL_CHANNEL_1TO0
|
KERNEL_CHANNEL_1TO0
|
||||||
.as_mut()
|
.as_mut()
|
||||||
|
@ -51,7 +55,14 @@ pub extern "C" fn await_finish(id: u32, timeout: u64) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub extern "C" fn send_message(id: u32, count: u8, tag: &CSlice<u8>, data: *const *const ()) {
|
pub extern "C" fn send_message(
|
||||||
|
id: u32,
|
||||||
|
is_return: bool,
|
||||||
|
destination: u8,
|
||||||
|
count: u8,
|
||||||
|
tag: &CSlice<u8>,
|
||||||
|
data: *const *const (),
|
||||||
|
) {
|
||||||
let mut buffer = Vec::<u8>::new();
|
let mut buffer = Vec::<u8>::new();
|
||||||
send_args(&mut buffer, 0, tag.as_ref(), data, false).expect("RPC encoding failed");
|
send_args(&mut buffer, 0, tag.as_ref(), data, false).expect("RPC encoding failed");
|
||||||
// overwrite service tag, include how many tags are in the message
|
// overwrite service tag, include how many tags are in the message
|
||||||
|
@ -59,6 +70,7 @@ pub extern "C" fn send_message(id: u32, count: u8, tag: &CSlice<u8>, data: *cons
|
||||||
unsafe {
|
unsafe {
|
||||||
KERNEL_CHANNEL_1TO0.as_mut().unwrap().send(Message::SubkernelMsgSend {
|
KERNEL_CHANNEL_1TO0.as_mut().unwrap().send(Message::SubkernelMsgSend {
|
||||||
id: id,
|
id: id,
|
||||||
|
destination: if is_return { None } else { Some(destination) },
|
||||||
data: buffer[3..].to_vec(),
|
data: buffer[3..].to_vec(),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -68,7 +80,7 @@ pub extern "C" fn send_message(id: u32, count: u8, tag: &CSlice<u8>, data: *cons
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub extern "C" fn await_message(id: u32, timeout: u64, tags: &CSlice<u8>, min: u8, max: u8) {
|
pub extern "C" fn await_message(id: i32, timeout: i64, tags: &CSlice<u8>, min: u8, max: u8) {
|
||||||
unsafe {
|
unsafe {
|
||||||
KERNEL_CHANNEL_1TO0
|
KERNEL_CHANNEL_1TO0
|
||||||
.as_mut()
|
.as_mut()
|
||||||
|
|
|
@ -41,3 +41,7 @@ libc = { path = "../libc" }
|
||||||
io = { path = "../libio", features = ["alloc"] }
|
io = { path = "../libio", features = ["alloc"] }
|
||||||
ksupport = { path = "../libksupport" }
|
ksupport = { path = "../libksupport" }
|
||||||
libboard_artiq = { path = "../libboard_artiq" }
|
libboard_artiq = { path = "../libboard_artiq" }
|
||||||
|
|
||||||
|
[dependencies.tar-no-std]
|
||||||
|
git = "https://git.m-labs.hk/M-Labs/tar-no-std"
|
||||||
|
rev = "2ab6dc5"
|
|
@ -3,17 +3,22 @@ use core::{cell::RefCell, fmt, slice, str};
|
||||||
|
|
||||||
use core_io::Error as IoError;
|
use core_io::Error as IoError;
|
||||||
use cslice::CSlice;
|
use cslice::CSlice;
|
||||||
|
use dyld::elf;
|
||||||
use futures::{future::FutureExt, select_biased};
|
use futures::{future::FutureExt, select_biased};
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
use io::Cursor;
|
use io::Cursor;
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
use ksupport::rpc;
|
use ksupport::rpc;
|
||||||
use ksupport::{kernel, resolve_channel_name};
|
use ksupport::{kernel, resolve_channel_name};
|
||||||
|
#[cfg(has_drtio)]
|
||||||
|
use libasync::delay;
|
||||||
use libasync::{smoltcp::{Sockets, TcpStream},
|
use libasync::{smoltcp::{Sockets, TcpStream},
|
||||||
task};
|
task};
|
||||||
use libboard_artiq::drtio_routing;
|
use libboard_artiq::drtio_routing;
|
||||||
#[cfg(feature = "target_kasli_soc")]
|
#[cfg(feature = "target_kasli_soc")]
|
||||||
use libboard_zynq::error_led::ErrorLED;
|
use libboard_zynq::error_led::ErrorLED;
|
||||||
|
#[cfg(has_drtio)]
|
||||||
|
use libboard_zynq::time::Milliseconds;
|
||||||
use libboard_zynq::{self as zynq,
|
use libboard_zynq::{self as zynq,
|
||||||
smoltcp::{self,
|
smoltcp::{self,
|
||||||
iface::{EthernetInterfaceBuilder, NeighborCache},
|
iface::{EthernetInterfaceBuilder, NeighborCache},
|
||||||
|
@ -27,6 +32,8 @@ use libcortex_a9::{mutex::Mutex,
|
||||||
use log::{error, info, warn};
|
use log::{error, info, warn};
|
||||||
use num_derive::{FromPrimitive, ToPrimitive};
|
use num_derive::{FromPrimitive, ToPrimitive};
|
||||||
use num_traits::{FromPrimitive, ToPrimitive};
|
use num_traits::{FromPrimitive, ToPrimitive};
|
||||||
|
#[cfg(has_drtio)]
|
||||||
|
use tar_no_std::TarArchiveRef;
|
||||||
|
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
use crate::pl;
|
use crate::pl;
|
||||||
|
@ -43,6 +50,8 @@ pub enum Error {
|
||||||
BufferExhausted,
|
BufferExhausted,
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
SubkernelError(subkernel::Error),
|
SubkernelError(subkernel::Error),
|
||||||
|
#[cfg(has_drtio)]
|
||||||
|
DestinationDown,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = core::result::Result<T, Error>;
|
pub type Result<T> = core::result::Result<T, Error>;
|
||||||
|
@ -57,6 +66,8 @@ impl fmt::Display for Error {
|
||||||
Error::BufferExhausted => write!(f, "buffer exhausted"),
|
Error::BufferExhausted => write!(f, "buffer exhausted"),
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
Error::SubkernelError(error) => write!(f, "subkernel error: {:?}", error),
|
Error::SubkernelError(error) => write!(f, "subkernel error: {:?}", error),
|
||||||
|
#[cfg(has_drtio)]
|
||||||
|
Error::DestinationDown => write!(f, "subkernel destination down"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -390,7 +401,11 @@ async fn handle_run_kernel(
|
||||||
control.borrow_mut().tx.async_send(reply).await;
|
control.borrow_mut().tx.async_send(reply).await;
|
||||||
}
|
}
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
kernel::Message::SubkernelLoadRunRequest { id, run } => {
|
kernel::Message::SubkernelLoadRunRequest {
|
||||||
|
id,
|
||||||
|
destination: _,
|
||||||
|
run,
|
||||||
|
} => {
|
||||||
let succeeded = match subkernel::load(aux_mutex, routing_table, timer, id, run).await {
|
let succeeded = match subkernel::load(aux_mutex, routing_table, timer, id, run).await {
|
||||||
Ok(()) => true,
|
Ok(()) => true,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
@ -436,8 +451,9 @@ async fn handle_run_kernel(
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
kernel::Message::SubkernelMsgSend { id, data } => {
|
kernel::Message::SubkernelMsgSend { id, destination, data } => {
|
||||||
let res = subkernel::message_send(aux_mutex, routing_table, timer, id, data).await;
|
let res =
|
||||||
|
subkernel::message_send(aux_mutex, routing_table, timer, id, destination.unwrap(), data).await;
|
||||||
match res {
|
match res {
|
||||||
Ok(_) => (),
|
Ok(_) => (),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
@ -452,12 +468,26 @@ async fn handle_run_kernel(
|
||||||
}
|
}
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
kernel::Message::SubkernelMsgRecvRequest { id, timeout, tags } => {
|
kernel::Message::SubkernelMsgRecvRequest { id, timeout, tags } => {
|
||||||
let message_received = subkernel::message_await(id, timeout, timer).await;
|
let message_received = subkernel::message_await(id as u32, timeout, timer).await;
|
||||||
let (status, count) = match message_received {
|
let (status, count) = match message_received {
|
||||||
Ok(ref message) => (kernel::SubkernelStatus::NoError, message.count),
|
Ok(ref message) => (kernel::SubkernelStatus::NoError, message.count),
|
||||||
Err(SubkernelError::Timeout) => (kernel::SubkernelStatus::Timeout, 0),
|
Err(SubkernelError::Timeout) => (kernel::SubkernelStatus::Timeout, 0),
|
||||||
Err(SubkernelError::IncorrectState) => (kernel::SubkernelStatus::IncorrectState, 0),
|
Err(SubkernelError::IncorrectState) => (kernel::SubkernelStatus::IncorrectState, 0),
|
||||||
Err(SubkernelError::CommLost) => (kernel::SubkernelStatus::CommLost, 0),
|
Err(SubkernelError::CommLost) => (kernel::SubkernelStatus::CommLost, 0),
|
||||||
|
Err(SubkernelError::SubkernelException) => {
|
||||||
|
error!("Exception in subkernel");
|
||||||
|
// just retrieve the exception
|
||||||
|
let status = subkernel::await_finish(aux_mutex, routing_table, timer, id as u32, timeout)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
match stream {
|
||||||
|
None => (),
|
||||||
|
Some(stream) => {
|
||||||
|
write_chunk(stream, &status.exception.unwrap()).await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(kernel::SubkernelStatus::OtherError, 0)
|
||||||
|
}
|
||||||
Err(_) => (kernel::SubkernelStatus::OtherError, 0),
|
Err(_) => (kernel::SubkernelStatus::OtherError, 0),
|
||||||
};
|
};
|
||||||
control
|
control
|
||||||
|
@ -524,6 +554,56 @@ async fn handle_run_kernel(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn handle_flash_kernel(
|
||||||
|
buffer: &Vec<u8>,
|
||||||
|
control: &Rc<RefCell<kernel::Control>>,
|
||||||
|
_up_destinations: &Rc<RefCell<[bool; drtio_routing::DEST_COUNT]>>,
|
||||||
|
_aux_mutex: &Rc<Mutex<bool>>,
|
||||||
|
_routing_table: &drtio_routing::RoutingTable,
|
||||||
|
_timer: GlobalTimer,
|
||||||
|
) -> Result<()> {
|
||||||
|
if buffer[0] == elf::ELFMAG0 && buffer[1] == elf::ELFMAG1 && buffer[2] == elf::ELFMAG2 && buffer[3] == elf::ELFMAG3
|
||||||
|
{
|
||||||
|
// assume ELF file, proceed as before
|
||||||
|
load_kernel(buffer, control, None).await
|
||||||
|
} else {
|
||||||
|
#[cfg(has_drtio)]
|
||||||
|
{
|
||||||
|
let archive = TarArchiveRef::new(buffer.as_ref());
|
||||||
|
let entries = archive.entries();
|
||||||
|
let mut main_lib: Vec<u8> = Vec::new();
|
||||||
|
for entry in entries {
|
||||||
|
if entry.filename().as_str() == "main.elf" {
|
||||||
|
main_lib = entry.data().to_vec();
|
||||||
|
} else {
|
||||||
|
// subkernel filename must be in format:
|
||||||
|
// "<subkernel id> <destination>.elf"
|
||||||
|
let filename = entry.filename();
|
||||||
|
let mut iter = filename.as_str().split_whitespace();
|
||||||
|
let sid: u32 = iter.next().unwrap().parse().unwrap();
|
||||||
|
let dest: u8 = iter.next().unwrap().strip_suffix(".elf").unwrap().parse().unwrap();
|
||||||
|
let up = _up_destinations.borrow()[dest as usize];
|
||||||
|
if up {
|
||||||
|
let subkernel_lib = entry.data().to_vec();
|
||||||
|
subkernel::add_subkernel(sid, dest, subkernel_lib).await;
|
||||||
|
match subkernel::upload(_aux_mutex, _routing_table, _timer, sid).await {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(_) => return Err(Error::UnexpectedPattern),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return Err(Error::DestinationDown);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
load_kernel(&main_lib, control, None).await
|
||||||
|
}
|
||||||
|
#[cfg(not(has_drtio))]
|
||||||
|
{
|
||||||
|
panic!("multi-kernel libraries are not supported in standalone systems");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
async fn load_kernel(
|
async fn load_kernel(
|
||||||
buffer: &Vec<u8>,
|
buffer: &Vec<u8>,
|
||||||
control: &Rc<RefCell<kernel::Control>>,
|
control: &Rc<RefCell<kernel::Control>>,
|
||||||
|
@ -638,6 +718,32 @@ async fn handle_connection(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn flash_and_run_idle_kernel(
|
||||||
|
buffer: &Vec<u8>,
|
||||||
|
control: &Rc<RefCell<kernel::Control>>,
|
||||||
|
up_destinations: &Rc<RefCell<[bool; drtio_routing::DEST_COUNT]>>,
|
||||||
|
aux_mutex: &Rc<Mutex<bool>>,
|
||||||
|
routing_table: &drtio_routing::RoutingTable,
|
||||||
|
timer: GlobalTimer,
|
||||||
|
) {
|
||||||
|
info!("Loading idle kernel");
|
||||||
|
let res = handle_flash_kernel(buffer, control, up_destinations, aux_mutex, routing_table, timer)
|
||||||
|
.await;
|
||||||
|
match res {
|
||||||
|
#[cfg(has_drtio)]
|
||||||
|
Err(Error::DestinationDown) => {
|
||||||
|
let mut countdown = timer.countdown();
|
||||||
|
delay(&mut countdown, Milliseconds(500)).await;
|
||||||
|
}
|
||||||
|
Err(_) => warn!("error loading idle kernel"),
|
||||||
|
_ => (),
|
||||||
|
}
|
||||||
|
info!("Running idle kernel");
|
||||||
|
let _ = handle_run_kernel(None, control, up_destinations, aux_mutex, routing_table, timer)
|
||||||
|
.await.map_err(|_| warn!("error running idle kernel"));
|
||||||
|
info!("Idle kernel terminated");
|
||||||
|
}
|
||||||
|
|
||||||
pub fn main(timer: GlobalTimer, cfg: Config) {
|
pub fn main(timer: GlobalTimer, cfg: Config) {
|
||||||
let net_addresses = net_settings::get_addresses(&cfg);
|
let net_addresses = net_settings::get_addresses(&cfg);
|
||||||
info!("network addresses: {}", net_addresses);
|
info!("network addresses: {}", net_addresses);
|
||||||
|
@ -679,7 +785,6 @@ pub fn main(timer: GlobalTimer, cfg: Config) {
|
||||||
|
|
||||||
Sockets::init(32);
|
Sockets::init(32);
|
||||||
|
|
||||||
// before, mutex was on io, but now that io isn't used...?
|
|
||||||
let aux_mutex: Rc<Mutex<bool>> = Rc::new(Mutex::new(false));
|
let aux_mutex: Rc<Mutex<bool>> = Rc::new(Mutex::new(false));
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
let drtio_routing_table = Rc::new(RefCell::new(drtio_routing::config_routing_table(
|
let drtio_routing_table = Rc::new(RefCell::new(drtio_routing::config_routing_table(
|
||||||
|
@ -702,9 +807,16 @@ pub fn main(timer: GlobalTimer, cfg: Config) {
|
||||||
let idle_kernel = Rc::new(cfg.read("idle_kernel").ok());
|
let idle_kernel = Rc::new(cfg.read("idle_kernel").ok());
|
||||||
if let Ok(buffer) = cfg.read("startup_kernel") {
|
if let Ok(buffer) = cfg.read("startup_kernel") {
|
||||||
info!("Loading startup kernel...");
|
info!("Loading startup kernel...");
|
||||||
if let Ok(()) = task::block_on(load_kernel(&buffer, &control, None)) {
|
|
||||||
info!("Starting startup kernel...");
|
|
||||||
let routing_table = drtio_routing_table.borrow();
|
let routing_table = drtio_routing_table.borrow();
|
||||||
|
if let Ok(()) = task::block_on(handle_flash_kernel(
|
||||||
|
&buffer,
|
||||||
|
&control,
|
||||||
|
&up_destinations,
|
||||||
|
&aux_mutex,
|
||||||
|
&routing_table,
|
||||||
|
timer,
|
||||||
|
)) {
|
||||||
|
info!("Starting startup kernel...");
|
||||||
let _ = task::block_on(handle_run_kernel(
|
let _ = task::block_on(handle_run_kernel(
|
||||||
None,
|
None,
|
||||||
&control,
|
&control,
|
||||||
|
@ -722,8 +834,30 @@ pub fn main(timer: GlobalTimer, cfg: Config) {
|
||||||
mgmt::start(cfg);
|
mgmt::start(cfg);
|
||||||
|
|
||||||
task::spawn(async move {
|
task::spawn(async move {
|
||||||
let connection = Rc::new(Semaphore::new(1, 1));
|
let connection = Rc::new(Semaphore::new(0, 1));
|
||||||
let terminate = Rc::new(Semaphore::new(0, 1));
|
let terminate = Rc::new(Semaphore::new(0, 1));
|
||||||
|
{
|
||||||
|
let control = control.clone();
|
||||||
|
let idle_kernel = idle_kernel.clone();
|
||||||
|
let connection = connection.clone();
|
||||||
|
let terminate = terminate.clone();
|
||||||
|
let up_destinations = up_destinations.clone();
|
||||||
|
let aux_mutex = aux_mutex.clone();
|
||||||
|
let routing_table = drtio_routing_table.clone();
|
||||||
|
task::spawn(async move {
|
||||||
|
let routing_table = routing_table.borrow();
|
||||||
|
select_biased! {
|
||||||
|
_ = (async {
|
||||||
|
if let Some(buffer) = &*idle_kernel {
|
||||||
|
flash_and_run_idle_kernel(&buffer, &control, &up_destinations, &aux_mutex, &routing_table, timer).await;
|
||||||
|
}
|
||||||
|
}).fuse() => (),
|
||||||
|
_ = terminate.async_wait().fuse() => ()
|
||||||
|
}
|
||||||
|
connection.signal();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let mut stream = TcpStream::accept(1381, 0x10_000, 0x10_000).await.unwrap();
|
let mut stream = TcpStream::accept(1381, 0x10_000, 0x10_000).await.unwrap();
|
||||||
|
|
||||||
|
@ -751,13 +885,7 @@ pub fn main(timer: GlobalTimer, cfg: Config) {
|
||||||
.await
|
.await
|
||||||
.map_err(|e| warn!("connection terminated: {}", e));
|
.map_err(|e| warn!("connection terminated: {}", e));
|
||||||
if let Some(buffer) = &*idle_kernel {
|
if let Some(buffer) = &*idle_kernel {
|
||||||
info!("Loading idle kernel");
|
flash_and_run_idle_kernel(&buffer, &control, &up_destinations, &aux_mutex, &routing_table, timer).await;
|
||||||
let _ = load_kernel(&buffer, &control, None)
|
|
||||||
.await.map_err(|_| warn!("error loading idle kernel"));
|
|
||||||
info!("Running idle kernel");
|
|
||||||
let _ = handle_run_kernel(None, &control, &up_destinations, &aux_mutex, &routing_table, timer)
|
|
||||||
.await.map_err(|_| warn!("error running idle kernel"));
|
|
||||||
info!("Idle kernel terminated");
|
|
||||||
}
|
}
|
||||||
}).fuse() => (),
|
}).fuse() => (),
|
||||||
_ = terminate.async_wait().fuse() => ()
|
_ = terminate.async_wait().fuse() => ()
|
||||||
|
|
|
@ -8,14 +8,14 @@
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate alloc;
|
extern crate alloc;
|
||||||
|
|
||||||
#[cfg(feature = "target_kasli_soc")]
|
#[cfg(all(feature = "target_kasli_soc", has_drtio))]
|
||||||
use core::cell::RefCell;
|
use core::cell::RefCell;
|
||||||
|
|
||||||
use ksupport;
|
use ksupport;
|
||||||
use libasync::task;
|
use libasync::task;
|
||||||
#[cfg(has_drtio_eem)]
|
#[cfg(has_drtio_eem)]
|
||||||
use libboard_artiq::drtio_eem;
|
use libboard_artiq::drtio_eem;
|
||||||
#[cfg(feature = "target_kasli_soc")]
|
#[cfg(all(feature = "target_kasli_soc", has_drtio))]
|
||||||
use libboard_artiq::io_expander;
|
use libboard_artiq::io_expander;
|
||||||
use libboard_artiq::{identifier_read, logger, pl};
|
use libboard_artiq::{identifier_read, logger, pl};
|
||||||
use libboard_zynq::{gic, mpcore, timer::GlobalTimer};
|
use libboard_zynq::{gic, mpcore, timer::GlobalTimer};
|
||||||
|
@ -38,7 +38,7 @@ mod rtio_mgt;
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
mod subkernel;
|
mod subkernel;
|
||||||
|
|
||||||
#[cfg(feature = "target_kasli_soc")]
|
#[cfg(all(feature = "target_kasli_soc", has_drtio))]
|
||||||
async fn io_expanders_service(
|
async fn io_expanders_service(
|
||||||
i2c_bus: RefCell<&mut libboard_zynq::i2c::I2c>,
|
i2c_bus: RefCell<&mut libboard_zynq::i2c::I2c>,
|
||||||
io_expander0: RefCell<io_expander::IoExpander>,
|
io_expander0: RefCell<io_expander::IoExpander>,
|
||||||
|
@ -93,15 +93,11 @@ pub fn main_core0() {
|
||||||
info!("gateware ident: {}", identifier_read(&mut [0; 64]));
|
info!("gateware ident: {}", identifier_read(&mut [0; 64]));
|
||||||
|
|
||||||
ksupport::i2c::init();
|
ksupport::i2c::init();
|
||||||
#[cfg(feature = "target_kasli_soc")]
|
#[cfg(all(feature = "target_kasli_soc", has_drtio))]
|
||||||
let i2c_bus = unsafe { (ksupport::i2c::I2C_BUS).as_mut().unwrap() };
|
|
||||||
|
|
||||||
#[cfg(feature = "target_kasli_soc")]
|
|
||||||
let (mut io_expander0, mut io_expander1);
|
|
||||||
#[cfg(feature = "target_kasli_soc")]
|
|
||||||
{
|
{
|
||||||
io_expander0 = io_expander::IoExpander::new(i2c_bus, 0).unwrap();
|
let i2c_bus = unsafe { (ksupport::i2c::I2C_BUS).as_mut().unwrap() };
|
||||||
io_expander1 = io_expander::IoExpander::new(i2c_bus, 1).unwrap();
|
let mut io_expander0 = io_expander::IoExpander::new(i2c_bus, 0).unwrap();
|
||||||
|
let mut io_expander1 = io_expander::IoExpander::new(i2c_bus, 1).unwrap();
|
||||||
io_expander0
|
io_expander0
|
||||||
.init(i2c_bus)
|
.init(i2c_bus)
|
||||||
.expect("I2C I/O expander #0 initialization failed");
|
.expect("I2C I/O expander #0 initialization failed");
|
||||||
|
@ -115,6 +111,11 @@ pub fn main_core0() {
|
||||||
io_expander1.set(1, 1, false);
|
io_expander1.set(1, 1, false);
|
||||||
io_expander0.service(i2c_bus).unwrap();
|
io_expander0.service(i2c_bus).unwrap();
|
||||||
io_expander1.service(i2c_bus).unwrap();
|
io_expander1.service(i2c_bus).unwrap();
|
||||||
|
task::spawn(io_expanders_service(
|
||||||
|
RefCell::new(i2c_bus),
|
||||||
|
RefCell::new(io_expander0),
|
||||||
|
RefCell::new(io_expander1),
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let cfg = match Config::new() {
|
let cfg = match Config::new() {
|
||||||
|
@ -135,11 +136,5 @@ pub fn main_core0() {
|
||||||
|
|
||||||
task::spawn(ksupport::report_async_rtio_errors());
|
task::spawn(ksupport::report_async_rtio_errors());
|
||||||
|
|
||||||
#[cfg(feature = "target_kasli_soc")]
|
|
||||||
task::spawn(io_expanders_service(
|
|
||||||
RefCell::new(i2c_bus),
|
|
||||||
RefCell::new(io_expander0),
|
|
||||||
RefCell::new(io_expander1),
|
|
||||||
));
|
|
||||||
comms::main(timer, cfg);
|
comms::main(timer, cfg);
|
||||||
}
|
}
|
||||||
|
|
|
@ -142,9 +142,9 @@ pub mod remote_dma {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn playback_done(&mut self, destination: u8, error: u8, channel: u32, timestamp: u64) {
|
pub async fn playback_done(&mut self, source: u8, error: u8, channel: u32, timestamp: u64) {
|
||||||
let mut traces_locked = self.traces.async_lock().await;
|
let mut traces_locked = self.traces.async_lock().await;
|
||||||
let mut trace = traces_locked.get_mut(&destination).unwrap();
|
let mut trace = traces_locked.get_mut(&source).unwrap();
|
||||||
trace.state = RemoteState::PlaybackEnded {
|
trace.state = RemoteState::PlaybackEnded {
|
||||||
error: error,
|
error: error,
|
||||||
channel: channel,
|
channel: channel,
|
||||||
|
|
|
@ -13,8 +13,10 @@ pub mod drtio {
|
||||||
use ksupport::{resolve_channel_name, ASYNC_ERROR_BUSY, ASYNC_ERROR_COLLISION, ASYNC_ERROR_SEQUENCE_ERROR,
|
use ksupport::{resolve_channel_name, ASYNC_ERROR_BUSY, ASYNC_ERROR_COLLISION, ASYNC_ERROR_SEQUENCE_ERROR,
|
||||||
SEEN_ASYNC_ERRORS};
|
SEEN_ASYNC_ERRORS};
|
||||||
use libasync::{delay, task};
|
use libasync::{delay, task};
|
||||||
use libboard_artiq::{drtioaux::Error, drtioaux_async, drtioaux_async::Packet,
|
use libboard_artiq::{drtioaux::Error,
|
||||||
drtioaux_proto::MASTER_PAYLOAD_MAX_SIZE};
|
drtioaux_async,
|
||||||
|
drtioaux_async::Packet,
|
||||||
|
drtioaux_proto::{PayloadStatus, MASTER_PAYLOAD_MAX_SIZE}};
|
||||||
use libboard_zynq::time::Milliseconds;
|
use libboard_zynq::time::Milliseconds;
|
||||||
use log::{error, info, warn};
|
use log::{error, info, warn};
|
||||||
|
|
||||||
|
@ -41,39 +43,102 @@ pub mod drtio {
|
||||||
unsafe { (csr::DRTIO[linkno].rx_up_read)() == 1 }
|
unsafe { (csr::DRTIO[linkno].rx_up_read)() == 1 }
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn process_async_packets(aux_mutex: &Mutex<bool>, linkno: u8, packet: Packet) -> Option<Packet> {
|
async fn link_has_async_ready(linkno: u8) -> bool {
|
||||||
// returns None if an async packet has been consumed
|
let linkno = linkno as usize;
|
||||||
|
let async_ready;
|
||||||
|
unsafe {
|
||||||
|
async_ready = (csr::DRTIO[linkno].async_messages_ready_read)() == 1;
|
||||||
|
(csr::DRTIO[linkno].async_messages_ready_write)(1);
|
||||||
|
}
|
||||||
|
async_ready
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn process_async_packets(
|
||||||
|
aux_mutex: &Mutex<bool>,
|
||||||
|
linkno: u8,
|
||||||
|
routing_table: &drtio_routing::RoutingTable,
|
||||||
|
timer: GlobalTimer,
|
||||||
|
) {
|
||||||
|
if link_has_async_ready(linkno).await {
|
||||||
|
loop {
|
||||||
|
let reply = aux_transact(aux_mutex, linkno, &Packet::RoutingRetrievePackets, timer).await;
|
||||||
|
if let Ok(packet) = reply {
|
||||||
match packet {
|
match packet {
|
||||||
Packet::DmaPlaybackStatus {
|
Packet::DmaPlaybackStatus {
|
||||||
id,
|
id,
|
||||||
destination,
|
source,
|
||||||
|
destination: 0,
|
||||||
error,
|
error,
|
||||||
channel,
|
channel,
|
||||||
timestamp,
|
timestamp,
|
||||||
} => {
|
} => {
|
||||||
remote_dma::playback_done(id, destination, error, channel, timestamp).await;
|
remote_dma::playback_done(id, source, error, channel, timestamp).await;
|
||||||
None
|
|
||||||
}
|
}
|
||||||
Packet::SubkernelFinished { id, with_exception } => {
|
Packet::SubkernelFinished {
|
||||||
subkernel::subkernel_finished(id, with_exception).await;
|
id,
|
||||||
None
|
destination: 0,
|
||||||
|
with_exception,
|
||||||
|
exception_src,
|
||||||
|
} => {
|
||||||
|
subkernel::subkernel_finished(id, with_exception, exception_src).await;
|
||||||
}
|
}
|
||||||
Packet::SubkernelMessage {
|
Packet::SubkernelMessage {
|
||||||
id,
|
id,
|
||||||
destination: from,
|
source,
|
||||||
last,
|
destination: 0,
|
||||||
|
status,
|
||||||
length,
|
length,
|
||||||
data,
|
data,
|
||||||
} => {
|
} => {
|
||||||
subkernel::message_handle_incoming(id, last, length as usize, &data).await;
|
subkernel::message_handle_incoming(id, status, length as usize, &data).await;
|
||||||
// acknowledge receiving part of the message
|
// acknowledge receiving part of the message
|
||||||
let _lock = aux_mutex.async_lock().await;
|
let _lock = aux_mutex.async_lock().await;
|
||||||
drtioaux_async::send(linkno, &Packet::SubkernelMessageAck { destination: from })
|
drtioaux_async::send(linkno, &Packet::SubkernelMessageAck { destination: source })
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
None
|
let mut countdown = timer.countdown();
|
||||||
|
// give the satellites some time to process
|
||||||
|
delay(&mut countdown, Milliseconds(10)).await;
|
||||||
|
}
|
||||||
|
// routable packets
|
||||||
|
Packet::DmaAddTraceRequest { destination, .. }
|
||||||
|
| Packet::DmaAddTraceReply { destination, .. }
|
||||||
|
| Packet::DmaRemoveTraceRequest { destination, .. }
|
||||||
|
| Packet::DmaRemoveTraceReply { destination, .. }
|
||||||
|
| Packet::DmaPlaybackRequest { destination, .. }
|
||||||
|
| Packet::DmaPlaybackReply { destination, .. }
|
||||||
|
| Packet::SubkernelLoadRunRequest { destination, .. }
|
||||||
|
| Packet::SubkernelLoadRunReply { destination, .. }
|
||||||
|
| Packet::SubkernelMessage { destination, .. }
|
||||||
|
| Packet::SubkernelMessageAck { destination, .. }
|
||||||
|
| Packet::DmaPlaybackStatus { destination, .. }
|
||||||
|
| Packet::SubkernelFinished { destination, .. } => {
|
||||||
|
let dest_link = routing_table.0[destination as usize][0] - 1;
|
||||||
|
if dest_link == linkno {
|
||||||
|
warn!(
|
||||||
|
"[LINK#{}] Re-routed packet would return to the same link, dropping: {:?}",
|
||||||
|
linkno, packet
|
||||||
|
);
|
||||||
|
} else if destination == 0 {
|
||||||
|
warn!("[LINK#{}] Received invalid routable packet: {:?}", linkno, packet)
|
||||||
|
} else {
|
||||||
|
drtioaux_async::send(dest_link, &packet).await.unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Packet::RoutingNoPackets => break,
|
||||||
|
|
||||||
|
other => warn!("[LINK#{}] Received an unroutable packet: {:?}", linkno, other),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
warn!(
|
||||||
|
"[LINK#{}] Error handling async packets ({})",
|
||||||
|
linkno,
|
||||||
|
reply.unwrap_err()
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
other => Some(other),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -208,11 +273,7 @@ pub mod drtio {
|
||||||
async fn process_unsolicited_aux(aux_mutex: &Rc<Mutex<bool>>, linkno: u8) {
|
async fn process_unsolicited_aux(aux_mutex: &Rc<Mutex<bool>>, linkno: u8) {
|
||||||
let _lock = aux_mutex.async_lock().await;
|
let _lock = aux_mutex.async_lock().await;
|
||||||
match drtioaux_async::recv(linkno).await {
|
match drtioaux_async::recv(linkno).await {
|
||||||
Ok(Some(packet)) => {
|
Ok(Some(packet)) => warn!("[LINK#{}] unsolicited aux packet: {:?}", linkno, packet),
|
||||||
if let Some(packet) = process_async_packets(aux_mutex, linkno, packet).await {
|
|
||||||
warn!("[LINK#{}] unsolicited aux packet: {:?}", linkno, packet);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(None) => (),
|
Ok(None) => (),
|
||||||
Err(_) => warn!("[LINK#{}] aux packet error", linkno),
|
Err(_) => warn!("[LINK#{}] aux packet error", linkno),
|
||||||
}
|
}
|
||||||
|
@ -281,7 +342,6 @@ pub mod drtio {
|
||||||
let linkno = hop - 1;
|
let linkno = hop - 1;
|
||||||
if destination_up(up_destinations, destination).await {
|
if destination_up(up_destinations, destination).await {
|
||||||
if up_links[linkno as usize] {
|
if up_links[linkno as usize] {
|
||||||
loop {
|
|
||||||
let reply = aux_transact(
|
let reply = aux_transact(
|
||||||
aux_mutex,
|
aux_mutex,
|
||||||
linkno,
|
linkno,
|
||||||
|
@ -294,13 +354,7 @@ pub mod drtio {
|
||||||
match reply {
|
match reply {
|
||||||
Ok(Packet::DestinationDownReply) => {
|
Ok(Packet::DestinationDownReply) => {
|
||||||
destination_set_up(routing_table, up_destinations, destination, false).await;
|
destination_set_up(routing_table, up_destinations, destination, false).await;
|
||||||
remote_dma::destination_changed(
|
remote_dma::destination_changed(aux_mutex, routing_table, timer, destination, false)
|
||||||
aux_mutex,
|
|
||||||
routing_table,
|
|
||||||
timer,
|
|
||||||
destination,
|
|
||||||
false,
|
|
||||||
)
|
|
||||||
.await;
|
.await;
|
||||||
subkernel::destination_changed(aux_mutex, routing_table, timer, destination, false)
|
subkernel::destination_changed(aux_mutex, routing_table, timer, destination, false)
|
||||||
.await;
|
.await;
|
||||||
|
@ -333,16 +387,9 @@ pub mod drtio {
|
||||||
);
|
);
|
||||||
unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_BUSY };
|
unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_BUSY };
|
||||||
}
|
}
|
||||||
Ok(packet) => match process_async_packets(aux_mutex, linkno, packet).await {
|
Ok(packet) => error!("[DEST#{}] received unexpected aux packet: {:?}", destination, packet),
|
||||||
Some(packet) => {
|
|
||||||
error!("[DEST#{}] received unexpected aux packet: {:?}", destination, packet)
|
|
||||||
}
|
|
||||||
None => continue,
|
|
||||||
},
|
|
||||||
Err(e) => error!("[DEST#{}] communication failed ({})", destination, e),
|
Err(e) => error!("[DEST#{}] communication failed ({})", destination, e),
|
||||||
}
|
}
|
||||||
break;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
destination_set_up(routing_table, up_destinations, destination, false).await;
|
destination_set_up(routing_table, up_destinations, destination, false).await;
|
||||||
remote_dma::destination_changed(aux_mutex, routing_table, timer, destination, false).await;
|
remote_dma::destination_changed(aux_mutex, routing_table, timer, destination, false).await;
|
||||||
|
@ -391,6 +438,7 @@ pub mod drtio {
|
||||||
if up_links[linkno as usize] {
|
if up_links[linkno as usize] {
|
||||||
/* link was previously up */
|
/* link was previously up */
|
||||||
if link_rx_up(linkno).await {
|
if link_rx_up(linkno).await {
|
||||||
|
process_async_packets(aux_mutex, linkno, routing_table, timer).await;
|
||||||
process_unsolicited_aux(aux_mutex, linkno).await;
|
process_unsolicited_aux(aux_mutex, linkno).await;
|
||||||
process_local_errors(linkno).await;
|
process_local_errors(linkno).await;
|
||||||
} else {
|
} else {
|
||||||
|
@ -463,7 +511,7 @@ pub mod drtio {
|
||||||
reply_handler_f: HandlerF,
|
reply_handler_f: HandlerF,
|
||||||
) -> Result<(), &'static str>
|
) -> Result<(), &'static str>
|
||||||
where
|
where
|
||||||
PacketF: Fn(&[u8; MASTER_PAYLOAD_MAX_SIZE], bool, usize) -> Packet,
|
PacketF: Fn(&[u8; MASTER_PAYLOAD_MAX_SIZE], PayloadStatus, usize) -> Packet,
|
||||||
HandlerF: Fn(&Packet) -> Result<(), &'static str>,
|
HandlerF: Fn(&Packet) -> Result<(), &'static str>,
|
||||||
{
|
{
|
||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
|
@ -474,10 +522,12 @@ pub mod drtio {
|
||||||
} else {
|
} else {
|
||||||
data.len() - i
|
data.len() - i
|
||||||
} as usize;
|
} as usize;
|
||||||
|
let first = i == 0;
|
||||||
let last = i + len == data.len();
|
let last = i + len == data.len();
|
||||||
slice[..len].clone_from_slice(&data[i..i + len]);
|
slice[..len].clone_from_slice(&data[i..i + len]);
|
||||||
i += len;
|
i += len;
|
||||||
let packet = packet_f(&slice, last, len);
|
let status = PayloadStatus::from_status(first, last);
|
||||||
|
let packet = packet_f(&slice, status, len);
|
||||||
let reply = aux_transact(aux_mutex, linkno, &packet, timer).await?;
|
let reply = aux_transact(aux_mutex, linkno, &packet, timer).await?;
|
||||||
reply_handler_f(&reply)?;
|
reply_handler_f(&reply)?;
|
||||||
}
|
}
|
||||||
|
@ -498,16 +548,25 @@ pub mod drtio {
|
||||||
aux_mutex,
|
aux_mutex,
|
||||||
timer,
|
timer,
|
||||||
trace,
|
trace,
|
||||||
|slice, last, len| Packet::DmaAddTraceRequest {
|
|slice, status, len| Packet::DmaAddTraceRequest {
|
||||||
id: id,
|
id: id,
|
||||||
|
source: 0,
|
||||||
destination: destination,
|
destination: destination,
|
||||||
last: last,
|
status: status,
|
||||||
length: len as u16,
|
length: len as u16,
|
||||||
trace: *slice,
|
trace: *slice,
|
||||||
},
|
},
|
||||||
|reply| match reply {
|
|reply| match reply {
|
||||||
Packet::DmaAddTraceReply { succeeded: true } => Ok(()),
|
Packet::DmaAddTraceReply {
|
||||||
Packet::DmaAddTraceReply { succeeded: false } => Err("error adding trace on satellite"),
|
destination: 0,
|
||||||
|
succeeded: true,
|
||||||
|
..
|
||||||
|
} => Ok(()),
|
||||||
|
Packet::DmaAddTraceReply {
|
||||||
|
destination: 0,
|
||||||
|
succeeded: false,
|
||||||
|
..
|
||||||
|
} => Err("error adding trace on satellite"),
|
||||||
_ => Err("adding DMA trace failed, unexpected aux packet"),
|
_ => Err("adding DMA trace failed, unexpected aux packet"),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
@ -527,14 +586,21 @@ pub mod drtio {
|
||||||
linkno,
|
linkno,
|
||||||
&Packet::DmaRemoveTraceRequest {
|
&Packet::DmaRemoveTraceRequest {
|
||||||
id: id,
|
id: id,
|
||||||
|
source: 0,
|
||||||
destination: destination,
|
destination: destination,
|
||||||
},
|
},
|
||||||
timer,
|
timer,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
match reply {
|
match reply {
|
||||||
Ok(Packet::DmaRemoveTraceReply { succeeded: true }) => Ok(()),
|
Ok(Packet::DmaRemoveTraceReply {
|
||||||
Ok(Packet::DmaRemoveTraceReply { succeeded: false }) => Err("satellite DMA erase error"),
|
destination: 0,
|
||||||
|
succeeded: true,
|
||||||
|
}) => Ok(()),
|
||||||
|
Ok(Packet::DmaRemoveTraceReply {
|
||||||
|
destination: 0,
|
||||||
|
succeeded: false,
|
||||||
|
}) => Err("satellite DMA erase error"),
|
||||||
Ok(_) => Err("adding trace failed, unexpected aux packet"),
|
Ok(_) => Err("adding trace failed, unexpected aux packet"),
|
||||||
Err(_) => Err("erasing trace failed, aux error"),
|
Err(_) => Err("erasing trace failed, aux error"),
|
||||||
}
|
}
|
||||||
|
@ -554,6 +620,7 @@ pub mod drtio {
|
||||||
linkno,
|
linkno,
|
||||||
&Packet::DmaPlaybackRequest {
|
&Packet::DmaPlaybackRequest {
|
||||||
id: id,
|
id: id,
|
||||||
|
source: 0,
|
||||||
destination: destination,
|
destination: destination,
|
||||||
timestamp: timestamp,
|
timestamp: timestamp,
|
||||||
},
|
},
|
||||||
|
@ -561,8 +628,14 @@ pub mod drtio {
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
match reply {
|
match reply {
|
||||||
Ok(Packet::DmaPlaybackReply { succeeded: true }) => Ok(()),
|
Ok(Packet::DmaPlaybackReply {
|
||||||
Ok(Packet::DmaPlaybackReply { succeeded: false }) => Err("error on DMA playback request"),
|
destination: 0,
|
||||||
|
succeeded: true,
|
||||||
|
}) => Ok(()),
|
||||||
|
Ok(Packet::DmaPlaybackReply {
|
||||||
|
destination: 0,
|
||||||
|
succeeded: false,
|
||||||
|
}) => Err("error on DMA playback request"),
|
||||||
Ok(_) => Err("received unexpected aux packet during DMA playback"),
|
Ok(_) => Err("received unexpected aux packet during DMA playback"),
|
||||||
Err(_) => Err("aux error on DMA playback"),
|
Err(_) => Err("aux error on DMA playback"),
|
||||||
}
|
}
|
||||||
|
@ -655,10 +728,10 @@ pub mod drtio {
|
||||||
aux_mutex,
|
aux_mutex,
|
||||||
timer,
|
timer,
|
||||||
data,
|
data,
|
||||||
|slice, last, len| Packet::SubkernelAddDataRequest {
|
|slice, status, len| Packet::SubkernelAddDataRequest {
|
||||||
id: id,
|
id: id,
|
||||||
destination: destination,
|
destination: destination,
|
||||||
last: last,
|
status: status,
|
||||||
length: len as u16,
|
length: len as u16,
|
||||||
data: *slice,
|
data: *slice,
|
||||||
},
|
},
|
||||||
|
@ -685,6 +758,7 @@ pub mod drtio {
|
||||||
linkno,
|
linkno,
|
||||||
&Packet::SubkernelLoadRunRequest {
|
&Packet::SubkernelLoadRunRequest {
|
||||||
id: id,
|
id: id,
|
||||||
|
source: 0,
|
||||||
destination: destination,
|
destination: destination,
|
||||||
run: run,
|
run: run,
|
||||||
},
|
},
|
||||||
|
@ -692,8 +766,14 @@ pub mod drtio {
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
match reply {
|
match reply {
|
||||||
Packet::SubkernelLoadRunReply { succeeded: true } => return Ok(()),
|
Packet::SubkernelLoadRunReply {
|
||||||
Packet::SubkernelLoadRunReply { succeeded: false } => return Err("error on subkernel run request"),
|
destination: 0,
|
||||||
|
succeeded: true,
|
||||||
|
} => return Ok(()),
|
||||||
|
Packet::SubkernelLoadRunReply {
|
||||||
|
destination: 0,
|
||||||
|
succeeded: false,
|
||||||
|
} => return Err("error on subkernel run request"),
|
||||||
_ => return Err("received unexpected aux packet during subkernel run"),
|
_ => return Err("received unexpected aux packet during subkernel run"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -742,10 +822,11 @@ pub mod drtio {
|
||||||
aux_mutex,
|
aux_mutex,
|
||||||
timer,
|
timer,
|
||||||
message,
|
message,
|
||||||
|slice, last, len| Packet::SubkernelMessage {
|
|slice, status, len| Packet::SubkernelMessage {
|
||||||
|
source: 0,
|
||||||
destination: destination,
|
destination: destination,
|
||||||
id: id,
|
id: id,
|
||||||
last: last,
|
status: status,
|
||||||
length: len as u16,
|
length: len as u16,
|
||||||
data: *slice,
|
data: *slice,
|
||||||
},
|
},
|
||||||
|
|
|
@ -1,10 +1,11 @@
|
||||||
use alloc::{collections::BTreeMap, rc::Rc, vec::Vec};
|
use alloc::{collections::BTreeMap, rc::Rc, vec::Vec};
|
||||||
|
|
||||||
use libasync::task;
|
use libasync::task;
|
||||||
use libboard_artiq::{drtio_routing::RoutingTable, drtioaux_proto::MASTER_PAYLOAD_MAX_SIZE};
|
use libboard_artiq::{drtio_routing::RoutingTable,
|
||||||
|
drtioaux_proto::{PayloadStatus, MASTER_PAYLOAD_MAX_SIZE}};
|
||||||
use libboard_zynq::{time::Milliseconds, timer::GlobalTimer};
|
use libboard_zynq::{time::Milliseconds, timer::GlobalTimer};
|
||||||
use libcortex_a9::mutex::Mutex;
|
use libcortex_a9::mutex::Mutex;
|
||||||
use log::error;
|
use log::{error, warn};
|
||||||
|
|
||||||
use crate::rtio_mgt::drtio;
|
use crate::rtio_mgt::drtio;
|
||||||
|
|
||||||
|
@ -12,7 +13,7 @@ use crate::rtio_mgt::drtio;
|
||||||
pub enum FinishStatus {
|
pub enum FinishStatus {
|
||||||
Ok,
|
Ok,
|
||||||
CommLost,
|
CommLost,
|
||||||
Exception,
|
Exception(u8), // exception source
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||||
|
@ -28,6 +29,7 @@ pub enum Error {
|
||||||
Timeout,
|
Timeout,
|
||||||
IncorrectState,
|
IncorrectState,
|
||||||
SubkernelNotFound,
|
SubkernelNotFound,
|
||||||
|
SubkernelException,
|
||||||
CommLost,
|
CommLost,
|
||||||
DrtioError(&'static str),
|
DrtioError(&'static str),
|
||||||
}
|
}
|
||||||
|
@ -119,17 +121,19 @@ pub async fn clear_subkernels() {
|
||||||
CURRENT_MESSAGES.async_lock().await.clear();
|
CURRENT_MESSAGES.async_lock().await.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn subkernel_finished(id: u32, with_exception: bool) {
|
pub async fn subkernel_finished(id: u32, with_exception: bool, exception_src: u8) {
|
||||||
// called upon receiving DRTIO SubkernelRunDone
|
// called upon receiving DRTIO SubkernelRunDone
|
||||||
// may be None if session ends and is cleared
|
// may be None if session ends and is cleared
|
||||||
if let Some(subkernel) = SUBKERNELS.async_lock().await.get_mut(&id) {
|
if let Some(subkernel) = SUBKERNELS.async_lock().await.get_mut(&id) {
|
||||||
|
if subkernel.state == SubkernelState::Running {
|
||||||
subkernel.state = SubkernelState::Finished {
|
subkernel.state = SubkernelState::Finished {
|
||||||
status: match with_exception {
|
status: match with_exception {
|
||||||
true => FinishStatus::Exception,
|
true => FinishStatus::Exception(exception_src),
|
||||||
false => FinishStatus::Ok,
|
false => FinishStatus::Ok,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn destination_changed(
|
pub async fn destination_changed(
|
||||||
|
@ -165,26 +169,35 @@ pub async fn await_finish(
|
||||||
routing_table: &RoutingTable,
|
routing_table: &RoutingTable,
|
||||||
timer: GlobalTimer,
|
timer: GlobalTimer,
|
||||||
id: u32,
|
id: u32,
|
||||||
timeout: u64,
|
timeout: i64,
|
||||||
) -> Result<SubkernelFinished, Error> {
|
) -> Result<SubkernelFinished, Error> {
|
||||||
match SUBKERNELS.async_lock().await.get(&id).unwrap().state {
|
match SUBKERNELS.async_lock().await.get(&id).unwrap().state {
|
||||||
SubkernelState::Running | SubkernelState::Finished { .. } => (),
|
SubkernelState::Running | SubkernelState::Finished { .. } => (),
|
||||||
_ => return Err(Error::IncorrectState),
|
_ => return Err(Error::IncorrectState),
|
||||||
}
|
}
|
||||||
let max_time = timer.get_time() + Milliseconds(timeout);
|
if timeout > 0 {
|
||||||
|
let max_time = timer.get_time() + Milliseconds(timeout as u64);
|
||||||
while timer.get_time() < max_time {
|
while timer.get_time() < max_time {
|
||||||
{
|
|
||||||
match SUBKERNELS.async_lock().await.get(&id).unwrap().state {
|
match SUBKERNELS.async_lock().await.get(&id).unwrap().state {
|
||||||
SubkernelState::Finished { .. } => break,
|
SubkernelState::Finished { .. } => break,
|
||||||
_ => (),
|
_ => (),
|
||||||
};
|
};
|
||||||
}
|
|
||||||
task::r#yield().await;
|
task::r#yield().await;
|
||||||
}
|
}
|
||||||
if timer.get_time() >= max_time {
|
if timer.get_time() >= max_time {
|
||||||
error!("Remote subkernel finish await timed out");
|
error!("Remote subkernel finish await timed out");
|
||||||
return Err(Error::Timeout);
|
return Err(Error::Timeout);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// no timeout, wait forever
|
||||||
|
loop {
|
||||||
|
match SUBKERNELS.async_lock().await.get(&id).unwrap().state {
|
||||||
|
SubkernelState::Finished { .. } => break,
|
||||||
|
_ => (),
|
||||||
|
};
|
||||||
|
task::r#yield().await;
|
||||||
|
}
|
||||||
|
}
|
||||||
if let Some(subkernel) = SUBKERNELS.async_lock().await.get_mut(&id) {
|
if let Some(subkernel) = SUBKERNELS.async_lock().await.get_mut(&id) {
|
||||||
match subkernel.state {
|
match subkernel.state {
|
||||||
SubkernelState::Finished { status } => {
|
SubkernelState::Finished { status } => {
|
||||||
|
@ -192,11 +205,8 @@ pub async fn await_finish(
|
||||||
Ok(SubkernelFinished {
|
Ok(SubkernelFinished {
|
||||||
id: id,
|
id: id,
|
||||||
status: status,
|
status: status,
|
||||||
exception: if status == FinishStatus::Exception {
|
exception: if let FinishStatus::Exception(dest) = status {
|
||||||
Some(
|
Some(drtio::subkernel_retrieve_exception(aux_mutex, routing_table, timer, dest).await?)
|
||||||
drtio::subkernel_retrieve_exception(aux_mutex, routing_table, timer, subkernel.destination)
|
|
||||||
.await?,
|
|
||||||
)
|
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
},
|
},
|
||||||
|
@ -220,13 +230,28 @@ static MESSAGE_QUEUE: Mutex<Vec<Message>> = Mutex::new(Vec::new());
|
||||||
// currently under construction message(s) (can be from multiple sources)
|
// currently under construction message(s) (can be from multiple sources)
|
||||||
static CURRENT_MESSAGES: Mutex<BTreeMap<u32, Message>> = Mutex::new(BTreeMap::new());
|
static CURRENT_MESSAGES: Mutex<BTreeMap<u32, Message>> = Mutex::new(BTreeMap::new());
|
||||||
|
|
||||||
pub async fn message_handle_incoming(id: u32, last: bool, length: usize, data: &[u8; MASTER_PAYLOAD_MAX_SIZE]) {
|
pub async fn message_handle_incoming(
|
||||||
|
id: u32,
|
||||||
|
status: PayloadStatus,
|
||||||
|
length: usize,
|
||||||
|
data: &[u8; MASTER_PAYLOAD_MAX_SIZE],
|
||||||
|
) {
|
||||||
// called when receiving a message from satellite
|
// called when receiving a message from satellite
|
||||||
if SUBKERNELS.async_lock().await.get(&id).is_none() {
|
{
|
||||||
// do not add messages for non-existing or deleted subkernels
|
let subkernel_lock = SUBKERNELS.async_lock().await;
|
||||||
|
let subkernel = subkernel_lock.get(&id);
|
||||||
|
if subkernel.is_some() && subkernel.unwrap().state != SubkernelState::Running {
|
||||||
|
// do not add messages for non-running or deleted subkernels
|
||||||
|
warn!("received a message for a non-running subkernel #{}", id);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
let mut current_messages = CURRENT_MESSAGES.async_lock().await;
|
let mut current_messages = CURRENT_MESSAGES.async_lock().await;
|
||||||
|
|
||||||
|
if status.is_first() {
|
||||||
|
current_messages.remove(&id);
|
||||||
|
}
|
||||||
|
|
||||||
match current_messages.get_mut(&id) {
|
match current_messages.get_mut(&id) {
|
||||||
Some(message) => message.data.extend(&data[..length]),
|
Some(message) => message.data.extend(&data[..length]),
|
||||||
None => {
|
None => {
|
||||||
|
@ -240,7 +265,7 @@ pub async fn message_handle_incoming(id: u32, last: bool, length: usize, data: &
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
if last {
|
if status.is_last() {
|
||||||
// when done, remove from working queue
|
// when done, remove from working queue
|
||||||
MESSAGE_QUEUE
|
MESSAGE_QUEUE
|
||||||
.async_lock()
|
.async_lock()
|
||||||
|
@ -249,7 +274,9 @@ pub async fn message_handle_incoming(id: u32, last: bool, length: usize, data: &
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn message_await(id: u32, timeout: u64, timer: GlobalTimer) -> Result<Message, Error> {
|
pub async fn message_await(id: u32, timeout: i64, timer: GlobalTimer) -> Result<Message, Error> {
|
||||||
|
let is_subkernel = SUBKERNELS.async_lock().await.get(&id).is_some();
|
||||||
|
if is_subkernel {
|
||||||
match SUBKERNELS.async_lock().await.get(&id).unwrap().state {
|
match SUBKERNELS.async_lock().await.get(&id).unwrap().state {
|
||||||
SubkernelState::Finished {
|
SubkernelState::Finished {
|
||||||
status: FinishStatus::CommLost,
|
status: FinishStatus::CommLost,
|
||||||
|
@ -257,8 +284,9 @@ pub async fn message_await(id: u32, timeout: u64, timer: GlobalTimer) -> Result<
|
||||||
SubkernelState::Running | SubkernelState::Finished { .. } => (),
|
SubkernelState::Running | SubkernelState::Finished { .. } => (),
|
||||||
_ => return Err(Error::IncorrectState),
|
_ => return Err(Error::IncorrectState),
|
||||||
}
|
}
|
||||||
let max_time = timer.get_time() + Milliseconds(timeout);
|
}
|
||||||
while timer.get_time() < max_time {
|
let max_time = timer.get_time() + Milliseconds(timeout as u64);
|
||||||
|
while timeout < 0 || (timeout > 0 && timer.get_time() < max_time) {
|
||||||
{
|
{
|
||||||
let mut message_queue = MESSAGE_QUEUE.async_lock().await;
|
let mut message_queue = MESSAGE_QUEUE.async_lock().await;
|
||||||
for i in 0..message_queue.len() {
|
for i in 0..message_queue.len() {
|
||||||
|
@ -269,6 +297,17 @@ pub async fn message_await(id: u32, timeout: u64, timer: GlobalTimer) -> Result<
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if is_subkernel {
|
||||||
|
match SUBKERNELS.async_lock().await.get(&id).unwrap().state {
|
||||||
|
SubkernelState::Finished {
|
||||||
|
status: FinishStatus::CommLost,
|
||||||
|
} => return Err(Error::CommLost),
|
||||||
|
SubkernelState::Finished {
|
||||||
|
status: FinishStatus::Exception(_),
|
||||||
|
} => return Err(Error::SubkernelException),
|
||||||
|
_ => (),
|
||||||
|
}
|
||||||
|
}
|
||||||
task::r#yield().await;
|
task::r#yield().await;
|
||||||
}
|
}
|
||||||
Err(Error::Timeout)
|
Err(Error::Timeout)
|
||||||
|
@ -279,9 +318,8 @@ pub async fn message_send<'a>(
|
||||||
routing_table: &RoutingTable,
|
routing_table: &RoutingTable,
|
||||||
timer: GlobalTimer,
|
timer: GlobalTimer,
|
||||||
id: u32,
|
id: u32,
|
||||||
|
destination: u8,
|
||||||
message: Vec<u8>,
|
message: Vec<u8>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let destination = SUBKERNELS.async_lock().await.get(&id).unwrap().destination;
|
|
||||||
// rpc data prepared by the kernel core already
|
|
||||||
Ok(drtio::subkernel_send_message(aux_mutex, routing_table, timer, id, destination, &message).await?)
|
Ok(drtio::subkernel_send_message(aux_mutex, routing_table, timer, id, destination, &message).await?)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,13 @@
|
||||||
use alloc::{collections::btree_map::BTreeMap, vec::Vec};
|
use alloc::{collections::btree_map::BTreeMap, string::String, vec::Vec};
|
||||||
|
use core::mem;
|
||||||
|
|
||||||
use libboard_artiq::pl::csr;
|
use ksupport::kernel::DmaRecorder;
|
||||||
|
use libboard_artiq::{drtio_routing::RoutingTable,
|
||||||
|
drtioaux_proto::{Packet, PayloadStatus, MASTER_PAYLOAD_MAX_SIZE},
|
||||||
|
pl::csr};
|
||||||
use libcortex_a9::cache::dcci_slice;
|
use libcortex_a9::cache::dcci_slice;
|
||||||
|
use routing::{Router, Sliceable};
|
||||||
|
use subkernel::Manager as KernelManager;
|
||||||
|
|
||||||
const ALIGNMENT: usize = 64;
|
const ALIGNMENT: usize = 64;
|
||||||
|
|
||||||
|
@ -12,16 +18,20 @@ enum ManagerState {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct RtioStatus {
|
pub struct RtioStatus {
|
||||||
|
pub source: u8,
|
||||||
pub id: u32,
|
pub id: u32,
|
||||||
pub error: u8,
|
pub error: u8,
|
||||||
pub channel: u32,
|
pub channel: u32,
|
||||||
pub timestamp: u64,
|
pub timestamp: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
IdNotFound,
|
IdNotFound,
|
||||||
PlaybackInProgress,
|
PlaybackInProgress,
|
||||||
EntryNotComplete,
|
EntryNotComplete,
|
||||||
|
MasterDmaFound,
|
||||||
|
UploadFail,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
@ -29,13 +39,228 @@ struct Entry {
|
||||||
trace: Vec<u8>,
|
trace: Vec<u8>,
|
||||||
padding_len: usize,
|
padding_len: usize,
|
||||||
complete: bool,
|
complete: bool,
|
||||||
|
duration: i64, // relevant for local DMA
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Entry {
|
||||||
|
pub fn from_vec(data: Vec<u8>, duration: i64) -> Entry {
|
||||||
|
let mut entry = Entry {
|
||||||
|
trace: data,
|
||||||
|
padding_len: 0,
|
||||||
|
complete: true,
|
||||||
|
duration: duration,
|
||||||
|
};
|
||||||
|
entry.realign();
|
||||||
|
entry
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn id(&self) -> u32 {
|
||||||
|
self.trace[self.padding_len..].as_ptr() as u32
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn realign(&mut self) {
|
||||||
|
self.trace.push(0);
|
||||||
|
let data_len = self.trace.len();
|
||||||
|
|
||||||
|
self.trace.reserve(ALIGNMENT - 1);
|
||||||
|
let padding = ALIGNMENT - self.trace.as_ptr() as usize % ALIGNMENT;
|
||||||
|
let padding = if padding == ALIGNMENT { 0 } else { padding };
|
||||||
|
for _ in 0..padding {
|
||||||
|
// Vec guarantees that this will not reallocate
|
||||||
|
self.trace.push(0)
|
||||||
|
}
|
||||||
|
for i in 1..data_len + 1 {
|
||||||
|
self.trace[data_len + padding - i] = self.trace[data_len - i]
|
||||||
|
}
|
||||||
|
self.complete = true;
|
||||||
|
self.padding_len = padding;
|
||||||
|
|
||||||
|
dcci_slice(&self.trace);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
enum RemoteTraceState {
|
||||||
|
Unsent,
|
||||||
|
Sending(usize),
|
||||||
|
Ready,
|
||||||
|
Running(usize),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct RemoteTraces {
|
||||||
|
remote_traces: BTreeMap<u8, Sliceable>,
|
||||||
|
state: RemoteTraceState,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RemoteTraces {
|
||||||
|
pub fn new(traces: BTreeMap<u8, Sliceable>) -> RemoteTraces {
|
||||||
|
RemoteTraces {
|
||||||
|
remote_traces: traces,
|
||||||
|
state: RemoteTraceState::Unsent,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// on subkernel request
|
||||||
|
pub fn upload_traces(
|
||||||
|
&mut self,
|
||||||
|
id: u32,
|
||||||
|
router: &mut Router,
|
||||||
|
rank: u8,
|
||||||
|
self_destination: u8,
|
||||||
|
routing_table: &RoutingTable,
|
||||||
|
) -> usize {
|
||||||
|
let len = self.remote_traces.len();
|
||||||
|
if len > 0 {
|
||||||
|
self.state = RemoteTraceState::Sending(self.remote_traces.len());
|
||||||
|
for (dest, trace) in self.remote_traces.iter_mut() {
|
||||||
|
// queue up the first packet for all destinations, rest will be sent after first ACK
|
||||||
|
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
|
let meta = trace.get_slice_master(&mut data_slice);
|
||||||
|
router.route(
|
||||||
|
Packet::DmaAddTraceRequest {
|
||||||
|
source: self_destination,
|
||||||
|
destination: *dest,
|
||||||
|
id: id,
|
||||||
|
status: meta.status,
|
||||||
|
length: meta.len,
|
||||||
|
trace: data_slice,
|
||||||
|
},
|
||||||
|
routing_table,
|
||||||
|
rank,
|
||||||
|
self_destination,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
len
|
||||||
|
}
|
||||||
|
|
||||||
|
// on incoming Packet::DmaAddTraceReply
|
||||||
|
pub fn ack_upload(
|
||||||
|
&mut self,
|
||||||
|
kernel_manager: &mut KernelManager,
|
||||||
|
source: u8,
|
||||||
|
id: u32,
|
||||||
|
succeeded: bool,
|
||||||
|
router: &mut Router,
|
||||||
|
rank: u8,
|
||||||
|
self_destination: u8,
|
||||||
|
routing_table: &RoutingTable,
|
||||||
|
) {
|
||||||
|
if let RemoteTraceState::Sending(count) = self.state {
|
||||||
|
if let Some(trace) = self.remote_traces.get_mut(&source) {
|
||||||
|
if trace.at_end() {
|
||||||
|
if count - 1 == 0 {
|
||||||
|
self.state = RemoteTraceState::Ready;
|
||||||
|
if let Some((id, timestamp)) = kernel_manager.ddma_remote_uploaded(succeeded) {
|
||||||
|
self.playback(id, timestamp, router, rank, self_destination, routing_table);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
self.state = RemoteTraceState::Sending(count - 1);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// send next slice
|
||||||
|
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
|
let meta = trace.get_slice_master(&mut data_slice);
|
||||||
|
router.route(
|
||||||
|
Packet::DmaAddTraceRequest {
|
||||||
|
source: self_destination,
|
||||||
|
destination: meta.destination,
|
||||||
|
id: id,
|
||||||
|
status: meta.status,
|
||||||
|
length: meta.len,
|
||||||
|
trace: data_slice,
|
||||||
|
},
|
||||||
|
routing_table,
|
||||||
|
rank,
|
||||||
|
self_destination,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// on subkernel request
|
||||||
|
pub fn playback(
|
||||||
|
&mut self,
|
||||||
|
id: u32,
|
||||||
|
timestamp: u64,
|
||||||
|
router: &mut Router,
|
||||||
|
rank: u8,
|
||||||
|
self_destination: u8,
|
||||||
|
routing_table: &RoutingTable,
|
||||||
|
) {
|
||||||
|
// route all the playback requests
|
||||||
|
// remote traces (local trace runs on core1 unlike mainline firmware)
|
||||||
|
self.state = RemoteTraceState::Running(self.remote_traces.len());
|
||||||
|
for (dest, _) in self.remote_traces.iter() {
|
||||||
|
router.route(
|
||||||
|
Packet::DmaPlaybackRequest {
|
||||||
|
source: self_destination,
|
||||||
|
destination: *dest,
|
||||||
|
id: id,
|
||||||
|
timestamp: timestamp,
|
||||||
|
},
|
||||||
|
routing_table,
|
||||||
|
rank,
|
||||||
|
self_destination,
|
||||||
|
);
|
||||||
|
// response will be ignored (succeeded = false handled by the main thread)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// on incoming Packet::DmaPlaybackDone
|
||||||
|
pub fn remote_finished(&mut self, kernel_manager: &mut KernelManager, error: u8, channel: u32, timestamp: u64) {
|
||||||
|
if let RemoteTraceState::Running(count) = self.state {
|
||||||
|
if error != 0 || count - 1 == 0 {
|
||||||
|
// notify the kernel about a DDMA error or finish
|
||||||
|
kernel_manager.ddma_finished(error, channel, timestamp);
|
||||||
|
self.state = RemoteTraceState::Ready;
|
||||||
|
// further messages will be ignored (if there was an error)
|
||||||
|
} else {
|
||||||
|
// no error and not the last one awaited
|
||||||
|
self.state = RemoteTraceState::Running(count - 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn erase(
|
||||||
|
&mut self,
|
||||||
|
id: u32,
|
||||||
|
router: &mut Router,
|
||||||
|
rank: u8,
|
||||||
|
self_destination: u8,
|
||||||
|
routing_table: &RoutingTable,
|
||||||
|
) {
|
||||||
|
for (dest, _) in self.remote_traces.iter() {
|
||||||
|
router.route(
|
||||||
|
Packet::DmaRemoveTraceRequest {
|
||||||
|
source: self_destination,
|
||||||
|
destination: *dest,
|
||||||
|
id: id,
|
||||||
|
},
|
||||||
|
routing_table,
|
||||||
|
rank,
|
||||||
|
self_destination,
|
||||||
|
);
|
||||||
|
// response will be ignored as this object will stop existing too
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn has_remote_traces(&self) -> bool {
|
||||||
|
self.remote_traces.len() > 0
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Manager {
|
pub struct Manager {
|
||||||
entries: BTreeMap<u32, Entry>,
|
entries: BTreeMap<(u8, u32), Entry>,
|
||||||
state: ManagerState,
|
state: ManagerState,
|
||||||
currentid: u32,
|
current_id: u32,
|
||||||
|
current_source: u8,
|
||||||
|
|
||||||
|
remote_entries: BTreeMap<u32, RemoteTraces>,
|
||||||
|
name_map: BTreeMap<String, u32>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Manager {
|
impl Manager {
|
||||||
|
@ -45,79 +270,238 @@ impl Manager {
|
||||||
unsafe { while csr::rtio_dma::enable_read() != 0 {} }
|
unsafe { while csr::rtio_dma::enable_read() != 0 {} }
|
||||||
Manager {
|
Manager {
|
||||||
entries: BTreeMap::new(),
|
entries: BTreeMap::new(),
|
||||||
currentid: 0,
|
current_id: 0,
|
||||||
|
current_source: 0,
|
||||||
state: ManagerState::Idle,
|
state: ManagerState::Idle,
|
||||||
|
remote_entries: BTreeMap::new(),
|
||||||
|
name_map: BTreeMap::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add(&mut self, id: u32, last: bool, trace: &[u8], trace_len: usize) -> Result<(), Error> {
|
pub fn add(
|
||||||
let entry = match self.entries.get_mut(&id) {
|
&mut self,
|
||||||
|
source: u8,
|
||||||
|
id: u32,
|
||||||
|
status: PayloadStatus,
|
||||||
|
trace: &[u8],
|
||||||
|
trace_len: usize,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let entry = match self.entries.get_mut(&(source, id)) {
|
||||||
Some(entry) => {
|
Some(entry) => {
|
||||||
if entry.complete {
|
if entry.complete || status.is_first() {
|
||||||
// replace entry
|
// replace entry
|
||||||
self.entries.remove(&id);
|
self.entries.remove(&(source, id));
|
||||||
self.entries.insert(
|
self.entries.insert(
|
||||||
id,
|
(source, id),
|
||||||
Entry {
|
Entry {
|
||||||
trace: Vec::new(),
|
trace: Vec::new(),
|
||||||
padding_len: 0,
|
padding_len: 0,
|
||||||
complete: false,
|
complete: false,
|
||||||
|
duration: 0,
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
self.entries.get_mut(&id).unwrap()
|
self.entries.get_mut(&(source, id)).unwrap()
|
||||||
} else {
|
} else {
|
||||||
entry
|
entry
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
self.entries.insert(
|
self.entries.insert(
|
||||||
id,
|
(source, id),
|
||||||
Entry {
|
Entry {
|
||||||
trace: Vec::new(),
|
trace: Vec::new(),
|
||||||
padding_len: 0,
|
padding_len: 0,
|
||||||
complete: false,
|
complete: false,
|
||||||
|
duration: 0,
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
self.entries.get_mut(&id).unwrap()
|
self.entries.get_mut(&(source, id)).unwrap()
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
entry.trace.extend(&trace[0..trace_len]);
|
entry.trace.extend(&trace[0..trace_len]);
|
||||||
|
|
||||||
if last {
|
if status.is_last() {
|
||||||
entry.trace.push(0);
|
entry.realign();
|
||||||
let data_len = entry.trace.len();
|
|
||||||
|
|
||||||
// Realign.
|
|
||||||
entry.trace.reserve(ALIGNMENT - 1);
|
|
||||||
let padding = ALIGNMENT - entry.trace.as_ptr() as usize % ALIGNMENT;
|
|
||||||
let padding = if padding == ALIGNMENT { 0 } else { padding };
|
|
||||||
for _ in 0..padding {
|
|
||||||
// Vec guarantees that this will not reallocate
|
|
||||||
entry.trace.push(0)
|
|
||||||
}
|
|
||||||
for i in 1..data_len + 1 {
|
|
||||||
entry.trace[data_len + padding - i] = entry.trace[data_len - i]
|
|
||||||
}
|
|
||||||
entry.complete = true;
|
|
||||||
entry.padding_len = padding;
|
|
||||||
dcci_slice(&entry.trace);
|
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn erase(&mut self, id: u32) -> Result<(), Error> {
|
// api for DRTIO
|
||||||
match self.entries.remove(&id) {
|
pub fn erase(&mut self, source: u8, id: u32) -> Result<(), Error> {
|
||||||
|
match self.entries.remove(&(source, id)) {
|
||||||
Some(_) => Ok(()),
|
Some(_) => Ok(()),
|
||||||
None => Err(Error::IdNotFound),
|
None => Err(Error::IdNotFound),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn playback(&mut self, id: u32, timestamp: u64) -> Result<(), Error> {
|
// API for subkernel
|
||||||
|
pub fn erase_name(
|
||||||
|
&mut self,
|
||||||
|
name: &str,
|
||||||
|
router: &mut Router,
|
||||||
|
rank: u8,
|
||||||
|
self_destination: u8,
|
||||||
|
routing_table: &RoutingTable,
|
||||||
|
) {
|
||||||
|
if let Some(id) = self.name_map.get(name) {
|
||||||
|
if let Some(traces) = self.remote_entries.get_mut(&id) {
|
||||||
|
traces.erase(*id, router, rank, self_destination, routing_table);
|
||||||
|
self.remote_entries.remove(&id);
|
||||||
|
}
|
||||||
|
self.entries.remove(&(self_destination, *id));
|
||||||
|
self.name_map.remove(name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn remote_finished(
|
||||||
|
&mut self,
|
||||||
|
kernel_manager: &mut KernelManager,
|
||||||
|
id: u32,
|
||||||
|
error: u8,
|
||||||
|
channel: u32,
|
||||||
|
timestamp: u64,
|
||||||
|
) {
|
||||||
|
if let Some(entry) = self.remote_entries.get_mut(&id) {
|
||||||
|
entry.remote_finished(kernel_manager, error, channel, timestamp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ack_upload(
|
||||||
|
&mut self,
|
||||||
|
kernel_manager: &mut KernelManager,
|
||||||
|
source: u8,
|
||||||
|
id: u32,
|
||||||
|
succeeded: bool,
|
||||||
|
router: &mut Router,
|
||||||
|
rank: u8,
|
||||||
|
self_destination: u8,
|
||||||
|
routing_table: &RoutingTable,
|
||||||
|
) {
|
||||||
|
if let Some(entry) = self.remote_entries.get_mut(&id) {
|
||||||
|
entry.ack_upload(
|
||||||
|
kernel_manager,
|
||||||
|
source,
|
||||||
|
id,
|
||||||
|
succeeded,
|
||||||
|
router,
|
||||||
|
rank,
|
||||||
|
self_destination,
|
||||||
|
routing_table,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// API for subkernel
|
||||||
|
pub fn upload_traces(
|
||||||
|
&mut self,
|
||||||
|
id: u32,
|
||||||
|
router: &mut Router,
|
||||||
|
rank: u8,
|
||||||
|
self_destination: u8,
|
||||||
|
routing_table: &RoutingTable,
|
||||||
|
) -> Result<usize, Error> {
|
||||||
|
let remote_traces = self.remote_entries.get_mut(&id);
|
||||||
|
let mut len = 0;
|
||||||
|
if let Some(traces) = remote_traces {
|
||||||
|
len = traces.upload_traces(id, router, rank, self_destination, routing_table);
|
||||||
|
}
|
||||||
|
Ok(len)
|
||||||
|
}
|
||||||
|
|
||||||
|
// API for subkernel
|
||||||
|
pub fn playback_remote(
|
||||||
|
&mut self,
|
||||||
|
id: u32,
|
||||||
|
timestamp: u64,
|
||||||
|
router: &mut Router,
|
||||||
|
rank: u8,
|
||||||
|
self_destination: u8,
|
||||||
|
routing_table: &RoutingTable,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
if let Some(traces) = self.remote_entries.get_mut(&id) {
|
||||||
|
traces.playback(id, timestamp, router, rank, self_destination, routing_table);
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(Error::IdNotFound)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// API for subkernel
|
||||||
|
pub fn cleanup(&mut self, router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) {
|
||||||
|
// after subkernel ends, remove all self-generated traces
|
||||||
|
for (_, id) in self.name_map.iter_mut() {
|
||||||
|
if let Some(traces) = self.remote_entries.get_mut(&id) {
|
||||||
|
traces.erase(*id, router, rank, self_destination, routing_table);
|
||||||
|
self.remote_entries.remove(&id);
|
||||||
|
}
|
||||||
|
self.entries.remove(&(self_destination, *id));
|
||||||
|
}
|
||||||
|
self.name_map.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
// API for subkernel
|
||||||
|
pub fn retrieve(&self, self_destination: u8, name: &String) -> Option<(i32, i64, bool)> {
|
||||||
|
let id = self.name_map.get(name)?;
|
||||||
|
let duration = self.entries.get(&(self_destination, *id))?.duration;
|
||||||
|
let uses_ddma = self.has_remote_traces(*id);
|
||||||
|
Some((*id as i32, duration, uses_ddma))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn has_remote_traces(&self, id: u32) -> bool {
|
||||||
|
match self.remote_entries.get(&id) {
|
||||||
|
Some(traces) => traces.has_remote_traces(),
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn put_record(&mut self, mut recorder: DmaRecorder, self_destination: u8) -> Result<u32, Error> {
|
||||||
|
let mut remote_traces: BTreeMap<u8, Sliceable> = BTreeMap::new();
|
||||||
|
|
||||||
|
let mut local_trace: Vec<u8> = Vec::new();
|
||||||
|
// analyze each entry and put in proper buckets, as the kernel core
|
||||||
|
// sends whole chunks, to limit comms/kernel CPU communication,
|
||||||
|
// and as only comms core has access to varios DMA buffers.
|
||||||
|
let mut ptr = 0;
|
||||||
|
recorder.buffer.push(0);
|
||||||
|
while recorder.buffer[ptr] != 0 {
|
||||||
|
// ptr + 3 = tgt >> 24 (destination)
|
||||||
|
let len = recorder.buffer[ptr] as usize;
|
||||||
|
let destination = recorder.buffer[ptr + 3];
|
||||||
|
if destination == 0 {
|
||||||
|
return Err(Error::MasterDmaFound);
|
||||||
|
} else if destination == self_destination {
|
||||||
|
local_trace.extend(&recorder.buffer[ptr..ptr + len]);
|
||||||
|
} else {
|
||||||
|
if let Some(remote_trace) = remote_traces.get_mut(&destination) {
|
||||||
|
remote_trace.extend(&recorder.buffer[ptr..ptr + len]);
|
||||||
|
} else {
|
||||||
|
remote_traces.insert(
|
||||||
|
destination,
|
||||||
|
Sliceable::new(destination, recorder.buffer[ptr..ptr + len].to_vec()),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// and jump to the next event
|
||||||
|
ptr += len;
|
||||||
|
}
|
||||||
|
let local_entry = Entry::from_vec(local_trace, recorder.duration);
|
||||||
|
|
||||||
|
let id = local_entry.id();
|
||||||
|
self.entries.insert((self_destination, id), local_entry);
|
||||||
|
self.remote_entries.insert(id, RemoteTraces::new(remote_traces));
|
||||||
|
let mut name = String::new();
|
||||||
|
mem::swap(&mut recorder.name, &mut name);
|
||||||
|
self.name_map.insert(name, id);
|
||||||
|
|
||||||
|
Ok(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn playback(&mut self, source: u8, id: u32, timestamp: u64) -> Result<(), Error> {
|
||||||
if self.state != ManagerState::Idle {
|
if self.state != ManagerState::Idle {
|
||||||
return Err(Error::PlaybackInProgress);
|
return Err(Error::PlaybackInProgress);
|
||||||
}
|
}
|
||||||
|
|
||||||
let entry = match self.entries.get(&id) {
|
let entry = match self.entries.get(&(source, id)) {
|
||||||
Some(entry) => entry,
|
Some(entry) => entry,
|
||||||
None => {
|
None => {
|
||||||
return Err(Error::IdNotFound);
|
return Err(Error::IdNotFound);
|
||||||
|
@ -130,7 +514,8 @@ impl Manager {
|
||||||
assert!(ptr as u32 % 64 == 0);
|
assert!(ptr as u32 % 64 == 0);
|
||||||
|
|
||||||
self.state = ManagerState::Playback;
|
self.state = ManagerState::Playback;
|
||||||
self.currentid = id;
|
self.current_id = id;
|
||||||
|
self.current_source = source;
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
csr::rtio_dma::base_address_write(ptr as u32);
|
csr::rtio_dma::base_address_write(ptr as u32);
|
||||||
|
@ -162,7 +547,8 @@ impl Manager {
|
||||||
csr::rtio_dma::error_write(1);
|
csr::rtio_dma::error_write(1);
|
||||||
}
|
}
|
||||||
return Some(RtioStatus {
|
return Some(RtioStatus {
|
||||||
id: self.currentid,
|
source: self.current_source,
|
||||||
|
id: self.current_id,
|
||||||
error: error,
|
error: error,
|
||||||
channel: channel,
|
channel: channel,
|
||||||
timestamp: timestamp,
|
timestamp: timestamp,
|
||||||
|
|
|
@ -39,11 +39,13 @@ use libboard_zynq::{i2c::I2c, print, println, time::Milliseconds, timer::GlobalT
|
||||||
use libcortex_a9::{l2c::enable_l2_cache, regs::MPIDR};
|
use libcortex_a9::{l2c::enable_l2_cache, regs::MPIDR};
|
||||||
use libregister::RegisterR;
|
use libregister::RegisterR;
|
||||||
use libsupport_zynq::ram;
|
use libsupport_zynq::ram;
|
||||||
|
use routing::Router;
|
||||||
use subkernel::Manager as KernelManager;
|
use subkernel::Manager as KernelManager;
|
||||||
|
|
||||||
mod analyzer;
|
mod analyzer;
|
||||||
mod dma;
|
mod dma;
|
||||||
mod repeater;
|
mod repeater;
|
||||||
|
mod routing;
|
||||||
mod subkernel;
|
mod subkernel;
|
||||||
|
|
||||||
fn drtiosat_reset(reset: bool) {
|
fn drtiosat_reset(reset: bool) {
|
||||||
|
@ -72,6 +74,12 @@ fn drtiosat_tsc_loaded() -> bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn drtiosat_async_ready() {
|
||||||
|
unsafe {
|
||||||
|
csr::drtiosat::async_messages_ready_write(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
macro_rules! forward {
|
macro_rules! forward {
|
||||||
($routing_table:expr, $destination:expr, $rank:expr, $repeaters:expr, $packet:expr, $timer:expr) => {{
|
($routing_table:expr, $destination:expr, $rank:expr, $repeaters:expr, $packet:expr, $timer:expr) => {{
|
||||||
|
@ -79,7 +87,11 @@ macro_rules! forward {
|
||||||
if hop != 0 {
|
if hop != 0 {
|
||||||
let repno = (hop - 1) as usize;
|
let repno = (hop - 1) as usize;
|
||||||
if repno < $repeaters.len() {
|
if repno < $repeaters.len() {
|
||||||
|
if $packet.expects_response() {
|
||||||
return $repeaters[repno].aux_forward($packet, $timer);
|
return $repeaters[repno].aux_forward($packet, $timer);
|
||||||
|
} else {
|
||||||
|
return $repeaters[repno].aux_send($packet);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
return Err(drtioaux::Error::RoutingError);
|
return Err(drtioaux::Error::RoutingError);
|
||||||
}
|
}
|
||||||
|
@ -95,13 +107,15 @@ macro_rules! forward {
|
||||||
fn process_aux_packet(
|
fn process_aux_packet(
|
||||||
_repeaters: &mut [repeater::Repeater],
|
_repeaters: &mut [repeater::Repeater],
|
||||||
_routing_table: &mut drtio_routing::RoutingTable,
|
_routing_table: &mut drtio_routing::RoutingTable,
|
||||||
_rank: &mut u8,
|
rank: &mut u8,
|
||||||
|
self_destination: &mut u8,
|
||||||
packet: drtioaux::Packet,
|
packet: drtioaux::Packet,
|
||||||
timer: &mut GlobalTimer,
|
timer: &mut GlobalTimer,
|
||||||
i2c: &mut I2c,
|
i2c: &mut I2c,
|
||||||
dma_manager: &mut DmaManager,
|
dma_manager: &mut DmaManager,
|
||||||
analyzer: &mut Analyzer,
|
analyzer: &mut Analyzer,
|
||||||
kernel_manager: &mut KernelManager,
|
kernel_manager: &mut KernelManager,
|
||||||
|
router: &mut Router,
|
||||||
) -> Result<(), drtioaux::Error> {
|
) -> Result<(), drtioaux::Error> {
|
||||||
// In the code below, *_chan_sel_write takes an u8 if there are fewer than 256 channels,
|
// In the code below, *_chan_sel_write takes an u8 if there are fewer than 256 channels,
|
||||||
// and u16 otherwise; hence the `as _` conversion.
|
// and u16 otherwise; hence the `as _` conversion.
|
||||||
|
@ -122,54 +136,12 @@ fn process_aux_packet(
|
||||||
|
|
||||||
drtioaux::Packet::DestinationStatusRequest { destination } => {
|
drtioaux::Packet::DestinationStatusRequest { destination } => {
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
let hop = _routing_table.0[destination as usize][*_rank as usize];
|
let hop = _routing_table.0[destination as usize][*rank as usize];
|
||||||
#[cfg(not(has_drtio_routing))]
|
#[cfg(not(has_drtio_routing))]
|
||||||
let hop = 0;
|
let hop = 0;
|
||||||
|
|
||||||
if hop == 0 {
|
if hop == 0 {
|
||||||
if let Some(status) = dma_manager.check_state() {
|
*self_destination = destination;
|
||||||
info!(
|
|
||||||
"playback done, error: {}, channel: {}, timestamp: {}",
|
|
||||||
status.error, status.channel, status.timestamp
|
|
||||||
);
|
|
||||||
drtioaux::send(
|
|
||||||
0,
|
|
||||||
&drtioaux::Packet::DmaPlaybackStatus {
|
|
||||||
destination: destination,
|
|
||||||
id: status.id,
|
|
||||||
error: status.error,
|
|
||||||
channel: status.channel,
|
|
||||||
timestamp: status.timestamp,
|
|
||||||
},
|
|
||||||
)?;
|
|
||||||
} else if let Some(subkernel_finished) = kernel_manager.get_last_finished() {
|
|
||||||
info!(
|
|
||||||
"subkernel {} finished, with exception: {}",
|
|
||||||
subkernel_finished.id, subkernel_finished.with_exception
|
|
||||||
);
|
|
||||||
drtioaux::send(
|
|
||||||
0,
|
|
||||||
&drtioaux::Packet::SubkernelFinished {
|
|
||||||
id: subkernel_finished.id,
|
|
||||||
with_exception: subkernel_finished.with_exception,
|
|
||||||
},
|
|
||||||
)?;
|
|
||||||
} else if kernel_manager.message_is_ready() {
|
|
||||||
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
|
||||||
match kernel_manager.message_get_slice(&mut data_slice) {
|
|
||||||
Some(meta) => drtioaux::send(
|
|
||||||
0,
|
|
||||||
&drtioaux::Packet::SubkernelMessage {
|
|
||||||
destination: destination,
|
|
||||||
id: kernel_manager.get_current_id().unwrap(),
|
|
||||||
last: meta.last,
|
|
||||||
length: meta.len as u16,
|
|
||||||
data: data_slice,
|
|
||||||
},
|
|
||||||
)?,
|
|
||||||
None => warn!("subkernel message is ready but no message is present"),
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
let errors;
|
let errors;
|
||||||
unsafe {
|
unsafe {
|
||||||
errors = csr::drtiosat::rtio_error_read();
|
errors = csr::drtiosat::rtio_error_read();
|
||||||
|
@ -199,7 +171,6 @@ fn process_aux_packet(
|
||||||
drtioaux::send(0, &drtioaux::Packet::DestinationOkReply)?;
|
drtioaux::send(0, &drtioaux::Packet::DestinationOkReply)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
{
|
{
|
||||||
|
@ -242,11 +213,11 @@ fn process_aux_packet(
|
||||||
drtioaux::send(0, &drtioaux::Packet::RoutingAck)
|
drtioaux::send(0, &drtioaux::Packet::RoutingAck)
|
||||||
}
|
}
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
drtioaux::Packet::RoutingSetRank { rank } => {
|
drtioaux::Packet::RoutingSetRank { rank: new_rank } => {
|
||||||
*_rank = rank;
|
*rank = new_rank;
|
||||||
drtio_routing::interconnect_enable_all(_routing_table, rank);
|
drtio_routing::interconnect_enable_all(_routing_table, new_rank);
|
||||||
|
|
||||||
let rep_rank = rank + 1;
|
let rep_rank = new_rank + 1;
|
||||||
for rep in _repeaters.iter() {
|
for rep in _repeaters.iter() {
|
||||||
if let Err(e) = rep.set_rank(rep_rank, timer) {
|
if let Err(e) = rep.set_rank(rep_rank, timer) {
|
||||||
error!("failed to set rank ({:?})", e);
|
error!("failed to set rank ({:?})", e);
|
||||||
|
@ -267,12 +238,20 @@ fn process_aux_packet(
|
||||||
#[cfg(not(has_drtio_routing))]
|
#[cfg(not(has_drtio_routing))]
|
||||||
drtioaux::Packet::RoutingSetRank { rank: _ } => drtioaux::send(0, &drtioaux::Packet::RoutingAck),
|
drtioaux::Packet::RoutingSetRank { rank: _ } => drtioaux::send(0, &drtioaux::Packet::RoutingAck),
|
||||||
|
|
||||||
|
drtioaux::Packet::RoutingRetrievePackets => {
|
||||||
|
let packet = router
|
||||||
|
.get_upstream_packet()
|
||||||
|
.or(Some(drtioaux::Packet::RoutingNoPackets))
|
||||||
|
.unwrap();
|
||||||
|
drtioaux::send(0, &packet)
|
||||||
|
}
|
||||||
|
|
||||||
drtioaux::Packet::MonitorRequest {
|
drtioaux::Packet::MonitorRequest {
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
channel,
|
channel,
|
||||||
probe,
|
probe,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
let value;
|
let value;
|
||||||
#[cfg(has_rtio_moninj)]
|
#[cfg(has_rtio_moninj)]
|
||||||
unsafe {
|
unsafe {
|
||||||
|
@ -294,7 +273,7 @@ fn process_aux_packet(
|
||||||
overrd,
|
overrd,
|
||||||
value,
|
value,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
#[cfg(has_rtio_moninj)]
|
#[cfg(has_rtio_moninj)]
|
||||||
unsafe {
|
unsafe {
|
||||||
csr::rtio_moninj::inj_chan_sel_write(channel as _);
|
csr::rtio_moninj::inj_chan_sel_write(channel as _);
|
||||||
|
@ -308,7 +287,7 @@ fn process_aux_packet(
|
||||||
channel,
|
channel,
|
||||||
overrd,
|
overrd,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
let value;
|
let value;
|
||||||
#[cfg(has_rtio_moninj)]
|
#[cfg(has_rtio_moninj)]
|
||||||
unsafe {
|
unsafe {
|
||||||
|
@ -327,7 +306,7 @@ fn process_aux_packet(
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
busno: _busno,
|
busno: _busno,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
let succeeded = i2c.start().is_ok();
|
let succeeded = i2c.start().is_ok();
|
||||||
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
||||||
}
|
}
|
||||||
|
@ -335,7 +314,7 @@ fn process_aux_packet(
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
busno: _busno,
|
busno: _busno,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
let succeeded = i2c.restart().is_ok();
|
let succeeded = i2c.restart().is_ok();
|
||||||
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
||||||
}
|
}
|
||||||
|
@ -343,7 +322,7 @@ fn process_aux_packet(
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
busno: _busno,
|
busno: _busno,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
let succeeded = i2c.stop().is_ok();
|
let succeeded = i2c.stop().is_ok();
|
||||||
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
||||||
}
|
}
|
||||||
|
@ -352,7 +331,7 @@ fn process_aux_packet(
|
||||||
busno: _busno,
|
busno: _busno,
|
||||||
data,
|
data,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
match i2c.write(data) {
|
match i2c.write(data) {
|
||||||
Ok(ack) => drtioaux::send(
|
Ok(ack) => drtioaux::send(
|
||||||
0,
|
0,
|
||||||
|
@ -375,7 +354,7 @@ fn process_aux_packet(
|
||||||
busno: _busno,
|
busno: _busno,
|
||||||
ack,
|
ack,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
match i2c.read(ack) {
|
match i2c.read(ack) {
|
||||||
Ok(data) => drtioaux::send(
|
Ok(data) => drtioaux::send(
|
||||||
0,
|
0,
|
||||||
|
@ -399,7 +378,7 @@ fn process_aux_packet(
|
||||||
address,
|
address,
|
||||||
mask,
|
mask,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
let ch = match mask {
|
let ch = match mask {
|
||||||
//decode from mainline, PCA9548-centric API
|
//decode from mainline, PCA9548-centric API
|
||||||
0x00 => None,
|
0x00 => None,
|
||||||
|
@ -425,7 +404,7 @@ fn process_aux_packet(
|
||||||
div: _div,
|
div: _div,
|
||||||
cs: _cs,
|
cs: _cs,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
// todo: reimplement when/if SPI is available
|
// todo: reimplement when/if SPI is available
|
||||||
//let succeeded = spi::set_config(busno, flags, length, div, cs).is_ok();
|
//let succeeded = spi::set_config(busno, flags, length, div, cs).is_ok();
|
||||||
drtioaux::send(0, &drtioaux::Packet::SpiBasicReply { succeeded: false })
|
drtioaux::send(0, &drtioaux::Packet::SpiBasicReply { succeeded: false })
|
||||||
|
@ -435,7 +414,7 @@ fn process_aux_packet(
|
||||||
busno: _busno,
|
busno: _busno,
|
||||||
data: _data,
|
data: _data,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
// todo: reimplement when/if SPI is available
|
// todo: reimplement when/if SPI is available
|
||||||
//let succeeded = spi::write(busno, data).is_ok();
|
//let succeeded = spi::write(busno, data).is_ok();
|
||||||
drtioaux::send(0, &drtioaux::Packet::SpiBasicReply { succeeded: false })
|
drtioaux::send(0, &drtioaux::Packet::SpiBasicReply { succeeded: false })
|
||||||
|
@ -444,7 +423,7 @@ fn process_aux_packet(
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
busno: _busno,
|
busno: _busno,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
// todo: reimplement when/if SPI is available
|
// todo: reimplement when/if SPI is available
|
||||||
// match spi::read(busno) {
|
// match spi::read(busno) {
|
||||||
// Ok(data) => drtioaux::send(0,
|
// Ok(data) => drtioaux::send(0,
|
||||||
|
@ -464,7 +443,7 @@ fn process_aux_packet(
|
||||||
drtioaux::Packet::AnalyzerHeaderRequest {
|
drtioaux::Packet::AnalyzerHeaderRequest {
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
let header = analyzer.get_header();
|
let header = analyzer.get_header();
|
||||||
drtioaux::send(
|
drtioaux::send(
|
||||||
0,
|
0,
|
||||||
|
@ -478,7 +457,7 @@ fn process_aux_packet(
|
||||||
drtioaux::Packet::AnalyzerDataRequest {
|
drtioaux::Packet::AnalyzerDataRequest {
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
let mut data_slice: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE];
|
let mut data_slice: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE];
|
||||||
let meta = analyzer.get_data(&mut data_slice);
|
let meta = analyzer.get_data(&mut data_slice);
|
||||||
drtioaux::send(
|
drtioaux::send(
|
||||||
|
@ -492,55 +471,135 @@ fn process_aux_packet(
|
||||||
}
|
}
|
||||||
|
|
||||||
drtioaux::Packet::DmaAddTraceRequest {
|
drtioaux::Packet::DmaAddTraceRequest {
|
||||||
destination: _destination,
|
source,
|
||||||
|
destination,
|
||||||
id,
|
id,
|
||||||
last,
|
status,
|
||||||
length,
|
length,
|
||||||
trace,
|
trace,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, destination, *rank, _repeaters, &packet, timer);
|
||||||
let succeeded = dma_manager.add(id, last, &trace, length as usize).is_ok();
|
*self_destination = destination;
|
||||||
drtioaux::send(0, &drtioaux::Packet::DmaAddTraceReply { succeeded: succeeded })
|
let succeeded = dma_manager.add(source, id, status, &trace, length as usize).is_ok();
|
||||||
|
router.send(
|
||||||
|
drtioaux::Packet::DmaAddTraceReply {
|
||||||
|
source: *self_destination,
|
||||||
|
destination: source,
|
||||||
|
id: id,
|
||||||
|
succeeded: succeeded,
|
||||||
|
},
|
||||||
|
_routing_table,
|
||||||
|
*rank,
|
||||||
|
*self_destination,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
drtioaux::Packet::DmaAddTraceReply {
|
||||||
|
source,
|
||||||
|
destination: _destination,
|
||||||
|
id,
|
||||||
|
succeeded,
|
||||||
|
} => {
|
||||||
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
|
dma_manager.ack_upload(
|
||||||
|
kernel_manager,
|
||||||
|
source,
|
||||||
|
id,
|
||||||
|
succeeded,
|
||||||
|
router,
|
||||||
|
*rank,
|
||||||
|
*self_destination,
|
||||||
|
_routing_table,
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
drtioaux::Packet::DmaRemoveTraceRequest {
|
drtioaux::Packet::DmaRemoveTraceRequest {
|
||||||
|
source,
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
id,
|
id,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
let succeeded = dma_manager.erase(id).is_ok();
|
let succeeded = dma_manager.erase(source, id).is_ok();
|
||||||
drtioaux::send(0, &drtioaux::Packet::DmaRemoveTraceReply { succeeded: succeeded })
|
router.send(
|
||||||
|
drtioaux::Packet::DmaRemoveTraceReply {
|
||||||
|
destination: source,
|
||||||
|
succeeded: succeeded,
|
||||||
|
},
|
||||||
|
_routing_table,
|
||||||
|
*rank,
|
||||||
|
*self_destination,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
drtioaux::Packet::DmaRemoveTraceReply {
|
||||||
|
destination: _destination,
|
||||||
|
succeeded: _,
|
||||||
|
} => {
|
||||||
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
drtioaux::Packet::DmaPlaybackRequest {
|
drtioaux::Packet::DmaPlaybackRequest {
|
||||||
|
source,
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
id,
|
id,
|
||||||
timestamp,
|
timestamp,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
let succeeded = if !kernel_manager.running() {
|
let succeeded = if !kernel_manager.running() {
|
||||||
dma_manager.playback(id, timestamp).is_ok()
|
dma_manager.playback(source, id, timestamp).is_ok()
|
||||||
} else {
|
} else {
|
||||||
false
|
false
|
||||||
};
|
};
|
||||||
drtioaux::send(0, &drtioaux::Packet::DmaPlaybackReply { succeeded: succeeded })
|
router.send(
|
||||||
|
drtioaux::Packet::DmaPlaybackReply {
|
||||||
|
destination: source,
|
||||||
|
succeeded: succeeded,
|
||||||
|
},
|
||||||
|
_routing_table,
|
||||||
|
*rank,
|
||||||
|
*self_destination,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
drtioaux::Packet::DmaPlaybackReply {
|
||||||
|
destination: _destination,
|
||||||
|
succeeded,
|
||||||
|
} => {
|
||||||
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
|
if !succeeded {
|
||||||
|
kernel_manager.ddma_nack();
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
drtioaux::Packet::DmaPlaybackStatus {
|
||||||
|
source: _,
|
||||||
|
destination: _destination,
|
||||||
|
id,
|
||||||
|
error,
|
||||||
|
channel,
|
||||||
|
timestamp,
|
||||||
|
} => {
|
||||||
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
|
dma_manager.remote_finished(kernel_manager, id, error, channel, timestamp);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
drtioaux::Packet::SubkernelAddDataRequest {
|
drtioaux::Packet::SubkernelAddDataRequest {
|
||||||
destination: _destination,
|
destination,
|
||||||
id,
|
id,
|
||||||
last,
|
status,
|
||||||
length,
|
length,
|
||||||
data,
|
data,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, destination, *rank, _repeaters, &packet, timer);
|
||||||
let succeeded = kernel_manager.add(id, last, &data, length as usize).is_ok();
|
*self_destination = destination;
|
||||||
|
let succeeded = kernel_manager.add(id, status, &data, length as usize).is_ok();
|
||||||
drtioaux::send(0, &drtioaux::Packet::SubkernelAddDataReply { succeeded: succeeded })
|
drtioaux::send(0, &drtioaux::Packet::SubkernelAddDataReply { succeeded: succeeded })
|
||||||
}
|
}
|
||||||
drtioaux::Packet::SubkernelLoadRunRequest {
|
drtioaux::Packet::SubkernelLoadRunRequest {
|
||||||
|
source,
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
id,
|
id,
|
||||||
run,
|
run,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
let mut succeeded = kernel_manager.load(id).is_ok();
|
let mut succeeded = kernel_manager.load(id).is_ok();
|
||||||
// allow preloading a kernel with delayed run
|
// allow preloading a kernel with delayed run
|
||||||
if run {
|
if run {
|
||||||
|
@ -548,59 +607,91 @@ fn process_aux_packet(
|
||||||
// cannot run kernel while DDMA is running
|
// cannot run kernel while DDMA is running
|
||||||
succeeded = false;
|
succeeded = false;
|
||||||
} else {
|
} else {
|
||||||
succeeded |= kernel_manager.run(id).is_ok();
|
succeeded |= kernel_manager.run(source, id).is_ok();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drtioaux::send(0, &drtioaux::Packet::SubkernelLoadRunReply { succeeded: succeeded })
|
router.send(
|
||||||
|
drtioaux::Packet::SubkernelLoadRunReply {
|
||||||
|
destination: source,
|
||||||
|
succeeded: succeeded,
|
||||||
|
},
|
||||||
|
_routing_table,
|
||||||
|
*rank,
|
||||||
|
*self_destination,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
drtioaux::Packet::SubkernelLoadRunReply {
|
||||||
|
destination: _destination,
|
||||||
|
succeeded,
|
||||||
|
} => {
|
||||||
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
|
// received if local subkernel started another, remote subkernel
|
||||||
|
kernel_manager.subkernel_load_run_reply(succeeded);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
drtioaux::Packet::SubkernelFinished {
|
||||||
|
destination: _destination,
|
||||||
|
id,
|
||||||
|
with_exception,
|
||||||
|
exception_src,
|
||||||
|
} => {
|
||||||
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
|
kernel_manager.remote_subkernel_finished(id, with_exception, exception_src);
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
drtioaux::Packet::SubkernelExceptionRequest {
|
drtioaux::Packet::SubkernelExceptionRequest {
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
let mut data_slice: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE];
|
let mut data_slice: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE];
|
||||||
let meta = kernel_manager.exception_get_slice(&mut data_slice);
|
let meta = kernel_manager.exception_get_slice(&mut data_slice);
|
||||||
drtioaux::send(
|
drtioaux::send(
|
||||||
0,
|
0,
|
||||||
&drtioaux::Packet::SubkernelException {
|
&drtioaux::Packet::SubkernelException {
|
||||||
last: meta.last,
|
last: meta.status.is_last(),
|
||||||
length: meta.len,
|
length: meta.len,
|
||||||
data: data_slice,
|
data: data_slice,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
drtioaux::Packet::SubkernelMessage {
|
drtioaux::Packet::SubkernelMessage {
|
||||||
destination,
|
source,
|
||||||
id: _id,
|
destination: _destination,
|
||||||
last,
|
id,
|
||||||
|
status,
|
||||||
length,
|
length,
|
||||||
data,
|
data,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
kernel_manager.message_handle_incoming(last, length as usize, &data);
|
kernel_manager.message_handle_incoming(status, id, length as usize, &data);
|
||||||
drtioaux::send(
|
router.send(
|
||||||
0,
|
drtioaux::Packet::SubkernelMessageAck { destination: source },
|
||||||
&drtioaux::Packet::SubkernelMessageAck {
|
_routing_table,
|
||||||
destination: destination,
|
*rank,
|
||||||
},
|
*self_destination,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
drtioaux::Packet::SubkernelMessageAck {
|
drtioaux::Packet::SubkernelMessageAck {
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
||||||
if kernel_manager.message_ack_slice() {
|
if kernel_manager.message_ack_slice() {
|
||||||
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
if let Some(meta) = kernel_manager.message_get_slice(&mut data_slice) {
|
if let Some(meta) = kernel_manager.message_get_slice(&mut data_slice) {
|
||||||
drtioaux::send(
|
// route and not send immediately as ACKs are not a beginning of a transaction
|
||||||
0,
|
router.route(
|
||||||
&drtioaux::Packet::SubkernelMessage {
|
drtioaux::Packet::SubkernelMessage {
|
||||||
destination: *_rank,
|
source: *self_destination,
|
||||||
|
destination: meta.destination,
|
||||||
id: kernel_manager.get_current_id().unwrap(),
|
id: kernel_manager.get_current_id().unwrap(),
|
||||||
last: meta.last,
|
status: meta.status,
|
||||||
length: meta.len as u16,
|
length: meta.len as u16,
|
||||||
data: data_slice,
|
data: data_slice,
|
||||||
},
|
},
|
||||||
)?;
|
_routing_table,
|
||||||
|
*rank,
|
||||||
|
*self_destination,
|
||||||
|
);
|
||||||
} else {
|
} else {
|
||||||
error!("Error receiving message slice");
|
error!("Error receiving message slice");
|
||||||
}
|
}
|
||||||
|
@ -608,8 +699,8 @@ fn process_aux_packet(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
_ => {
|
p => {
|
||||||
warn!("received unexpected aux packet");
|
warn!("received unexpected aux packet: {:?}", p);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -619,32 +710,35 @@ fn process_aux_packets(
|
||||||
repeaters: &mut [repeater::Repeater],
|
repeaters: &mut [repeater::Repeater],
|
||||||
routing_table: &mut drtio_routing::RoutingTable,
|
routing_table: &mut drtio_routing::RoutingTable,
|
||||||
rank: &mut u8,
|
rank: &mut u8,
|
||||||
|
self_destination: &mut u8,
|
||||||
timer: &mut GlobalTimer,
|
timer: &mut GlobalTimer,
|
||||||
i2c: &mut I2c,
|
i2c: &mut I2c,
|
||||||
dma_manager: &mut DmaManager,
|
dma_manager: &mut DmaManager,
|
||||||
analyzer: &mut Analyzer,
|
analyzer: &mut Analyzer,
|
||||||
kernel_manager: &mut KernelManager,
|
kernel_manager: &mut KernelManager,
|
||||||
|
router: &mut Router,
|
||||||
) {
|
) {
|
||||||
let result = drtioaux::recv(0).and_then(|packet| {
|
let result = drtioaux::recv(0).and_then(|packet| {
|
||||||
if let Some(packet) = packet {
|
if let Some(packet) = packet.or_else(|| router.get_local_packet()) {
|
||||||
process_aux_packet(
|
process_aux_packet(
|
||||||
repeaters,
|
repeaters,
|
||||||
routing_table,
|
routing_table,
|
||||||
rank,
|
rank,
|
||||||
|
self_destination,
|
||||||
packet,
|
packet,
|
||||||
timer,
|
timer,
|
||||||
i2c,
|
i2c,
|
||||||
dma_manager,
|
dma_manager,
|
||||||
analyzer,
|
analyzer,
|
||||||
kernel_manager,
|
kernel_manager,
|
||||||
|
router,
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
match result {
|
if let Err(e) = result {
|
||||||
Ok(()) => (),
|
warn!("aux packet error ({:?})", e);
|
||||||
Err(e) => warn!("aux packet error ({:?})", e),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -800,17 +894,20 @@ pub extern "C" fn main_core0() -> i32 {
|
||||||
}
|
}
|
||||||
let mut routing_table = drtio_routing::RoutingTable::default_empty();
|
let mut routing_table = drtio_routing::RoutingTable::default_empty();
|
||||||
let mut rank = 1;
|
let mut rank = 1;
|
||||||
|
let mut destination = 1;
|
||||||
|
|
||||||
let mut hardware_tick_ts = 0;
|
let mut hardware_tick_ts = 0;
|
||||||
|
|
||||||
let mut control = ksupport::kernel::Control::start();
|
let mut control = ksupport::kernel::Control::start();
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
|
let mut router = Router::new();
|
||||||
|
|
||||||
while !drtiosat_link_rx_up() {
|
while !drtiosat_link_rx_up() {
|
||||||
drtiosat_process_errors();
|
drtiosat_process_errors();
|
||||||
#[allow(unused_mut)]
|
#[allow(unused_mut)]
|
||||||
for mut rep in repeaters.iter_mut() {
|
for mut rep in repeaters.iter_mut() {
|
||||||
rep.service(&routing_table, rank, &mut timer);
|
rep.service(&routing_table, rank, destination, &mut router, &mut timer);
|
||||||
}
|
}
|
||||||
#[cfg(feature = "target_kasli_soc")]
|
#[cfg(feature = "target_kasli_soc")]
|
||||||
{
|
{
|
||||||
|
@ -849,15 +946,17 @@ pub extern "C" fn main_core0() -> i32 {
|
||||||
&mut repeaters,
|
&mut repeaters,
|
||||||
&mut routing_table,
|
&mut routing_table,
|
||||||
&mut rank,
|
&mut rank,
|
||||||
|
&mut destination,
|
||||||
&mut timer,
|
&mut timer,
|
||||||
&mut i2c,
|
&mut i2c,
|
||||||
&mut dma_manager,
|
&mut dma_manager,
|
||||||
&mut analyzer,
|
&mut analyzer,
|
||||||
&mut kernel_manager,
|
&mut kernel_manager,
|
||||||
|
&mut router,
|
||||||
);
|
);
|
||||||
#[allow(unused_mut)]
|
#[allow(unused_mut)]
|
||||||
for mut rep in repeaters.iter_mut() {
|
for mut rep in repeaters.iter_mut() {
|
||||||
rep.service(&routing_table, rank, &mut timer);
|
rep.service(&routing_table, rank, destination, &mut router, &mut timer);
|
||||||
}
|
}
|
||||||
#[cfg(feature = "target_kasli_soc")]
|
#[cfg(feature = "target_kasli_soc")]
|
||||||
{
|
{
|
||||||
|
@ -880,7 +979,45 @@ pub extern "C" fn main_core0() -> i32 {
|
||||||
error!("aux packet error: {:?}", e);
|
error!("aux packet error: {:?}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
kernel_manager.process_kern_requests(rank, timer);
|
if let Some(status) = dma_manager.check_state() {
|
||||||
|
info!(
|
||||||
|
"playback done, error: {}, channel: {}, timestamp: {}",
|
||||||
|
status.error, status.channel, status.timestamp
|
||||||
|
);
|
||||||
|
router.route(
|
||||||
|
drtioaux::Packet::DmaPlaybackStatus {
|
||||||
|
source: destination,
|
||||||
|
destination: status.source,
|
||||||
|
id: status.id,
|
||||||
|
error: status.error,
|
||||||
|
channel: status.channel,
|
||||||
|
timestamp: status.timestamp,
|
||||||
|
},
|
||||||
|
&routing_table,
|
||||||
|
rank,
|
||||||
|
destination,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
kernel_manager.process_kern_requests(
|
||||||
|
&mut router,
|
||||||
|
&routing_table,
|
||||||
|
rank,
|
||||||
|
destination,
|
||||||
|
&mut dma_manager,
|
||||||
|
&timer,
|
||||||
|
);
|
||||||
|
|
||||||
|
#[cfg(has_drtio_routing)]
|
||||||
|
if let Some((repno, packet)) = router.get_downstream_packet() {
|
||||||
|
if let Err(e) = repeaters[repno].aux_send(&packet) {
|
||||||
|
warn!("[REP#{}] Error when sending packet to satellite ({:?})", repno, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if router.any_upstream_waiting() {
|
||||||
|
drtiosat_async_ready();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
drtiosat_reset_phy(true);
|
drtiosat_reset_phy(true);
|
||||||
|
|
|
@ -6,6 +6,7 @@ use libboard_artiq::{drtio_routing, drtioaux};
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
use libboard_zynq::time::Milliseconds;
|
use libboard_zynq::time::Milliseconds;
|
||||||
use libboard_zynq::timer::GlobalTimer;
|
use libboard_zynq::timer::GlobalTimer;
|
||||||
|
use routing::Router;
|
||||||
|
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
fn rep_link_rx_up(repno: u8) -> bool {
|
fn rep_link_rx_up(repno: u8) -> bool {
|
||||||
|
@ -53,7 +54,14 @@ impl Repeater {
|
||||||
self.state == RepeaterState::Up
|
self.state == RepeaterState::Up
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn service(&mut self, routing_table: &drtio_routing::RoutingTable, rank: u8, timer: &mut GlobalTimer) {
|
pub fn service(
|
||||||
|
&mut self,
|
||||||
|
routing_table: &drtio_routing::RoutingTable,
|
||||||
|
rank: u8,
|
||||||
|
destination: u8,
|
||||||
|
router: &mut Router,
|
||||||
|
timer: &mut GlobalTimer,
|
||||||
|
) {
|
||||||
self.process_local_errors();
|
self.process_local_errors();
|
||||||
|
|
||||||
match self.state {
|
match self.state {
|
||||||
|
@ -116,6 +124,11 @@ impl Repeater {
|
||||||
info!("[REP#{}] link is down", self.repno);
|
info!("[REP#{}] link is down", self.repno);
|
||||||
self.state = RepeaterState::Down;
|
self.state = RepeaterState::Down;
|
||||||
}
|
}
|
||||||
|
if self.async_messages_ready() {
|
||||||
|
if let Err(e) = self.handle_async(routing_table, rank, destination, router, timer) {
|
||||||
|
warn!("[REP#{}] Error handling async messages ({:?})", self.repno, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
RepeaterState::Failed => {
|
RepeaterState::Failed => {
|
||||||
if !rep_link_rx_up(self.repno) {
|
if !rep_link_rx_up(self.repno) {
|
||||||
|
@ -173,6 +186,34 @@ impl Repeater {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn async_messages_ready(&self) -> bool {
|
||||||
|
let async_rdy;
|
||||||
|
unsafe {
|
||||||
|
async_rdy = (csr::DRTIOREP[self.repno as usize].async_messages_ready_read)();
|
||||||
|
(csr::DRTIOREP[self.repno as usize].async_messages_ready_write)(0);
|
||||||
|
}
|
||||||
|
async_rdy == 1
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_async(
|
||||||
|
&self,
|
||||||
|
routing_table: &drtio_routing::RoutingTable,
|
||||||
|
rank: u8,
|
||||||
|
self_destination: u8,
|
||||||
|
router: &mut Router,
|
||||||
|
timer: &mut GlobalTimer,
|
||||||
|
) -> Result<(), drtioaux::Error> {
|
||||||
|
loop {
|
||||||
|
drtioaux::send(self.auxno, &drtioaux::Packet::RoutingRetrievePackets).unwrap();
|
||||||
|
let reply = self.recv_aux_timeout(200, timer)?;
|
||||||
|
match reply {
|
||||||
|
drtioaux::Packet::RoutingNoPackets => break,
|
||||||
|
packet => router.route(packet, routing_table, rank, self_destination),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
fn recv_aux_timeout(&self, timeout: u32, timer: &mut GlobalTimer) -> Result<drtioaux::Packet, drtioaux::Error> {
|
fn recv_aux_timeout(&self, timeout: u32, timer: &mut GlobalTimer) -> Result<drtioaux::Packet, drtioaux::Error> {
|
||||||
let max_time = timer.get_time() + Milliseconds(timeout.into());
|
let max_time = timer.get_time() + Milliseconds(timeout.into());
|
||||||
loop {
|
loop {
|
||||||
|
@ -191,15 +232,19 @@ impl Repeater {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn aux_forward(&self, request: &drtioaux::Packet, timer: &mut GlobalTimer) -> Result<(), drtioaux::Error> {
|
pub fn aux_forward(&self, request: &drtioaux::Packet, timer: &mut GlobalTimer) -> Result<(), drtioaux::Error> {
|
||||||
if self.state != RepeaterState::Up {
|
self.aux_send(request)?;
|
||||||
return Err(drtioaux::Error::LinkDown);
|
|
||||||
}
|
|
||||||
drtioaux::send(self.auxno, request).unwrap();
|
|
||||||
let reply = self.recv_aux_timeout(200, timer)?;
|
let reply = self.recv_aux_timeout(200, timer)?;
|
||||||
drtioaux::send(0, &reply).unwrap();
|
drtioaux::send(0, &reply).unwrap();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn aux_send(&self, request: &drtioaux::Packet) -> Result<(), drtioaux::Error> {
|
||||||
|
if self.state != RepeaterState::Up {
|
||||||
|
return Err(drtioaux::Error::LinkDown);
|
||||||
|
}
|
||||||
|
drtioaux::send(self.auxno, request)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn sync_tsc(&self, timer: &mut GlobalTimer) -> Result<(), drtioaux::Error> {
|
pub fn sync_tsc(&self, timer: &mut GlobalTimer) -> Result<(), drtioaux::Error> {
|
||||||
if self.state != RepeaterState::Up {
|
if self.state != RepeaterState::Up {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
|
@ -302,7 +347,15 @@ impl Repeater {
|
||||||
Repeater::default()
|
Repeater::default()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn service(&self, _routing_table: &drtio_routing::RoutingTable, _rank: u8, _timer: &mut GlobalTimer) {}
|
pub fn service(
|
||||||
|
&self,
|
||||||
|
_routing_table: &drtio_routing::RoutingTable,
|
||||||
|
_rank: u8,
|
||||||
|
_destination: u8,
|
||||||
|
_router: &mut Router,
|
||||||
|
_timer: &mut GlobalTimer,
|
||||||
|
) {
|
||||||
|
}
|
||||||
|
|
||||||
pub fn sync_tsc(&self, _timer: &mut GlobalTimer) -> Result<(), drtioaux::Error> {
|
pub fn sync_tsc(&self, _timer: &mut GlobalTimer) -> Result<(), drtioaux::Error> {
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -0,0 +1,190 @@
|
||||||
|
use alloc::{collections::vec_deque::VecDeque, vec::Vec};
|
||||||
|
use core::cmp::min;
|
||||||
|
|
||||||
|
#[cfg(has_drtio_routing)]
|
||||||
|
use libboard_artiq::pl::csr;
|
||||||
|
use libboard_artiq::{drtio_routing, drtioaux,
|
||||||
|
drtioaux_proto::{PayloadStatus, MASTER_PAYLOAD_MAX_SIZE, SAT_PAYLOAD_MAX_SIZE}};
|
||||||
|
|
||||||
|
pub struct SliceMeta {
|
||||||
|
pub destination: u8,
|
||||||
|
pub len: u16,
|
||||||
|
pub status: PayloadStatus,
|
||||||
|
}
|
||||||
|
|
||||||
|
/* represents data that has to be sent to Master */
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Sliceable {
|
||||||
|
it: usize,
|
||||||
|
data: Vec<u8>,
|
||||||
|
destination: u8,
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! get_slice_fn {
|
||||||
|
($name:tt, $size:expr) => {
|
||||||
|
pub fn $name(&mut self, data_slice: &mut [u8; $size]) -> SliceMeta {
|
||||||
|
let first = self.it == 0;
|
||||||
|
let len = min($size, self.data.len() - self.it);
|
||||||
|
let last = self.it + len == self.data.len();
|
||||||
|
let status = PayloadStatus::from_status(first, last);
|
||||||
|
|
||||||
|
data_slice[..len].clone_from_slice(&self.data[self.it..self.it + len]);
|
||||||
|
self.it += len;
|
||||||
|
|
||||||
|
SliceMeta {
|
||||||
|
destination: self.destination,
|
||||||
|
len: len as u16,
|
||||||
|
status: status,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Sliceable {
|
||||||
|
pub fn new(destination: u8, data: Vec<u8>) -> Sliceable {
|
||||||
|
Sliceable {
|
||||||
|
it: 0,
|
||||||
|
data: data,
|
||||||
|
destination: destination,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn at_end(&self) -> bool {
|
||||||
|
self.it == self.data.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn extend(&mut self, data: &[u8]) {
|
||||||
|
self.data.extend(data);
|
||||||
|
}
|
||||||
|
|
||||||
|
get_slice_fn!(get_slice_sat, SAT_PAYLOAD_MAX_SIZE);
|
||||||
|
get_slice_fn!(get_slice_master, MASTER_PAYLOAD_MAX_SIZE);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Packets from downstream (further satellites) are received and routed appropriately.
|
||||||
|
// they're passed as soon as possible downstream (within the subtree), or sent upstream,
|
||||||
|
// which is notified about pending packets.
|
||||||
|
// for rank 1 (connected to master) satellites, these packets are passed as an answer to DestinationStatusRequest;
|
||||||
|
// for higher ranks, after getting a notification, it will transact with downstream to get the pending packets.
|
||||||
|
|
||||||
|
// forward! macro is not deprecated, as routable packets are only these that can originate
|
||||||
|
// from both master and satellite, e.g. DDMA and Subkernel.
|
||||||
|
|
||||||
|
pub struct Router {
|
||||||
|
upstream_queue: VecDeque<drtioaux::Packet>,
|
||||||
|
local_queue: VecDeque<drtioaux::Packet>,
|
||||||
|
#[cfg(has_drtio_routing)]
|
||||||
|
downstream_queue: VecDeque<(usize, drtioaux::Packet)>,
|
||||||
|
upstream_notified: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Router {
|
||||||
|
pub fn new() -> Router {
|
||||||
|
Router {
|
||||||
|
upstream_queue: VecDeque::new(),
|
||||||
|
local_queue: VecDeque::new(),
|
||||||
|
#[cfg(has_drtio_routing)]
|
||||||
|
downstream_queue: VecDeque::new(),
|
||||||
|
upstream_notified: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Called by local sources (DDMA, kernel) and by repeaters on receiving async data;
|
||||||
|
// messages are always buffered for both upstream and downstream
|
||||||
|
pub fn route(
|
||||||
|
&mut self,
|
||||||
|
packet: drtioaux::Packet,
|
||||||
|
_routing_table: &drtio_routing::RoutingTable,
|
||||||
|
_rank: u8,
|
||||||
|
self_destination: u8,
|
||||||
|
) {
|
||||||
|
let destination = packet.routable_destination();
|
||||||
|
#[cfg(has_drtio_routing)]
|
||||||
|
{
|
||||||
|
if let Some(destination) = destination {
|
||||||
|
let hop = _routing_table.0[destination as usize][_rank as usize] as usize;
|
||||||
|
if destination == self_destination {
|
||||||
|
self.local_queue.push_back(packet);
|
||||||
|
} else if hop > 0 && hop < csr::DRTIOREP.len() {
|
||||||
|
let repno = (hop - 1) as usize;
|
||||||
|
self.downstream_queue.push_back((repno, packet));
|
||||||
|
} else {
|
||||||
|
self.upstream_queue.push_back(packet);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
error!("Received an unroutable packet: {:?}", packet);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[cfg(not(has_drtio_routing))]
|
||||||
|
{
|
||||||
|
if destination == Some(self_destination) {
|
||||||
|
self.local_queue.push_back(packet);
|
||||||
|
} else {
|
||||||
|
self.upstream_queue.push_back(packet);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sends a packet to a required destination, routing if necessary
|
||||||
|
pub fn send(
|
||||||
|
&mut self,
|
||||||
|
packet: drtioaux::Packet,
|
||||||
|
_routing_table: &drtio_routing::RoutingTable,
|
||||||
|
_rank: u8,
|
||||||
|
_destination: u8,
|
||||||
|
) -> Result<(), drtioaux::Error> {
|
||||||
|
#[cfg(has_drtio_routing)]
|
||||||
|
{
|
||||||
|
let destination = packet.routable_destination();
|
||||||
|
if let Some(destination) = destination {
|
||||||
|
let hop = _routing_table.0[destination as usize][_rank as usize] as usize;
|
||||||
|
if destination == 0 {
|
||||||
|
// response is needed immediately if master required it
|
||||||
|
drtioaux::send(0, &packet)?;
|
||||||
|
} else if !(hop > 0 && hop < csr::DRTIOREP.len()) {
|
||||||
|
// higher rank can wait
|
||||||
|
self.upstream_queue.push_back(packet);
|
||||||
|
} else {
|
||||||
|
let repno = (hop - 1) as usize;
|
||||||
|
// transaction will occur at closest possible opportunity
|
||||||
|
self.downstream_queue.push_back((repno, packet));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
// packet not supported in routing, fallback - sent directly
|
||||||
|
drtioaux::send(0, &packet)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[cfg(not(has_drtio_routing))]
|
||||||
|
{
|
||||||
|
drtioaux::send(0, &packet)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn any_upstream_waiting(&mut self) -> bool {
|
||||||
|
let empty = self.upstream_queue.is_empty();
|
||||||
|
if !empty && !self.upstream_notified {
|
||||||
|
self.upstream_notified = true; // so upstream will not get spammed with notifications
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_upstream_packet(&mut self) -> Option<drtioaux::Packet> {
|
||||||
|
let packet = self.upstream_queue.pop_front();
|
||||||
|
if packet.is_none() {
|
||||||
|
self.upstream_notified = false;
|
||||||
|
}
|
||||||
|
packet
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(has_drtio_routing)]
|
||||||
|
pub fn get_downstream_packet(&mut self) -> Option<(usize, drtioaux::Packet)> {
|
||||||
|
self.downstream_queue.pop_front()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_local_packet(&mut self) -> Option<drtioaux::Packet> {
|
||||||
|
self.local_queue.pop_front()
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,26 +1,52 @@
|
||||||
use alloc::{collections::{BTreeMap, VecDeque},
|
use alloc::{collections::BTreeMap,
|
||||||
format,
|
format,
|
||||||
string::{String, ToString},
|
string::{String, ToString},
|
||||||
vec::Vec};
|
vec::Vec};
|
||||||
use core::{cmp::min, option::NoneError, slice, str};
|
use core::{option::NoneError, slice, str};
|
||||||
|
|
||||||
use core_io::{Error as IoError, Write};
|
use core_io::{Error as IoError, Write};
|
||||||
use cslice::AsCSlice;
|
use cslice::AsCSlice;
|
||||||
|
use dma::{Error as DmaError, Manager as DmaManager};
|
||||||
use io::{Cursor, ProtoWrite};
|
use io::{Cursor, ProtoWrite};
|
||||||
use ksupport::{eh_artiq, kernel, rpc};
|
use ksupport::{eh_artiq, kernel, rpc};
|
||||||
use libboard_artiq::{drtioaux_proto::{MASTER_PAYLOAD_MAX_SIZE, SAT_PAYLOAD_MAX_SIZE},
|
use libboard_artiq::{drtio_routing::RoutingTable,
|
||||||
|
drtioaux,
|
||||||
|
drtioaux_proto::{PayloadStatus, MASTER_PAYLOAD_MAX_SIZE, SAT_PAYLOAD_MAX_SIZE},
|
||||||
pl::csr};
|
pl::csr};
|
||||||
use libboard_zynq::{time::Milliseconds, timer::GlobalTimer};
|
use libboard_zynq::{time::Milliseconds, timer::GlobalTimer};
|
||||||
use libcortex_a9::sync_channel::Receiver;
|
use libcortex_a9::sync_channel::Receiver;
|
||||||
use log::warn;
|
use log::warn;
|
||||||
|
use routing::{Router, SliceMeta, Sliceable};
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
enum KernelState {
|
enum KernelState {
|
||||||
Absent,
|
Absent,
|
||||||
Loaded,
|
Loaded,
|
||||||
Running,
|
Running,
|
||||||
MsgAwait(Milliseconds, Vec<u8>),
|
MsgAwait {
|
||||||
|
max_time: Option<Milliseconds>,
|
||||||
|
id: u32,
|
||||||
|
tags: Vec<u8>,
|
||||||
|
},
|
||||||
MsgSending,
|
MsgSending,
|
||||||
|
SubkernelAwaitLoad,
|
||||||
|
SubkernelAwaitFinish {
|
||||||
|
max_time: Option<Milliseconds>,
|
||||||
|
id: u32,
|
||||||
|
},
|
||||||
|
DmaUploading,
|
||||||
|
DmaPendingPlayback {
|
||||||
|
id: u32,
|
||||||
|
timestamp: u64,
|
||||||
|
},
|
||||||
|
DmaPendingAwait {
|
||||||
|
id: u32,
|
||||||
|
timestamp: u64,
|
||||||
|
max_time: Milliseconds,
|
||||||
|
},
|
||||||
|
DmaAwait {
|
||||||
|
max_time: Milliseconds,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
@ -31,7 +57,9 @@ pub enum Error {
|
||||||
NoMessage,
|
NoMessage,
|
||||||
AwaitingMessage,
|
AwaitingMessage,
|
||||||
SubkernelIoError,
|
SubkernelIoError,
|
||||||
|
DrtioError,
|
||||||
KernelException(Sliceable),
|
KernelException(Sliceable),
|
||||||
|
DmaError(DmaError),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<NoneError> for Error {
|
impl From<NoneError> for Error {
|
||||||
|
@ -46,33 +74,38 @@ impl From<IoError> for Error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<DmaError> for Error {
|
||||||
|
fn from(value: DmaError) -> Error {
|
||||||
|
Error::DmaError(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<()> for Error {
|
impl From<()> for Error {
|
||||||
fn from(_: ()) -> Error {
|
fn from(_: ()) -> Error {
|
||||||
Error::NoMessage
|
Error::NoMessage
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<drtioaux::Error> for Error {
|
||||||
|
fn from(_value: drtioaux::Error) -> Error {
|
||||||
|
Error::DrtioError
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
macro_rules! unexpected {
|
macro_rules! unexpected {
|
||||||
($($arg:tt)*) => (return Err(Error::Unexpected(format!($($arg)*))));
|
($($arg:tt)*) => (return Err(Error::Unexpected(format!($($arg)*))));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* represents data that has to be sent to Master */
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Sliceable {
|
|
||||||
it: usize,
|
|
||||||
data: Vec<u8>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/* represents interkernel messages */
|
/* represents interkernel messages */
|
||||||
struct Message {
|
struct Message {
|
||||||
count: u8,
|
count: u8,
|
||||||
|
id: u32,
|
||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq)]
|
#[derive(PartialEq)]
|
||||||
enum OutMessageState {
|
enum OutMessageState {
|
||||||
NoMessage,
|
NoMessage,
|
||||||
MessageReady,
|
|
||||||
MessageBeingSent,
|
MessageBeingSent,
|
||||||
MessageSent,
|
MessageSent,
|
||||||
MessageAcknowledged,
|
MessageAcknowledged,
|
||||||
|
@ -82,7 +115,7 @@ enum OutMessageState {
|
||||||
struct MessageManager {
|
struct MessageManager {
|
||||||
out_message: Option<Sliceable>,
|
out_message: Option<Sliceable>,
|
||||||
out_state: OutMessageState,
|
out_state: OutMessageState,
|
||||||
in_queue: VecDeque<Message>,
|
in_queue: Vec<Message>,
|
||||||
in_buffer: Option<Message>,
|
in_buffer: Option<Message>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,6 +125,8 @@ struct Session {
|
||||||
kernel_state: KernelState,
|
kernel_state: KernelState,
|
||||||
last_exception: Option<Sliceable>,
|
last_exception: Option<Sliceable>,
|
||||||
messages: MessageManager,
|
messages: MessageManager,
|
||||||
|
source: u8, // which destination requested running the kernel
|
||||||
|
subkernels_finished: Vec<u32>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Session {
|
impl Session {
|
||||||
|
@ -101,13 +136,15 @@ impl Session {
|
||||||
kernel_state: KernelState::Absent,
|
kernel_state: KernelState::Absent,
|
||||||
last_exception: None,
|
last_exception: None,
|
||||||
messages: MessageManager::new(),
|
messages: MessageManager::new(),
|
||||||
|
source: 0,
|
||||||
|
subkernels_finished: Vec::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn running(&self) -> bool {
|
fn running(&self) -> bool {
|
||||||
match self.kernel_state {
|
match self.kernel_state {
|
||||||
KernelState::Absent | KernelState::Loaded => false,
|
KernelState::Absent | KernelState::Loaded => false,
|
||||||
KernelState::Running | KernelState::MsgAwait { .. } | KernelState::MsgSending => true,
|
_ => true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -129,40 +166,8 @@ pub struct Manager<'a> {
|
||||||
pub struct SubkernelFinished {
|
pub struct SubkernelFinished {
|
||||||
pub id: u32,
|
pub id: u32,
|
||||||
pub with_exception: bool,
|
pub with_exception: bool,
|
||||||
}
|
pub exception_source: u8,
|
||||||
|
pub source: u8,
|
||||||
pub struct SliceMeta {
|
|
||||||
pub len: u16,
|
|
||||||
pub last: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! get_slice_fn {
|
|
||||||
($name:tt, $size:expr) => {
|
|
||||||
pub fn $name(&mut self, data_slice: &mut [u8; $size]) -> SliceMeta {
|
|
||||||
if self.data.len() == 0 {
|
|
||||||
return SliceMeta { len: 0, last: true };
|
|
||||||
}
|
|
||||||
let len = min($size, self.data.len() - self.it);
|
|
||||||
let last = self.it + len == self.data.len();
|
|
||||||
|
|
||||||
data_slice[..len].clone_from_slice(&self.data[self.it..self.it + len]);
|
|
||||||
self.it += len;
|
|
||||||
|
|
||||||
SliceMeta {
|
|
||||||
len: len as u16,
|
|
||||||
last: last,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Sliceable {
|
|
||||||
pub fn new(data: Vec<u8>) -> Sliceable {
|
|
||||||
Sliceable { it: 0, data: data }
|
|
||||||
}
|
|
||||||
|
|
||||||
get_slice_fn!(get_slice_sat, SAT_PAYLOAD_MAX_SIZE);
|
|
||||||
get_slice_fn!(get_slice_master, MASTER_PAYLOAD_MAX_SIZE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MessageManager {
|
impl MessageManager {
|
||||||
|
@ -170,36 +175,35 @@ impl MessageManager {
|
||||||
MessageManager {
|
MessageManager {
|
||||||
out_message: None,
|
out_message: None,
|
||||||
out_state: OutMessageState::NoMessage,
|
out_state: OutMessageState::NoMessage,
|
||||||
in_queue: VecDeque::new(),
|
in_queue: Vec::new(),
|
||||||
in_buffer: None,
|
in_buffer: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn handle_incoming(&mut self, last: bool, length: usize, data: &[u8; MASTER_PAYLOAD_MAX_SIZE]) {
|
pub fn handle_incoming(
|
||||||
|
&mut self,
|
||||||
|
status: PayloadStatus,
|
||||||
|
id: u32,
|
||||||
|
length: usize,
|
||||||
|
data: &[u8; MASTER_PAYLOAD_MAX_SIZE],
|
||||||
|
) {
|
||||||
// called when receiving a message from master
|
// called when receiving a message from master
|
||||||
|
if status.is_first() {
|
||||||
|
self.in_buffer = None;
|
||||||
|
}
|
||||||
match self.in_buffer.as_mut() {
|
match self.in_buffer.as_mut() {
|
||||||
Some(message) => message.data.extend(&data[..length]),
|
Some(message) => message.data.extend(&data[..length]),
|
||||||
None => {
|
None => {
|
||||||
self.in_buffer = Some(Message {
|
self.in_buffer = Some(Message {
|
||||||
count: data[0],
|
count: data[0],
|
||||||
|
id: id,
|
||||||
data: data[1..length].to_vec(),
|
data: data[1..length].to_vec(),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
if last {
|
if status.is_last() {
|
||||||
// when done, remove from working queue
|
// when done, remove from working queue
|
||||||
self.in_queue.push_back(self.in_buffer.take().unwrap());
|
self.in_queue.push(self.in_buffer.take().unwrap());
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_outgoing_ready(&mut self) -> bool {
|
|
||||||
// called by main loop, to see if there's anything to send, will send it afterwards
|
|
||||||
match self.out_state {
|
|
||||||
OutMessageState::MessageReady => {
|
|
||||||
self.out_state = OutMessageState::MessageBeingSent;
|
|
||||||
true
|
|
||||||
}
|
|
||||||
_ => false,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -218,7 +222,7 @@ impl MessageManager {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
let meta = self.out_message.as_mut()?.get_slice_master(data_slice);
|
let meta = self.out_message.as_mut()?.get_slice_master(data_slice);
|
||||||
if meta.last {
|
if meta.status.is_last() {
|
||||||
// clear the message slot
|
// clear the message slot
|
||||||
self.out_message = None;
|
self.out_message = None;
|
||||||
// notify kernel with a flag that message is sent
|
// notify kernel with a flag that message is sent
|
||||||
|
@ -242,15 +246,44 @@ impl MessageManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn accept_outgoing(&mut self, message: Vec<u8>) -> Result<(), Error> {
|
pub fn accept_outgoing(
|
||||||
// service tag skipped in kernel
|
&mut self,
|
||||||
self.out_message = Some(Sliceable::new(message));
|
id: u32,
|
||||||
self.out_state = OutMessageState::MessageReady;
|
self_destination: u8,
|
||||||
|
destination: u8,
|
||||||
|
message: Vec<u8>,
|
||||||
|
routing_table: &RoutingTable,
|
||||||
|
rank: u8,
|
||||||
|
router: &mut Router,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
self.out_message = Some(Sliceable::new(destination, message));
|
||||||
|
|
||||||
|
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
|
self.out_state = OutMessageState::MessageBeingSent;
|
||||||
|
let meta = self.get_outgoing_slice(&mut data_slice).unwrap();
|
||||||
|
router.route(
|
||||||
|
drtioaux::Packet::SubkernelMessage {
|
||||||
|
source: self_destination,
|
||||||
|
destination: destination,
|
||||||
|
id: id,
|
||||||
|
status: meta.status,
|
||||||
|
length: meta.len as u16,
|
||||||
|
data: data_slice,
|
||||||
|
},
|
||||||
|
routing_table,
|
||||||
|
rank,
|
||||||
|
self_destination,
|
||||||
|
);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_incoming(&mut self) -> Option<Message> {
|
pub fn get_incoming(&mut self, id: u32) -> Option<Message> {
|
||||||
self.in_queue.pop_front()
|
for i in 0..self.in_queue.len() {
|
||||||
|
if self.in_queue[i].id == id {
|
||||||
|
return Some(self.in_queue.remove(i));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -265,10 +298,10 @@ impl<'a> Manager<'_> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add(&mut self, id: u32, last: bool, data: &[u8], data_len: usize) -> Result<(), Error> {
|
pub fn add(&mut self, id: u32, status: PayloadStatus, data: &[u8], data_len: usize) -> Result<(), Error> {
|
||||||
let kernel = match self.kernels.get_mut(&id) {
|
let kernel = match self.kernels.get_mut(&id) {
|
||||||
Some(kernel) => {
|
Some(kernel) => {
|
||||||
if kernel.complete {
|
if kernel.complete || status.is_first() {
|
||||||
// replace entry
|
// replace entry
|
||||||
self.kernels.remove(&id);
|
self.kernels.remove(&id);
|
||||||
self.kernels.insert(
|
self.kernels.insert(
|
||||||
|
@ -296,7 +329,7 @@ impl<'a> Manager<'_> {
|
||||||
};
|
};
|
||||||
kernel.library.extend(&data[0..data_len]);
|
kernel.library.extend(&data[0..data_len]);
|
||||||
|
|
||||||
kernel.complete = last;
|
kernel.complete = status.is_last();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -311,12 +344,12 @@ impl<'a> Manager<'_> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn run(&mut self, id: u32) -> Result<(), Error> {
|
pub fn run(&mut self, source: u8, id: u32) -> Result<(), Error> {
|
||||||
info!("starting subkernel #{}", id);
|
|
||||||
if self.session.kernel_state != KernelState::Loaded || self.session.id != id {
|
if self.session.kernel_state != KernelState::Loaded || self.session.id != id {
|
||||||
self.load(id)?;
|
self.load(id)?;
|
||||||
}
|
}
|
||||||
self.session.kernel_state = KernelState::Running;
|
self.session.kernel_state = KernelState::Running;
|
||||||
|
self.session.source = source;
|
||||||
unsafe {
|
unsafe {
|
||||||
csr::cri_con::selected_write(2);
|
csr::cri_con::selected_write(2);
|
||||||
}
|
}
|
||||||
|
@ -325,11 +358,17 @@ impl<'a> Manager<'_> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn message_handle_incoming(&mut self, last: bool, length: usize, slice: &[u8; MASTER_PAYLOAD_MAX_SIZE]) {
|
pub fn message_handle_incoming(
|
||||||
|
&mut self,
|
||||||
|
status: PayloadStatus,
|
||||||
|
id: u32,
|
||||||
|
length: usize,
|
||||||
|
slice: &[u8; MASTER_PAYLOAD_MAX_SIZE],
|
||||||
|
) {
|
||||||
if !self.running() {
|
if !self.running() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
self.session.messages.handle_incoming(last, length, slice);
|
self.session.messages.handle_incoming(status, id, length, slice);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn message_get_slice(&mut self, slice: &mut [u8; MASTER_PAYLOAD_MAX_SIZE]) -> Option<SliceMeta> {
|
pub fn message_get_slice(&mut self, slice: &mut [u8; MASTER_PAYLOAD_MAX_SIZE]) -> Option<SliceMeta> {
|
||||||
|
@ -347,10 +386,6 @@ impl<'a> Manager<'_> {
|
||||||
self.session.messages.ack_slice()
|
self.session.messages.ack_slice()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn message_is_ready(&mut self) -> bool {
|
|
||||||
self.session.messages.is_outgoing_ready()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn load(&mut self, id: u32) -> Result<(), Error> {
|
pub fn load(&mut self, id: u32) -> Result<(), Error> {
|
||||||
if self.session.id == id && self.session.kernel_state == KernelState::Loaded {
|
if self.session.id == id && self.session.kernel_state == KernelState::Loaded {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
|
@ -378,14 +413,14 @@ impl<'a> Manager<'_> {
|
||||||
pub fn exception_get_slice(&mut self, data_slice: &mut [u8; SAT_PAYLOAD_MAX_SIZE]) -> SliceMeta {
|
pub fn exception_get_slice(&mut self, data_slice: &mut [u8; SAT_PAYLOAD_MAX_SIZE]) -> SliceMeta {
|
||||||
match self.session.last_exception.as_mut() {
|
match self.session.last_exception.as_mut() {
|
||||||
Some(exception) => exception.get_slice_sat(data_slice),
|
Some(exception) => exception.get_slice_sat(data_slice),
|
||||||
None => SliceMeta { len: 0, last: true },
|
None => SliceMeta {
|
||||||
|
destination: 0,
|
||||||
|
len: 0,
|
||||||
|
status: PayloadStatus::FirstAndLast,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_last_finished(&mut self) -> Option<SubkernelFinished> {
|
|
||||||
self.last_finished.take()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn kernel_stop(&mut self) {
|
fn kernel_stop(&mut self) {
|
||||||
self.session.kernel_state = KernelState::Absent;
|
self.session.kernel_state = KernelState::Absent;
|
||||||
unsafe {
|
unsafe {
|
||||||
|
@ -415,13 +450,92 @@ impl<'a> Manager<'_> {
|
||||||
&[],
|
&[],
|
||||||
0,
|
0,
|
||||||
) {
|
) {
|
||||||
Ok(_) => self.session.last_exception = Some(Sliceable::new(writer.into_inner())),
|
Ok(_) => self.session.last_exception = Some(Sliceable::new(0, writer.into_inner())),
|
||||||
Err(_) => error!("Error writing exception data"),
|
Err(_) => error!("Error writing exception data"),
|
||||||
}
|
}
|
||||||
self.kernel_stop();
|
self.kernel_stop();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn process_kern_requests(&mut self, rank: u8, timer: GlobalTimer) {
|
pub fn ddma_finished(&mut self, error: u8, channel: u32, timestamp: u64) {
|
||||||
|
if let KernelState::DmaAwait { .. } = self.session.kernel_state {
|
||||||
|
self.control.tx.send(kernel::Message::DmaAwaitRemoteReply {
|
||||||
|
timeout: false,
|
||||||
|
error: error,
|
||||||
|
channel: channel,
|
||||||
|
timestamp: timestamp,
|
||||||
|
});
|
||||||
|
self.session.kernel_state = KernelState::Running;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ddma_nack(&mut self) {
|
||||||
|
// for simplicity treat it as a timeout...
|
||||||
|
if let KernelState::DmaAwait { .. } = self.session.kernel_state {
|
||||||
|
self.control.tx.send(kernel::Message::DmaAwaitRemoteReply {
|
||||||
|
timeout: true,
|
||||||
|
error: 0,
|
||||||
|
channel: 0,
|
||||||
|
timestamp: 0,
|
||||||
|
});
|
||||||
|
self.session.kernel_state = KernelState::Running;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ddma_remote_uploaded(&mut self, succeeded: bool) -> Option<(u32, u64)> {
|
||||||
|
// returns a tuple of id, timestamp in case a playback needs to be started immediately
|
||||||
|
if !succeeded {
|
||||||
|
self.kernel_stop();
|
||||||
|
self.runtime_exception(Error::DmaError(DmaError::UploadFail));
|
||||||
|
}
|
||||||
|
let res = match self.session.kernel_state {
|
||||||
|
KernelState::DmaPendingPlayback { id, timestamp } => {
|
||||||
|
self.session.kernel_state = KernelState::Running;
|
||||||
|
Some((id, timestamp))
|
||||||
|
}
|
||||||
|
KernelState::DmaPendingAwait {
|
||||||
|
id,
|
||||||
|
timestamp,
|
||||||
|
max_time,
|
||||||
|
} => {
|
||||||
|
self.session.kernel_state = KernelState::DmaAwait { max_time: max_time };
|
||||||
|
Some((id, timestamp))
|
||||||
|
}
|
||||||
|
KernelState::DmaUploading => {
|
||||||
|
self.session.kernel_state = KernelState::Running;
|
||||||
|
None
|
||||||
|
}
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
|
res
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn process_kern_requests(
|
||||||
|
&mut self,
|
||||||
|
router: &mut Router,
|
||||||
|
routing_table: &RoutingTable,
|
||||||
|
rank: u8,
|
||||||
|
destination: u8,
|
||||||
|
dma_manager: &mut DmaManager,
|
||||||
|
timer: &GlobalTimer,
|
||||||
|
) {
|
||||||
|
if let Some(subkernel_finished) = self.last_finished.take() {
|
||||||
|
info!(
|
||||||
|
"subkernel {} finished, with exception: {}",
|
||||||
|
subkernel_finished.id, subkernel_finished.with_exception
|
||||||
|
);
|
||||||
|
router.route(
|
||||||
|
drtioaux::Packet::SubkernelFinished {
|
||||||
|
destination: subkernel_finished.source,
|
||||||
|
id: subkernel_finished.id,
|
||||||
|
with_exception: subkernel_finished.with_exception,
|
||||||
|
exception_src: subkernel_finished.exception_source,
|
||||||
|
},
|
||||||
|
&routing_table,
|
||||||
|
rank,
|
||||||
|
destination,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
if !self.running() {
|
if !self.running() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -434,6 +548,8 @@ impl<'a> Manager<'_> {
|
||||||
self.last_finished = Some(SubkernelFinished {
|
self.last_finished = Some(SubkernelFinished {
|
||||||
id: self.session.id,
|
id: self.session.id,
|
||||||
with_exception: true,
|
with_exception: true,
|
||||||
|
exception_source: destination,
|
||||||
|
source: self.session.source,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
@ -442,15 +558,19 @@ impl<'a> Manager<'_> {
|
||||||
self.last_finished = Some(SubkernelFinished {
|
self.last_finished = Some(SubkernelFinished {
|
||||||
id: self.session.id,
|
id: self.session.id,
|
||||||
with_exception: true,
|
with_exception: true,
|
||||||
|
exception_source: destination,
|
||||||
|
source: self.session.source,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
match self.process_kern_message(rank, timer) {
|
match self.process_kern_message(router, routing_table, rank, destination, dma_manager, timer) {
|
||||||
Ok(true) => {
|
Ok(true) => {
|
||||||
self.last_finished = Some(SubkernelFinished {
|
self.last_finished = Some(SubkernelFinished {
|
||||||
id: self.session.id,
|
id: self.session.id,
|
||||||
with_exception: false,
|
with_exception: false,
|
||||||
|
exception_source: 0,
|
||||||
|
source: self.session.source,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
Ok(false) | Err(Error::NoMessage) => (),
|
Ok(false) | Err(Error::NoMessage) => (),
|
||||||
|
@ -459,6 +579,8 @@ impl<'a> Manager<'_> {
|
||||||
self.last_finished = Some(SubkernelFinished {
|
self.last_finished = Some(SubkernelFinished {
|
||||||
id: self.session.id,
|
id: self.session.id,
|
||||||
with_exception: true,
|
with_exception: true,
|
||||||
|
exception_source: destination,
|
||||||
|
source: self.session.source,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
@ -467,16 +589,52 @@ impl<'a> Manager<'_> {
|
||||||
self.last_finished = Some(SubkernelFinished {
|
self.last_finished = Some(SubkernelFinished {
|
||||||
id: self.session.id,
|
id: self.session.id,
|
||||||
with_exception: true,
|
with_exception: true,
|
||||||
|
exception_source: destination,
|
||||||
|
source: self.session.source,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_kern_message(&mut self, rank: u8, timer: GlobalTimer) -> Result<bool, Error> {
|
pub fn subkernel_load_run_reply(&mut self, succeeded: bool) {
|
||||||
|
if self.session.kernel_state == KernelState::SubkernelAwaitLoad {
|
||||||
|
self.control
|
||||||
|
.tx
|
||||||
|
.send(kernel::Message::SubkernelLoadRunReply { succeeded: succeeded });
|
||||||
|
self.session.kernel_state = KernelState::Running;
|
||||||
|
} else {
|
||||||
|
warn!("received unsolicited SubkernelLoadRunReply");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn remote_subkernel_finished(&mut self, id: u32, with_exception: bool, exception_source: u8) {
|
||||||
|
if with_exception {
|
||||||
|
self.kernel_stop();
|
||||||
|
self.last_finished = Some(SubkernelFinished {
|
||||||
|
source: self.session.source,
|
||||||
|
id: self.session.id,
|
||||||
|
with_exception: true,
|
||||||
|
exception_source: exception_source,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
self.session.subkernels_finished.push(id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn process_kern_message(
|
||||||
|
&mut self,
|
||||||
|
router: &mut Router,
|
||||||
|
routing_table: &RoutingTable,
|
||||||
|
rank: u8,
|
||||||
|
self_destination: u8,
|
||||||
|
dma_manager: &mut DmaManager,
|
||||||
|
timer: &GlobalTimer,
|
||||||
|
) -> Result<bool, Error> {
|
||||||
let reply = self.control.rx.try_recv()?;
|
let reply = self.control.rx.try_recv()?;
|
||||||
match reply {
|
match reply {
|
||||||
kernel::Message::KernelFinished(_async_errors) => {
|
kernel::Message::KernelFinished(_async_errors) => {
|
||||||
self.kernel_stop();
|
self.kernel_stop();
|
||||||
|
dma_manager.cleanup(router, rank, self_destination, routing_table);
|
||||||
return Ok(true);
|
return Ok(true);
|
||||||
}
|
}
|
||||||
kernel::Message::KernelException(exceptions, stack_pointers, backtrace, async_errors) => {
|
kernel::Message::KernelException(exceptions, stack_pointers, backtrace, async_errors) => {
|
||||||
|
@ -493,7 +651,7 @@ impl<'a> Manager<'_> {
|
||||||
Err(_) => error!("Error writing exception data"),
|
Err(_) => error!("Error writing exception data"),
|
||||||
}
|
}
|
||||||
self.kernel_stop();
|
self.kernel_stop();
|
||||||
return Err(Error::KernelException(Sliceable::new(writer.into_inner())));
|
return Err(Error::KernelException(Sliceable::new(0, writer.into_inner())));
|
||||||
}
|
}
|
||||||
kernel::Message::CachePutRequest(key, value) => {
|
kernel::Message::CachePutRequest(key, value) => {
|
||||||
self.cache.insert(key, value);
|
self.cache.insert(key, value);
|
||||||
|
@ -503,18 +661,117 @@ impl<'a> Manager<'_> {
|
||||||
let value = self.cache.get(&key).unwrap_or(&DEFAULT).clone();
|
let value = self.cache.get(&key).unwrap_or(&DEFAULT).clone();
|
||||||
self.control.tx.send(kernel::Message::CacheGetReply(value));
|
self.control.tx.send(kernel::Message::CacheGetReply(value));
|
||||||
}
|
}
|
||||||
kernel::Message::SubkernelMsgSend { id: _, data } => {
|
|
||||||
self.session.messages.accept_outgoing(data)?;
|
kernel::Message::DmaPutRequest(recorder) => {
|
||||||
|
// ddma is always used on satellites
|
||||||
|
if let Ok(id) = dma_manager.put_record(recorder, self_destination) {
|
||||||
|
dma_manager.upload_traces(id, router, rank, self_destination, routing_table)?;
|
||||||
|
self.session.kernel_state = KernelState::DmaUploading;
|
||||||
|
} else {
|
||||||
|
unexpected!("DMAError: found an unsupported call to RTIO devices on master")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
kernel::Message::DmaEraseRequest(name) => {
|
||||||
|
dma_manager.erase_name(&name, router, rank, self_destination, routing_table);
|
||||||
|
}
|
||||||
|
kernel::Message::DmaGetRequest(name) => {
|
||||||
|
let dma_meta = dma_manager.retrieve(self_destination, &name);
|
||||||
|
self.control.tx.send(kernel::Message::DmaGetReply(dma_meta));
|
||||||
|
}
|
||||||
|
kernel::Message::DmaStartRemoteRequest { id, timestamp } => {
|
||||||
|
if self.session.kernel_state != KernelState::DmaUploading {
|
||||||
|
dma_manager.playback_remote(
|
||||||
|
id as u32,
|
||||||
|
timestamp as u64,
|
||||||
|
router,
|
||||||
|
rank,
|
||||||
|
self_destination,
|
||||||
|
routing_table,
|
||||||
|
)?;
|
||||||
|
} else {
|
||||||
|
self.session.kernel_state = KernelState::DmaPendingPlayback {
|
||||||
|
id: id as u32,
|
||||||
|
timestamp: timestamp as u64,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
kernel::Message::DmaAwaitRemoteRequest(_id) => {
|
||||||
|
let max_time = timer.get_time() + Milliseconds(10000);
|
||||||
|
self.session.kernel_state = match self.session.kernel_state {
|
||||||
|
// if we are still waiting for the traces to be uploaded, extend the state by timeout
|
||||||
|
KernelState::DmaPendingPlayback { id, timestamp } => KernelState::DmaPendingAwait {
|
||||||
|
id: id,
|
||||||
|
timestamp: timestamp,
|
||||||
|
max_time: max_time,
|
||||||
|
},
|
||||||
|
_ => KernelState::DmaAwait { max_time: max_time },
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
kernel::Message::SubkernelMsgSend {
|
||||||
|
id: _id,
|
||||||
|
destination: msg_dest,
|
||||||
|
data,
|
||||||
|
} => {
|
||||||
|
let msg_dest = msg_dest.or(Some(self.session.source)).unwrap();
|
||||||
|
self.session.messages.accept_outgoing(
|
||||||
|
self.session.id,
|
||||||
|
self_destination,
|
||||||
|
msg_dest,
|
||||||
|
data,
|
||||||
|
routing_table,
|
||||||
|
rank,
|
||||||
|
router,
|
||||||
|
)?;
|
||||||
self.session.kernel_state = KernelState::MsgSending;
|
self.session.kernel_state = KernelState::MsgSending;
|
||||||
}
|
}
|
||||||
kernel::Message::SubkernelMsgRecvRequest { id: _, timeout, tags } => {
|
kernel::Message::SubkernelMsgRecvRequest { id, timeout, tags } => {
|
||||||
let max_time = timer.get_time() + Milliseconds(timeout);
|
let id = if id == -1 { self.session.id } else { id as u32 };
|
||||||
self.session.kernel_state = KernelState::MsgAwait(max_time, tags);
|
let max_time = if timeout > 0 {
|
||||||
|
Some(timer.get_time() + Milliseconds(timeout as u64))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
self.session.kernel_state = KernelState::MsgAwait {
|
||||||
|
max_time: max_time,
|
||||||
|
id: id,
|
||||||
|
tags: tags,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
kernel::Message::SubkernelLoadRunRequest {
|
||||||
|
id,
|
||||||
|
destination: sk_destination,
|
||||||
|
run,
|
||||||
|
} => {
|
||||||
|
self.session.kernel_state = KernelState::SubkernelAwaitLoad;
|
||||||
|
router.route(
|
||||||
|
drtioaux::Packet::SubkernelLoadRunRequest {
|
||||||
|
source: self_destination,
|
||||||
|
destination: sk_destination,
|
||||||
|
id: id,
|
||||||
|
run: run,
|
||||||
|
},
|
||||||
|
routing_table,
|
||||||
|
rank,
|
||||||
|
self_destination,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
kernel::Message::SubkernelAwaitFinishRequest { id, timeout } => {
|
||||||
|
let max_time = if timeout > 0 {
|
||||||
|
Some(timer.get_time() + Milliseconds(timeout as u64))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
self.session.kernel_state = KernelState::SubkernelAwaitFinish {
|
||||||
|
max_time: max_time,
|
||||||
|
id: id,
|
||||||
|
};
|
||||||
}
|
}
|
||||||
kernel::Message::UpDestinationsRequest(destination) => {
|
kernel::Message::UpDestinationsRequest(destination) => {
|
||||||
self.control
|
self.control.tx.send(kernel::Message::UpDestinationsReply(
|
||||||
.tx
|
destination == (self_destination as i32),
|
||||||
.send(kernel::Message::UpDestinationsReply(destination == (rank as i32)));
|
));
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
unexpected!("unexpected message from core1 while kernel was running: {:?}", reply);
|
unexpected!("unexpected message from core1 while kernel was running: {:?}", reply);
|
||||||
|
@ -523,10 +780,11 @@ impl<'a> Manager<'_> {
|
||||||
Ok(false)
|
Ok(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_external_messages(&mut self, timer: GlobalTimer) -> Result<(), Error> {
|
fn process_external_messages(&mut self, timer: &GlobalTimer) -> Result<(), Error> {
|
||||||
match &self.session.kernel_state {
|
match &self.session.kernel_state {
|
||||||
KernelState::MsgAwait(timeout, tags) => {
|
KernelState::MsgAwait { max_time, id, tags } => {
|
||||||
if timer.get_time() > *timeout {
|
if let Some(max_time) = *max_time {
|
||||||
|
if timer.get_time() > max_time {
|
||||||
self.control.tx.send(kernel::Message::SubkernelMsgRecvReply {
|
self.control.tx.send(kernel::Message::SubkernelMsgRecvReply {
|
||||||
status: kernel::SubkernelStatus::Timeout,
|
status: kernel::SubkernelStatus::Timeout,
|
||||||
count: 0,
|
count: 0,
|
||||||
|
@ -534,7 +792,8 @@ impl<'a> Manager<'_> {
|
||||||
self.session.kernel_state = KernelState::Running;
|
self.session.kernel_state = KernelState::Running;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
if let Some(message) = self.session.messages.get_incoming() {
|
}
|
||||||
|
if let Some(message) = self.session.messages.get_incoming(*id) {
|
||||||
self.control.tx.send(kernel::Message::SubkernelMsgRecvReply {
|
self.control.tx.send(kernel::Message::SubkernelMsgRecvReply {
|
||||||
status: kernel::SubkernelStatus::NoError,
|
status: kernel::SubkernelStatus::NoError,
|
||||||
count: message.count,
|
count: message.count,
|
||||||
|
@ -555,11 +814,47 @@ impl<'a> Manager<'_> {
|
||||||
Err(Error::AwaitingMessage)
|
Err(Error::AwaitingMessage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
KernelState::SubkernelAwaitFinish { max_time, id } => {
|
||||||
|
if let Some(max_time) = *max_time {
|
||||||
|
if timer.get_time() > max_time {
|
||||||
|
self.control.tx.send(kernel::Message::SubkernelAwaitFinishReply {
|
||||||
|
status: kernel::SubkernelStatus::Timeout,
|
||||||
|
});
|
||||||
|
self.session.kernel_state = KernelState::Running;
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let mut i = 0;
|
||||||
|
for status in &self.session.subkernels_finished {
|
||||||
|
if *status == *id {
|
||||||
|
self.control.tx.send(kernel::Message::SubkernelAwaitFinishReply {
|
||||||
|
status: kernel::SubkernelStatus::NoError,
|
||||||
|
});
|
||||||
|
self.session.kernel_state = KernelState::Running;
|
||||||
|
self.session.subkernels_finished.swap_remove(i);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
KernelState::DmaAwait { max_time } | KernelState::DmaPendingAwait { max_time, .. } => {
|
||||||
|
if timer.get_time() > *max_time {
|
||||||
|
self.control.tx.send(kernel::Message::DmaAwaitRemoteReply {
|
||||||
|
timeout: true,
|
||||||
|
error: 0,
|
||||||
|
channel: 0,
|
||||||
|
timestamp: 0,
|
||||||
|
});
|
||||||
|
self.session.kernel_state = KernelState::Running;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
_ => Ok(()),
|
_ => Ok(()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pass_message_to_kernel(&mut self, message: &Message, tags: Vec<u8>, timer: GlobalTimer) -> Result<(), Error> {
|
fn pass_message_to_kernel(&mut self, message: &Message, tags: Vec<u8>, timer: &GlobalTimer) -> Result<(), Error> {
|
||||||
let mut reader = Cursor::new(&message.data);
|
let mut reader = Cursor::new(&message.data);
|
||||||
let mut current_tags: &[u8] = &tags;
|
let mut current_tags: &[u8] = &tags;
|
||||||
let mut i = message.count;
|
let mut i = message.count;
|
||||||
|
@ -582,7 +877,7 @@ impl<'a> Manager<'_> {
|
||||||
let mut writer = Cursor::new(buf);
|
let mut writer = Cursor::new(buf);
|
||||||
match write_exception(&mut writer, exceptions, stack_pointers, backtrace, async_errors) {
|
match write_exception(&mut writer, exceptions, stack_pointers, backtrace, async_errors) {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
exception = Some(Sliceable::new(writer.into_inner()));
|
exception = Some(Sliceable::new(0, writer.into_inner()));
|
||||||
}
|
}
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
unexpected = Some("Error writing exception data".to_string());
|
unexpected = Some("Error writing exception data".to_string());
|
||||||
|
@ -676,7 +971,7 @@ where
|
||||||
|
|
||||||
fn recv_w_timeout(
|
fn recv_w_timeout(
|
||||||
rx: &mut Receiver<'_, kernel::Message>,
|
rx: &mut Receiver<'_, kernel::Message>,
|
||||||
timer: GlobalTimer,
|
timer: &GlobalTimer,
|
||||||
timeout: u64,
|
timeout: u64,
|
||||||
) -> Result<kernel::Message, Error> {
|
) -> Result<kernel::Message, Error> {
|
||||||
let max_time = timer.get_time() + Milliseconds(timeout);
|
let max_time = timer.get_time() + Milliseconds(timeout);
|
||||||
|
|
Loading…
Reference in New Issue