forked from M-Labs/artiq-zynq
Compare commits
No commits in common. "idle_kernel_rework" and "master" have entirely different histories.
idle_kerne
...
master
68
flake.lock
68
flake.lock
|
@ -11,11 +11,11 @@
|
||||||
"src-pythonparser": "src-pythonparser"
|
"src-pythonparser": "src-pythonparser"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1706785107,
|
"lastModified": 1697537883,
|
||||||
"narHash": "sha256-Uj72tqigiOCdewSSBBMg6zUpVKhwjAo1HeLJgvyZ3oc=",
|
"narHash": "sha256-GfadmYHFkczltX+rPf08YpAHjYa/31ZmmVD578BcFow=",
|
||||||
"ref": "refs/heads/master",
|
"ref": "refs/heads/master",
|
||||||
"rev": "3aaa7e04f26a495e8847e47424bfc16d76d82bf8",
|
"rev": "b168f0bb4be1697ff100475c20ee304dcc31fcc2",
|
||||||
"revCount": 8672,
|
"revCount": 8573,
|
||||||
"type": "git",
|
"type": "git",
|
||||||
"url": "https://github.com/m-labs/artiq.git"
|
"url": "https://github.com/m-labs/artiq.git"
|
||||||
},
|
},
|
||||||
|
@ -37,11 +37,11 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1701573753,
|
"lastModified": 1693473687,
|
||||||
"narHash": "sha256-vhEtXjb9AM6/HnsgfVmhJQeqQ9JqysUm7iWNzTIbexs=",
|
"narHash": "sha256-BdLddCWbvoEyakcGwhph9b5dIU1iA0hCQV7KYgU8nos=",
|
||||||
"owner": "m-labs",
|
"owner": "m-labs",
|
||||||
"repo": "artiq-comtools",
|
"repo": "artiq-comtools",
|
||||||
"rev": "199bdabf4de49cb7ada8a4ac7133008e0f8434b7",
|
"rev": "f522ef3dbc65961f17b2d3d41e927409d970fd79",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -55,11 +55,11 @@
|
||||||
"systems": "systems"
|
"systems": "systems"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1694529238,
|
"lastModified": 1692799911,
|
||||||
"narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
|
"narHash": "sha256-3eihraek4qL744EvQXsK1Ha6C3CR7nnT8X2qWap4RNk=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "flake-utils",
|
"repo": "flake-utils",
|
||||||
"rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
|
"rev": "f9e7cf818399d17d347f847525c5a5a8032e4e44",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -71,11 +71,11 @@
|
||||||
"mozilla-overlay": {
|
"mozilla-overlay": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1704373101,
|
"lastModified": 1695805681,
|
||||||
"narHash": "sha256-+gi59LRWRQmwROrmE1E2b3mtocwueCQqZ60CwLG+gbg=",
|
"narHash": "sha256-1ElPLD8eFfnuIk0G52HGGpRtQZ4QPCjChRlEOfkZ5ro=",
|
||||||
"owner": "mozilla",
|
"owner": "mozilla",
|
||||||
"repo": "nixpkgs-mozilla",
|
"repo": "nixpkgs-mozilla",
|
||||||
"rev": "9b11a87c0cc54e308fa83aac5b4ee1816d5418a2",
|
"rev": "6eabade97bc28d707a8b9d82ad13ef143836736e",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -87,11 +87,11 @@
|
||||||
"mozilla-overlay_2": {
|
"mozilla-overlay_2": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1704373101,
|
"lastModified": 1695805681,
|
||||||
"narHash": "sha256-+gi59LRWRQmwROrmE1E2b3mtocwueCQqZ60CwLG+gbg=",
|
"narHash": "sha256-1ElPLD8eFfnuIk0G52HGGpRtQZ4QPCjChRlEOfkZ5ro=",
|
||||||
"owner": "mozilla",
|
"owner": "mozilla",
|
||||||
"repo": "nixpkgs-mozilla",
|
"repo": "nixpkgs-mozilla",
|
||||||
"rev": "9b11a87c0cc54e308fa83aac5b4ee1816d5418a2",
|
"rev": "6eabade97bc28d707a8b9d82ad13ef143836736e",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -118,16 +118,16 @@
|
||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1706515015,
|
"lastModified": 1697226376,
|
||||||
"narHash": "sha256-eFfY5A7wlYy3jD/75lx6IJRueg4noE+jowl0a8lIlVo=",
|
"narHash": "sha256-cumLLb1QOUtWieUnLGqo+ylNt3+fU8Lcv5Zl+tYbRUE=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "f4a8d6d5324c327dcc2d863eb7f3cc06ad630df4",
|
"rev": "898cb2064b6e98b8c5499f37e81adbdf2925f7c5",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"ref": "nixos-23.11",
|
"ref": "nixos-23.05",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
|
@ -147,11 +147,11 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1701572254,
|
"lastModified": 1697528004,
|
||||||
"narHash": "sha256-ixq8dlpyOytDr+d/OmW8v1Ioy9V2G2ibOlNj8GFDSq4=",
|
"narHash": "sha256-FFa2MbhAJEjwY58uOs0swvgymfjubHyWba6Q0X6CbB0=",
|
||||||
"owner": "m-labs",
|
"owner": "m-labs",
|
||||||
"repo": "sipyco",
|
"repo": "sipyco",
|
||||||
"rev": "cceac0df537887135f99aa6b1bdd82853f16b4d6",
|
"rev": "c0a7ed350ccfb85474217057fc47b3f258ca8d99",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -163,11 +163,11 @@
|
||||||
"src-migen": {
|
"src-migen": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1702942348,
|
"lastModified": 1697013661,
|
||||||
"narHash": "sha256-gKIfHZxsv+jcgDFRW9mPqmwqbZXuRvXefkZcSFjOGHw=",
|
"narHash": "sha256-qNCqgWyE4vTDmyjE2XMJqW1djuBxT25A36AzQfZqluU=",
|
||||||
"owner": "m-labs",
|
"owner": "m-labs",
|
||||||
"repo": "migen",
|
"repo": "migen",
|
||||||
"rev": "50934ad10a87ade47219b796535978b9bdf24023",
|
"rev": "aadc19df93b7aa9ca761aaebbb98a11e0cf2d7c7",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -179,11 +179,11 @@
|
||||||
"src-misoc": {
|
"src-misoc": {
|
||||||
"flake": false,
|
"flake": false,
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1699352904,
|
"lastModified": 1693709836,
|
||||||
"narHash": "sha256-SglyTmXOPv8jJOjwAjJrj/WhAkItQfUbvKfUqrynwRg=",
|
"narHash": "sha256-YiCk05RYLzZu1CYkQ2r7XtjwVEqkUGTQn388uOls9tI=",
|
||||||
"ref": "refs/heads/master",
|
"ref": "refs/heads/master",
|
||||||
"rev": "a53859f2167c31ab5225b6c09f30cf05527b94f4",
|
"rev": "58dc4ee60d165ce9145cf3d904241fc154b6407f",
|
||||||
"revCount": 2452,
|
"revCount": 2448,
|
||||||
"submodules": true,
|
"submodules": true,
|
||||||
"type": "git",
|
"type": "git",
|
||||||
"url": "https://github.com/m-labs/misoc.git"
|
"url": "https://github.com/m-labs/misoc.git"
|
||||||
|
@ -234,11 +234,11 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1702982463,
|
"lastModified": 1697795161,
|
||||||
"narHash": "sha256-jKR3drE2rsTaYGEgIdv5kUo2LOb1JyIb4tJhVuCXTTc=",
|
"narHash": "sha256-p89w9eoFJ2VFTDZ5Mrv5vsH0E1Ko9z1C6Ett281hCHg=",
|
||||||
"ref": "refs/heads/master",
|
"ref": "refs/heads/master",
|
||||||
"rev": "4168eb63a7e846863331ae4e656cfd82a867cca8",
|
"rev": "be672ab662d8134ee11412a651864824f6483d4a",
|
||||||
"revCount": 636,
|
"revCount": 630,
|
||||||
"type": "git",
|
"type": "git",
|
||||||
"url": "https://git.m-labs.hk/m-labs/zynq-rs"
|
"url": "https://git.m-labs.hk/m-labs/zynq-rs"
|
||||||
},
|
},
|
||||||
|
|
|
@ -78,7 +78,6 @@
|
||||||
|
|
||||||
# migen/misoc version checks are broken with pyproject for some reason
|
# migen/misoc version checks are broken with pyproject for some reason
|
||||||
postPatch = ''
|
postPatch = ''
|
||||||
sed -i "1,4d" pyproject.toml
|
|
||||||
substituteInPlace pyproject.toml \
|
substituteInPlace pyproject.toml \
|
||||||
--replace '"migen@git+https://github.com/m-labs/migen",' ""
|
--replace '"migen@git+https://github.com/m-labs/migen",' ""
|
||||||
substituteInPlace pyproject.toml \
|
substituteInPlace pyproject.toml \
|
||||||
|
@ -123,9 +122,6 @@
|
||||||
src = ./src;
|
src = ./src;
|
||||||
cargoLock = {
|
cargoLock = {
|
||||||
lockFile = src/Cargo.lock;
|
lockFile = src/Cargo.lock;
|
||||||
outputHashes = {
|
|
||||||
"tar-no-std-0.1.8" = "sha256-xm17108v4smXOqxdLvHl9CxTCJslmeogjm4Y87IXFuM=";
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
nativeBuildInputs = [
|
nativeBuildInputs = [
|
||||||
|
@ -153,7 +149,6 @@
|
||||||
|
|
||||||
doCheck = false;
|
doCheck = false;
|
||||||
dontFixup = true;
|
dontFixup = true;
|
||||||
auditable = false;
|
|
||||||
};
|
};
|
||||||
gateware = pkgs.runCommand "${target}-${variant}-gateware"
|
gateware = pkgs.runCommand "${target}-${variant}-gateware"
|
||||||
{
|
{
|
||||||
|
|
|
@ -2,12 +2,6 @@
|
||||||
# It is not intended for manual editing.
|
# It is not intended for manual editing.
|
||||||
version = 3
|
version = 3
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "arrayvec"
|
|
||||||
version = "0.7.4"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "async-recursion"
|
name = "async-recursion"
|
||||||
version = "0.3.2"
|
version = "0.3.2"
|
||||||
|
@ -480,7 +474,6 @@ dependencies = [
|
||||||
"log_buffer",
|
"log_buffer",
|
||||||
"num-derive",
|
"num-derive",
|
||||||
"num-traits",
|
"num-traits",
|
||||||
"tar-no-std",
|
|
||||||
"unwind",
|
"unwind",
|
||||||
"vcell",
|
"vcell",
|
||||||
"void",
|
"void",
|
||||||
|
@ -545,16 +538,6 @@ dependencies = [
|
||||||
"unicode-ident",
|
"unicode-ident",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "tar-no-std"
|
|
||||||
version = "0.1.8"
|
|
||||||
source = "git+https://git.m-labs.hk/M-Labs/tar-no-std?rev=2ab6dc5#2ab6dc58e5249c59c4eb03eaf3a119bcdd678d32"
|
|
||||||
dependencies = [
|
|
||||||
"arrayvec",
|
|
||||||
"bitflags",
|
|
||||||
"log",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "unicode-ident"
|
name = "unicode-ident"
|
||||||
version = "1.0.5"
|
version = "1.0.5"
|
||||||
|
|
|
@ -237,16 +237,12 @@ class GenericMaster(SoCCore):
|
||||||
gtx0 = self.gt_drtio.gtxs[0]
|
gtx0 = self.gt_drtio.gtxs[0]
|
||||||
self.specials += Instance("BUFG", i_I=gtx0.txoutclk, o_O=txout_buf)
|
self.specials += Instance("BUFG", i_I=gtx0.txoutclk, o_O=txout_buf)
|
||||||
|
|
||||||
ext_async_rst = Signal()
|
|
||||||
|
|
||||||
self.submodules.bootstrap = GTPBootstrapClock(self.platform, clk_freq)
|
self.submodules.bootstrap = GTPBootstrapClock(self.platform, clk_freq)
|
||||||
self.submodules.sys_crg = zynq_clocking.SYSCRG(
|
self.submodules.sys_crg = zynq_clocking.SYSCRG(
|
||||||
self.platform,
|
self.platform,
|
||||||
self.ps7,
|
self.ps7,
|
||||||
txout_buf,
|
txout_buf,
|
||||||
clk_sw=self.gt_drtio.stable_clkin.storage,
|
clk_sw=gtx0.tx_init.done)
|
||||||
clk_sw_status=gtx0.tx_init.done,
|
|
||||||
ext_async_rst=ext_async_rst)
|
|
||||||
self.csr_devices.append("sys_crg")
|
self.csr_devices.append("sys_crg")
|
||||||
self.crg = self.ps7 # HACK for eem_7series to find the clock
|
self.crg = self.ps7 # HACK for eem_7series to find the clock
|
||||||
self.crg.cd_sys = self.sys_crg.cd_sys
|
self.crg.cd_sys = self.sys_crg.cd_sys
|
||||||
|
@ -254,9 +250,6 @@ class GenericMaster(SoCCore):
|
||||||
self.bootstrap.cd_bootstrap.clk, self.sys_crg.cd_sys.clk)
|
self.bootstrap.cd_bootstrap.clk, self.sys_crg.cd_sys.clk)
|
||||||
fix_serdes_timing_path(platform)
|
fix_serdes_timing_path(platform)
|
||||||
|
|
||||||
self.comb += ext_async_rst.eq(self.sys_crg.clk_sw_fsm.o_clk_sw & ~gtx0.tx_init.done)
|
|
||||||
self.specials += MultiReg(self.sys_crg.clk_sw_fsm.o_clk_sw & self.sys_crg.mmcm_locked, self.gt_drtio.clk_path_ready, odomain="bootstrap")
|
|
||||||
|
|
||||||
self.config["HAS_SI5324"] = None
|
self.config["HAS_SI5324"] = None
|
||||||
self.config["SI5324_SOFT_RESET"] = None
|
self.config["SI5324_SOFT_RESET"] = None
|
||||||
|
|
||||||
|
@ -426,16 +419,12 @@ class GenericSatellite(SoCCore):
|
||||||
gtx0 = self.gt_drtio.gtxs[0]
|
gtx0 = self.gt_drtio.gtxs[0]
|
||||||
self.specials += Instance("BUFG", i_I=gtx0.txoutclk, o_O=txout_buf)
|
self.specials += Instance("BUFG", i_I=gtx0.txoutclk, o_O=txout_buf)
|
||||||
|
|
||||||
ext_async_rst = Signal()
|
|
||||||
|
|
||||||
self.submodules.bootstrap = GTPBootstrapClock(self.platform, clk_freq)
|
self.submodules.bootstrap = GTPBootstrapClock(self.platform, clk_freq)
|
||||||
self.submodules.sys_crg = zynq_clocking.SYSCRG(
|
self.submodules.sys_crg = zynq_clocking.SYSCRG(
|
||||||
self.platform,
|
self.platform,
|
||||||
self.ps7,
|
self.ps7,
|
||||||
txout_buf,
|
txout_buf,
|
||||||
clk_sw=self.gt_drtio.stable_clkin.storage,
|
clk_sw=gtx0.tx_init.done)
|
||||||
clk_sw_status=gtx0.tx_init.done,
|
|
||||||
ext_async_rst=ext_async_rst)
|
|
||||||
platform.add_false_path_constraints(
|
platform.add_false_path_constraints(
|
||||||
self.bootstrap.cd_bootstrap.clk, self.sys_crg.cd_sys.clk)
|
self.bootstrap.cd_bootstrap.clk, self.sys_crg.cd_sys.clk)
|
||||||
self.csr_devices.append("sys_crg")
|
self.csr_devices.append("sys_crg")
|
||||||
|
@ -444,9 +433,6 @@ class GenericSatellite(SoCCore):
|
||||||
|
|
||||||
fix_serdes_timing_path(platform)
|
fix_serdes_timing_path(platform)
|
||||||
|
|
||||||
self.comb += ext_async_rst.eq(self.sys_crg.clk_sw_fsm.o_clk_sw & ~gtx0.tx_init.done)
|
|
||||||
self.specials += MultiReg(self.sys_crg.clk_sw_fsm.o_clk_sw & self.sys_crg.mmcm_locked, self.gt_drtio.clk_path_ready, odomain="bootstrap")
|
|
||||||
|
|
||||||
self.rtio_channels = []
|
self.rtio_channels = []
|
||||||
has_grabber = any(peripheral["type"] == "grabber" for peripheral in description["peripherals"])
|
has_grabber = any(peripheral["type"] == "grabber" for peripheral in description["peripherals"])
|
||||||
if has_grabber:
|
if has_grabber:
|
||||||
|
|
|
@ -226,7 +226,6 @@ class _MasterBase(SoCCore):
|
||||||
self.csr_devices.append("gt_drtio")
|
self.csr_devices.append("gt_drtio")
|
||||||
|
|
||||||
self.submodules.rtio_tsc = rtio.TSC(glbl_fine_ts_width=3)
|
self.submodules.rtio_tsc = rtio.TSC(glbl_fine_ts_width=3)
|
||||||
ext_async_rst = Signal()
|
|
||||||
txout_buf = Signal()
|
txout_buf = Signal()
|
||||||
gtx0 = self.gt_drtio.gtxs[0]
|
gtx0 = self.gt_drtio.gtxs[0]
|
||||||
self.specials += Instance("BUFG", i_I=gtx0.txoutclk, o_O=txout_buf)
|
self.specials += Instance("BUFG", i_I=gtx0.txoutclk, o_O=txout_buf)
|
||||||
|
@ -235,17 +234,12 @@ class _MasterBase(SoCCore):
|
||||||
self.platform,
|
self.platform,
|
||||||
self.ps7,
|
self.ps7,
|
||||||
txout_buf,
|
txout_buf,
|
||||||
clk_sw=self.gt_drtio.stable_clkin.storage,
|
clk_sw=gtx0.tx_init.done,
|
||||||
clk_sw_status=gtx0.tx_init.done,
|
|
||||||
ext_async_rst=ext_async_rst,
|
|
||||||
freq=clk_freq)
|
freq=clk_freq)
|
||||||
platform.add_false_path_constraints(
|
platform.add_false_path_constraints(
|
||||||
self.bootstrap.cd_bootstrap.clk, self.sys_crg.cd_sys.clk)
|
self.bootstrap.cd_bootstrap.clk, self.sys_crg.cd_sys.clk)
|
||||||
self.csr_devices.append("sys_crg")
|
self.csr_devices.append("sys_crg")
|
||||||
|
|
||||||
self.comb += ext_async_rst.eq(self.sys_crg.clk_sw_fsm.o_clk_sw & ~gtx0.tx_init.done)
|
|
||||||
self.specials += MultiReg(self.sys_crg.clk_sw_fsm.o_clk_sw & self.sys_crg.mmcm_locked, self.gt_drtio.clk_path_ready, odomain="bootstrap")
|
|
||||||
|
|
||||||
drtio_csr_group = []
|
drtio_csr_group = []
|
||||||
drtioaux_csr_group = []
|
drtioaux_csr_group = []
|
||||||
drtioaux_memory_group = []
|
drtioaux_memory_group = []
|
||||||
|
@ -367,7 +361,6 @@ class _SatelliteBase(SoCCore):
|
||||||
clk_freq=clk_freq)
|
clk_freq=clk_freq)
|
||||||
self.csr_devices.append("gt_drtio")
|
self.csr_devices.append("gt_drtio")
|
||||||
|
|
||||||
ext_async_rst = Signal()
|
|
||||||
txout_buf = Signal()
|
txout_buf = Signal()
|
||||||
txout_buf.attr.add("keep")
|
txout_buf.attr.add("keep")
|
||||||
gtx0 = self.gt_drtio.gtxs[0]
|
gtx0 = self.gt_drtio.gtxs[0]
|
||||||
|
@ -380,17 +373,12 @@ class _SatelliteBase(SoCCore):
|
||||||
self.platform,
|
self.platform,
|
||||||
self.ps7,
|
self.ps7,
|
||||||
txout_buf,
|
txout_buf,
|
||||||
clk_sw=self.gt_drtio.stable_clkin.storage,
|
clk_sw=gtx0.tx_init.done,
|
||||||
clk_sw_status=gtx0.tx_init.done,
|
|
||||||
ext_async_rst=ext_async_rst,
|
|
||||||
freq=clk_freq)
|
freq=clk_freq)
|
||||||
platform.add_false_path_constraints(
|
platform.add_false_path_constraints(
|
||||||
self.bootstrap.cd_bootstrap.clk, self.sys_crg.cd_sys.clk)
|
self.bootstrap.cd_bootstrap.clk, self.sys_crg.cd_sys.clk)
|
||||||
self.csr_devices.append("sys_crg")
|
self.csr_devices.append("sys_crg")
|
||||||
|
|
||||||
self.comb += ext_async_rst.eq(self.sys_crg.clk_sw_fsm.o_clk_sw & ~gtx0.tx_init.done)
|
|
||||||
self.specials += MultiReg(self.sys_crg.clk_sw_fsm.o_clk_sw & self.sys_crg.mmcm_locked, self.gt_drtio.clk_path_ready, odomain="bootstrap")
|
|
||||||
|
|
||||||
drtioaux_csr_group = []
|
drtioaux_csr_group = []
|
||||||
drtioaux_memory_group = []
|
drtioaux_memory_group = []
|
||||||
drtiorep_csr_group = []
|
drtiorep_csr_group = []
|
||||||
|
|
|
@ -65,7 +65,7 @@ class ClockSwitchFSM(Module):
|
||||||
|
|
||||||
|
|
||||||
class SYSCRG(Module, AutoCSR):
|
class SYSCRG(Module, AutoCSR):
|
||||||
def __init__(self, platform, ps7, main_clk, clk_sw=None, clk_sw_status=None, freq=125e6, ext_async_rst=None, ):
|
def __init__(self, platform, ps7, main_clk, clk_sw=None, freq=125e6):
|
||||||
# assumes bootstrap clock is same freq as main and sys output
|
# assumes bootstrap clock is same freq as main and sys output
|
||||||
self.clock_domains.cd_sys = ClockDomain()
|
self.clock_domains.cd_sys = ClockDomain()
|
||||||
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
|
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
|
||||||
|
@ -88,7 +88,7 @@ class SYSCRG(Module, AutoCSR):
|
||||||
else:
|
else:
|
||||||
self.comb += self.clk_sw_fsm.i_clk_sw.eq(clk_sw)
|
self.comb += self.clk_sw_fsm.i_clk_sw.eq(clk_sw)
|
||||||
|
|
||||||
self.mmcm_locked = Signal()
|
mmcm_locked = Signal()
|
||||||
mmcm_sys = Signal()
|
mmcm_sys = Signal()
|
||||||
mmcm_sys4x = Signal()
|
mmcm_sys4x = Signal()
|
||||||
mmcm_sys5x = Signal()
|
mmcm_sys5x = Signal()
|
||||||
|
@ -96,7 +96,7 @@ class SYSCRG(Module, AutoCSR):
|
||||||
mmcm_fb_clk = Signal()
|
mmcm_fb_clk = Signal()
|
||||||
self.specials += [
|
self.specials += [
|
||||||
Instance("MMCME2_ADV",
|
Instance("MMCME2_ADV",
|
||||||
p_STARTUP_WAIT="FALSE", o_LOCKED=self.mmcm_locked,
|
p_STARTUP_WAIT="FALSE", o_LOCKED=mmcm_locked,
|
||||||
p_BANDWIDTH="HIGH",
|
p_BANDWIDTH="HIGH",
|
||||||
p_REF_JITTER1=0.001,
|
p_REF_JITTER1=0.001,
|
||||||
p_CLKIN1_PERIOD=period, i_CLKIN1=main_clk,
|
p_CLKIN1_PERIOD=period, i_CLKIN1=main_clk,
|
||||||
|
@ -125,17 +125,8 @@ class SYSCRG(Module, AutoCSR):
|
||||||
Instance("BUFG", i_I=mmcm_sys, o_O=self.cd_sys.clk),
|
Instance("BUFG", i_I=mmcm_sys, o_O=self.cd_sys.clk),
|
||||||
Instance("BUFG", i_I=mmcm_sys4x, o_O=self.cd_sys4x.clk),
|
Instance("BUFG", i_I=mmcm_sys4x, o_O=self.cd_sys4x.clk),
|
||||||
Instance("BUFG", i_I=mmcm_clk208, o_O=self.cd_clk200.clk),
|
Instance("BUFG", i_I=mmcm_clk208, o_O=self.cd_clk200.clk),
|
||||||
]
|
AsyncResetSynchronizer(self.cd_sys, ~mmcm_locked),
|
||||||
|
AsyncResetSynchronizer(self.cd_clk200, ~mmcm_locked),
|
||||||
if ext_async_rst is not None:
|
|
||||||
self.specials += [
|
|
||||||
AsyncResetSynchronizer(self.cd_sys, ~self.mmcm_locked | ext_async_rst),
|
|
||||||
AsyncResetSynchronizer(self.cd_clk200, ~self.mmcm_locked | ext_async_rst),
|
|
||||||
]
|
|
||||||
else:
|
|
||||||
self.specials += [
|
|
||||||
AsyncResetSynchronizer(self.cd_sys, ~self.mmcm_locked),
|
|
||||||
AsyncResetSynchronizer(self.cd_clk200, ~self.mmcm_locked),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
reset_counter = Signal(4, reset=15)
|
reset_counter = Signal(4, reset=15)
|
||||||
|
@ -148,7 +139,4 @@ class SYSCRG(Module, AutoCSR):
|
||||||
)
|
)
|
||||||
self.specials += Instance("IDELAYCTRL", i_REFCLK=ClockSignal("clk200"), i_RST=ic_reset)
|
self.specials += Instance("IDELAYCTRL", i_REFCLK=ClockSignal("clk200"), i_RST=ic_reset)
|
||||||
|
|
||||||
if clk_sw_status is None:
|
|
||||||
self.comb += self.current_clock.status.eq(self.clk_sw_fsm.o_clk_sw)
|
self.comb += self.current_clock.status.eq(self.clk_sw_fsm.o_clk_sw)
|
||||||
else:
|
|
||||||
self.comb += self.current_clock.status.eq(clk_sw_status)
|
|
||||||
|
|
|
@ -5,7 +5,7 @@ use io::proto::{ProtoRead, ProtoWrite};
|
||||||
// used by satellite -> master analyzer, subkernel exceptions
|
// used by satellite -> master analyzer, subkernel exceptions
|
||||||
pub const SAT_PAYLOAD_MAX_SIZE: usize = /*max size*/512 - /*CRC*/4 - /*packet ID*/1 - /*last*/1 - /*length*/2;
|
pub const SAT_PAYLOAD_MAX_SIZE: usize = /*max size*/512 - /*CRC*/4 - /*packet ID*/1 - /*last*/1 - /*length*/2;
|
||||||
// used by DDMA, subkernel program data (need to provide extra ID and destination)
|
// used by DDMA, subkernel program data (need to provide extra ID and destination)
|
||||||
pub const MASTER_PAYLOAD_MAX_SIZE: usize = SAT_PAYLOAD_MAX_SIZE - /*source*/1 - /*destination*/1 - /*ID*/4;
|
pub const MASTER_PAYLOAD_MAX_SIZE: usize = SAT_PAYLOAD_MAX_SIZE - /*destination*/1 - /*ID*/4;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
|
@ -19,46 +19,6 @@ impl From<IoError> for Error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Clone, Copy, Debug)]
|
|
||||||
#[repr(u8)]
|
|
||||||
pub enum PayloadStatus {
|
|
||||||
Middle = 0,
|
|
||||||
First = 1,
|
|
||||||
Last = 2,
|
|
||||||
FirstAndLast = 3,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<u8> for PayloadStatus {
|
|
||||||
fn from(value: u8) -> PayloadStatus {
|
|
||||||
match value {
|
|
||||||
0 => PayloadStatus::Middle,
|
|
||||||
1 => PayloadStatus::First,
|
|
||||||
2 => PayloadStatus::Last,
|
|
||||||
3 => PayloadStatus::FirstAndLast,
|
|
||||||
_ => unreachable!(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PayloadStatus {
|
|
||||||
pub fn is_first(self) -> bool {
|
|
||||||
self == PayloadStatus::First || self == PayloadStatus::FirstAndLast
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_last(self) -> bool {
|
|
||||||
self == PayloadStatus::Last || self == PayloadStatus::FirstAndLast
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn from_status(first: bool, last: bool) -> PayloadStatus {
|
|
||||||
match (first, last) {
|
|
||||||
(true, true) => PayloadStatus::FirstAndLast,
|
|
||||||
(true, false) => PayloadStatus::First,
|
|
||||||
(false, true) => PayloadStatus::Last,
|
|
||||||
(false, false) => PayloadStatus::Middle,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(PartialEq, Debug)]
|
#[derive(PartialEq, Debug)]
|
||||||
pub enum Packet {
|
pub enum Packet {
|
||||||
EchoRequest,
|
EchoRequest,
|
||||||
|
@ -89,8 +49,6 @@ pub enum Packet {
|
||||||
RoutingSetRank {
|
RoutingSetRank {
|
||||||
rank: u8,
|
rank: u8,
|
||||||
},
|
},
|
||||||
RoutingRetrievePackets,
|
|
||||||
RoutingNoPackets,
|
|
||||||
RoutingAck,
|
RoutingAck,
|
||||||
|
|
||||||
MonitorRequest {
|
MonitorRequest {
|
||||||
|
@ -199,40 +157,31 @@ pub enum Packet {
|
||||||
},
|
},
|
||||||
|
|
||||||
DmaAddTraceRequest {
|
DmaAddTraceRequest {
|
||||||
source: u8,
|
|
||||||
destination: u8,
|
destination: u8,
|
||||||
id: u32,
|
id: u32,
|
||||||
status: PayloadStatus,
|
last: bool,
|
||||||
length: u16,
|
length: u16,
|
||||||
trace: [u8; MASTER_PAYLOAD_MAX_SIZE],
|
trace: [u8; MASTER_PAYLOAD_MAX_SIZE],
|
||||||
},
|
},
|
||||||
DmaAddTraceReply {
|
DmaAddTraceReply {
|
||||||
source: u8,
|
|
||||||
destination: u8,
|
|
||||||
id: u32,
|
|
||||||
succeeded: bool,
|
succeeded: bool,
|
||||||
},
|
},
|
||||||
DmaRemoveTraceRequest {
|
DmaRemoveTraceRequest {
|
||||||
source: u8,
|
|
||||||
destination: u8,
|
destination: u8,
|
||||||
id: u32,
|
id: u32,
|
||||||
},
|
},
|
||||||
DmaRemoveTraceReply {
|
DmaRemoveTraceReply {
|
||||||
destination: u8,
|
|
||||||
succeeded: bool,
|
succeeded: bool,
|
||||||
},
|
},
|
||||||
DmaPlaybackRequest {
|
DmaPlaybackRequest {
|
||||||
source: u8,
|
|
||||||
destination: u8,
|
destination: u8,
|
||||||
id: u32,
|
id: u32,
|
||||||
timestamp: u64,
|
timestamp: u64,
|
||||||
},
|
},
|
||||||
DmaPlaybackReply {
|
DmaPlaybackReply {
|
||||||
destination: u8,
|
|
||||||
succeeded: bool,
|
succeeded: bool,
|
||||||
},
|
},
|
||||||
DmaPlaybackStatus {
|
DmaPlaybackStatus {
|
||||||
source: u8,
|
|
||||||
destination: u8,
|
destination: u8,
|
||||||
id: u32,
|
id: u32,
|
||||||
error: u8,
|
error: u8,
|
||||||
|
@ -243,7 +192,7 @@ pub enum Packet {
|
||||||
SubkernelAddDataRequest {
|
SubkernelAddDataRequest {
|
||||||
destination: u8,
|
destination: u8,
|
||||||
id: u32,
|
id: u32,
|
||||||
status: PayloadStatus,
|
last: bool,
|
||||||
length: u16,
|
length: u16,
|
||||||
data: [u8; MASTER_PAYLOAD_MAX_SIZE],
|
data: [u8; MASTER_PAYLOAD_MAX_SIZE],
|
||||||
},
|
},
|
||||||
|
@ -251,20 +200,22 @@ pub enum Packet {
|
||||||
succeeded: bool,
|
succeeded: bool,
|
||||||
},
|
},
|
||||||
SubkernelLoadRunRequest {
|
SubkernelLoadRunRequest {
|
||||||
source: u8,
|
|
||||||
destination: u8,
|
destination: u8,
|
||||||
id: u32,
|
id: u32,
|
||||||
run: bool,
|
run: bool,
|
||||||
},
|
},
|
||||||
SubkernelLoadRunReply {
|
SubkernelLoadRunReply {
|
||||||
|
succeeded: bool,
|
||||||
|
},
|
||||||
|
SubkernelStopRequest {
|
||||||
destination: u8,
|
destination: u8,
|
||||||
|
},
|
||||||
|
SubkernelStopReply {
|
||||||
succeeded: bool,
|
succeeded: bool,
|
||||||
},
|
},
|
||||||
SubkernelFinished {
|
SubkernelFinished {
|
||||||
destination: u8,
|
|
||||||
id: u32,
|
id: u32,
|
||||||
with_exception: bool,
|
with_exception: bool,
|
||||||
exception_src: u8,
|
|
||||||
},
|
},
|
||||||
SubkernelExceptionRequest {
|
SubkernelExceptionRequest {
|
||||||
destination: u8,
|
destination: u8,
|
||||||
|
@ -275,10 +226,9 @@ pub enum Packet {
|
||||||
data: [u8; SAT_PAYLOAD_MAX_SIZE],
|
data: [u8; SAT_PAYLOAD_MAX_SIZE],
|
||||||
},
|
},
|
||||||
SubkernelMessage {
|
SubkernelMessage {
|
||||||
source: u8,
|
|
||||||
destination: u8,
|
destination: u8,
|
||||||
id: u32,
|
id: u32,
|
||||||
status: PayloadStatus,
|
last: bool,
|
||||||
length: u16,
|
length: u16,
|
||||||
data: [u8; MASTER_PAYLOAD_MAX_SIZE],
|
data: [u8; MASTER_PAYLOAD_MAX_SIZE],
|
||||||
},
|
},
|
||||||
|
@ -325,8 +275,6 @@ impl Packet {
|
||||||
rank: reader.read_u8()?,
|
rank: reader.read_u8()?,
|
||||||
},
|
},
|
||||||
0x32 => Packet::RoutingAck,
|
0x32 => Packet::RoutingAck,
|
||||||
0x33 => Packet::RoutingRetrievePackets,
|
|
||||||
0x34 => Packet::RoutingNoPackets,
|
|
||||||
|
|
||||||
0x40 => Packet::MonitorRequest {
|
0x40 => Packet::MonitorRequest {
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
|
@ -441,49 +389,39 @@ impl Packet {
|
||||||
}
|
}
|
||||||
|
|
||||||
0xb0 => {
|
0xb0 => {
|
||||||
let source = reader.read_u8()?;
|
|
||||||
let destination = reader.read_u8()?;
|
let destination = reader.read_u8()?;
|
||||||
let id = reader.read_u32()?;
|
let id = reader.read_u32()?;
|
||||||
let status = reader.read_u8()?;
|
let last = reader.read_bool()?;
|
||||||
let length = reader.read_u16()?;
|
let length = reader.read_u16()?;
|
||||||
let mut trace: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
let mut trace: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
reader.read_exact(&mut trace[0..length as usize])?;
|
reader.read_exact(&mut trace[0..length as usize])?;
|
||||||
Packet::DmaAddTraceRequest {
|
Packet::DmaAddTraceRequest {
|
||||||
source: source,
|
|
||||||
destination: destination,
|
destination: destination,
|
||||||
id: id,
|
id: id,
|
||||||
status: PayloadStatus::from(status),
|
last: last,
|
||||||
length: length as u16,
|
length: length as u16,
|
||||||
trace: trace,
|
trace: trace,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
0xb1 => Packet::DmaAddTraceReply {
|
0xb1 => Packet::DmaAddTraceReply {
|
||||||
source: reader.read_u8()?,
|
|
||||||
destination: reader.read_u8()?,
|
|
||||||
id: reader.read_u32()?,
|
|
||||||
succeeded: reader.read_bool()?,
|
succeeded: reader.read_bool()?,
|
||||||
},
|
},
|
||||||
0xb2 => Packet::DmaRemoveTraceRequest {
|
0xb2 => Packet::DmaRemoveTraceRequest {
|
||||||
source: reader.read_u8()?,
|
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
id: reader.read_u32()?,
|
id: reader.read_u32()?,
|
||||||
},
|
},
|
||||||
0xb3 => Packet::DmaRemoveTraceReply {
|
0xb3 => Packet::DmaRemoveTraceReply {
|
||||||
destination: reader.read_u8()?,
|
|
||||||
succeeded: reader.read_bool()?,
|
succeeded: reader.read_bool()?,
|
||||||
},
|
},
|
||||||
0xb4 => Packet::DmaPlaybackRequest {
|
0xb4 => Packet::DmaPlaybackRequest {
|
||||||
source: reader.read_u8()?,
|
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
id: reader.read_u32()?,
|
id: reader.read_u32()?,
|
||||||
timestamp: reader.read_u64()?,
|
timestamp: reader.read_u64()?,
|
||||||
},
|
},
|
||||||
0xb5 => Packet::DmaPlaybackReply {
|
0xb5 => Packet::DmaPlaybackReply {
|
||||||
destination: reader.read_u8()?,
|
|
||||||
succeeded: reader.read_bool()?,
|
succeeded: reader.read_bool()?,
|
||||||
},
|
},
|
||||||
0xb6 => Packet::DmaPlaybackStatus {
|
0xb6 => Packet::DmaPlaybackStatus {
|
||||||
source: reader.read_u8()?,
|
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
id: reader.read_u32()?,
|
id: reader.read_u32()?,
|
||||||
error: reader.read_u8()?,
|
error: reader.read_u8()?,
|
||||||
|
@ -494,14 +432,14 @@ impl Packet {
|
||||||
0xc0 => {
|
0xc0 => {
|
||||||
let destination = reader.read_u8()?;
|
let destination = reader.read_u8()?;
|
||||||
let id = reader.read_u32()?;
|
let id = reader.read_u32()?;
|
||||||
let status = PayloadStatus::from(reader.read_u8()?);
|
let last = reader.read_bool()?;
|
||||||
let length = reader.read_u16()?;
|
let length = reader.read_u16()?;
|
||||||
let mut data: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
let mut data: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
reader.read_exact(&mut data[0..length as usize])?;
|
reader.read_exact(&mut data[0..length as usize])?;
|
||||||
Packet::SubkernelAddDataRequest {
|
Packet::SubkernelAddDataRequest {
|
||||||
destination: destination,
|
destination: destination,
|
||||||
id: id,
|
id: id,
|
||||||
status: status,
|
last: last,
|
||||||
length: length as u16,
|
length: length as u16,
|
||||||
data: data,
|
data: data,
|
||||||
}
|
}
|
||||||
|
@ -510,20 +448,22 @@ impl Packet {
|
||||||
succeeded: reader.read_bool()?,
|
succeeded: reader.read_bool()?,
|
||||||
},
|
},
|
||||||
0xc4 => Packet::SubkernelLoadRunRequest {
|
0xc4 => Packet::SubkernelLoadRunRequest {
|
||||||
source: reader.read_u8()?,
|
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
id: reader.read_u32()?,
|
id: reader.read_u32()?,
|
||||||
run: reader.read_bool()?,
|
run: reader.read_bool()?,
|
||||||
},
|
},
|
||||||
0xc5 => Packet::SubkernelLoadRunReply {
|
0xc5 => Packet::SubkernelLoadRunReply {
|
||||||
|
succeeded: reader.read_bool()?,
|
||||||
|
},
|
||||||
|
0xc6 => Packet::SubkernelStopRequest {
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
|
},
|
||||||
|
0xc7 => Packet::SubkernelStopReply {
|
||||||
succeeded: reader.read_bool()?,
|
succeeded: reader.read_bool()?,
|
||||||
},
|
},
|
||||||
0xc8 => Packet::SubkernelFinished {
|
0xc8 => Packet::SubkernelFinished {
|
||||||
destination: reader.read_u8()?,
|
|
||||||
id: reader.read_u32()?,
|
id: reader.read_u32()?,
|
||||||
with_exception: reader.read_bool()?,
|
with_exception: reader.read_bool()?,
|
||||||
exception_src: reader.read_u8()?,
|
|
||||||
},
|
},
|
||||||
0xc9 => Packet::SubkernelExceptionRequest {
|
0xc9 => Packet::SubkernelExceptionRequest {
|
||||||
destination: reader.read_u8()?,
|
destination: reader.read_u8()?,
|
||||||
|
@ -540,18 +480,16 @@ impl Packet {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
0xcb => {
|
0xcb => {
|
||||||
let source = reader.read_u8()?;
|
|
||||||
let destination = reader.read_u8()?;
|
let destination = reader.read_u8()?;
|
||||||
let id = reader.read_u32()?;
|
let id = reader.read_u32()?;
|
||||||
let status = reader.read_u8()?;
|
let last = reader.read_bool()?;
|
||||||
let length = reader.read_u16()?;
|
let length = reader.read_u16()?;
|
||||||
let mut data: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
let mut data: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
reader.read_exact(&mut data[0..length as usize])?;
|
reader.read_exact(&mut data[0..length as usize])?;
|
||||||
Packet::SubkernelMessage {
|
Packet::SubkernelMessage {
|
||||||
source: source,
|
|
||||||
destination: destination,
|
destination: destination,
|
||||||
id: id,
|
id: id,
|
||||||
status: PayloadStatus::from(status),
|
last: last,
|
||||||
length: length as u16,
|
length: length as u16,
|
||||||
data: data,
|
data: data,
|
||||||
}
|
}
|
||||||
|
@ -602,8 +540,6 @@ impl Packet {
|
||||||
writer.write_u8(rank)?;
|
writer.write_u8(rank)?;
|
||||||
}
|
}
|
||||||
Packet::RoutingAck => writer.write_u8(0x32)?,
|
Packet::RoutingAck => writer.write_u8(0x32)?,
|
||||||
Packet::RoutingRetrievePackets => writer.write_u8(0x33)?,
|
|
||||||
Packet::RoutingNoPackets => writer.write_u8(0x34)?,
|
|
||||||
|
|
||||||
Packet::MonitorRequest {
|
Packet::MonitorRequest {
|
||||||
destination,
|
destination,
|
||||||
|
@ -775,69 +711,49 @@ impl Packet {
|
||||||
}
|
}
|
||||||
|
|
||||||
Packet::DmaAddTraceRequest {
|
Packet::DmaAddTraceRequest {
|
||||||
source,
|
|
||||||
destination,
|
destination,
|
||||||
id,
|
id,
|
||||||
status,
|
last,
|
||||||
trace,
|
trace,
|
||||||
length,
|
length,
|
||||||
} => {
|
} => {
|
||||||
writer.write_u8(0xb0)?;
|
writer.write_u8(0xb0)?;
|
||||||
writer.write_u8(source)?;
|
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_u8(status as u8)?;
|
writer.write_bool(last)?;
|
||||||
// trace may be broken down to fit within drtio aux memory limit
|
// trace may be broken down to fit within drtio aux memory limit
|
||||||
// will be reconstructed by satellite
|
// will be reconstructed by satellite
|
||||||
writer.write_u16(length)?;
|
writer.write_u16(length)?;
|
||||||
writer.write_all(&trace[0..length as usize])?;
|
writer.write_all(&trace[0..length as usize])?;
|
||||||
}
|
}
|
||||||
Packet::DmaAddTraceReply {
|
Packet::DmaAddTraceReply { succeeded } => {
|
||||||
source,
|
|
||||||
destination,
|
|
||||||
id,
|
|
||||||
succeeded,
|
|
||||||
} => {
|
|
||||||
writer.write_u8(0xb1)?;
|
writer.write_u8(0xb1)?;
|
||||||
writer.write_u8(source)?;
|
|
||||||
writer.write_u8(destination)?;
|
|
||||||
writer.write_u32(id)?;
|
|
||||||
writer.write_bool(succeeded)?;
|
writer.write_bool(succeeded)?;
|
||||||
}
|
}
|
||||||
Packet::DmaRemoveTraceRequest {
|
Packet::DmaRemoveTraceRequest { destination, id } => {
|
||||||
source,
|
|
||||||
destination,
|
|
||||||
id,
|
|
||||||
} => {
|
|
||||||
writer.write_u8(0xb2)?;
|
writer.write_u8(0xb2)?;
|
||||||
writer.write_u8(source)?;
|
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
}
|
}
|
||||||
Packet::DmaRemoveTraceReply { destination, succeeded } => {
|
Packet::DmaRemoveTraceReply { succeeded } => {
|
||||||
writer.write_u8(0xb3)?;
|
writer.write_u8(0xb3)?;
|
||||||
writer.write_u8(destination)?;
|
|
||||||
writer.write_bool(succeeded)?;
|
writer.write_bool(succeeded)?;
|
||||||
}
|
}
|
||||||
Packet::DmaPlaybackRequest {
|
Packet::DmaPlaybackRequest {
|
||||||
source,
|
|
||||||
destination,
|
destination,
|
||||||
id,
|
id,
|
||||||
timestamp,
|
timestamp,
|
||||||
} => {
|
} => {
|
||||||
writer.write_u8(0xb4)?;
|
writer.write_u8(0xb4)?;
|
||||||
writer.write_u8(source)?;
|
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_u64(timestamp)?;
|
writer.write_u64(timestamp)?;
|
||||||
}
|
}
|
||||||
Packet::DmaPlaybackReply { destination, succeeded } => {
|
Packet::DmaPlaybackReply { succeeded } => {
|
||||||
writer.write_u8(0xb5)?;
|
writer.write_u8(0xb5)?;
|
||||||
writer.write_u8(destination)?;
|
|
||||||
writer.write_bool(succeeded)?;
|
writer.write_bool(succeeded)?;
|
||||||
}
|
}
|
||||||
Packet::DmaPlaybackStatus {
|
Packet::DmaPlaybackStatus {
|
||||||
source,
|
|
||||||
destination,
|
destination,
|
||||||
id,
|
id,
|
||||||
error,
|
error,
|
||||||
|
@ -845,7 +761,6 @@ impl Packet {
|
||||||
timestamp,
|
timestamp,
|
||||||
} => {
|
} => {
|
||||||
writer.write_u8(0xb6)?;
|
writer.write_u8(0xb6)?;
|
||||||
writer.write_u8(source)?;
|
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_u8(error)?;
|
writer.write_u8(error)?;
|
||||||
|
@ -856,14 +771,14 @@ impl Packet {
|
||||||
Packet::SubkernelAddDataRequest {
|
Packet::SubkernelAddDataRequest {
|
||||||
destination,
|
destination,
|
||||||
id,
|
id,
|
||||||
status,
|
last,
|
||||||
data,
|
data,
|
||||||
length,
|
length,
|
||||||
} => {
|
} => {
|
||||||
writer.write_u8(0xc0)?;
|
writer.write_u8(0xc0)?;
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_u8(status as u8)?;
|
writer.write_bool(last)?;
|
||||||
writer.write_u16(length)?;
|
writer.write_u16(length)?;
|
||||||
writer.write_all(&data[0..length as usize])?;
|
writer.write_all(&data[0..length as usize])?;
|
||||||
}
|
}
|
||||||
|
@ -871,34 +786,28 @@ impl Packet {
|
||||||
writer.write_u8(0xc1)?;
|
writer.write_u8(0xc1)?;
|
||||||
writer.write_bool(succeeded)?;
|
writer.write_bool(succeeded)?;
|
||||||
}
|
}
|
||||||
Packet::SubkernelLoadRunRequest {
|
Packet::SubkernelLoadRunRequest { destination, id, run } => {
|
||||||
source,
|
|
||||||
destination,
|
|
||||||
id,
|
|
||||||
run,
|
|
||||||
} => {
|
|
||||||
writer.write_u8(0xc4)?;
|
writer.write_u8(0xc4)?;
|
||||||
writer.write_u8(source)?;
|
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_bool(run)?;
|
writer.write_bool(run)?;
|
||||||
}
|
}
|
||||||
Packet::SubkernelLoadRunReply { destination, succeeded } => {
|
Packet::SubkernelLoadRunReply { succeeded } => {
|
||||||
writer.write_u8(0xc5)?;
|
writer.write_u8(0xc5)?;
|
||||||
writer.write_u8(destination)?;
|
|
||||||
writer.write_bool(succeeded)?;
|
writer.write_bool(succeeded)?;
|
||||||
}
|
}
|
||||||
Packet::SubkernelFinished {
|
Packet::SubkernelStopRequest { destination } => {
|
||||||
destination,
|
writer.write_u8(0xc6)?;
|
||||||
id,
|
|
||||||
with_exception,
|
|
||||||
exception_src,
|
|
||||||
} => {
|
|
||||||
writer.write_u8(0xc8)?;
|
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
|
}
|
||||||
|
Packet::SubkernelStopReply { succeeded } => {
|
||||||
|
writer.write_u8(0xc7)?;
|
||||||
|
writer.write_bool(succeeded)?;
|
||||||
|
}
|
||||||
|
Packet::SubkernelFinished { id, with_exception } => {
|
||||||
|
writer.write_u8(0xc8)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_bool(with_exception)?;
|
writer.write_bool(with_exception)?;
|
||||||
writer.write_u8(exception_src)?;
|
|
||||||
}
|
}
|
||||||
Packet::SubkernelExceptionRequest { destination } => {
|
Packet::SubkernelExceptionRequest { destination } => {
|
||||||
writer.write_u8(0xc9)?;
|
writer.write_u8(0xc9)?;
|
||||||
|
@ -911,18 +820,16 @@ impl Packet {
|
||||||
writer.write_all(&data[0..length as usize])?;
|
writer.write_all(&data[0..length as usize])?;
|
||||||
}
|
}
|
||||||
Packet::SubkernelMessage {
|
Packet::SubkernelMessage {
|
||||||
source,
|
|
||||||
destination,
|
destination,
|
||||||
id,
|
id,
|
||||||
status,
|
last,
|
||||||
data,
|
data,
|
||||||
length,
|
length,
|
||||||
} => {
|
} => {
|
||||||
writer.write_u8(0xcb)?;
|
writer.write_u8(0xcb)?;
|
||||||
writer.write_u8(source)?;
|
|
||||||
writer.write_u8(destination)?;
|
writer.write_u8(destination)?;
|
||||||
writer.write_u32(id)?;
|
writer.write_u32(id)?;
|
||||||
writer.write_u8(status as u8)?;
|
writer.write_bool(last)?;
|
||||||
writer.write_u16(length)?;
|
writer.write_u16(length)?;
|
||||||
writer.write_all(&data[0..length as usize])?;
|
writer.write_all(&data[0..length as usize])?;
|
||||||
}
|
}
|
||||||
|
@ -933,39 +840,4 @@ impl Packet {
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn routable_destination(&self) -> Option<u8> {
|
|
||||||
// only for packets that could be re-routed, not only forwarded
|
|
||||||
match self {
|
|
||||||
Packet::DmaAddTraceRequest { destination, .. } => Some(*destination),
|
|
||||||
Packet::DmaAddTraceReply { destination, .. } => Some(*destination),
|
|
||||||
Packet::DmaRemoveTraceRequest { destination, .. } => Some(*destination),
|
|
||||||
Packet::DmaRemoveTraceReply { destination, .. } => Some(*destination),
|
|
||||||
Packet::DmaPlaybackRequest { destination, .. } => Some(*destination),
|
|
||||||
Packet::DmaPlaybackReply { destination, .. } => Some(*destination),
|
|
||||||
Packet::SubkernelLoadRunRequest { destination, .. } => Some(*destination),
|
|
||||||
Packet::SubkernelLoadRunReply { destination, .. } => Some(*destination),
|
|
||||||
Packet::SubkernelMessage { destination, .. } => Some(*destination),
|
|
||||||
Packet::SubkernelMessageAck { destination } => Some(*destination),
|
|
||||||
Packet::DmaPlaybackStatus { destination, .. } => Some(*destination),
|
|
||||||
Packet::SubkernelFinished { destination, .. } => Some(*destination),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn expects_response(&self) -> bool {
|
|
||||||
// returns true if the routable packet should elicit a response
|
|
||||||
// e.g. reply, ACK packets end a conversation,
|
|
||||||
// and firmware should not wait for response
|
|
||||||
match self {
|
|
||||||
Packet::DmaAddTraceReply { .. }
|
|
||||||
| Packet::DmaRemoveTraceReply { .. }
|
|
||||||
| Packet::DmaPlaybackReply { .. }
|
|
||||||
| Packet::SubkernelLoadRunReply { .. }
|
|
||||||
| Packet::SubkernelMessageAck { .. }
|
|
||||||
| Packet::DmaPlaybackStatus { .. }
|
|
||||||
| Packet::SubkernelFinished { .. } => false,
|
|
||||||
_ => true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,7 @@ pub mod drtioaux;
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
pub mod drtioaux_async;
|
pub mod drtioaux_async;
|
||||||
pub mod drtioaux_proto;
|
pub mod drtioaux_proto;
|
||||||
#[cfg(all(feature = "target_kasli_soc", has_drtio))]
|
#[cfg(feature = "target_kasli_soc")]
|
||||||
pub mod io_expander;
|
pub mod io_expander;
|
||||||
pub mod logger;
|
pub mod logger;
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
|
|
|
@ -170,7 +170,6 @@ pub extern "C" fn dma_playback(timestamp: i64, ptr: i32, _uses_ddma: bool) {
|
||||||
csr::rtio_dma::base_address_write(ptr as u32);
|
csr::rtio_dma::base_address_write(ptr as u32);
|
||||||
csr::rtio_dma::time_offset_write(timestamp as u64);
|
csr::rtio_dma::time_offset_write(timestamp as u64);
|
||||||
|
|
||||||
let old_cri_master = csr::cri_con::selected_read();
|
|
||||||
csr::cri_con::selected_write(1);
|
csr::cri_con::selected_write(1);
|
||||||
csr::rtio_dma::enable_write(1);
|
csr::rtio_dma::enable_write(1);
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
|
@ -184,7 +183,7 @@ pub extern "C" fn dma_playback(timestamp: i64, ptr: i32, _uses_ddma: bool) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
while csr::rtio_dma::enable_read() != 0 {}
|
while csr::rtio_dma::enable_read() != 0 {}
|
||||||
csr::cri_con::selected_write(old_cri_master);
|
csr::cri_con::selected_write(0);
|
||||||
|
|
||||||
let error = csr::rtio_dma::error_read();
|
let error = csr::rtio_dma::error_read();
|
||||||
if error != 0 {
|
if error != 0 {
|
||||||
|
|
|
@ -77,7 +77,6 @@ pub enum Message {
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
SubkernelLoadRunRequest {
|
SubkernelLoadRunRequest {
|
||||||
id: u32,
|
id: u32,
|
||||||
destination: u8,
|
|
||||||
run: bool,
|
run: bool,
|
||||||
},
|
},
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
|
@ -87,7 +86,7 @@ pub enum Message {
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
SubkernelAwaitFinishRequest {
|
SubkernelAwaitFinishRequest {
|
||||||
id: u32,
|
id: u32,
|
||||||
timeout: i64,
|
timeout: u64,
|
||||||
},
|
},
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
SubkernelAwaitFinishReply {
|
SubkernelAwaitFinishReply {
|
||||||
|
@ -96,15 +95,14 @@ pub enum Message {
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
SubkernelMsgSend {
|
SubkernelMsgSend {
|
||||||
id: u32,
|
id: u32,
|
||||||
destination: Option<u8>,
|
|
||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
},
|
},
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
SubkernelMsgSent,
|
SubkernelMsgSent,
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
SubkernelMsgRecvRequest {
|
SubkernelMsgRecvRequest {
|
||||||
id: i32,
|
id: u32,
|
||||||
timeout: i64,
|
timeout: u64,
|
||||||
tags: Vec<u8>,
|
tags: Vec<u8>,
|
||||||
},
|
},
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
|
|
|
@ -5,16 +5,12 @@ use cslice::CSlice;
|
||||||
use super::{Message, SubkernelStatus, KERNEL_CHANNEL_0TO1, KERNEL_CHANNEL_1TO0};
|
use super::{Message, SubkernelStatus, KERNEL_CHANNEL_0TO1, KERNEL_CHANNEL_1TO0};
|
||||||
use crate::{artiq_raise, rpc::send_args};
|
use crate::{artiq_raise, rpc::send_args};
|
||||||
|
|
||||||
pub extern "C" fn load_run(id: u32, destination: u8, run: bool) {
|
pub extern "C" fn load_run(id: u32, run: bool) {
|
||||||
unsafe {
|
unsafe {
|
||||||
KERNEL_CHANNEL_1TO0
|
KERNEL_CHANNEL_1TO0
|
||||||
.as_mut()
|
.as_mut()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.send(Message::SubkernelLoadRunRequest {
|
.send(Message::SubkernelLoadRunRequest { id: id, run: run });
|
||||||
id: id,
|
|
||||||
destination: destination,
|
|
||||||
run: run,
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
match unsafe { KERNEL_CHANNEL_0TO1.as_mut().unwrap() }.recv() {
|
match unsafe { KERNEL_CHANNEL_0TO1.as_mut().unwrap() }.recv() {
|
||||||
Message::SubkernelLoadRunReply { succeeded: true } => (),
|
Message::SubkernelLoadRunReply { succeeded: true } => (),
|
||||||
|
@ -25,7 +21,7 @@ pub extern "C" fn load_run(id: u32, destination: u8, run: bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub extern "C" fn await_finish(id: u32, timeout: i64) {
|
pub extern "C" fn await_finish(id: u32, timeout: u64) {
|
||||||
unsafe {
|
unsafe {
|
||||||
KERNEL_CHANNEL_1TO0
|
KERNEL_CHANNEL_1TO0
|
||||||
.as_mut()
|
.as_mut()
|
||||||
|
@ -55,14 +51,7 @@ pub extern "C" fn await_finish(id: u32, timeout: i64) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub extern "C" fn send_message(
|
pub extern "C" fn send_message(id: u32, count: u8, tag: &CSlice<u8>, data: *const *const ()) {
|
||||||
id: u32,
|
|
||||||
is_return: bool,
|
|
||||||
destination: u8,
|
|
||||||
count: u8,
|
|
||||||
tag: &CSlice<u8>,
|
|
||||||
data: *const *const (),
|
|
||||||
) {
|
|
||||||
let mut buffer = Vec::<u8>::new();
|
let mut buffer = Vec::<u8>::new();
|
||||||
send_args(&mut buffer, 0, tag.as_ref(), data, false).expect("RPC encoding failed");
|
send_args(&mut buffer, 0, tag.as_ref(), data, false).expect("RPC encoding failed");
|
||||||
// overwrite service tag, include how many tags are in the message
|
// overwrite service tag, include how many tags are in the message
|
||||||
|
@ -70,7 +59,6 @@ pub extern "C" fn send_message(
|
||||||
unsafe {
|
unsafe {
|
||||||
KERNEL_CHANNEL_1TO0.as_mut().unwrap().send(Message::SubkernelMsgSend {
|
KERNEL_CHANNEL_1TO0.as_mut().unwrap().send(Message::SubkernelMsgSend {
|
||||||
id: id,
|
id: id,
|
||||||
destination: if is_return { None } else { Some(destination) },
|
|
||||||
data: buffer[3..].to_vec(),
|
data: buffer[3..].to_vec(),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -80,7 +68,7 @@ pub extern "C" fn send_message(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub extern "C" fn await_message(id: i32, timeout: i64, tags: &CSlice<u8>, min: u8, max: u8) {
|
pub extern "C" fn await_message(id: u32, timeout: u64, tags: &CSlice<u8>, min: u8, max: u8) {
|
||||||
unsafe {
|
unsafe {
|
||||||
KERNEL_CHANNEL_1TO0
|
KERNEL_CHANNEL_1TO0
|
||||||
.as_mut()
|
.as_mut()
|
||||||
|
|
|
@ -41,7 +41,3 @@ libc = { path = "../libc" }
|
||||||
io = { path = "../libio", features = ["alloc"] }
|
io = { path = "../libio", features = ["alloc"] }
|
||||||
ksupport = { path = "../libksupport" }
|
ksupport = { path = "../libksupport" }
|
||||||
libboard_artiq = { path = "../libboard_artiq" }
|
libboard_artiq = { path = "../libboard_artiq" }
|
||||||
|
|
||||||
[dependencies.tar-no-std]
|
|
||||||
git = "https://git.m-labs.hk/M-Labs/tar-no-std"
|
|
||||||
rev = "2ab6dc5"
|
|
|
@ -3,22 +3,17 @@ use core::{cell::RefCell, fmt, slice, str};
|
||||||
|
|
||||||
use core_io::Error as IoError;
|
use core_io::Error as IoError;
|
||||||
use cslice::CSlice;
|
use cslice::CSlice;
|
||||||
use dyld::elf;
|
|
||||||
use futures::{future::FutureExt, select_biased};
|
use futures::{future::FutureExt, select_biased};
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
use io::Cursor;
|
use io::Cursor;
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
use ksupport::rpc;
|
use ksupport::rpc;
|
||||||
use ksupport::{kernel, resolve_channel_name};
|
use ksupport::{kernel, resolve_channel_name};
|
||||||
#[cfg(has_drtio)]
|
|
||||||
use libasync::delay;
|
|
||||||
use libasync::{smoltcp::{Sockets, TcpStream},
|
use libasync::{smoltcp::{Sockets, TcpStream},
|
||||||
task};
|
task};
|
||||||
use libboard_artiq::drtio_routing;
|
use libboard_artiq::drtio_routing;
|
||||||
#[cfg(feature = "target_kasli_soc")]
|
#[cfg(feature = "target_kasli_soc")]
|
||||||
use libboard_zynq::error_led::ErrorLED;
|
use libboard_zynq::error_led::ErrorLED;
|
||||||
#[cfg(has_drtio)]
|
|
||||||
use libboard_zynq::time::Milliseconds;
|
|
||||||
use libboard_zynq::{self as zynq,
|
use libboard_zynq::{self as zynq,
|
||||||
smoltcp::{self,
|
smoltcp::{self,
|
||||||
iface::{EthernetInterfaceBuilder, NeighborCache},
|
iface::{EthernetInterfaceBuilder, NeighborCache},
|
||||||
|
@ -32,8 +27,6 @@ use libcortex_a9::{mutex::Mutex,
|
||||||
use log::{error, info, warn};
|
use log::{error, info, warn};
|
||||||
use num_derive::{FromPrimitive, ToPrimitive};
|
use num_derive::{FromPrimitive, ToPrimitive};
|
||||||
use num_traits::{FromPrimitive, ToPrimitive};
|
use num_traits::{FromPrimitive, ToPrimitive};
|
||||||
#[cfg(has_drtio)]
|
|
||||||
use tar_no_std::TarArchiveRef;
|
|
||||||
|
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
use crate::pl;
|
use crate::pl;
|
||||||
|
@ -50,8 +43,6 @@ pub enum Error {
|
||||||
BufferExhausted,
|
BufferExhausted,
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
SubkernelError(subkernel::Error),
|
SubkernelError(subkernel::Error),
|
||||||
#[cfg(has_drtio)]
|
|
||||||
DestinationDown,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = core::result::Result<T, Error>;
|
pub type Result<T> = core::result::Result<T, Error>;
|
||||||
|
@ -66,8 +57,6 @@ impl fmt::Display for Error {
|
||||||
Error::BufferExhausted => write!(f, "buffer exhausted"),
|
Error::BufferExhausted => write!(f, "buffer exhausted"),
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
Error::SubkernelError(error) => write!(f, "subkernel error: {:?}", error),
|
Error::SubkernelError(error) => write!(f, "subkernel error: {:?}", error),
|
||||||
#[cfg(has_drtio)]
|
|
||||||
Error::DestinationDown => write!(f, "subkernel destination down"),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -401,11 +390,7 @@ async fn handle_run_kernel(
|
||||||
control.borrow_mut().tx.async_send(reply).await;
|
control.borrow_mut().tx.async_send(reply).await;
|
||||||
}
|
}
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
kernel::Message::SubkernelLoadRunRequest {
|
kernel::Message::SubkernelLoadRunRequest { id, run } => {
|
||||||
id,
|
|
||||||
destination: _,
|
|
||||||
run,
|
|
||||||
} => {
|
|
||||||
let succeeded = match subkernel::load(aux_mutex, routing_table, timer, id, run).await {
|
let succeeded = match subkernel::load(aux_mutex, routing_table, timer, id, run).await {
|
||||||
Ok(()) => true,
|
Ok(()) => true,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
@ -451,9 +436,8 @@ async fn handle_run_kernel(
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
kernel::Message::SubkernelMsgSend { id, destination, data } => {
|
kernel::Message::SubkernelMsgSend { id, data } => {
|
||||||
let res =
|
let res = subkernel::message_send(aux_mutex, routing_table, timer, id, data).await;
|
||||||
subkernel::message_send(aux_mutex, routing_table, timer, id, destination.unwrap(), data).await;
|
|
||||||
match res {
|
match res {
|
||||||
Ok(_) => (),
|
Ok(_) => (),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
@ -468,26 +452,12 @@ async fn handle_run_kernel(
|
||||||
}
|
}
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
kernel::Message::SubkernelMsgRecvRequest { id, timeout, tags } => {
|
kernel::Message::SubkernelMsgRecvRequest { id, timeout, tags } => {
|
||||||
let message_received = subkernel::message_await(id as u32, timeout, timer).await;
|
let message_received = subkernel::message_await(id, timeout, timer).await;
|
||||||
let (status, count) = match message_received {
|
let (status, count) = match message_received {
|
||||||
Ok(ref message) => (kernel::SubkernelStatus::NoError, message.count),
|
Ok(ref message) => (kernel::SubkernelStatus::NoError, message.count),
|
||||||
Err(SubkernelError::Timeout) => (kernel::SubkernelStatus::Timeout, 0),
|
Err(SubkernelError::Timeout) => (kernel::SubkernelStatus::Timeout, 0),
|
||||||
Err(SubkernelError::IncorrectState) => (kernel::SubkernelStatus::IncorrectState, 0),
|
Err(SubkernelError::IncorrectState) => (kernel::SubkernelStatus::IncorrectState, 0),
|
||||||
Err(SubkernelError::CommLost) => (kernel::SubkernelStatus::CommLost, 0),
|
Err(SubkernelError::CommLost) => (kernel::SubkernelStatus::CommLost, 0),
|
||||||
Err(SubkernelError::SubkernelException) => {
|
|
||||||
error!("Exception in subkernel");
|
|
||||||
// just retrieve the exception
|
|
||||||
let status = subkernel::await_finish(aux_mutex, routing_table, timer, id as u32, timeout)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
match stream {
|
|
||||||
None => (),
|
|
||||||
Some(stream) => {
|
|
||||||
write_chunk(stream, &status.exception.unwrap()).await?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
(kernel::SubkernelStatus::OtherError, 0)
|
|
||||||
}
|
|
||||||
Err(_) => (kernel::SubkernelStatus::OtherError, 0),
|
Err(_) => (kernel::SubkernelStatus::OtherError, 0),
|
||||||
};
|
};
|
||||||
control
|
control
|
||||||
|
@ -554,56 +524,6 @@ async fn handle_run_kernel(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_flash_kernel(
|
|
||||||
buffer: &Vec<u8>,
|
|
||||||
control: &Rc<RefCell<kernel::Control>>,
|
|
||||||
_up_destinations: &Rc<RefCell<[bool; drtio_routing::DEST_COUNT]>>,
|
|
||||||
_aux_mutex: &Rc<Mutex<bool>>,
|
|
||||||
_routing_table: &drtio_routing::RoutingTable,
|
|
||||||
_timer: GlobalTimer,
|
|
||||||
) -> Result<()> {
|
|
||||||
if buffer[0] == elf::ELFMAG0 && buffer[1] == elf::ELFMAG1 && buffer[2] == elf::ELFMAG2 && buffer[3] == elf::ELFMAG3
|
|
||||||
{
|
|
||||||
// assume ELF file, proceed as before
|
|
||||||
load_kernel(buffer, control, None).await
|
|
||||||
} else {
|
|
||||||
#[cfg(has_drtio)]
|
|
||||||
{
|
|
||||||
let archive = TarArchiveRef::new(buffer.as_ref());
|
|
||||||
let entries = archive.entries();
|
|
||||||
let mut main_lib: Vec<u8> = Vec::new();
|
|
||||||
for entry in entries {
|
|
||||||
if entry.filename().as_str() == "main.elf" {
|
|
||||||
main_lib = entry.data().to_vec();
|
|
||||||
} else {
|
|
||||||
// subkernel filename must be in format:
|
|
||||||
// "<subkernel id> <destination>.elf"
|
|
||||||
let filename = entry.filename();
|
|
||||||
let mut iter = filename.as_str().split_whitespace();
|
|
||||||
let sid: u32 = iter.next().unwrap().parse().unwrap();
|
|
||||||
let dest: u8 = iter.next().unwrap().strip_suffix(".elf").unwrap().parse().unwrap();
|
|
||||||
let up = _up_destinations.borrow()[dest as usize];
|
|
||||||
if up {
|
|
||||||
let subkernel_lib = entry.data().to_vec();
|
|
||||||
subkernel::add_subkernel(sid, dest, subkernel_lib).await;
|
|
||||||
match subkernel::upload(_aux_mutex, _routing_table, _timer, sid).await {
|
|
||||||
Ok(_) => (),
|
|
||||||
Err(_) => return Err(Error::UnexpectedPattern),
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return Err(Error::DestinationDown);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
load_kernel(&main_lib, control, None).await
|
|
||||||
}
|
|
||||||
#[cfg(not(has_drtio))]
|
|
||||||
{
|
|
||||||
panic!("multi-kernel libraries are not supported in standalone systems");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn load_kernel(
|
async fn load_kernel(
|
||||||
buffer: &Vec<u8>,
|
buffer: &Vec<u8>,
|
||||||
control: &Rc<RefCell<kernel::Control>>,
|
control: &Rc<RefCell<kernel::Control>>,
|
||||||
|
@ -718,32 +638,6 @@ async fn handle_connection(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn flash_and_run_idle_kernel(
|
|
||||||
buffer: &Vec<u8>,
|
|
||||||
control: &Rc<RefCell<kernel::Control>>,
|
|
||||||
up_destinations: &Rc<RefCell<[bool; drtio_routing::DEST_COUNT]>>,
|
|
||||||
aux_mutex: &Rc<Mutex<bool>>,
|
|
||||||
routing_table: &drtio_routing::RoutingTable,
|
|
||||||
timer: GlobalTimer,
|
|
||||||
) {
|
|
||||||
info!("Loading idle kernel");
|
|
||||||
let res = handle_flash_kernel(buffer, control, up_destinations, aux_mutex, routing_table, timer)
|
|
||||||
.await;
|
|
||||||
match res {
|
|
||||||
#[cfg(has_drtio)]
|
|
||||||
Err(Error::DestinationDown) => {
|
|
||||||
let mut countdown = timer.countdown();
|
|
||||||
delay(&mut countdown, Milliseconds(500)).await;
|
|
||||||
}
|
|
||||||
Err(_) => warn!("error loading idle kernel"),
|
|
||||||
_ => (),
|
|
||||||
}
|
|
||||||
info!("Running idle kernel");
|
|
||||||
let _ = handle_run_kernel(None, control, up_destinations, aux_mutex, routing_table, timer)
|
|
||||||
.await.map_err(|_| warn!("error running idle kernel"));
|
|
||||||
info!("Idle kernel terminated");
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn main(timer: GlobalTimer, cfg: Config) {
|
pub fn main(timer: GlobalTimer, cfg: Config) {
|
||||||
let net_addresses = net_settings::get_addresses(&cfg);
|
let net_addresses = net_settings::get_addresses(&cfg);
|
||||||
info!("network addresses: {}", net_addresses);
|
info!("network addresses: {}", net_addresses);
|
||||||
|
@ -785,6 +679,7 @@ pub fn main(timer: GlobalTimer, cfg: Config) {
|
||||||
|
|
||||||
Sockets::init(32);
|
Sockets::init(32);
|
||||||
|
|
||||||
|
// before, mutex was on io, but now that io isn't used...?
|
||||||
let aux_mutex: Rc<Mutex<bool>> = Rc::new(Mutex::new(false));
|
let aux_mutex: Rc<Mutex<bool>> = Rc::new(Mutex::new(false));
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
let drtio_routing_table = Rc::new(RefCell::new(drtio_routing::config_routing_table(
|
let drtio_routing_table = Rc::new(RefCell::new(drtio_routing::config_routing_table(
|
||||||
|
@ -807,16 +702,9 @@ pub fn main(timer: GlobalTimer, cfg: Config) {
|
||||||
let idle_kernel = Rc::new(cfg.read("idle_kernel").ok());
|
let idle_kernel = Rc::new(cfg.read("idle_kernel").ok());
|
||||||
if let Ok(buffer) = cfg.read("startup_kernel") {
|
if let Ok(buffer) = cfg.read("startup_kernel") {
|
||||||
info!("Loading startup kernel...");
|
info!("Loading startup kernel...");
|
||||||
let routing_table = drtio_routing_table.borrow();
|
if let Ok(()) = task::block_on(load_kernel(&buffer, &control, None)) {
|
||||||
if let Ok(()) = task::block_on(handle_flash_kernel(
|
|
||||||
&buffer,
|
|
||||||
&control,
|
|
||||||
&up_destinations,
|
|
||||||
&aux_mutex,
|
|
||||||
&routing_table,
|
|
||||||
timer,
|
|
||||||
)) {
|
|
||||||
info!("Starting startup kernel...");
|
info!("Starting startup kernel...");
|
||||||
|
let routing_table = drtio_routing_table.borrow();
|
||||||
let _ = task::block_on(handle_run_kernel(
|
let _ = task::block_on(handle_run_kernel(
|
||||||
None,
|
None,
|
||||||
&control,
|
&control,
|
||||||
|
@ -834,30 +722,8 @@ pub fn main(timer: GlobalTimer, cfg: Config) {
|
||||||
mgmt::start(cfg);
|
mgmt::start(cfg);
|
||||||
|
|
||||||
task::spawn(async move {
|
task::spawn(async move {
|
||||||
let connection = Rc::new(Semaphore::new(0, 1));
|
let connection = Rc::new(Semaphore::new(1, 1));
|
||||||
let terminate = Rc::new(Semaphore::new(0, 1));
|
let terminate = Rc::new(Semaphore::new(0, 1));
|
||||||
{
|
|
||||||
let control = control.clone();
|
|
||||||
let idle_kernel = idle_kernel.clone();
|
|
||||||
let connection = connection.clone();
|
|
||||||
let terminate = terminate.clone();
|
|
||||||
let up_destinations = up_destinations.clone();
|
|
||||||
let aux_mutex = aux_mutex.clone();
|
|
||||||
let routing_table = drtio_routing_table.clone();
|
|
||||||
task::spawn(async move {
|
|
||||||
let routing_table = routing_table.borrow();
|
|
||||||
select_biased! {
|
|
||||||
_ = (async {
|
|
||||||
if let Some(buffer) = &*idle_kernel {
|
|
||||||
flash_and_run_idle_kernel(&buffer, &control, &up_destinations, &aux_mutex, &routing_table, timer).await;
|
|
||||||
}
|
|
||||||
}).fuse() => (),
|
|
||||||
_ = terminate.async_wait().fuse() => ()
|
|
||||||
}
|
|
||||||
connection.signal();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let mut stream = TcpStream::accept(1381, 0x10_000, 0x10_000).await.unwrap();
|
let mut stream = TcpStream::accept(1381, 0x10_000, 0x10_000).await.unwrap();
|
||||||
|
|
||||||
|
@ -885,7 +751,13 @@ pub fn main(timer: GlobalTimer, cfg: Config) {
|
||||||
.await
|
.await
|
||||||
.map_err(|e| warn!("connection terminated: {}", e));
|
.map_err(|e| warn!("connection terminated: {}", e));
|
||||||
if let Some(buffer) = &*idle_kernel {
|
if let Some(buffer) = &*idle_kernel {
|
||||||
flash_and_run_idle_kernel(&buffer, &control, &up_destinations, &aux_mutex, &routing_table, timer).await;
|
info!("Loading idle kernel");
|
||||||
|
let _ = load_kernel(&buffer, &control, None)
|
||||||
|
.await.map_err(|_| warn!("error loading idle kernel"));
|
||||||
|
info!("Running idle kernel");
|
||||||
|
let _ = handle_run_kernel(None, &control, &up_destinations, &aux_mutex, &routing_table, timer)
|
||||||
|
.await.map_err(|_| warn!("error running idle kernel"));
|
||||||
|
info!("Idle kernel terminated");
|
||||||
}
|
}
|
||||||
}).fuse() => (),
|
}).fuse() => (),
|
||||||
_ = terminate.async_wait().fuse() => ()
|
_ = terminate.async_wait().fuse() => ()
|
||||||
|
|
|
@ -8,14 +8,14 @@
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate alloc;
|
extern crate alloc;
|
||||||
|
|
||||||
#[cfg(all(feature = "target_kasli_soc", has_drtio))]
|
#[cfg(feature = "target_kasli_soc")]
|
||||||
use core::cell::RefCell;
|
use core::cell::RefCell;
|
||||||
|
|
||||||
use ksupport;
|
use ksupport;
|
||||||
use libasync::task;
|
use libasync::task;
|
||||||
#[cfg(has_drtio_eem)]
|
#[cfg(has_drtio_eem)]
|
||||||
use libboard_artiq::drtio_eem;
|
use libboard_artiq::drtio_eem;
|
||||||
#[cfg(all(feature = "target_kasli_soc", has_drtio))]
|
#[cfg(feature = "target_kasli_soc")]
|
||||||
use libboard_artiq::io_expander;
|
use libboard_artiq::io_expander;
|
||||||
use libboard_artiq::{identifier_read, logger, pl};
|
use libboard_artiq::{identifier_read, logger, pl};
|
||||||
use libboard_zynq::{gic, mpcore, timer::GlobalTimer};
|
use libboard_zynq::{gic, mpcore, timer::GlobalTimer};
|
||||||
|
@ -38,7 +38,7 @@ mod rtio_mgt;
|
||||||
#[cfg(has_drtio)]
|
#[cfg(has_drtio)]
|
||||||
mod subkernel;
|
mod subkernel;
|
||||||
|
|
||||||
#[cfg(all(feature = "target_kasli_soc", has_drtio))]
|
#[cfg(feature = "target_kasli_soc")]
|
||||||
async fn io_expanders_service(
|
async fn io_expanders_service(
|
||||||
i2c_bus: RefCell<&mut libboard_zynq::i2c::I2c>,
|
i2c_bus: RefCell<&mut libboard_zynq::i2c::I2c>,
|
||||||
io_expander0: RefCell<io_expander::IoExpander>,
|
io_expander0: RefCell<io_expander::IoExpander>,
|
||||||
|
@ -93,11 +93,15 @@ pub fn main_core0() {
|
||||||
info!("gateware ident: {}", identifier_read(&mut [0; 64]));
|
info!("gateware ident: {}", identifier_read(&mut [0; 64]));
|
||||||
|
|
||||||
ksupport::i2c::init();
|
ksupport::i2c::init();
|
||||||
#[cfg(all(feature = "target_kasli_soc", has_drtio))]
|
#[cfg(feature = "target_kasli_soc")]
|
||||||
{
|
|
||||||
let i2c_bus = unsafe { (ksupport::i2c::I2C_BUS).as_mut().unwrap() };
|
let i2c_bus = unsafe { (ksupport::i2c::I2C_BUS).as_mut().unwrap() };
|
||||||
let mut io_expander0 = io_expander::IoExpander::new(i2c_bus, 0).unwrap();
|
|
||||||
let mut io_expander1 = io_expander::IoExpander::new(i2c_bus, 1).unwrap();
|
#[cfg(feature = "target_kasli_soc")]
|
||||||
|
let (mut io_expander0, mut io_expander1);
|
||||||
|
#[cfg(feature = "target_kasli_soc")]
|
||||||
|
{
|
||||||
|
io_expander0 = io_expander::IoExpander::new(i2c_bus, 0).unwrap();
|
||||||
|
io_expander1 = io_expander::IoExpander::new(i2c_bus, 1).unwrap();
|
||||||
io_expander0
|
io_expander0
|
||||||
.init(i2c_bus)
|
.init(i2c_bus)
|
||||||
.expect("I2C I/O expander #0 initialization failed");
|
.expect("I2C I/O expander #0 initialization failed");
|
||||||
|
@ -111,11 +115,6 @@ pub fn main_core0() {
|
||||||
io_expander1.set(1, 1, false);
|
io_expander1.set(1, 1, false);
|
||||||
io_expander0.service(i2c_bus).unwrap();
|
io_expander0.service(i2c_bus).unwrap();
|
||||||
io_expander1.service(i2c_bus).unwrap();
|
io_expander1.service(i2c_bus).unwrap();
|
||||||
task::spawn(io_expanders_service(
|
|
||||||
RefCell::new(i2c_bus),
|
|
||||||
RefCell::new(io_expander0),
|
|
||||||
RefCell::new(io_expander1),
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let cfg = match Config::new() {
|
let cfg = match Config::new() {
|
||||||
|
@ -136,5 +135,11 @@ pub fn main_core0() {
|
||||||
|
|
||||||
task::spawn(ksupport::report_async_rtio_errors());
|
task::spawn(ksupport::report_async_rtio_errors());
|
||||||
|
|
||||||
|
#[cfg(feature = "target_kasli_soc")]
|
||||||
|
task::spawn(io_expanders_service(
|
||||||
|
RefCell::new(i2c_bus),
|
||||||
|
RefCell::new(io_expander0),
|
||||||
|
RefCell::new(io_expander1),
|
||||||
|
));
|
||||||
comms::main(timer, cfg);
|
comms::main(timer, cfg);
|
||||||
}
|
}
|
||||||
|
|
|
@ -142,9 +142,9 @@ pub mod remote_dma {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn playback_done(&mut self, source: u8, error: u8, channel: u32, timestamp: u64) {
|
pub async fn playback_done(&mut self, destination: u8, error: u8, channel: u32, timestamp: u64) {
|
||||||
let mut traces_locked = self.traces.async_lock().await;
|
let mut traces_locked = self.traces.async_lock().await;
|
||||||
let mut trace = traces_locked.get_mut(&source).unwrap();
|
let mut trace = traces_locked.get_mut(&destination).unwrap();
|
||||||
trace.state = RemoteState::PlaybackEnded {
|
trace.state = RemoteState::PlaybackEnded {
|
||||||
error: error,
|
error: error,
|
||||||
channel: channel,
|
channel: channel,
|
||||||
|
|
|
@ -13,10 +13,8 @@ pub mod drtio {
|
||||||
use ksupport::{resolve_channel_name, ASYNC_ERROR_BUSY, ASYNC_ERROR_COLLISION, ASYNC_ERROR_SEQUENCE_ERROR,
|
use ksupport::{resolve_channel_name, ASYNC_ERROR_BUSY, ASYNC_ERROR_COLLISION, ASYNC_ERROR_SEQUENCE_ERROR,
|
||||||
SEEN_ASYNC_ERRORS};
|
SEEN_ASYNC_ERRORS};
|
||||||
use libasync::{delay, task};
|
use libasync::{delay, task};
|
||||||
use libboard_artiq::{drtioaux::Error,
|
use libboard_artiq::{drtioaux::Error, drtioaux_async, drtioaux_async::Packet,
|
||||||
drtioaux_async,
|
drtioaux_proto::MASTER_PAYLOAD_MAX_SIZE};
|
||||||
drtioaux_async::Packet,
|
|
||||||
drtioaux_proto::{PayloadStatus, MASTER_PAYLOAD_MAX_SIZE}};
|
|
||||||
use libboard_zynq::time::Milliseconds;
|
use libboard_zynq::time::Milliseconds;
|
||||||
use log::{error, info, warn};
|
use log::{error, info, warn};
|
||||||
|
|
||||||
|
@ -43,102 +41,39 @@ pub mod drtio {
|
||||||
unsafe { (csr::DRTIO[linkno].rx_up_read)() == 1 }
|
unsafe { (csr::DRTIO[linkno].rx_up_read)() == 1 }
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn link_has_async_ready(linkno: u8) -> bool {
|
async fn process_async_packets(aux_mutex: &Mutex<bool>, linkno: u8, packet: Packet) -> Option<Packet> {
|
||||||
let linkno = linkno as usize;
|
// returns None if an async packet has been consumed
|
||||||
let async_ready;
|
|
||||||
unsafe {
|
|
||||||
async_ready = (csr::DRTIO[linkno].async_messages_ready_read)() == 1;
|
|
||||||
(csr::DRTIO[linkno].async_messages_ready_write)(1);
|
|
||||||
}
|
|
||||||
async_ready
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn process_async_packets(
|
|
||||||
aux_mutex: &Mutex<bool>,
|
|
||||||
linkno: u8,
|
|
||||||
routing_table: &drtio_routing::RoutingTable,
|
|
||||||
timer: GlobalTimer,
|
|
||||||
) {
|
|
||||||
if link_has_async_ready(linkno).await {
|
|
||||||
loop {
|
|
||||||
let reply = aux_transact(aux_mutex, linkno, &Packet::RoutingRetrievePackets, timer).await;
|
|
||||||
if let Ok(packet) = reply {
|
|
||||||
match packet {
|
match packet {
|
||||||
Packet::DmaPlaybackStatus {
|
Packet::DmaPlaybackStatus {
|
||||||
id,
|
id,
|
||||||
source,
|
destination,
|
||||||
destination: 0,
|
|
||||||
error,
|
error,
|
||||||
channel,
|
channel,
|
||||||
timestamp,
|
timestamp,
|
||||||
} => {
|
} => {
|
||||||
remote_dma::playback_done(id, source, error, channel, timestamp).await;
|
remote_dma::playback_done(id, destination, error, channel, timestamp).await;
|
||||||
|
None
|
||||||
}
|
}
|
||||||
Packet::SubkernelFinished {
|
Packet::SubkernelFinished { id, with_exception } => {
|
||||||
id,
|
subkernel::subkernel_finished(id, with_exception).await;
|
||||||
destination: 0,
|
None
|
||||||
with_exception,
|
|
||||||
exception_src,
|
|
||||||
} => {
|
|
||||||
subkernel::subkernel_finished(id, with_exception, exception_src).await;
|
|
||||||
}
|
}
|
||||||
Packet::SubkernelMessage {
|
Packet::SubkernelMessage {
|
||||||
id,
|
id,
|
||||||
source,
|
destination: from,
|
||||||
destination: 0,
|
last,
|
||||||
status,
|
|
||||||
length,
|
length,
|
||||||
data,
|
data,
|
||||||
} => {
|
} => {
|
||||||
subkernel::message_handle_incoming(id, status, length as usize, &data).await;
|
subkernel::message_handle_incoming(id, last, length as usize, &data).await;
|
||||||
// acknowledge receiving part of the message
|
// acknowledge receiving part of the message
|
||||||
let _lock = aux_mutex.async_lock().await;
|
let _lock = aux_mutex.async_lock().await;
|
||||||
drtioaux_async::send(linkno, &Packet::SubkernelMessageAck { destination: source })
|
drtioaux_async::send(linkno, &Packet::SubkernelMessageAck { destination: from })
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut countdown = timer.countdown();
|
None
|
||||||
// give the satellites some time to process
|
|
||||||
delay(&mut countdown, Milliseconds(10)).await;
|
|
||||||
}
|
|
||||||
// routable packets
|
|
||||||
Packet::DmaAddTraceRequest { destination, .. }
|
|
||||||
| Packet::DmaAddTraceReply { destination, .. }
|
|
||||||
| Packet::DmaRemoveTraceRequest { destination, .. }
|
|
||||||
| Packet::DmaRemoveTraceReply { destination, .. }
|
|
||||||
| Packet::DmaPlaybackRequest { destination, .. }
|
|
||||||
| Packet::DmaPlaybackReply { destination, .. }
|
|
||||||
| Packet::SubkernelLoadRunRequest { destination, .. }
|
|
||||||
| Packet::SubkernelLoadRunReply { destination, .. }
|
|
||||||
| Packet::SubkernelMessage { destination, .. }
|
|
||||||
| Packet::SubkernelMessageAck { destination, .. }
|
|
||||||
| Packet::DmaPlaybackStatus { destination, .. }
|
|
||||||
| Packet::SubkernelFinished { destination, .. } => {
|
|
||||||
let dest_link = routing_table.0[destination as usize][0] - 1;
|
|
||||||
if dest_link == linkno {
|
|
||||||
warn!(
|
|
||||||
"[LINK#{}] Re-routed packet would return to the same link, dropping: {:?}",
|
|
||||||
linkno, packet
|
|
||||||
);
|
|
||||||
} else if destination == 0 {
|
|
||||||
warn!("[LINK#{}] Received invalid routable packet: {:?}", linkno, packet)
|
|
||||||
} else {
|
|
||||||
drtioaux_async::send(dest_link, &packet).await.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Packet::RoutingNoPackets => break,
|
|
||||||
|
|
||||||
other => warn!("[LINK#{}] Received an unroutable packet: {:?}", linkno, other),
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
warn!(
|
|
||||||
"[LINK#{}] Error handling async packets ({})",
|
|
||||||
linkno,
|
|
||||||
reply.unwrap_err()
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
other => Some(other),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -273,7 +208,11 @@ pub mod drtio {
|
||||||
async fn process_unsolicited_aux(aux_mutex: &Rc<Mutex<bool>>, linkno: u8) {
|
async fn process_unsolicited_aux(aux_mutex: &Rc<Mutex<bool>>, linkno: u8) {
|
||||||
let _lock = aux_mutex.async_lock().await;
|
let _lock = aux_mutex.async_lock().await;
|
||||||
match drtioaux_async::recv(linkno).await {
|
match drtioaux_async::recv(linkno).await {
|
||||||
Ok(Some(packet)) => warn!("[LINK#{}] unsolicited aux packet: {:?}", linkno, packet),
|
Ok(Some(packet)) => {
|
||||||
|
if let Some(packet) = process_async_packets(aux_mutex, linkno, packet).await {
|
||||||
|
warn!("[LINK#{}] unsolicited aux packet: {:?}", linkno, packet);
|
||||||
|
}
|
||||||
|
}
|
||||||
Ok(None) => (),
|
Ok(None) => (),
|
||||||
Err(_) => warn!("[LINK#{}] aux packet error", linkno),
|
Err(_) => warn!("[LINK#{}] aux packet error", linkno),
|
||||||
}
|
}
|
||||||
|
@ -342,6 +281,7 @@ pub mod drtio {
|
||||||
let linkno = hop - 1;
|
let linkno = hop - 1;
|
||||||
if destination_up(up_destinations, destination).await {
|
if destination_up(up_destinations, destination).await {
|
||||||
if up_links[linkno as usize] {
|
if up_links[linkno as usize] {
|
||||||
|
loop {
|
||||||
let reply = aux_transact(
|
let reply = aux_transact(
|
||||||
aux_mutex,
|
aux_mutex,
|
||||||
linkno,
|
linkno,
|
||||||
|
@ -354,7 +294,13 @@ pub mod drtio {
|
||||||
match reply {
|
match reply {
|
||||||
Ok(Packet::DestinationDownReply) => {
|
Ok(Packet::DestinationDownReply) => {
|
||||||
destination_set_up(routing_table, up_destinations, destination, false).await;
|
destination_set_up(routing_table, up_destinations, destination, false).await;
|
||||||
remote_dma::destination_changed(aux_mutex, routing_table, timer, destination, false)
|
remote_dma::destination_changed(
|
||||||
|
aux_mutex,
|
||||||
|
routing_table,
|
||||||
|
timer,
|
||||||
|
destination,
|
||||||
|
false,
|
||||||
|
)
|
||||||
.await;
|
.await;
|
||||||
subkernel::destination_changed(aux_mutex, routing_table, timer, destination, false)
|
subkernel::destination_changed(aux_mutex, routing_table, timer, destination, false)
|
||||||
.await;
|
.await;
|
||||||
|
@ -387,9 +333,16 @@ pub mod drtio {
|
||||||
);
|
);
|
||||||
unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_BUSY };
|
unsafe { SEEN_ASYNC_ERRORS |= ASYNC_ERROR_BUSY };
|
||||||
}
|
}
|
||||||
Ok(packet) => error!("[DEST#{}] received unexpected aux packet: {:?}", destination, packet),
|
Ok(packet) => match process_async_packets(aux_mutex, linkno, packet).await {
|
||||||
|
Some(packet) => {
|
||||||
|
error!("[DEST#{}] received unexpected aux packet: {:?}", destination, packet)
|
||||||
|
}
|
||||||
|
None => continue,
|
||||||
|
},
|
||||||
Err(e) => error!("[DEST#{}] communication failed ({})", destination, e),
|
Err(e) => error!("[DEST#{}] communication failed ({})", destination, e),
|
||||||
}
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
destination_set_up(routing_table, up_destinations, destination, false).await;
|
destination_set_up(routing_table, up_destinations, destination, false).await;
|
||||||
remote_dma::destination_changed(aux_mutex, routing_table, timer, destination, false).await;
|
remote_dma::destination_changed(aux_mutex, routing_table, timer, destination, false).await;
|
||||||
|
@ -438,7 +391,6 @@ pub mod drtio {
|
||||||
if up_links[linkno as usize] {
|
if up_links[linkno as usize] {
|
||||||
/* link was previously up */
|
/* link was previously up */
|
||||||
if link_rx_up(linkno).await {
|
if link_rx_up(linkno).await {
|
||||||
process_async_packets(aux_mutex, linkno, routing_table, timer).await;
|
|
||||||
process_unsolicited_aux(aux_mutex, linkno).await;
|
process_unsolicited_aux(aux_mutex, linkno).await;
|
||||||
process_local_errors(linkno).await;
|
process_local_errors(linkno).await;
|
||||||
} else {
|
} else {
|
||||||
|
@ -511,7 +463,7 @@ pub mod drtio {
|
||||||
reply_handler_f: HandlerF,
|
reply_handler_f: HandlerF,
|
||||||
) -> Result<(), &'static str>
|
) -> Result<(), &'static str>
|
||||||
where
|
where
|
||||||
PacketF: Fn(&[u8; MASTER_PAYLOAD_MAX_SIZE], PayloadStatus, usize) -> Packet,
|
PacketF: Fn(&[u8; MASTER_PAYLOAD_MAX_SIZE], bool, usize) -> Packet,
|
||||||
HandlerF: Fn(&Packet) -> Result<(), &'static str>,
|
HandlerF: Fn(&Packet) -> Result<(), &'static str>,
|
||||||
{
|
{
|
||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
|
@ -522,12 +474,10 @@ pub mod drtio {
|
||||||
} else {
|
} else {
|
||||||
data.len() - i
|
data.len() - i
|
||||||
} as usize;
|
} as usize;
|
||||||
let first = i == 0;
|
|
||||||
let last = i + len == data.len();
|
let last = i + len == data.len();
|
||||||
slice[..len].clone_from_slice(&data[i..i + len]);
|
slice[..len].clone_from_slice(&data[i..i + len]);
|
||||||
i += len;
|
i += len;
|
||||||
let status = PayloadStatus::from_status(first, last);
|
let packet = packet_f(&slice, last, len);
|
||||||
let packet = packet_f(&slice, status, len);
|
|
||||||
let reply = aux_transact(aux_mutex, linkno, &packet, timer).await?;
|
let reply = aux_transact(aux_mutex, linkno, &packet, timer).await?;
|
||||||
reply_handler_f(&reply)?;
|
reply_handler_f(&reply)?;
|
||||||
}
|
}
|
||||||
|
@ -548,25 +498,16 @@ pub mod drtio {
|
||||||
aux_mutex,
|
aux_mutex,
|
||||||
timer,
|
timer,
|
||||||
trace,
|
trace,
|
||||||
|slice, status, len| Packet::DmaAddTraceRequest {
|
|slice, last, len| Packet::DmaAddTraceRequest {
|
||||||
id: id,
|
id: id,
|
||||||
source: 0,
|
|
||||||
destination: destination,
|
destination: destination,
|
||||||
status: status,
|
last: last,
|
||||||
length: len as u16,
|
length: len as u16,
|
||||||
trace: *slice,
|
trace: *slice,
|
||||||
},
|
},
|
||||||
|reply| match reply {
|
|reply| match reply {
|
||||||
Packet::DmaAddTraceReply {
|
Packet::DmaAddTraceReply { succeeded: true } => Ok(()),
|
||||||
destination: 0,
|
Packet::DmaAddTraceReply { succeeded: false } => Err("error adding trace on satellite"),
|
||||||
succeeded: true,
|
|
||||||
..
|
|
||||||
} => Ok(()),
|
|
||||||
Packet::DmaAddTraceReply {
|
|
||||||
destination: 0,
|
|
||||||
succeeded: false,
|
|
||||||
..
|
|
||||||
} => Err("error adding trace on satellite"),
|
|
||||||
_ => Err("adding DMA trace failed, unexpected aux packet"),
|
_ => Err("adding DMA trace failed, unexpected aux packet"),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
@ -586,21 +527,14 @@ pub mod drtio {
|
||||||
linkno,
|
linkno,
|
||||||
&Packet::DmaRemoveTraceRequest {
|
&Packet::DmaRemoveTraceRequest {
|
||||||
id: id,
|
id: id,
|
||||||
source: 0,
|
|
||||||
destination: destination,
|
destination: destination,
|
||||||
},
|
},
|
||||||
timer,
|
timer,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
match reply {
|
match reply {
|
||||||
Ok(Packet::DmaRemoveTraceReply {
|
Ok(Packet::DmaRemoveTraceReply { succeeded: true }) => Ok(()),
|
||||||
destination: 0,
|
Ok(Packet::DmaRemoveTraceReply { succeeded: false }) => Err("satellite DMA erase error"),
|
||||||
succeeded: true,
|
|
||||||
}) => Ok(()),
|
|
||||||
Ok(Packet::DmaRemoveTraceReply {
|
|
||||||
destination: 0,
|
|
||||||
succeeded: false,
|
|
||||||
}) => Err("satellite DMA erase error"),
|
|
||||||
Ok(_) => Err("adding trace failed, unexpected aux packet"),
|
Ok(_) => Err("adding trace failed, unexpected aux packet"),
|
||||||
Err(_) => Err("erasing trace failed, aux error"),
|
Err(_) => Err("erasing trace failed, aux error"),
|
||||||
}
|
}
|
||||||
|
@ -620,7 +554,6 @@ pub mod drtio {
|
||||||
linkno,
|
linkno,
|
||||||
&Packet::DmaPlaybackRequest {
|
&Packet::DmaPlaybackRequest {
|
||||||
id: id,
|
id: id,
|
||||||
source: 0,
|
|
||||||
destination: destination,
|
destination: destination,
|
||||||
timestamp: timestamp,
|
timestamp: timestamp,
|
||||||
},
|
},
|
||||||
|
@ -628,14 +561,8 @@ pub mod drtio {
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
match reply {
|
match reply {
|
||||||
Ok(Packet::DmaPlaybackReply {
|
Ok(Packet::DmaPlaybackReply { succeeded: true }) => Ok(()),
|
||||||
destination: 0,
|
Ok(Packet::DmaPlaybackReply { succeeded: false }) => Err("error on DMA playback request"),
|
||||||
succeeded: true,
|
|
||||||
}) => Ok(()),
|
|
||||||
Ok(Packet::DmaPlaybackReply {
|
|
||||||
destination: 0,
|
|
||||||
succeeded: false,
|
|
||||||
}) => Err("error on DMA playback request"),
|
|
||||||
Ok(_) => Err("received unexpected aux packet during DMA playback"),
|
Ok(_) => Err("received unexpected aux packet during DMA playback"),
|
||||||
Err(_) => Err("aux error on DMA playback"),
|
Err(_) => Err("aux error on DMA playback"),
|
||||||
}
|
}
|
||||||
|
@ -728,10 +655,10 @@ pub mod drtio {
|
||||||
aux_mutex,
|
aux_mutex,
|
||||||
timer,
|
timer,
|
||||||
data,
|
data,
|
||||||
|slice, status, len| Packet::SubkernelAddDataRequest {
|
|slice, last, len| Packet::SubkernelAddDataRequest {
|
||||||
id: id,
|
id: id,
|
||||||
destination: destination,
|
destination: destination,
|
||||||
status: status,
|
last: last,
|
||||||
length: len as u16,
|
length: len as u16,
|
||||||
data: *slice,
|
data: *slice,
|
||||||
},
|
},
|
||||||
|
@ -758,7 +685,6 @@ pub mod drtio {
|
||||||
linkno,
|
linkno,
|
||||||
&Packet::SubkernelLoadRunRequest {
|
&Packet::SubkernelLoadRunRequest {
|
||||||
id: id,
|
id: id,
|
||||||
source: 0,
|
|
||||||
destination: destination,
|
destination: destination,
|
||||||
run: run,
|
run: run,
|
||||||
},
|
},
|
||||||
|
@ -766,14 +692,8 @@ pub mod drtio {
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
match reply {
|
match reply {
|
||||||
Packet::SubkernelLoadRunReply {
|
Packet::SubkernelLoadRunReply { succeeded: true } => return Ok(()),
|
||||||
destination: 0,
|
Packet::SubkernelLoadRunReply { succeeded: false } => return Err("error on subkernel run request"),
|
||||||
succeeded: true,
|
|
||||||
} => return Ok(()),
|
|
||||||
Packet::SubkernelLoadRunReply {
|
|
||||||
destination: 0,
|
|
||||||
succeeded: false,
|
|
||||||
} => return Err("error on subkernel run request"),
|
|
||||||
_ => return Err("received unexpected aux packet during subkernel run"),
|
_ => return Err("received unexpected aux packet during subkernel run"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -822,11 +742,10 @@ pub mod drtio {
|
||||||
aux_mutex,
|
aux_mutex,
|
||||||
timer,
|
timer,
|
||||||
message,
|
message,
|
||||||
|slice, status, len| Packet::SubkernelMessage {
|
|slice, last, len| Packet::SubkernelMessage {
|
||||||
source: 0,
|
|
||||||
destination: destination,
|
destination: destination,
|
||||||
id: id,
|
id: id,
|
||||||
status: status,
|
last: last,
|
||||||
length: len as u16,
|
length: len as u16,
|
||||||
data: *slice,
|
data: *slice,
|
||||||
},
|
},
|
||||||
|
|
|
@ -1,11 +1,10 @@
|
||||||
use alloc::{collections::BTreeMap, rc::Rc, vec::Vec};
|
use alloc::{collections::BTreeMap, rc::Rc, vec::Vec};
|
||||||
|
|
||||||
use libasync::task;
|
use libasync::task;
|
||||||
use libboard_artiq::{drtio_routing::RoutingTable,
|
use libboard_artiq::{drtio_routing::RoutingTable, drtioaux_proto::MASTER_PAYLOAD_MAX_SIZE};
|
||||||
drtioaux_proto::{PayloadStatus, MASTER_PAYLOAD_MAX_SIZE}};
|
|
||||||
use libboard_zynq::{time::Milliseconds, timer::GlobalTimer};
|
use libboard_zynq::{time::Milliseconds, timer::GlobalTimer};
|
||||||
use libcortex_a9::mutex::Mutex;
|
use libcortex_a9::mutex::Mutex;
|
||||||
use log::{error, warn};
|
use log::error;
|
||||||
|
|
||||||
use crate::rtio_mgt::drtio;
|
use crate::rtio_mgt::drtio;
|
||||||
|
|
||||||
|
@ -13,7 +12,7 @@ use crate::rtio_mgt::drtio;
|
||||||
pub enum FinishStatus {
|
pub enum FinishStatus {
|
||||||
Ok,
|
Ok,
|
||||||
CommLost,
|
CommLost,
|
||||||
Exception(u8), // exception source
|
Exception,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||||
|
@ -29,7 +28,6 @@ pub enum Error {
|
||||||
Timeout,
|
Timeout,
|
||||||
IncorrectState,
|
IncorrectState,
|
||||||
SubkernelNotFound,
|
SubkernelNotFound,
|
||||||
SubkernelException,
|
|
||||||
CommLost,
|
CommLost,
|
||||||
DrtioError(&'static str),
|
DrtioError(&'static str),
|
||||||
}
|
}
|
||||||
|
@ -121,20 +119,18 @@ pub async fn clear_subkernels() {
|
||||||
CURRENT_MESSAGES.async_lock().await.clear();
|
CURRENT_MESSAGES.async_lock().await.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn subkernel_finished(id: u32, with_exception: bool, exception_src: u8) {
|
pub async fn subkernel_finished(id: u32, with_exception: bool) {
|
||||||
// called upon receiving DRTIO SubkernelRunDone
|
// called upon receiving DRTIO SubkernelRunDone
|
||||||
// may be None if session ends and is cleared
|
// may be None if session ends and is cleared
|
||||||
if let Some(subkernel) = SUBKERNELS.async_lock().await.get_mut(&id) {
|
if let Some(subkernel) = SUBKERNELS.async_lock().await.get_mut(&id) {
|
||||||
if subkernel.state == SubkernelState::Running {
|
|
||||||
subkernel.state = SubkernelState::Finished {
|
subkernel.state = SubkernelState::Finished {
|
||||||
status: match with_exception {
|
status: match with_exception {
|
||||||
true => FinishStatus::Exception(exception_src),
|
true => FinishStatus::Exception,
|
||||||
false => FinishStatus::Ok,
|
false => FinishStatus::Ok,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn destination_changed(
|
pub async fn destination_changed(
|
||||||
aux_mutex: &Rc<Mutex<bool>>,
|
aux_mutex: &Rc<Mutex<bool>>,
|
||||||
|
@ -169,35 +165,26 @@ pub async fn await_finish(
|
||||||
routing_table: &RoutingTable,
|
routing_table: &RoutingTable,
|
||||||
timer: GlobalTimer,
|
timer: GlobalTimer,
|
||||||
id: u32,
|
id: u32,
|
||||||
timeout: i64,
|
timeout: u64,
|
||||||
) -> Result<SubkernelFinished, Error> {
|
) -> Result<SubkernelFinished, Error> {
|
||||||
match SUBKERNELS.async_lock().await.get(&id).unwrap().state {
|
match SUBKERNELS.async_lock().await.get(&id).unwrap().state {
|
||||||
SubkernelState::Running | SubkernelState::Finished { .. } => (),
|
SubkernelState::Running | SubkernelState::Finished { .. } => (),
|
||||||
_ => return Err(Error::IncorrectState),
|
_ => return Err(Error::IncorrectState),
|
||||||
}
|
}
|
||||||
if timeout > 0 {
|
let max_time = timer.get_time() + Milliseconds(timeout);
|
||||||
let max_time = timer.get_time() + Milliseconds(timeout as u64);
|
|
||||||
while timer.get_time() < max_time {
|
while timer.get_time() < max_time {
|
||||||
|
{
|
||||||
match SUBKERNELS.async_lock().await.get(&id).unwrap().state {
|
match SUBKERNELS.async_lock().await.get(&id).unwrap().state {
|
||||||
SubkernelState::Finished { .. } => break,
|
SubkernelState::Finished { .. } => break,
|
||||||
_ => (),
|
_ => (),
|
||||||
};
|
};
|
||||||
|
}
|
||||||
task::r#yield().await;
|
task::r#yield().await;
|
||||||
}
|
}
|
||||||
if timer.get_time() >= max_time {
|
if timer.get_time() >= max_time {
|
||||||
error!("Remote subkernel finish await timed out");
|
error!("Remote subkernel finish await timed out");
|
||||||
return Err(Error::Timeout);
|
return Err(Error::Timeout);
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
// no timeout, wait forever
|
|
||||||
loop {
|
|
||||||
match SUBKERNELS.async_lock().await.get(&id).unwrap().state {
|
|
||||||
SubkernelState::Finished { .. } => break,
|
|
||||||
_ => (),
|
|
||||||
};
|
|
||||||
task::r#yield().await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Some(subkernel) = SUBKERNELS.async_lock().await.get_mut(&id) {
|
if let Some(subkernel) = SUBKERNELS.async_lock().await.get_mut(&id) {
|
||||||
match subkernel.state {
|
match subkernel.state {
|
||||||
SubkernelState::Finished { status } => {
|
SubkernelState::Finished { status } => {
|
||||||
|
@ -205,8 +192,11 @@ pub async fn await_finish(
|
||||||
Ok(SubkernelFinished {
|
Ok(SubkernelFinished {
|
||||||
id: id,
|
id: id,
|
||||||
status: status,
|
status: status,
|
||||||
exception: if let FinishStatus::Exception(dest) = status {
|
exception: if status == FinishStatus::Exception {
|
||||||
Some(drtio::subkernel_retrieve_exception(aux_mutex, routing_table, timer, dest).await?)
|
Some(
|
||||||
|
drtio::subkernel_retrieve_exception(aux_mutex, routing_table, timer, subkernel.destination)
|
||||||
|
.await?,
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
},
|
},
|
||||||
|
@ -230,28 +220,13 @@ static MESSAGE_QUEUE: Mutex<Vec<Message>> = Mutex::new(Vec::new());
|
||||||
// currently under construction message(s) (can be from multiple sources)
|
// currently under construction message(s) (can be from multiple sources)
|
||||||
static CURRENT_MESSAGES: Mutex<BTreeMap<u32, Message>> = Mutex::new(BTreeMap::new());
|
static CURRENT_MESSAGES: Mutex<BTreeMap<u32, Message>> = Mutex::new(BTreeMap::new());
|
||||||
|
|
||||||
pub async fn message_handle_incoming(
|
pub async fn message_handle_incoming(id: u32, last: bool, length: usize, data: &[u8; MASTER_PAYLOAD_MAX_SIZE]) {
|
||||||
id: u32,
|
|
||||||
status: PayloadStatus,
|
|
||||||
length: usize,
|
|
||||||
data: &[u8; MASTER_PAYLOAD_MAX_SIZE],
|
|
||||||
) {
|
|
||||||
// called when receiving a message from satellite
|
// called when receiving a message from satellite
|
||||||
{
|
if SUBKERNELS.async_lock().await.get(&id).is_none() {
|
||||||
let subkernel_lock = SUBKERNELS.async_lock().await;
|
// do not add messages for non-existing or deleted subkernels
|
||||||
let subkernel = subkernel_lock.get(&id);
|
|
||||||
if subkernel.is_some() && subkernel.unwrap().state != SubkernelState::Running {
|
|
||||||
// do not add messages for non-running or deleted subkernels
|
|
||||||
warn!("received a message for a non-running subkernel #{}", id);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
let mut current_messages = CURRENT_MESSAGES.async_lock().await;
|
let mut current_messages = CURRENT_MESSAGES.async_lock().await;
|
||||||
|
|
||||||
if status.is_first() {
|
|
||||||
current_messages.remove(&id);
|
|
||||||
}
|
|
||||||
|
|
||||||
match current_messages.get_mut(&id) {
|
match current_messages.get_mut(&id) {
|
||||||
Some(message) => message.data.extend(&data[..length]),
|
Some(message) => message.data.extend(&data[..length]),
|
||||||
None => {
|
None => {
|
||||||
|
@ -265,7 +240,7 @@ pub async fn message_handle_incoming(
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
if status.is_last() {
|
if last {
|
||||||
// when done, remove from working queue
|
// when done, remove from working queue
|
||||||
MESSAGE_QUEUE
|
MESSAGE_QUEUE
|
||||||
.async_lock()
|
.async_lock()
|
||||||
|
@ -274,9 +249,7 @@ pub async fn message_handle_incoming(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn message_await(id: u32, timeout: i64, timer: GlobalTimer) -> Result<Message, Error> {
|
pub async fn message_await(id: u32, timeout: u64, timer: GlobalTimer) -> Result<Message, Error> {
|
||||||
let is_subkernel = SUBKERNELS.async_lock().await.get(&id).is_some();
|
|
||||||
if is_subkernel {
|
|
||||||
match SUBKERNELS.async_lock().await.get(&id).unwrap().state {
|
match SUBKERNELS.async_lock().await.get(&id).unwrap().state {
|
||||||
SubkernelState::Finished {
|
SubkernelState::Finished {
|
||||||
status: FinishStatus::CommLost,
|
status: FinishStatus::CommLost,
|
||||||
|
@ -284,9 +257,8 @@ pub async fn message_await(id: u32, timeout: i64, timer: GlobalTimer) -> Result<
|
||||||
SubkernelState::Running | SubkernelState::Finished { .. } => (),
|
SubkernelState::Running | SubkernelState::Finished { .. } => (),
|
||||||
_ => return Err(Error::IncorrectState),
|
_ => return Err(Error::IncorrectState),
|
||||||
}
|
}
|
||||||
}
|
let max_time = timer.get_time() + Milliseconds(timeout);
|
||||||
let max_time = timer.get_time() + Milliseconds(timeout as u64);
|
while timer.get_time() < max_time {
|
||||||
while timeout < 0 || (timeout > 0 && timer.get_time() < max_time) {
|
|
||||||
{
|
{
|
||||||
let mut message_queue = MESSAGE_QUEUE.async_lock().await;
|
let mut message_queue = MESSAGE_QUEUE.async_lock().await;
|
||||||
for i in 0..message_queue.len() {
|
for i in 0..message_queue.len() {
|
||||||
|
@ -297,17 +269,6 @@ pub async fn message_await(id: u32, timeout: i64, timer: GlobalTimer) -> Result<
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if is_subkernel {
|
|
||||||
match SUBKERNELS.async_lock().await.get(&id).unwrap().state {
|
|
||||||
SubkernelState::Finished {
|
|
||||||
status: FinishStatus::CommLost,
|
|
||||||
} => return Err(Error::CommLost),
|
|
||||||
SubkernelState::Finished {
|
|
||||||
status: FinishStatus::Exception(_),
|
|
||||||
} => return Err(Error::SubkernelException),
|
|
||||||
_ => (),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
task::r#yield().await;
|
task::r#yield().await;
|
||||||
}
|
}
|
||||||
Err(Error::Timeout)
|
Err(Error::Timeout)
|
||||||
|
@ -318,8 +279,9 @@ pub async fn message_send<'a>(
|
||||||
routing_table: &RoutingTable,
|
routing_table: &RoutingTable,
|
||||||
timer: GlobalTimer,
|
timer: GlobalTimer,
|
||||||
id: u32,
|
id: u32,
|
||||||
destination: u8,
|
|
||||||
message: Vec<u8>,
|
message: Vec<u8>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
let destination = SUBKERNELS.async_lock().await.get(&id).unwrap().destination;
|
||||||
|
// rpc data prepared by the kernel core already
|
||||||
Ok(drtio::subkernel_send_message(aux_mutex, routing_table, timer, id, destination, &message).await?)
|
Ok(drtio::subkernel_send_message(aux_mutex, routing_table, timer, id, destination, &message).await?)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,13 +1,7 @@
|
||||||
use alloc::{collections::btree_map::BTreeMap, string::String, vec::Vec};
|
use alloc::{collections::btree_map::BTreeMap, vec::Vec};
|
||||||
use core::mem;
|
|
||||||
|
|
||||||
use ksupport::kernel::DmaRecorder;
|
use libboard_artiq::pl::csr;
|
||||||
use libboard_artiq::{drtio_routing::RoutingTable,
|
|
||||||
drtioaux_proto::{Packet, PayloadStatus, MASTER_PAYLOAD_MAX_SIZE},
|
|
||||||
pl::csr};
|
|
||||||
use libcortex_a9::cache::dcci_slice;
|
use libcortex_a9::cache::dcci_slice;
|
||||||
use routing::{Router, Sliceable};
|
|
||||||
use subkernel::Manager as KernelManager;
|
|
||||||
|
|
||||||
const ALIGNMENT: usize = 64;
|
const ALIGNMENT: usize = 64;
|
||||||
|
|
||||||
|
@ -18,20 +12,16 @@ enum ManagerState {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct RtioStatus {
|
pub struct RtioStatus {
|
||||||
pub source: u8,
|
|
||||||
pub id: u32,
|
pub id: u32,
|
||||||
pub error: u8,
|
pub error: u8,
|
||||||
pub channel: u32,
|
pub channel: u32,
|
||||||
pub timestamp: u64,
|
pub timestamp: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
IdNotFound,
|
IdNotFound,
|
||||||
PlaybackInProgress,
|
PlaybackInProgress,
|
||||||
EntryNotComplete,
|
EntryNotComplete,
|
||||||
MasterDmaFound,
|
|
||||||
UploadFail,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
@ -39,228 +29,13 @@ struct Entry {
|
||||||
trace: Vec<u8>,
|
trace: Vec<u8>,
|
||||||
padding_len: usize,
|
padding_len: usize,
|
||||||
complete: bool,
|
complete: bool,
|
||||||
duration: i64, // relevant for local DMA
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Entry {
|
|
||||||
pub fn from_vec(data: Vec<u8>, duration: i64) -> Entry {
|
|
||||||
let mut entry = Entry {
|
|
||||||
trace: data,
|
|
||||||
padding_len: 0,
|
|
||||||
complete: true,
|
|
||||||
duration: duration,
|
|
||||||
};
|
|
||||||
entry.realign();
|
|
||||||
entry
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn id(&self) -> u32 {
|
|
||||||
self.trace[self.padding_len..].as_ptr() as u32
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn realign(&mut self) {
|
|
||||||
self.trace.push(0);
|
|
||||||
let data_len = self.trace.len();
|
|
||||||
|
|
||||||
self.trace.reserve(ALIGNMENT - 1);
|
|
||||||
let padding = ALIGNMENT - self.trace.as_ptr() as usize % ALIGNMENT;
|
|
||||||
let padding = if padding == ALIGNMENT { 0 } else { padding };
|
|
||||||
for _ in 0..padding {
|
|
||||||
// Vec guarantees that this will not reallocate
|
|
||||||
self.trace.push(0)
|
|
||||||
}
|
|
||||||
for i in 1..data_len + 1 {
|
|
||||||
self.trace[data_len + padding - i] = self.trace[data_len - i]
|
|
||||||
}
|
|
||||||
self.complete = true;
|
|
||||||
self.padding_len = padding;
|
|
||||||
|
|
||||||
dcci_slice(&self.trace);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
enum RemoteTraceState {
|
|
||||||
Unsent,
|
|
||||||
Sending(usize),
|
|
||||||
Ready,
|
|
||||||
Running(usize),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
struct RemoteTraces {
|
|
||||||
remote_traces: BTreeMap<u8, Sliceable>,
|
|
||||||
state: RemoteTraceState,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RemoteTraces {
|
|
||||||
pub fn new(traces: BTreeMap<u8, Sliceable>) -> RemoteTraces {
|
|
||||||
RemoteTraces {
|
|
||||||
remote_traces: traces,
|
|
||||||
state: RemoteTraceState::Unsent,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// on subkernel request
|
|
||||||
pub fn upload_traces(
|
|
||||||
&mut self,
|
|
||||||
id: u32,
|
|
||||||
router: &mut Router,
|
|
||||||
rank: u8,
|
|
||||||
self_destination: u8,
|
|
||||||
routing_table: &RoutingTable,
|
|
||||||
) -> usize {
|
|
||||||
let len = self.remote_traces.len();
|
|
||||||
if len > 0 {
|
|
||||||
self.state = RemoteTraceState::Sending(self.remote_traces.len());
|
|
||||||
for (dest, trace) in self.remote_traces.iter_mut() {
|
|
||||||
// queue up the first packet for all destinations, rest will be sent after first ACK
|
|
||||||
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
|
||||||
let meta = trace.get_slice_master(&mut data_slice);
|
|
||||||
router.route(
|
|
||||||
Packet::DmaAddTraceRequest {
|
|
||||||
source: self_destination,
|
|
||||||
destination: *dest,
|
|
||||||
id: id,
|
|
||||||
status: meta.status,
|
|
||||||
length: meta.len,
|
|
||||||
trace: data_slice,
|
|
||||||
},
|
|
||||||
routing_table,
|
|
||||||
rank,
|
|
||||||
self_destination,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
len
|
|
||||||
}
|
|
||||||
|
|
||||||
// on incoming Packet::DmaAddTraceReply
|
|
||||||
pub fn ack_upload(
|
|
||||||
&mut self,
|
|
||||||
kernel_manager: &mut KernelManager,
|
|
||||||
source: u8,
|
|
||||||
id: u32,
|
|
||||||
succeeded: bool,
|
|
||||||
router: &mut Router,
|
|
||||||
rank: u8,
|
|
||||||
self_destination: u8,
|
|
||||||
routing_table: &RoutingTable,
|
|
||||||
) {
|
|
||||||
if let RemoteTraceState::Sending(count) = self.state {
|
|
||||||
if let Some(trace) = self.remote_traces.get_mut(&source) {
|
|
||||||
if trace.at_end() {
|
|
||||||
if count - 1 == 0 {
|
|
||||||
self.state = RemoteTraceState::Ready;
|
|
||||||
if let Some((id, timestamp)) = kernel_manager.ddma_remote_uploaded(succeeded) {
|
|
||||||
self.playback(id, timestamp, router, rank, self_destination, routing_table);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
self.state = RemoteTraceState::Sending(count - 1);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// send next slice
|
|
||||||
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
|
||||||
let meta = trace.get_slice_master(&mut data_slice);
|
|
||||||
router.route(
|
|
||||||
Packet::DmaAddTraceRequest {
|
|
||||||
source: self_destination,
|
|
||||||
destination: meta.destination,
|
|
||||||
id: id,
|
|
||||||
status: meta.status,
|
|
||||||
length: meta.len,
|
|
||||||
trace: data_slice,
|
|
||||||
},
|
|
||||||
routing_table,
|
|
||||||
rank,
|
|
||||||
self_destination,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// on subkernel request
|
|
||||||
pub fn playback(
|
|
||||||
&mut self,
|
|
||||||
id: u32,
|
|
||||||
timestamp: u64,
|
|
||||||
router: &mut Router,
|
|
||||||
rank: u8,
|
|
||||||
self_destination: u8,
|
|
||||||
routing_table: &RoutingTable,
|
|
||||||
) {
|
|
||||||
// route all the playback requests
|
|
||||||
// remote traces (local trace runs on core1 unlike mainline firmware)
|
|
||||||
self.state = RemoteTraceState::Running(self.remote_traces.len());
|
|
||||||
for (dest, _) in self.remote_traces.iter() {
|
|
||||||
router.route(
|
|
||||||
Packet::DmaPlaybackRequest {
|
|
||||||
source: self_destination,
|
|
||||||
destination: *dest,
|
|
||||||
id: id,
|
|
||||||
timestamp: timestamp,
|
|
||||||
},
|
|
||||||
routing_table,
|
|
||||||
rank,
|
|
||||||
self_destination,
|
|
||||||
);
|
|
||||||
// response will be ignored (succeeded = false handled by the main thread)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// on incoming Packet::DmaPlaybackDone
|
|
||||||
pub fn remote_finished(&mut self, kernel_manager: &mut KernelManager, error: u8, channel: u32, timestamp: u64) {
|
|
||||||
if let RemoteTraceState::Running(count) = self.state {
|
|
||||||
if error != 0 || count - 1 == 0 {
|
|
||||||
// notify the kernel about a DDMA error or finish
|
|
||||||
kernel_manager.ddma_finished(error, channel, timestamp);
|
|
||||||
self.state = RemoteTraceState::Ready;
|
|
||||||
// further messages will be ignored (if there was an error)
|
|
||||||
} else {
|
|
||||||
// no error and not the last one awaited
|
|
||||||
self.state = RemoteTraceState::Running(count - 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn erase(
|
|
||||||
&mut self,
|
|
||||||
id: u32,
|
|
||||||
router: &mut Router,
|
|
||||||
rank: u8,
|
|
||||||
self_destination: u8,
|
|
||||||
routing_table: &RoutingTable,
|
|
||||||
) {
|
|
||||||
for (dest, _) in self.remote_traces.iter() {
|
|
||||||
router.route(
|
|
||||||
Packet::DmaRemoveTraceRequest {
|
|
||||||
source: self_destination,
|
|
||||||
destination: *dest,
|
|
||||||
id: id,
|
|
||||||
},
|
|
||||||
routing_table,
|
|
||||||
rank,
|
|
||||||
self_destination,
|
|
||||||
);
|
|
||||||
// response will be ignored as this object will stop existing too
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn has_remote_traces(&self) -> bool {
|
|
||||||
self.remote_traces.len() > 0
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Manager {
|
pub struct Manager {
|
||||||
entries: BTreeMap<(u8, u32), Entry>,
|
entries: BTreeMap<u32, Entry>,
|
||||||
state: ManagerState,
|
state: ManagerState,
|
||||||
current_id: u32,
|
currentid: u32,
|
||||||
current_source: u8,
|
|
||||||
|
|
||||||
remote_entries: BTreeMap<u32, RemoteTraces>,
|
|
||||||
name_map: BTreeMap<String, u32>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Manager {
|
impl Manager {
|
||||||
|
@ -270,238 +45,79 @@ impl Manager {
|
||||||
unsafe { while csr::rtio_dma::enable_read() != 0 {} }
|
unsafe { while csr::rtio_dma::enable_read() != 0 {} }
|
||||||
Manager {
|
Manager {
|
||||||
entries: BTreeMap::new(),
|
entries: BTreeMap::new(),
|
||||||
current_id: 0,
|
currentid: 0,
|
||||||
current_source: 0,
|
|
||||||
state: ManagerState::Idle,
|
state: ManagerState::Idle,
|
||||||
remote_entries: BTreeMap::new(),
|
|
||||||
name_map: BTreeMap::new(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add(
|
pub fn add(&mut self, id: u32, last: bool, trace: &[u8], trace_len: usize) -> Result<(), Error> {
|
||||||
&mut self,
|
let entry = match self.entries.get_mut(&id) {
|
||||||
source: u8,
|
|
||||||
id: u32,
|
|
||||||
status: PayloadStatus,
|
|
||||||
trace: &[u8],
|
|
||||||
trace_len: usize,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let entry = match self.entries.get_mut(&(source, id)) {
|
|
||||||
Some(entry) => {
|
Some(entry) => {
|
||||||
if entry.complete || status.is_first() {
|
if entry.complete {
|
||||||
// replace entry
|
// replace entry
|
||||||
self.entries.remove(&(source, id));
|
self.entries.remove(&id);
|
||||||
self.entries.insert(
|
self.entries.insert(
|
||||||
(source, id),
|
id,
|
||||||
Entry {
|
Entry {
|
||||||
trace: Vec::new(),
|
trace: Vec::new(),
|
||||||
padding_len: 0,
|
padding_len: 0,
|
||||||
complete: false,
|
complete: false,
|
||||||
duration: 0,
|
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
self.entries.get_mut(&(source, id)).unwrap()
|
self.entries.get_mut(&id).unwrap()
|
||||||
} else {
|
} else {
|
||||||
entry
|
entry
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
self.entries.insert(
|
self.entries.insert(
|
||||||
(source, id),
|
id,
|
||||||
Entry {
|
Entry {
|
||||||
trace: Vec::new(),
|
trace: Vec::new(),
|
||||||
padding_len: 0,
|
padding_len: 0,
|
||||||
complete: false,
|
complete: false,
|
||||||
duration: 0,
|
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
self.entries.get_mut(&(source, id)).unwrap()
|
self.entries.get_mut(&id).unwrap()
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
entry.trace.extend(&trace[0..trace_len]);
|
entry.trace.extend(&trace[0..trace_len]);
|
||||||
|
|
||||||
if status.is_last() {
|
if last {
|
||||||
entry.realign();
|
entry.trace.push(0);
|
||||||
|
let data_len = entry.trace.len();
|
||||||
|
|
||||||
|
// Realign.
|
||||||
|
entry.trace.reserve(ALIGNMENT - 1);
|
||||||
|
let padding = ALIGNMENT - entry.trace.as_ptr() as usize % ALIGNMENT;
|
||||||
|
let padding = if padding == ALIGNMENT { 0 } else { padding };
|
||||||
|
for _ in 0..padding {
|
||||||
|
// Vec guarantees that this will not reallocate
|
||||||
|
entry.trace.push(0)
|
||||||
|
}
|
||||||
|
for i in 1..data_len + 1 {
|
||||||
|
entry.trace[data_len + padding - i] = entry.trace[data_len - i]
|
||||||
|
}
|
||||||
|
entry.complete = true;
|
||||||
|
entry.padding_len = padding;
|
||||||
|
dcci_slice(&entry.trace);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// api for DRTIO
|
pub fn erase(&mut self, id: u32) -> Result<(), Error> {
|
||||||
pub fn erase(&mut self, source: u8, id: u32) -> Result<(), Error> {
|
match self.entries.remove(&id) {
|
||||||
match self.entries.remove(&(source, id)) {
|
|
||||||
Some(_) => Ok(()),
|
Some(_) => Ok(()),
|
||||||
None => Err(Error::IdNotFound),
|
None => Err(Error::IdNotFound),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// API for subkernel
|
pub fn playback(&mut self, id: u32, timestamp: u64) -> Result<(), Error> {
|
||||||
pub fn erase_name(
|
|
||||||
&mut self,
|
|
||||||
name: &str,
|
|
||||||
router: &mut Router,
|
|
||||||
rank: u8,
|
|
||||||
self_destination: u8,
|
|
||||||
routing_table: &RoutingTable,
|
|
||||||
) {
|
|
||||||
if let Some(id) = self.name_map.get(name) {
|
|
||||||
if let Some(traces) = self.remote_entries.get_mut(&id) {
|
|
||||||
traces.erase(*id, router, rank, self_destination, routing_table);
|
|
||||||
self.remote_entries.remove(&id);
|
|
||||||
}
|
|
||||||
self.entries.remove(&(self_destination, *id));
|
|
||||||
self.name_map.remove(name);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn remote_finished(
|
|
||||||
&mut self,
|
|
||||||
kernel_manager: &mut KernelManager,
|
|
||||||
id: u32,
|
|
||||||
error: u8,
|
|
||||||
channel: u32,
|
|
||||||
timestamp: u64,
|
|
||||||
) {
|
|
||||||
if let Some(entry) = self.remote_entries.get_mut(&id) {
|
|
||||||
entry.remote_finished(kernel_manager, error, channel, timestamp);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn ack_upload(
|
|
||||||
&mut self,
|
|
||||||
kernel_manager: &mut KernelManager,
|
|
||||||
source: u8,
|
|
||||||
id: u32,
|
|
||||||
succeeded: bool,
|
|
||||||
router: &mut Router,
|
|
||||||
rank: u8,
|
|
||||||
self_destination: u8,
|
|
||||||
routing_table: &RoutingTable,
|
|
||||||
) {
|
|
||||||
if let Some(entry) = self.remote_entries.get_mut(&id) {
|
|
||||||
entry.ack_upload(
|
|
||||||
kernel_manager,
|
|
||||||
source,
|
|
||||||
id,
|
|
||||||
succeeded,
|
|
||||||
router,
|
|
||||||
rank,
|
|
||||||
self_destination,
|
|
||||||
routing_table,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// API for subkernel
|
|
||||||
pub fn upload_traces(
|
|
||||||
&mut self,
|
|
||||||
id: u32,
|
|
||||||
router: &mut Router,
|
|
||||||
rank: u8,
|
|
||||||
self_destination: u8,
|
|
||||||
routing_table: &RoutingTable,
|
|
||||||
) -> Result<usize, Error> {
|
|
||||||
let remote_traces = self.remote_entries.get_mut(&id);
|
|
||||||
let mut len = 0;
|
|
||||||
if let Some(traces) = remote_traces {
|
|
||||||
len = traces.upload_traces(id, router, rank, self_destination, routing_table);
|
|
||||||
}
|
|
||||||
Ok(len)
|
|
||||||
}
|
|
||||||
|
|
||||||
// API for subkernel
|
|
||||||
pub fn playback_remote(
|
|
||||||
&mut self,
|
|
||||||
id: u32,
|
|
||||||
timestamp: u64,
|
|
||||||
router: &mut Router,
|
|
||||||
rank: u8,
|
|
||||||
self_destination: u8,
|
|
||||||
routing_table: &RoutingTable,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
if let Some(traces) = self.remote_entries.get_mut(&id) {
|
|
||||||
traces.playback(id, timestamp, router, rank, self_destination, routing_table);
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(Error::IdNotFound)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// API for subkernel
|
|
||||||
pub fn cleanup(&mut self, router: &mut Router, rank: u8, self_destination: u8, routing_table: &RoutingTable) {
|
|
||||||
// after subkernel ends, remove all self-generated traces
|
|
||||||
for (_, id) in self.name_map.iter_mut() {
|
|
||||||
if let Some(traces) = self.remote_entries.get_mut(&id) {
|
|
||||||
traces.erase(*id, router, rank, self_destination, routing_table);
|
|
||||||
self.remote_entries.remove(&id);
|
|
||||||
}
|
|
||||||
self.entries.remove(&(self_destination, *id));
|
|
||||||
}
|
|
||||||
self.name_map.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
// API for subkernel
|
|
||||||
pub fn retrieve(&self, self_destination: u8, name: &String) -> Option<(i32, i64, bool)> {
|
|
||||||
let id = self.name_map.get(name)?;
|
|
||||||
let duration = self.entries.get(&(self_destination, *id))?.duration;
|
|
||||||
let uses_ddma = self.has_remote_traces(*id);
|
|
||||||
Some((*id as i32, duration, uses_ddma))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn has_remote_traces(&self, id: u32) -> bool {
|
|
||||||
match self.remote_entries.get(&id) {
|
|
||||||
Some(traces) => traces.has_remote_traces(),
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn put_record(&mut self, mut recorder: DmaRecorder, self_destination: u8) -> Result<u32, Error> {
|
|
||||||
let mut remote_traces: BTreeMap<u8, Sliceable> = BTreeMap::new();
|
|
||||||
|
|
||||||
let mut local_trace: Vec<u8> = Vec::new();
|
|
||||||
// analyze each entry and put in proper buckets, as the kernel core
|
|
||||||
// sends whole chunks, to limit comms/kernel CPU communication,
|
|
||||||
// and as only comms core has access to varios DMA buffers.
|
|
||||||
let mut ptr = 0;
|
|
||||||
recorder.buffer.push(0);
|
|
||||||
while recorder.buffer[ptr] != 0 {
|
|
||||||
// ptr + 3 = tgt >> 24 (destination)
|
|
||||||
let len = recorder.buffer[ptr] as usize;
|
|
||||||
let destination = recorder.buffer[ptr + 3];
|
|
||||||
if destination == 0 {
|
|
||||||
return Err(Error::MasterDmaFound);
|
|
||||||
} else if destination == self_destination {
|
|
||||||
local_trace.extend(&recorder.buffer[ptr..ptr + len]);
|
|
||||||
} else {
|
|
||||||
if let Some(remote_trace) = remote_traces.get_mut(&destination) {
|
|
||||||
remote_trace.extend(&recorder.buffer[ptr..ptr + len]);
|
|
||||||
} else {
|
|
||||||
remote_traces.insert(
|
|
||||||
destination,
|
|
||||||
Sliceable::new(destination, recorder.buffer[ptr..ptr + len].to_vec()),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// and jump to the next event
|
|
||||||
ptr += len;
|
|
||||||
}
|
|
||||||
let local_entry = Entry::from_vec(local_trace, recorder.duration);
|
|
||||||
|
|
||||||
let id = local_entry.id();
|
|
||||||
self.entries.insert((self_destination, id), local_entry);
|
|
||||||
self.remote_entries.insert(id, RemoteTraces::new(remote_traces));
|
|
||||||
let mut name = String::new();
|
|
||||||
mem::swap(&mut recorder.name, &mut name);
|
|
||||||
self.name_map.insert(name, id);
|
|
||||||
|
|
||||||
Ok(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn playback(&mut self, source: u8, id: u32, timestamp: u64) -> Result<(), Error> {
|
|
||||||
if self.state != ManagerState::Idle {
|
if self.state != ManagerState::Idle {
|
||||||
return Err(Error::PlaybackInProgress);
|
return Err(Error::PlaybackInProgress);
|
||||||
}
|
}
|
||||||
|
|
||||||
let entry = match self.entries.get(&(source, id)) {
|
let entry = match self.entries.get(&id) {
|
||||||
Some(entry) => entry,
|
Some(entry) => entry,
|
||||||
None => {
|
None => {
|
||||||
return Err(Error::IdNotFound);
|
return Err(Error::IdNotFound);
|
||||||
|
@ -514,8 +130,7 @@ impl Manager {
|
||||||
assert!(ptr as u32 % 64 == 0);
|
assert!(ptr as u32 % 64 == 0);
|
||||||
|
|
||||||
self.state = ManagerState::Playback;
|
self.state = ManagerState::Playback;
|
||||||
self.current_id = id;
|
self.currentid = id;
|
||||||
self.current_source = source;
|
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
csr::rtio_dma::base_address_write(ptr as u32);
|
csr::rtio_dma::base_address_write(ptr as u32);
|
||||||
|
@ -547,8 +162,7 @@ impl Manager {
|
||||||
csr::rtio_dma::error_write(1);
|
csr::rtio_dma::error_write(1);
|
||||||
}
|
}
|
||||||
return Some(RtioStatus {
|
return Some(RtioStatus {
|
||||||
source: self.current_source,
|
id: self.currentid,
|
||||||
id: self.current_id,
|
|
||||||
error: error,
|
error: error,
|
||||||
channel: channel,
|
channel: channel,
|
||||||
timestamp: timestamp,
|
timestamp: timestamp,
|
||||||
|
|
|
@ -39,13 +39,11 @@ use libboard_zynq::{i2c::I2c, print, println, time::Milliseconds, timer::GlobalT
|
||||||
use libcortex_a9::{l2c::enable_l2_cache, regs::MPIDR};
|
use libcortex_a9::{l2c::enable_l2_cache, regs::MPIDR};
|
||||||
use libregister::RegisterR;
|
use libregister::RegisterR;
|
||||||
use libsupport_zynq::ram;
|
use libsupport_zynq::ram;
|
||||||
use routing::Router;
|
|
||||||
use subkernel::Manager as KernelManager;
|
use subkernel::Manager as KernelManager;
|
||||||
|
|
||||||
mod analyzer;
|
mod analyzer;
|
||||||
mod dma;
|
mod dma;
|
||||||
mod repeater;
|
mod repeater;
|
||||||
mod routing;
|
|
||||||
mod subkernel;
|
mod subkernel;
|
||||||
|
|
||||||
fn drtiosat_reset(reset: bool) {
|
fn drtiosat_reset(reset: bool) {
|
||||||
|
@ -74,12 +72,6 @@ fn drtiosat_tsc_loaded() -> bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn drtiosat_async_ready() {
|
|
||||||
unsafe {
|
|
||||||
csr::drtiosat::async_messages_ready_write(1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
macro_rules! forward {
|
macro_rules! forward {
|
||||||
($routing_table:expr, $destination:expr, $rank:expr, $repeaters:expr, $packet:expr, $timer:expr) => {{
|
($routing_table:expr, $destination:expr, $rank:expr, $repeaters:expr, $packet:expr, $timer:expr) => {{
|
||||||
|
@ -87,11 +79,7 @@ macro_rules! forward {
|
||||||
if hop != 0 {
|
if hop != 0 {
|
||||||
let repno = (hop - 1) as usize;
|
let repno = (hop - 1) as usize;
|
||||||
if repno < $repeaters.len() {
|
if repno < $repeaters.len() {
|
||||||
if $packet.expects_response() {
|
|
||||||
return $repeaters[repno].aux_forward($packet, $timer);
|
return $repeaters[repno].aux_forward($packet, $timer);
|
||||||
} else {
|
|
||||||
return $repeaters[repno].aux_send($packet);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
return Err(drtioaux::Error::RoutingError);
|
return Err(drtioaux::Error::RoutingError);
|
||||||
}
|
}
|
||||||
|
@ -107,15 +95,13 @@ macro_rules! forward {
|
||||||
fn process_aux_packet(
|
fn process_aux_packet(
|
||||||
_repeaters: &mut [repeater::Repeater],
|
_repeaters: &mut [repeater::Repeater],
|
||||||
_routing_table: &mut drtio_routing::RoutingTable,
|
_routing_table: &mut drtio_routing::RoutingTable,
|
||||||
rank: &mut u8,
|
_rank: &mut u8,
|
||||||
self_destination: &mut u8,
|
|
||||||
packet: drtioaux::Packet,
|
packet: drtioaux::Packet,
|
||||||
timer: &mut GlobalTimer,
|
timer: &mut GlobalTimer,
|
||||||
i2c: &mut I2c,
|
i2c: &mut I2c,
|
||||||
dma_manager: &mut DmaManager,
|
dma_manager: &mut DmaManager,
|
||||||
analyzer: &mut Analyzer,
|
analyzer: &mut Analyzer,
|
||||||
kernel_manager: &mut KernelManager,
|
kernel_manager: &mut KernelManager,
|
||||||
router: &mut Router,
|
|
||||||
) -> Result<(), drtioaux::Error> {
|
) -> Result<(), drtioaux::Error> {
|
||||||
// In the code below, *_chan_sel_write takes an u8 if there are fewer than 256 channels,
|
// In the code below, *_chan_sel_write takes an u8 if there are fewer than 256 channels,
|
||||||
// and u16 otherwise; hence the `as _` conversion.
|
// and u16 otherwise; hence the `as _` conversion.
|
||||||
|
@ -136,12 +122,54 @@ fn process_aux_packet(
|
||||||
|
|
||||||
drtioaux::Packet::DestinationStatusRequest { destination } => {
|
drtioaux::Packet::DestinationStatusRequest { destination } => {
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
let hop = _routing_table.0[destination as usize][*rank as usize];
|
let hop = _routing_table.0[destination as usize][*_rank as usize];
|
||||||
#[cfg(not(has_drtio_routing))]
|
#[cfg(not(has_drtio_routing))]
|
||||||
let hop = 0;
|
let hop = 0;
|
||||||
|
|
||||||
if hop == 0 {
|
if hop == 0 {
|
||||||
*self_destination = destination;
|
if let Some(status) = dma_manager.check_state() {
|
||||||
|
info!(
|
||||||
|
"playback done, error: {}, channel: {}, timestamp: {}",
|
||||||
|
status.error, status.channel, status.timestamp
|
||||||
|
);
|
||||||
|
drtioaux::send(
|
||||||
|
0,
|
||||||
|
&drtioaux::Packet::DmaPlaybackStatus {
|
||||||
|
destination: destination,
|
||||||
|
id: status.id,
|
||||||
|
error: status.error,
|
||||||
|
channel: status.channel,
|
||||||
|
timestamp: status.timestamp,
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
} else if let Some(subkernel_finished) = kernel_manager.get_last_finished() {
|
||||||
|
info!(
|
||||||
|
"subkernel {} finished, with exception: {}",
|
||||||
|
subkernel_finished.id, subkernel_finished.with_exception
|
||||||
|
);
|
||||||
|
drtioaux::send(
|
||||||
|
0,
|
||||||
|
&drtioaux::Packet::SubkernelFinished {
|
||||||
|
id: subkernel_finished.id,
|
||||||
|
with_exception: subkernel_finished.with_exception,
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
} else if kernel_manager.message_is_ready() {
|
||||||
|
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
|
match kernel_manager.message_get_slice(&mut data_slice) {
|
||||||
|
Some(meta) => drtioaux::send(
|
||||||
|
0,
|
||||||
|
&drtioaux::Packet::SubkernelMessage {
|
||||||
|
destination: destination,
|
||||||
|
id: kernel_manager.get_current_id().unwrap(),
|
||||||
|
last: meta.last,
|
||||||
|
length: meta.len as u16,
|
||||||
|
data: data_slice,
|
||||||
|
},
|
||||||
|
)?,
|
||||||
|
None => warn!("subkernel message is ready but no message is present"),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
let errors;
|
let errors;
|
||||||
unsafe {
|
unsafe {
|
||||||
errors = csr::drtiosat::rtio_error_read();
|
errors = csr::drtiosat::rtio_error_read();
|
||||||
|
@ -171,6 +199,7 @@ fn process_aux_packet(
|
||||||
drtioaux::send(0, &drtioaux::Packet::DestinationOkReply)?;
|
drtioaux::send(0, &drtioaux::Packet::DestinationOkReply)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
{
|
{
|
||||||
|
@ -213,11 +242,11 @@ fn process_aux_packet(
|
||||||
drtioaux::send(0, &drtioaux::Packet::RoutingAck)
|
drtioaux::send(0, &drtioaux::Packet::RoutingAck)
|
||||||
}
|
}
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
drtioaux::Packet::RoutingSetRank { rank: new_rank } => {
|
drtioaux::Packet::RoutingSetRank { rank } => {
|
||||||
*rank = new_rank;
|
*_rank = rank;
|
||||||
drtio_routing::interconnect_enable_all(_routing_table, new_rank);
|
drtio_routing::interconnect_enable_all(_routing_table, rank);
|
||||||
|
|
||||||
let rep_rank = new_rank + 1;
|
let rep_rank = rank + 1;
|
||||||
for rep in _repeaters.iter() {
|
for rep in _repeaters.iter() {
|
||||||
if let Err(e) = rep.set_rank(rep_rank, timer) {
|
if let Err(e) = rep.set_rank(rep_rank, timer) {
|
||||||
error!("failed to set rank ({:?})", e);
|
error!("failed to set rank ({:?})", e);
|
||||||
|
@ -238,20 +267,12 @@ fn process_aux_packet(
|
||||||
#[cfg(not(has_drtio_routing))]
|
#[cfg(not(has_drtio_routing))]
|
||||||
drtioaux::Packet::RoutingSetRank { rank: _ } => drtioaux::send(0, &drtioaux::Packet::RoutingAck),
|
drtioaux::Packet::RoutingSetRank { rank: _ } => drtioaux::send(0, &drtioaux::Packet::RoutingAck),
|
||||||
|
|
||||||
drtioaux::Packet::RoutingRetrievePackets => {
|
|
||||||
let packet = router
|
|
||||||
.get_upstream_packet()
|
|
||||||
.or(Some(drtioaux::Packet::RoutingNoPackets))
|
|
||||||
.unwrap();
|
|
||||||
drtioaux::send(0, &packet)
|
|
||||||
}
|
|
||||||
|
|
||||||
drtioaux::Packet::MonitorRequest {
|
drtioaux::Packet::MonitorRequest {
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
channel,
|
channel,
|
||||||
probe,
|
probe,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
||||||
let value;
|
let value;
|
||||||
#[cfg(has_rtio_moninj)]
|
#[cfg(has_rtio_moninj)]
|
||||||
unsafe {
|
unsafe {
|
||||||
|
@ -273,7 +294,7 @@ fn process_aux_packet(
|
||||||
overrd,
|
overrd,
|
||||||
value,
|
value,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
||||||
#[cfg(has_rtio_moninj)]
|
#[cfg(has_rtio_moninj)]
|
||||||
unsafe {
|
unsafe {
|
||||||
csr::rtio_moninj::inj_chan_sel_write(channel as _);
|
csr::rtio_moninj::inj_chan_sel_write(channel as _);
|
||||||
|
@ -287,7 +308,7 @@ fn process_aux_packet(
|
||||||
channel,
|
channel,
|
||||||
overrd,
|
overrd,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
||||||
let value;
|
let value;
|
||||||
#[cfg(has_rtio_moninj)]
|
#[cfg(has_rtio_moninj)]
|
||||||
unsafe {
|
unsafe {
|
||||||
|
@ -306,7 +327,7 @@ fn process_aux_packet(
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
busno: _busno,
|
busno: _busno,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
||||||
let succeeded = i2c.start().is_ok();
|
let succeeded = i2c.start().is_ok();
|
||||||
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
||||||
}
|
}
|
||||||
|
@ -314,7 +335,7 @@ fn process_aux_packet(
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
busno: _busno,
|
busno: _busno,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
||||||
let succeeded = i2c.restart().is_ok();
|
let succeeded = i2c.restart().is_ok();
|
||||||
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
||||||
}
|
}
|
||||||
|
@ -322,7 +343,7 @@ fn process_aux_packet(
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
busno: _busno,
|
busno: _busno,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
||||||
let succeeded = i2c.stop().is_ok();
|
let succeeded = i2c.stop().is_ok();
|
||||||
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
drtioaux::send(0, &drtioaux::Packet::I2cBasicReply { succeeded: succeeded })
|
||||||
}
|
}
|
||||||
|
@ -331,7 +352,7 @@ fn process_aux_packet(
|
||||||
busno: _busno,
|
busno: _busno,
|
||||||
data,
|
data,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
||||||
match i2c.write(data) {
|
match i2c.write(data) {
|
||||||
Ok(ack) => drtioaux::send(
|
Ok(ack) => drtioaux::send(
|
||||||
0,
|
0,
|
||||||
|
@ -354,7 +375,7 @@ fn process_aux_packet(
|
||||||
busno: _busno,
|
busno: _busno,
|
||||||
ack,
|
ack,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
||||||
match i2c.read(ack) {
|
match i2c.read(ack) {
|
||||||
Ok(data) => drtioaux::send(
|
Ok(data) => drtioaux::send(
|
||||||
0,
|
0,
|
||||||
|
@ -378,7 +399,7 @@ fn process_aux_packet(
|
||||||
address,
|
address,
|
||||||
mask,
|
mask,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
||||||
let ch = match mask {
|
let ch = match mask {
|
||||||
//decode from mainline, PCA9548-centric API
|
//decode from mainline, PCA9548-centric API
|
||||||
0x00 => None,
|
0x00 => None,
|
||||||
|
@ -404,7 +425,7 @@ fn process_aux_packet(
|
||||||
div: _div,
|
div: _div,
|
||||||
cs: _cs,
|
cs: _cs,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
||||||
// todo: reimplement when/if SPI is available
|
// todo: reimplement when/if SPI is available
|
||||||
//let succeeded = spi::set_config(busno, flags, length, div, cs).is_ok();
|
//let succeeded = spi::set_config(busno, flags, length, div, cs).is_ok();
|
||||||
drtioaux::send(0, &drtioaux::Packet::SpiBasicReply { succeeded: false })
|
drtioaux::send(0, &drtioaux::Packet::SpiBasicReply { succeeded: false })
|
||||||
|
@ -414,7 +435,7 @@ fn process_aux_packet(
|
||||||
busno: _busno,
|
busno: _busno,
|
||||||
data: _data,
|
data: _data,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
||||||
// todo: reimplement when/if SPI is available
|
// todo: reimplement when/if SPI is available
|
||||||
//let succeeded = spi::write(busno, data).is_ok();
|
//let succeeded = spi::write(busno, data).is_ok();
|
||||||
drtioaux::send(0, &drtioaux::Packet::SpiBasicReply { succeeded: false })
|
drtioaux::send(0, &drtioaux::Packet::SpiBasicReply { succeeded: false })
|
||||||
|
@ -423,7 +444,7 @@ fn process_aux_packet(
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
busno: _busno,
|
busno: _busno,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
||||||
// todo: reimplement when/if SPI is available
|
// todo: reimplement when/if SPI is available
|
||||||
// match spi::read(busno) {
|
// match spi::read(busno) {
|
||||||
// Ok(data) => drtioaux::send(0,
|
// Ok(data) => drtioaux::send(0,
|
||||||
|
@ -443,7 +464,7 @@ fn process_aux_packet(
|
||||||
drtioaux::Packet::AnalyzerHeaderRequest {
|
drtioaux::Packet::AnalyzerHeaderRequest {
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
||||||
let header = analyzer.get_header();
|
let header = analyzer.get_header();
|
||||||
drtioaux::send(
|
drtioaux::send(
|
||||||
0,
|
0,
|
||||||
|
@ -457,7 +478,7 @@ fn process_aux_packet(
|
||||||
drtioaux::Packet::AnalyzerDataRequest {
|
drtioaux::Packet::AnalyzerDataRequest {
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
||||||
let mut data_slice: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE];
|
let mut data_slice: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE];
|
||||||
let meta = analyzer.get_data(&mut data_slice);
|
let meta = analyzer.get_data(&mut data_slice);
|
||||||
drtioaux::send(
|
drtioaux::send(
|
||||||
|
@ -471,135 +492,55 @@ fn process_aux_packet(
|
||||||
}
|
}
|
||||||
|
|
||||||
drtioaux::Packet::DmaAddTraceRequest {
|
drtioaux::Packet::DmaAddTraceRequest {
|
||||||
source,
|
destination: _destination,
|
||||||
destination,
|
|
||||||
id,
|
id,
|
||||||
status,
|
last,
|
||||||
length,
|
length,
|
||||||
trace,
|
trace,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
||||||
*self_destination = destination;
|
let succeeded = dma_manager.add(id, last, &trace, length as usize).is_ok();
|
||||||
let succeeded = dma_manager.add(source, id, status, &trace, length as usize).is_ok();
|
drtioaux::send(0, &drtioaux::Packet::DmaAddTraceReply { succeeded: succeeded })
|
||||||
router.send(
|
|
||||||
drtioaux::Packet::DmaAddTraceReply {
|
|
||||||
source: *self_destination,
|
|
||||||
destination: source,
|
|
||||||
id: id,
|
|
||||||
succeeded: succeeded,
|
|
||||||
},
|
|
||||||
_routing_table,
|
|
||||||
*rank,
|
|
||||||
*self_destination,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
drtioaux::Packet::DmaAddTraceReply {
|
|
||||||
source,
|
|
||||||
destination: _destination,
|
|
||||||
id,
|
|
||||||
succeeded,
|
|
||||||
} => {
|
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
|
||||||
dma_manager.ack_upload(
|
|
||||||
kernel_manager,
|
|
||||||
source,
|
|
||||||
id,
|
|
||||||
succeeded,
|
|
||||||
router,
|
|
||||||
*rank,
|
|
||||||
*self_destination,
|
|
||||||
_routing_table,
|
|
||||||
);
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
drtioaux::Packet::DmaRemoveTraceRequest {
|
drtioaux::Packet::DmaRemoveTraceRequest {
|
||||||
source,
|
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
id,
|
id,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
||||||
let succeeded = dma_manager.erase(source, id).is_ok();
|
let succeeded = dma_manager.erase(id).is_ok();
|
||||||
router.send(
|
drtioaux::send(0, &drtioaux::Packet::DmaRemoveTraceReply { succeeded: succeeded })
|
||||||
drtioaux::Packet::DmaRemoveTraceReply {
|
|
||||||
destination: source,
|
|
||||||
succeeded: succeeded,
|
|
||||||
},
|
|
||||||
_routing_table,
|
|
||||||
*rank,
|
|
||||||
*self_destination,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
drtioaux::Packet::DmaRemoveTraceReply {
|
|
||||||
destination: _destination,
|
|
||||||
succeeded: _,
|
|
||||||
} => {
|
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
drtioaux::Packet::DmaPlaybackRequest {
|
drtioaux::Packet::DmaPlaybackRequest {
|
||||||
source,
|
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
id,
|
id,
|
||||||
timestamp,
|
timestamp,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
||||||
let succeeded = if !kernel_manager.running() {
|
let succeeded = if !kernel_manager.running() {
|
||||||
dma_manager.playback(source, id, timestamp).is_ok()
|
dma_manager.playback(id, timestamp).is_ok()
|
||||||
} else {
|
} else {
|
||||||
false
|
false
|
||||||
};
|
};
|
||||||
router.send(
|
drtioaux::send(0, &drtioaux::Packet::DmaPlaybackReply { succeeded: succeeded })
|
||||||
drtioaux::Packet::DmaPlaybackReply {
|
|
||||||
destination: source,
|
|
||||||
succeeded: succeeded,
|
|
||||||
},
|
|
||||||
_routing_table,
|
|
||||||
*rank,
|
|
||||||
*self_destination,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
drtioaux::Packet::DmaPlaybackReply {
|
|
||||||
destination: _destination,
|
|
||||||
succeeded,
|
|
||||||
} => {
|
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
|
||||||
if !succeeded {
|
|
||||||
kernel_manager.ddma_nack();
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
drtioaux::Packet::DmaPlaybackStatus {
|
|
||||||
source: _,
|
|
||||||
destination: _destination,
|
|
||||||
id,
|
|
||||||
error,
|
|
||||||
channel,
|
|
||||||
timestamp,
|
|
||||||
} => {
|
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
|
||||||
dma_manager.remote_finished(kernel_manager, id, error, channel, timestamp);
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
drtioaux::Packet::SubkernelAddDataRequest {
|
drtioaux::Packet::SubkernelAddDataRequest {
|
||||||
destination,
|
destination: _destination,
|
||||||
id,
|
id,
|
||||||
status,
|
last,
|
||||||
length,
|
length,
|
||||||
data,
|
data,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
||||||
*self_destination = destination;
|
let succeeded = kernel_manager.add(id, last, &data, length as usize).is_ok();
|
||||||
let succeeded = kernel_manager.add(id, status, &data, length as usize).is_ok();
|
|
||||||
drtioaux::send(0, &drtioaux::Packet::SubkernelAddDataReply { succeeded: succeeded })
|
drtioaux::send(0, &drtioaux::Packet::SubkernelAddDataReply { succeeded: succeeded })
|
||||||
}
|
}
|
||||||
drtioaux::Packet::SubkernelLoadRunRequest {
|
drtioaux::Packet::SubkernelLoadRunRequest {
|
||||||
source,
|
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
id,
|
id,
|
||||||
run,
|
run,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
||||||
let mut succeeded = kernel_manager.load(id).is_ok();
|
let mut succeeded = kernel_manager.load(id).is_ok();
|
||||||
// allow preloading a kernel with delayed run
|
// allow preloading a kernel with delayed run
|
||||||
if run {
|
if run {
|
||||||
|
@ -607,91 +548,59 @@ fn process_aux_packet(
|
||||||
// cannot run kernel while DDMA is running
|
// cannot run kernel while DDMA is running
|
||||||
succeeded = false;
|
succeeded = false;
|
||||||
} else {
|
} else {
|
||||||
succeeded |= kernel_manager.run(source, id).is_ok();
|
succeeded |= kernel_manager.run(id).is_ok();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
router.send(
|
drtioaux::send(0, &drtioaux::Packet::SubkernelLoadRunReply { succeeded: succeeded })
|
||||||
drtioaux::Packet::SubkernelLoadRunReply {
|
|
||||||
destination: source,
|
|
||||||
succeeded: succeeded,
|
|
||||||
},
|
|
||||||
_routing_table,
|
|
||||||
*rank,
|
|
||||||
*self_destination,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
drtioaux::Packet::SubkernelLoadRunReply {
|
|
||||||
destination: _destination,
|
|
||||||
succeeded,
|
|
||||||
} => {
|
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
|
||||||
// received if local subkernel started another, remote subkernel
|
|
||||||
kernel_manager.subkernel_load_run_reply(succeeded);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
drtioaux::Packet::SubkernelFinished {
|
|
||||||
destination: _destination,
|
|
||||||
id,
|
|
||||||
with_exception,
|
|
||||||
exception_src,
|
|
||||||
} => {
|
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
|
||||||
kernel_manager.remote_subkernel_finished(id, with_exception, exception_src);
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
drtioaux::Packet::SubkernelExceptionRequest {
|
drtioaux::Packet::SubkernelExceptionRequest {
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
||||||
let mut data_slice: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE];
|
let mut data_slice: [u8; SAT_PAYLOAD_MAX_SIZE] = [0; SAT_PAYLOAD_MAX_SIZE];
|
||||||
let meta = kernel_manager.exception_get_slice(&mut data_slice);
|
let meta = kernel_manager.exception_get_slice(&mut data_slice);
|
||||||
drtioaux::send(
|
drtioaux::send(
|
||||||
0,
|
0,
|
||||||
&drtioaux::Packet::SubkernelException {
|
&drtioaux::Packet::SubkernelException {
|
||||||
last: meta.status.is_last(),
|
last: meta.last,
|
||||||
length: meta.len,
|
length: meta.len,
|
||||||
data: data_slice,
|
data: data_slice,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
drtioaux::Packet::SubkernelMessage {
|
drtioaux::Packet::SubkernelMessage {
|
||||||
source,
|
destination,
|
||||||
destination: _destination,
|
id: _id,
|
||||||
id,
|
last,
|
||||||
status,
|
|
||||||
length,
|
length,
|
||||||
data,
|
data,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, destination, *_rank, _repeaters, &packet, timer);
|
||||||
kernel_manager.message_handle_incoming(status, id, length as usize, &data);
|
kernel_manager.message_handle_incoming(last, length as usize, &data);
|
||||||
router.send(
|
drtioaux::send(
|
||||||
drtioaux::Packet::SubkernelMessageAck { destination: source },
|
0,
|
||||||
_routing_table,
|
&drtioaux::Packet::SubkernelMessageAck {
|
||||||
*rank,
|
destination: destination,
|
||||||
*self_destination,
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
drtioaux::Packet::SubkernelMessageAck {
|
drtioaux::Packet::SubkernelMessageAck {
|
||||||
destination: _destination,
|
destination: _destination,
|
||||||
} => {
|
} => {
|
||||||
forward!(_routing_table, _destination, *rank, _repeaters, &packet, timer);
|
forward!(_routing_table, _destination, *_rank, _repeaters, &packet, timer);
|
||||||
if kernel_manager.message_ack_slice() {
|
if kernel_manager.message_ack_slice() {
|
||||||
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
||||||
if let Some(meta) = kernel_manager.message_get_slice(&mut data_slice) {
|
if let Some(meta) = kernel_manager.message_get_slice(&mut data_slice) {
|
||||||
// route and not send immediately as ACKs are not a beginning of a transaction
|
drtioaux::send(
|
||||||
router.route(
|
0,
|
||||||
drtioaux::Packet::SubkernelMessage {
|
&drtioaux::Packet::SubkernelMessage {
|
||||||
source: *self_destination,
|
destination: *_rank,
|
||||||
destination: meta.destination,
|
|
||||||
id: kernel_manager.get_current_id().unwrap(),
|
id: kernel_manager.get_current_id().unwrap(),
|
||||||
status: meta.status,
|
last: meta.last,
|
||||||
length: meta.len as u16,
|
length: meta.len as u16,
|
||||||
data: data_slice,
|
data: data_slice,
|
||||||
},
|
},
|
||||||
_routing_table,
|
)?;
|
||||||
*rank,
|
|
||||||
*self_destination,
|
|
||||||
);
|
|
||||||
} else {
|
} else {
|
||||||
error!("Error receiving message slice");
|
error!("Error receiving message slice");
|
||||||
}
|
}
|
||||||
|
@ -699,8 +608,8 @@ fn process_aux_packet(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
p => {
|
_ => {
|
||||||
warn!("received unexpected aux packet: {:?}", p);
|
warn!("received unexpected aux packet");
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -710,35 +619,32 @@ fn process_aux_packets(
|
||||||
repeaters: &mut [repeater::Repeater],
|
repeaters: &mut [repeater::Repeater],
|
||||||
routing_table: &mut drtio_routing::RoutingTable,
|
routing_table: &mut drtio_routing::RoutingTable,
|
||||||
rank: &mut u8,
|
rank: &mut u8,
|
||||||
self_destination: &mut u8,
|
|
||||||
timer: &mut GlobalTimer,
|
timer: &mut GlobalTimer,
|
||||||
i2c: &mut I2c,
|
i2c: &mut I2c,
|
||||||
dma_manager: &mut DmaManager,
|
dma_manager: &mut DmaManager,
|
||||||
analyzer: &mut Analyzer,
|
analyzer: &mut Analyzer,
|
||||||
kernel_manager: &mut KernelManager,
|
kernel_manager: &mut KernelManager,
|
||||||
router: &mut Router,
|
|
||||||
) {
|
) {
|
||||||
let result = drtioaux::recv(0).and_then(|packet| {
|
let result = drtioaux::recv(0).and_then(|packet| {
|
||||||
if let Some(packet) = packet.or_else(|| router.get_local_packet()) {
|
if let Some(packet) = packet {
|
||||||
process_aux_packet(
|
process_aux_packet(
|
||||||
repeaters,
|
repeaters,
|
||||||
routing_table,
|
routing_table,
|
||||||
rank,
|
rank,
|
||||||
self_destination,
|
|
||||||
packet,
|
packet,
|
||||||
timer,
|
timer,
|
||||||
i2c,
|
i2c,
|
||||||
dma_manager,
|
dma_manager,
|
||||||
analyzer,
|
analyzer,
|
||||||
kernel_manager,
|
kernel_manager,
|
||||||
router,
|
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
if let Err(e) = result {
|
match result {
|
||||||
warn!("aux packet error ({:?})", e);
|
Ok(()) => (),
|
||||||
|
Err(e) => warn!("aux packet error ({:?})", e),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -894,20 +800,17 @@ pub extern "C" fn main_core0() -> i32 {
|
||||||
}
|
}
|
||||||
let mut routing_table = drtio_routing::RoutingTable::default_empty();
|
let mut routing_table = drtio_routing::RoutingTable::default_empty();
|
||||||
let mut rank = 1;
|
let mut rank = 1;
|
||||||
let mut destination = 1;
|
|
||||||
|
|
||||||
let mut hardware_tick_ts = 0;
|
let mut hardware_tick_ts = 0;
|
||||||
|
|
||||||
let mut control = ksupport::kernel::Control::start();
|
let mut control = ksupport::kernel::Control::start();
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let mut router = Router::new();
|
|
||||||
|
|
||||||
while !drtiosat_link_rx_up() {
|
while !drtiosat_link_rx_up() {
|
||||||
drtiosat_process_errors();
|
drtiosat_process_errors();
|
||||||
#[allow(unused_mut)]
|
#[allow(unused_mut)]
|
||||||
for mut rep in repeaters.iter_mut() {
|
for mut rep in repeaters.iter_mut() {
|
||||||
rep.service(&routing_table, rank, destination, &mut router, &mut timer);
|
rep.service(&routing_table, rank, &mut timer);
|
||||||
}
|
}
|
||||||
#[cfg(feature = "target_kasli_soc")]
|
#[cfg(feature = "target_kasli_soc")]
|
||||||
{
|
{
|
||||||
|
@ -946,17 +849,15 @@ pub extern "C" fn main_core0() -> i32 {
|
||||||
&mut repeaters,
|
&mut repeaters,
|
||||||
&mut routing_table,
|
&mut routing_table,
|
||||||
&mut rank,
|
&mut rank,
|
||||||
&mut destination,
|
|
||||||
&mut timer,
|
&mut timer,
|
||||||
&mut i2c,
|
&mut i2c,
|
||||||
&mut dma_manager,
|
&mut dma_manager,
|
||||||
&mut analyzer,
|
&mut analyzer,
|
||||||
&mut kernel_manager,
|
&mut kernel_manager,
|
||||||
&mut router,
|
|
||||||
);
|
);
|
||||||
#[allow(unused_mut)]
|
#[allow(unused_mut)]
|
||||||
for mut rep in repeaters.iter_mut() {
|
for mut rep in repeaters.iter_mut() {
|
||||||
rep.service(&routing_table, rank, destination, &mut router, &mut timer);
|
rep.service(&routing_table, rank, &mut timer);
|
||||||
}
|
}
|
||||||
#[cfg(feature = "target_kasli_soc")]
|
#[cfg(feature = "target_kasli_soc")]
|
||||||
{
|
{
|
||||||
|
@ -979,45 +880,7 @@ pub extern "C" fn main_core0() -> i32 {
|
||||||
error!("aux packet error: {:?}", e);
|
error!("aux packet error: {:?}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if let Some(status) = dma_manager.check_state() {
|
kernel_manager.process_kern_requests(rank, timer);
|
||||||
info!(
|
|
||||||
"playback done, error: {}, channel: {}, timestamp: {}",
|
|
||||||
status.error, status.channel, status.timestamp
|
|
||||||
);
|
|
||||||
router.route(
|
|
||||||
drtioaux::Packet::DmaPlaybackStatus {
|
|
||||||
source: destination,
|
|
||||||
destination: status.source,
|
|
||||||
id: status.id,
|
|
||||||
error: status.error,
|
|
||||||
channel: status.channel,
|
|
||||||
timestamp: status.timestamp,
|
|
||||||
},
|
|
||||||
&routing_table,
|
|
||||||
rank,
|
|
||||||
destination,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
kernel_manager.process_kern_requests(
|
|
||||||
&mut router,
|
|
||||||
&routing_table,
|
|
||||||
rank,
|
|
||||||
destination,
|
|
||||||
&mut dma_manager,
|
|
||||||
&timer,
|
|
||||||
);
|
|
||||||
|
|
||||||
#[cfg(has_drtio_routing)]
|
|
||||||
if let Some((repno, packet)) = router.get_downstream_packet() {
|
|
||||||
if let Err(e) = repeaters[repno].aux_send(&packet) {
|
|
||||||
warn!("[REP#{}] Error when sending packet to satellite ({:?})", repno, e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if router.any_upstream_waiting() {
|
|
||||||
drtiosat_async_ready();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
drtiosat_reset_phy(true);
|
drtiosat_reset_phy(true);
|
||||||
|
|
|
@ -6,7 +6,6 @@ use libboard_artiq::{drtio_routing, drtioaux};
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
use libboard_zynq::time::Milliseconds;
|
use libboard_zynq::time::Milliseconds;
|
||||||
use libboard_zynq::timer::GlobalTimer;
|
use libboard_zynq::timer::GlobalTimer;
|
||||||
use routing::Router;
|
|
||||||
|
|
||||||
#[cfg(has_drtio_routing)]
|
#[cfg(has_drtio_routing)]
|
||||||
fn rep_link_rx_up(repno: u8) -> bool {
|
fn rep_link_rx_up(repno: u8) -> bool {
|
||||||
|
@ -54,14 +53,7 @@ impl Repeater {
|
||||||
self.state == RepeaterState::Up
|
self.state == RepeaterState::Up
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn service(
|
pub fn service(&mut self, routing_table: &drtio_routing::RoutingTable, rank: u8, timer: &mut GlobalTimer) {
|
||||||
&mut self,
|
|
||||||
routing_table: &drtio_routing::RoutingTable,
|
|
||||||
rank: u8,
|
|
||||||
destination: u8,
|
|
||||||
router: &mut Router,
|
|
||||||
timer: &mut GlobalTimer,
|
|
||||||
) {
|
|
||||||
self.process_local_errors();
|
self.process_local_errors();
|
||||||
|
|
||||||
match self.state {
|
match self.state {
|
||||||
|
@ -124,11 +116,6 @@ impl Repeater {
|
||||||
info!("[REP#{}] link is down", self.repno);
|
info!("[REP#{}] link is down", self.repno);
|
||||||
self.state = RepeaterState::Down;
|
self.state = RepeaterState::Down;
|
||||||
}
|
}
|
||||||
if self.async_messages_ready() {
|
|
||||||
if let Err(e) = self.handle_async(routing_table, rank, destination, router, timer) {
|
|
||||||
warn!("[REP#{}] Error handling async messages ({:?})", self.repno, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
RepeaterState::Failed => {
|
RepeaterState::Failed => {
|
||||||
if !rep_link_rx_up(self.repno) {
|
if !rep_link_rx_up(self.repno) {
|
||||||
|
@ -186,34 +173,6 @@ impl Repeater {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn async_messages_ready(&self) -> bool {
|
|
||||||
let async_rdy;
|
|
||||||
unsafe {
|
|
||||||
async_rdy = (csr::DRTIOREP[self.repno as usize].async_messages_ready_read)();
|
|
||||||
(csr::DRTIOREP[self.repno as usize].async_messages_ready_write)(0);
|
|
||||||
}
|
|
||||||
async_rdy == 1
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_async(
|
|
||||||
&self,
|
|
||||||
routing_table: &drtio_routing::RoutingTable,
|
|
||||||
rank: u8,
|
|
||||||
self_destination: u8,
|
|
||||||
router: &mut Router,
|
|
||||||
timer: &mut GlobalTimer,
|
|
||||||
) -> Result<(), drtioaux::Error> {
|
|
||||||
loop {
|
|
||||||
drtioaux::send(self.auxno, &drtioaux::Packet::RoutingRetrievePackets).unwrap();
|
|
||||||
let reply = self.recv_aux_timeout(200, timer)?;
|
|
||||||
match reply {
|
|
||||||
drtioaux::Packet::RoutingNoPackets => break,
|
|
||||||
packet => router.route(packet, routing_table, rank, self_destination),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn recv_aux_timeout(&self, timeout: u32, timer: &mut GlobalTimer) -> Result<drtioaux::Packet, drtioaux::Error> {
|
fn recv_aux_timeout(&self, timeout: u32, timer: &mut GlobalTimer) -> Result<drtioaux::Packet, drtioaux::Error> {
|
||||||
let max_time = timer.get_time() + Milliseconds(timeout.into());
|
let max_time = timer.get_time() + Milliseconds(timeout.into());
|
||||||
loop {
|
loop {
|
||||||
|
@ -232,17 +191,13 @@ impl Repeater {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn aux_forward(&self, request: &drtioaux::Packet, timer: &mut GlobalTimer) -> Result<(), drtioaux::Error> {
|
pub fn aux_forward(&self, request: &drtioaux::Packet, timer: &mut GlobalTimer) -> Result<(), drtioaux::Error> {
|
||||||
self.aux_send(request)?;
|
|
||||||
let reply = self.recv_aux_timeout(200, timer)?;
|
|
||||||
drtioaux::send(0, &reply).unwrap();
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn aux_send(&self, request: &drtioaux::Packet) -> Result<(), drtioaux::Error> {
|
|
||||||
if self.state != RepeaterState::Up {
|
if self.state != RepeaterState::Up {
|
||||||
return Err(drtioaux::Error::LinkDown);
|
return Err(drtioaux::Error::LinkDown);
|
||||||
}
|
}
|
||||||
drtioaux::send(self.auxno, request)
|
drtioaux::send(self.auxno, request).unwrap();
|
||||||
|
let reply = self.recv_aux_timeout(200, timer)?;
|
||||||
|
drtioaux::send(0, &reply).unwrap();
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn sync_tsc(&self, timer: &mut GlobalTimer) -> Result<(), drtioaux::Error> {
|
pub fn sync_tsc(&self, timer: &mut GlobalTimer) -> Result<(), drtioaux::Error> {
|
||||||
|
@ -347,15 +302,7 @@ impl Repeater {
|
||||||
Repeater::default()
|
Repeater::default()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn service(
|
pub fn service(&self, _routing_table: &drtio_routing::RoutingTable, _rank: u8, _timer: &mut GlobalTimer) {}
|
||||||
&self,
|
|
||||||
_routing_table: &drtio_routing::RoutingTable,
|
|
||||||
_rank: u8,
|
|
||||||
_destination: u8,
|
|
||||||
_router: &mut Router,
|
|
||||||
_timer: &mut GlobalTimer,
|
|
||||||
) {
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn sync_tsc(&self, _timer: &mut GlobalTimer) -> Result<(), drtioaux::Error> {
|
pub fn sync_tsc(&self, _timer: &mut GlobalTimer) -> Result<(), drtioaux::Error> {
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -1,190 +0,0 @@
|
||||||
use alloc::{collections::vec_deque::VecDeque, vec::Vec};
|
|
||||||
use core::cmp::min;
|
|
||||||
|
|
||||||
#[cfg(has_drtio_routing)]
|
|
||||||
use libboard_artiq::pl::csr;
|
|
||||||
use libboard_artiq::{drtio_routing, drtioaux,
|
|
||||||
drtioaux_proto::{PayloadStatus, MASTER_PAYLOAD_MAX_SIZE, SAT_PAYLOAD_MAX_SIZE}};
|
|
||||||
|
|
||||||
pub struct SliceMeta {
|
|
||||||
pub destination: u8,
|
|
||||||
pub len: u16,
|
|
||||||
pub status: PayloadStatus,
|
|
||||||
}
|
|
||||||
|
|
||||||
/* represents data that has to be sent to Master */
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Sliceable {
|
|
||||||
it: usize,
|
|
||||||
data: Vec<u8>,
|
|
||||||
destination: u8,
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! get_slice_fn {
|
|
||||||
($name:tt, $size:expr) => {
|
|
||||||
pub fn $name(&mut self, data_slice: &mut [u8; $size]) -> SliceMeta {
|
|
||||||
let first = self.it == 0;
|
|
||||||
let len = min($size, self.data.len() - self.it);
|
|
||||||
let last = self.it + len == self.data.len();
|
|
||||||
let status = PayloadStatus::from_status(first, last);
|
|
||||||
|
|
||||||
data_slice[..len].clone_from_slice(&self.data[self.it..self.it + len]);
|
|
||||||
self.it += len;
|
|
||||||
|
|
||||||
SliceMeta {
|
|
||||||
destination: self.destination,
|
|
||||||
len: len as u16,
|
|
||||||
status: status,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Sliceable {
|
|
||||||
pub fn new(destination: u8, data: Vec<u8>) -> Sliceable {
|
|
||||||
Sliceable {
|
|
||||||
it: 0,
|
|
||||||
data: data,
|
|
||||||
destination: destination,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn at_end(&self) -> bool {
|
|
||||||
self.it == self.data.len()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn extend(&mut self, data: &[u8]) {
|
|
||||||
self.data.extend(data);
|
|
||||||
}
|
|
||||||
|
|
||||||
get_slice_fn!(get_slice_sat, SAT_PAYLOAD_MAX_SIZE);
|
|
||||||
get_slice_fn!(get_slice_master, MASTER_PAYLOAD_MAX_SIZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Packets from downstream (further satellites) are received and routed appropriately.
|
|
||||||
// they're passed as soon as possible downstream (within the subtree), or sent upstream,
|
|
||||||
// which is notified about pending packets.
|
|
||||||
// for rank 1 (connected to master) satellites, these packets are passed as an answer to DestinationStatusRequest;
|
|
||||||
// for higher ranks, after getting a notification, it will transact with downstream to get the pending packets.
|
|
||||||
|
|
||||||
// forward! macro is not deprecated, as routable packets are only these that can originate
|
|
||||||
// from both master and satellite, e.g. DDMA and Subkernel.
|
|
||||||
|
|
||||||
pub struct Router {
|
|
||||||
upstream_queue: VecDeque<drtioaux::Packet>,
|
|
||||||
local_queue: VecDeque<drtioaux::Packet>,
|
|
||||||
#[cfg(has_drtio_routing)]
|
|
||||||
downstream_queue: VecDeque<(usize, drtioaux::Packet)>,
|
|
||||||
upstream_notified: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Router {
|
|
||||||
pub fn new() -> Router {
|
|
||||||
Router {
|
|
||||||
upstream_queue: VecDeque::new(),
|
|
||||||
local_queue: VecDeque::new(),
|
|
||||||
#[cfg(has_drtio_routing)]
|
|
||||||
downstream_queue: VecDeque::new(),
|
|
||||||
upstream_notified: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Called by local sources (DDMA, kernel) and by repeaters on receiving async data;
|
|
||||||
// messages are always buffered for both upstream and downstream
|
|
||||||
pub fn route(
|
|
||||||
&mut self,
|
|
||||||
packet: drtioaux::Packet,
|
|
||||||
_routing_table: &drtio_routing::RoutingTable,
|
|
||||||
_rank: u8,
|
|
||||||
self_destination: u8,
|
|
||||||
) {
|
|
||||||
let destination = packet.routable_destination();
|
|
||||||
#[cfg(has_drtio_routing)]
|
|
||||||
{
|
|
||||||
if let Some(destination) = destination {
|
|
||||||
let hop = _routing_table.0[destination as usize][_rank as usize] as usize;
|
|
||||||
if destination == self_destination {
|
|
||||||
self.local_queue.push_back(packet);
|
|
||||||
} else if hop > 0 && hop < csr::DRTIOREP.len() {
|
|
||||||
let repno = (hop - 1) as usize;
|
|
||||||
self.downstream_queue.push_back((repno, packet));
|
|
||||||
} else {
|
|
||||||
self.upstream_queue.push_back(packet);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
error!("Received an unroutable packet: {:?}", packet);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[cfg(not(has_drtio_routing))]
|
|
||||||
{
|
|
||||||
if destination == Some(self_destination) {
|
|
||||||
self.local_queue.push_back(packet);
|
|
||||||
} else {
|
|
||||||
self.upstream_queue.push_back(packet);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sends a packet to a required destination, routing if necessary
|
|
||||||
pub fn send(
|
|
||||||
&mut self,
|
|
||||||
packet: drtioaux::Packet,
|
|
||||||
_routing_table: &drtio_routing::RoutingTable,
|
|
||||||
_rank: u8,
|
|
||||||
_destination: u8,
|
|
||||||
) -> Result<(), drtioaux::Error> {
|
|
||||||
#[cfg(has_drtio_routing)]
|
|
||||||
{
|
|
||||||
let destination = packet.routable_destination();
|
|
||||||
if let Some(destination) = destination {
|
|
||||||
let hop = _routing_table.0[destination as usize][_rank as usize] as usize;
|
|
||||||
if destination == 0 {
|
|
||||||
// response is needed immediately if master required it
|
|
||||||
drtioaux::send(0, &packet)?;
|
|
||||||
} else if !(hop > 0 && hop < csr::DRTIOREP.len()) {
|
|
||||||
// higher rank can wait
|
|
||||||
self.upstream_queue.push_back(packet);
|
|
||||||
} else {
|
|
||||||
let repno = (hop - 1) as usize;
|
|
||||||
// transaction will occur at closest possible opportunity
|
|
||||||
self.downstream_queue.push_back((repno, packet));
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
// packet not supported in routing, fallback - sent directly
|
|
||||||
drtioaux::send(0, &packet)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[cfg(not(has_drtio_routing))]
|
|
||||||
{
|
|
||||||
drtioaux::send(0, &packet)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn any_upstream_waiting(&mut self) -> bool {
|
|
||||||
let empty = self.upstream_queue.is_empty();
|
|
||||||
if !empty && !self.upstream_notified {
|
|
||||||
self.upstream_notified = true; // so upstream will not get spammed with notifications
|
|
||||||
true
|
|
||||||
} else {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_upstream_packet(&mut self) -> Option<drtioaux::Packet> {
|
|
||||||
let packet = self.upstream_queue.pop_front();
|
|
||||||
if packet.is_none() {
|
|
||||||
self.upstream_notified = false;
|
|
||||||
}
|
|
||||||
packet
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(has_drtio_routing)]
|
|
||||||
pub fn get_downstream_packet(&mut self) -> Option<(usize, drtioaux::Packet)> {
|
|
||||||
self.downstream_queue.pop_front()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_local_packet(&mut self) -> Option<drtioaux::Packet> {
|
|
||||||
self.local_queue.pop_front()
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,52 +1,26 @@
|
||||||
use alloc::{collections::BTreeMap,
|
use alloc::{collections::{BTreeMap, VecDeque},
|
||||||
format,
|
format,
|
||||||
string::{String, ToString},
|
string::{String, ToString},
|
||||||
vec::Vec};
|
vec::Vec};
|
||||||
use core::{option::NoneError, slice, str};
|
use core::{cmp::min, option::NoneError, slice, str};
|
||||||
|
|
||||||
use core_io::{Error as IoError, Write};
|
use core_io::{Error as IoError, Write};
|
||||||
use cslice::AsCSlice;
|
use cslice::AsCSlice;
|
||||||
use dma::{Error as DmaError, Manager as DmaManager};
|
|
||||||
use io::{Cursor, ProtoWrite};
|
use io::{Cursor, ProtoWrite};
|
||||||
use ksupport::{eh_artiq, kernel, rpc};
|
use ksupport::{eh_artiq, kernel, rpc};
|
||||||
use libboard_artiq::{drtio_routing::RoutingTable,
|
use libboard_artiq::{drtioaux_proto::{MASTER_PAYLOAD_MAX_SIZE, SAT_PAYLOAD_MAX_SIZE},
|
||||||
drtioaux,
|
|
||||||
drtioaux_proto::{PayloadStatus, MASTER_PAYLOAD_MAX_SIZE, SAT_PAYLOAD_MAX_SIZE},
|
|
||||||
pl::csr};
|
pl::csr};
|
||||||
use libboard_zynq::{time::Milliseconds, timer::GlobalTimer};
|
use libboard_zynq::{time::Milliseconds, timer::GlobalTimer};
|
||||||
use libcortex_a9::sync_channel::Receiver;
|
use libcortex_a9::sync_channel::Receiver;
|
||||||
use log::warn;
|
use log::warn;
|
||||||
use routing::{Router, SliceMeta, Sliceable};
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
enum KernelState {
|
enum KernelState {
|
||||||
Absent,
|
Absent,
|
||||||
Loaded,
|
Loaded,
|
||||||
Running,
|
Running,
|
||||||
MsgAwait {
|
MsgAwait(Milliseconds, Vec<u8>),
|
||||||
max_time: Option<Milliseconds>,
|
|
||||||
id: u32,
|
|
||||||
tags: Vec<u8>,
|
|
||||||
},
|
|
||||||
MsgSending,
|
MsgSending,
|
||||||
SubkernelAwaitLoad,
|
|
||||||
SubkernelAwaitFinish {
|
|
||||||
max_time: Option<Milliseconds>,
|
|
||||||
id: u32,
|
|
||||||
},
|
|
||||||
DmaUploading,
|
|
||||||
DmaPendingPlayback {
|
|
||||||
id: u32,
|
|
||||||
timestamp: u64,
|
|
||||||
},
|
|
||||||
DmaPendingAwait {
|
|
||||||
id: u32,
|
|
||||||
timestamp: u64,
|
|
||||||
max_time: Milliseconds,
|
|
||||||
},
|
|
||||||
DmaAwait {
|
|
||||||
max_time: Milliseconds,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
@ -57,9 +31,7 @@ pub enum Error {
|
||||||
NoMessage,
|
NoMessage,
|
||||||
AwaitingMessage,
|
AwaitingMessage,
|
||||||
SubkernelIoError,
|
SubkernelIoError,
|
||||||
DrtioError,
|
|
||||||
KernelException(Sliceable),
|
KernelException(Sliceable),
|
||||||
DmaError(DmaError),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<NoneError> for Error {
|
impl From<NoneError> for Error {
|
||||||
|
@ -74,38 +46,33 @@ impl From<IoError> for Error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<DmaError> for Error {
|
|
||||||
fn from(value: DmaError) -> Error {
|
|
||||||
Error::DmaError(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<()> for Error {
|
impl From<()> for Error {
|
||||||
fn from(_: ()) -> Error {
|
fn from(_: ()) -> Error {
|
||||||
Error::NoMessage
|
Error::NoMessage
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<drtioaux::Error> for Error {
|
|
||||||
fn from(_value: drtioaux::Error) -> Error {
|
|
||||||
Error::DrtioError
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! unexpected {
|
macro_rules! unexpected {
|
||||||
($($arg:tt)*) => (return Err(Error::Unexpected(format!($($arg)*))));
|
($($arg:tt)*) => (return Err(Error::Unexpected(format!($($arg)*))));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* represents data that has to be sent to Master */
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Sliceable {
|
||||||
|
it: usize,
|
||||||
|
data: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
/* represents interkernel messages */
|
/* represents interkernel messages */
|
||||||
struct Message {
|
struct Message {
|
||||||
count: u8,
|
count: u8,
|
||||||
id: u32,
|
|
||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq)]
|
#[derive(PartialEq)]
|
||||||
enum OutMessageState {
|
enum OutMessageState {
|
||||||
NoMessage,
|
NoMessage,
|
||||||
|
MessageReady,
|
||||||
MessageBeingSent,
|
MessageBeingSent,
|
||||||
MessageSent,
|
MessageSent,
|
||||||
MessageAcknowledged,
|
MessageAcknowledged,
|
||||||
|
@ -115,7 +82,7 @@ enum OutMessageState {
|
||||||
struct MessageManager {
|
struct MessageManager {
|
||||||
out_message: Option<Sliceable>,
|
out_message: Option<Sliceable>,
|
||||||
out_state: OutMessageState,
|
out_state: OutMessageState,
|
||||||
in_queue: Vec<Message>,
|
in_queue: VecDeque<Message>,
|
||||||
in_buffer: Option<Message>,
|
in_buffer: Option<Message>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -125,8 +92,6 @@ struct Session {
|
||||||
kernel_state: KernelState,
|
kernel_state: KernelState,
|
||||||
last_exception: Option<Sliceable>,
|
last_exception: Option<Sliceable>,
|
||||||
messages: MessageManager,
|
messages: MessageManager,
|
||||||
source: u8, // which destination requested running the kernel
|
|
||||||
subkernels_finished: Vec<u32>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Session {
|
impl Session {
|
||||||
|
@ -136,15 +101,13 @@ impl Session {
|
||||||
kernel_state: KernelState::Absent,
|
kernel_state: KernelState::Absent,
|
||||||
last_exception: None,
|
last_exception: None,
|
||||||
messages: MessageManager::new(),
|
messages: MessageManager::new(),
|
||||||
source: 0,
|
|
||||||
subkernels_finished: Vec::new(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn running(&self) -> bool {
|
fn running(&self) -> bool {
|
||||||
match self.kernel_state {
|
match self.kernel_state {
|
||||||
KernelState::Absent | KernelState::Loaded => false,
|
KernelState::Absent | KernelState::Loaded => false,
|
||||||
_ => true,
|
KernelState::Running | KernelState::MsgAwait { .. } | KernelState::MsgSending => true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -166,8 +129,40 @@ pub struct Manager<'a> {
|
||||||
pub struct SubkernelFinished {
|
pub struct SubkernelFinished {
|
||||||
pub id: u32,
|
pub id: u32,
|
||||||
pub with_exception: bool,
|
pub with_exception: bool,
|
||||||
pub exception_source: u8,
|
}
|
||||||
pub source: u8,
|
|
||||||
|
pub struct SliceMeta {
|
||||||
|
pub len: u16,
|
||||||
|
pub last: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! get_slice_fn {
|
||||||
|
($name:tt, $size:expr) => {
|
||||||
|
pub fn $name(&mut self, data_slice: &mut [u8; $size]) -> SliceMeta {
|
||||||
|
if self.data.len() == 0 {
|
||||||
|
return SliceMeta { len: 0, last: true };
|
||||||
|
}
|
||||||
|
let len = min($size, self.data.len() - self.it);
|
||||||
|
let last = self.it + len == self.data.len();
|
||||||
|
|
||||||
|
data_slice[..len].clone_from_slice(&self.data[self.it..self.it + len]);
|
||||||
|
self.it += len;
|
||||||
|
|
||||||
|
SliceMeta {
|
||||||
|
len: len as u16,
|
||||||
|
last: last,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Sliceable {
|
||||||
|
pub fn new(data: Vec<u8>) -> Sliceable {
|
||||||
|
Sliceable { it: 0, data: data }
|
||||||
|
}
|
||||||
|
|
||||||
|
get_slice_fn!(get_slice_sat, SAT_PAYLOAD_MAX_SIZE);
|
||||||
|
get_slice_fn!(get_slice_master, MASTER_PAYLOAD_MAX_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MessageManager {
|
impl MessageManager {
|
||||||
|
@ -175,35 +170,36 @@ impl MessageManager {
|
||||||
MessageManager {
|
MessageManager {
|
||||||
out_message: None,
|
out_message: None,
|
||||||
out_state: OutMessageState::NoMessage,
|
out_state: OutMessageState::NoMessage,
|
||||||
in_queue: Vec::new(),
|
in_queue: VecDeque::new(),
|
||||||
in_buffer: None,
|
in_buffer: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn handle_incoming(
|
pub fn handle_incoming(&mut self, last: bool, length: usize, data: &[u8; MASTER_PAYLOAD_MAX_SIZE]) {
|
||||||
&mut self,
|
|
||||||
status: PayloadStatus,
|
|
||||||
id: u32,
|
|
||||||
length: usize,
|
|
||||||
data: &[u8; MASTER_PAYLOAD_MAX_SIZE],
|
|
||||||
) {
|
|
||||||
// called when receiving a message from master
|
// called when receiving a message from master
|
||||||
if status.is_first() {
|
|
||||||
self.in_buffer = None;
|
|
||||||
}
|
|
||||||
match self.in_buffer.as_mut() {
|
match self.in_buffer.as_mut() {
|
||||||
Some(message) => message.data.extend(&data[..length]),
|
Some(message) => message.data.extend(&data[..length]),
|
||||||
None => {
|
None => {
|
||||||
self.in_buffer = Some(Message {
|
self.in_buffer = Some(Message {
|
||||||
count: data[0],
|
count: data[0],
|
||||||
id: id,
|
|
||||||
data: data[1..length].to_vec(),
|
data: data[1..length].to_vec(),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
if status.is_last() {
|
if last {
|
||||||
// when done, remove from working queue
|
// when done, remove from working queue
|
||||||
self.in_queue.push(self.in_buffer.take().unwrap());
|
self.in_queue.push_back(self.in_buffer.take().unwrap());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn is_outgoing_ready(&mut self) -> bool {
|
||||||
|
// called by main loop, to see if there's anything to send, will send it afterwards
|
||||||
|
match self.out_state {
|
||||||
|
OutMessageState::MessageReady => {
|
||||||
|
self.out_state = OutMessageState::MessageBeingSent;
|
||||||
|
true
|
||||||
|
}
|
||||||
|
_ => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -222,7 +218,7 @@ impl MessageManager {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
let meta = self.out_message.as_mut()?.get_slice_master(data_slice);
|
let meta = self.out_message.as_mut()?.get_slice_master(data_slice);
|
||||||
if meta.status.is_last() {
|
if meta.last {
|
||||||
// clear the message slot
|
// clear the message slot
|
||||||
self.out_message = None;
|
self.out_message = None;
|
||||||
// notify kernel with a flag that message is sent
|
// notify kernel with a flag that message is sent
|
||||||
|
@ -246,44 +242,15 @@ impl MessageManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn accept_outgoing(
|
pub fn accept_outgoing(&mut self, message: Vec<u8>) -> Result<(), Error> {
|
||||||
&mut self,
|
// service tag skipped in kernel
|
||||||
id: u32,
|
self.out_message = Some(Sliceable::new(message));
|
||||||
self_destination: u8,
|
self.out_state = OutMessageState::MessageReady;
|
||||||
destination: u8,
|
|
||||||
message: Vec<u8>,
|
|
||||||
routing_table: &RoutingTable,
|
|
||||||
rank: u8,
|
|
||||||
router: &mut Router,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
self.out_message = Some(Sliceable::new(destination, message));
|
|
||||||
|
|
||||||
let mut data_slice: [u8; MASTER_PAYLOAD_MAX_SIZE] = [0; MASTER_PAYLOAD_MAX_SIZE];
|
|
||||||
self.out_state = OutMessageState::MessageBeingSent;
|
|
||||||
let meta = self.get_outgoing_slice(&mut data_slice).unwrap();
|
|
||||||
router.route(
|
|
||||||
drtioaux::Packet::SubkernelMessage {
|
|
||||||
source: self_destination,
|
|
||||||
destination: destination,
|
|
||||||
id: id,
|
|
||||||
status: meta.status,
|
|
||||||
length: meta.len as u16,
|
|
||||||
data: data_slice,
|
|
||||||
},
|
|
||||||
routing_table,
|
|
||||||
rank,
|
|
||||||
self_destination,
|
|
||||||
);
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_incoming(&mut self, id: u32) -> Option<Message> {
|
pub fn get_incoming(&mut self) -> Option<Message> {
|
||||||
for i in 0..self.in_queue.len() {
|
self.in_queue.pop_front()
|
||||||
if self.in_queue[i].id == id {
|
|
||||||
return Some(self.in_queue.remove(i));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -298,10 +265,10 @@ impl<'a> Manager<'_> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add(&mut self, id: u32, status: PayloadStatus, data: &[u8], data_len: usize) -> Result<(), Error> {
|
pub fn add(&mut self, id: u32, last: bool, data: &[u8], data_len: usize) -> Result<(), Error> {
|
||||||
let kernel = match self.kernels.get_mut(&id) {
|
let kernel = match self.kernels.get_mut(&id) {
|
||||||
Some(kernel) => {
|
Some(kernel) => {
|
||||||
if kernel.complete || status.is_first() {
|
if kernel.complete {
|
||||||
// replace entry
|
// replace entry
|
||||||
self.kernels.remove(&id);
|
self.kernels.remove(&id);
|
||||||
self.kernels.insert(
|
self.kernels.insert(
|
||||||
|
@ -329,7 +296,7 @@ impl<'a> Manager<'_> {
|
||||||
};
|
};
|
||||||
kernel.library.extend(&data[0..data_len]);
|
kernel.library.extend(&data[0..data_len]);
|
||||||
|
|
||||||
kernel.complete = status.is_last();
|
kernel.complete = last;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -344,12 +311,12 @@ impl<'a> Manager<'_> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn run(&mut self, source: u8, id: u32) -> Result<(), Error> {
|
pub fn run(&mut self, id: u32) -> Result<(), Error> {
|
||||||
|
info!("starting subkernel #{}", id);
|
||||||
if self.session.kernel_state != KernelState::Loaded || self.session.id != id {
|
if self.session.kernel_state != KernelState::Loaded || self.session.id != id {
|
||||||
self.load(id)?;
|
self.load(id)?;
|
||||||
}
|
}
|
||||||
self.session.kernel_state = KernelState::Running;
|
self.session.kernel_state = KernelState::Running;
|
||||||
self.session.source = source;
|
|
||||||
unsafe {
|
unsafe {
|
||||||
csr::cri_con::selected_write(2);
|
csr::cri_con::selected_write(2);
|
||||||
}
|
}
|
||||||
|
@ -358,17 +325,11 @@ impl<'a> Manager<'_> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn message_handle_incoming(
|
pub fn message_handle_incoming(&mut self, last: bool, length: usize, slice: &[u8; MASTER_PAYLOAD_MAX_SIZE]) {
|
||||||
&mut self,
|
|
||||||
status: PayloadStatus,
|
|
||||||
id: u32,
|
|
||||||
length: usize,
|
|
||||||
slice: &[u8; MASTER_PAYLOAD_MAX_SIZE],
|
|
||||||
) {
|
|
||||||
if !self.running() {
|
if !self.running() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
self.session.messages.handle_incoming(status, id, length, slice);
|
self.session.messages.handle_incoming(last, length, slice);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn message_get_slice(&mut self, slice: &mut [u8; MASTER_PAYLOAD_MAX_SIZE]) -> Option<SliceMeta> {
|
pub fn message_get_slice(&mut self, slice: &mut [u8; MASTER_PAYLOAD_MAX_SIZE]) -> Option<SliceMeta> {
|
||||||
|
@ -386,6 +347,10 @@ impl<'a> Manager<'_> {
|
||||||
self.session.messages.ack_slice()
|
self.session.messages.ack_slice()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn message_is_ready(&mut self) -> bool {
|
||||||
|
self.session.messages.is_outgoing_ready()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn load(&mut self, id: u32) -> Result<(), Error> {
|
pub fn load(&mut self, id: u32) -> Result<(), Error> {
|
||||||
if self.session.id == id && self.session.kernel_state == KernelState::Loaded {
|
if self.session.id == id && self.session.kernel_state == KernelState::Loaded {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
|
@ -413,14 +378,14 @@ impl<'a> Manager<'_> {
|
||||||
pub fn exception_get_slice(&mut self, data_slice: &mut [u8; SAT_PAYLOAD_MAX_SIZE]) -> SliceMeta {
|
pub fn exception_get_slice(&mut self, data_slice: &mut [u8; SAT_PAYLOAD_MAX_SIZE]) -> SliceMeta {
|
||||||
match self.session.last_exception.as_mut() {
|
match self.session.last_exception.as_mut() {
|
||||||
Some(exception) => exception.get_slice_sat(data_slice),
|
Some(exception) => exception.get_slice_sat(data_slice),
|
||||||
None => SliceMeta {
|
None => SliceMeta { len: 0, last: true },
|
||||||
destination: 0,
|
|
||||||
len: 0,
|
|
||||||
status: PayloadStatus::FirstAndLast,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_last_finished(&mut self) -> Option<SubkernelFinished> {
|
||||||
|
self.last_finished.take()
|
||||||
|
}
|
||||||
|
|
||||||
fn kernel_stop(&mut self) {
|
fn kernel_stop(&mut self) {
|
||||||
self.session.kernel_state = KernelState::Absent;
|
self.session.kernel_state = KernelState::Absent;
|
||||||
unsafe {
|
unsafe {
|
||||||
|
@ -450,92 +415,13 @@ impl<'a> Manager<'_> {
|
||||||
&[],
|
&[],
|
||||||
0,
|
0,
|
||||||
) {
|
) {
|
||||||
Ok(_) => self.session.last_exception = Some(Sliceable::new(0, writer.into_inner())),
|
Ok(_) => self.session.last_exception = Some(Sliceable::new(writer.into_inner())),
|
||||||
Err(_) => error!("Error writing exception data"),
|
Err(_) => error!("Error writing exception data"),
|
||||||
}
|
}
|
||||||
self.kernel_stop();
|
self.kernel_stop();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn ddma_finished(&mut self, error: u8, channel: u32, timestamp: u64) {
|
pub fn process_kern_requests(&mut self, rank: u8, timer: GlobalTimer) {
|
||||||
if let KernelState::DmaAwait { .. } = self.session.kernel_state {
|
|
||||||
self.control.tx.send(kernel::Message::DmaAwaitRemoteReply {
|
|
||||||
timeout: false,
|
|
||||||
error: error,
|
|
||||||
channel: channel,
|
|
||||||
timestamp: timestamp,
|
|
||||||
});
|
|
||||||
self.session.kernel_state = KernelState::Running;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn ddma_nack(&mut self) {
|
|
||||||
// for simplicity treat it as a timeout...
|
|
||||||
if let KernelState::DmaAwait { .. } = self.session.kernel_state {
|
|
||||||
self.control.tx.send(kernel::Message::DmaAwaitRemoteReply {
|
|
||||||
timeout: true,
|
|
||||||
error: 0,
|
|
||||||
channel: 0,
|
|
||||||
timestamp: 0,
|
|
||||||
});
|
|
||||||
self.session.kernel_state = KernelState::Running;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn ddma_remote_uploaded(&mut self, succeeded: bool) -> Option<(u32, u64)> {
|
|
||||||
// returns a tuple of id, timestamp in case a playback needs to be started immediately
|
|
||||||
if !succeeded {
|
|
||||||
self.kernel_stop();
|
|
||||||
self.runtime_exception(Error::DmaError(DmaError::UploadFail));
|
|
||||||
}
|
|
||||||
let res = match self.session.kernel_state {
|
|
||||||
KernelState::DmaPendingPlayback { id, timestamp } => {
|
|
||||||
self.session.kernel_state = KernelState::Running;
|
|
||||||
Some((id, timestamp))
|
|
||||||
}
|
|
||||||
KernelState::DmaPendingAwait {
|
|
||||||
id,
|
|
||||||
timestamp,
|
|
||||||
max_time,
|
|
||||||
} => {
|
|
||||||
self.session.kernel_state = KernelState::DmaAwait { max_time: max_time };
|
|
||||||
Some((id, timestamp))
|
|
||||||
}
|
|
||||||
KernelState::DmaUploading => {
|
|
||||||
self.session.kernel_state = KernelState::Running;
|
|
||||||
None
|
|
||||||
}
|
|
||||||
_ => None,
|
|
||||||
};
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn process_kern_requests(
|
|
||||||
&mut self,
|
|
||||||
router: &mut Router,
|
|
||||||
routing_table: &RoutingTable,
|
|
||||||
rank: u8,
|
|
||||||
destination: u8,
|
|
||||||
dma_manager: &mut DmaManager,
|
|
||||||
timer: &GlobalTimer,
|
|
||||||
) {
|
|
||||||
if let Some(subkernel_finished) = self.last_finished.take() {
|
|
||||||
info!(
|
|
||||||
"subkernel {} finished, with exception: {}",
|
|
||||||
subkernel_finished.id, subkernel_finished.with_exception
|
|
||||||
);
|
|
||||||
router.route(
|
|
||||||
drtioaux::Packet::SubkernelFinished {
|
|
||||||
destination: subkernel_finished.source,
|
|
||||||
id: subkernel_finished.id,
|
|
||||||
with_exception: subkernel_finished.with_exception,
|
|
||||||
exception_src: subkernel_finished.exception_source,
|
|
||||||
},
|
|
||||||
&routing_table,
|
|
||||||
rank,
|
|
||||||
destination,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if !self.running() {
|
if !self.running() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -548,8 +434,6 @@ impl<'a> Manager<'_> {
|
||||||
self.last_finished = Some(SubkernelFinished {
|
self.last_finished = Some(SubkernelFinished {
|
||||||
id: self.session.id,
|
id: self.session.id,
|
||||||
with_exception: true,
|
with_exception: true,
|
||||||
exception_source: destination,
|
|
||||||
source: self.session.source,
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
@ -558,19 +442,15 @@ impl<'a> Manager<'_> {
|
||||||
self.last_finished = Some(SubkernelFinished {
|
self.last_finished = Some(SubkernelFinished {
|
||||||
id: self.session.id,
|
id: self.session.id,
|
||||||
with_exception: true,
|
with_exception: true,
|
||||||
exception_source: destination,
|
|
||||||
source: self.session.source,
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
match self.process_kern_message(router, routing_table, rank, destination, dma_manager, timer) {
|
match self.process_kern_message(rank, timer) {
|
||||||
Ok(true) => {
|
Ok(true) => {
|
||||||
self.last_finished = Some(SubkernelFinished {
|
self.last_finished = Some(SubkernelFinished {
|
||||||
id: self.session.id,
|
id: self.session.id,
|
||||||
with_exception: false,
|
with_exception: false,
|
||||||
exception_source: 0,
|
|
||||||
source: self.session.source,
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
Ok(false) | Err(Error::NoMessage) => (),
|
Ok(false) | Err(Error::NoMessage) => (),
|
||||||
|
@ -579,8 +459,6 @@ impl<'a> Manager<'_> {
|
||||||
self.last_finished = Some(SubkernelFinished {
|
self.last_finished = Some(SubkernelFinished {
|
||||||
id: self.session.id,
|
id: self.session.id,
|
||||||
with_exception: true,
|
with_exception: true,
|
||||||
exception_source: destination,
|
|
||||||
source: self.session.source,
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
@ -589,52 +467,16 @@ impl<'a> Manager<'_> {
|
||||||
self.last_finished = Some(SubkernelFinished {
|
self.last_finished = Some(SubkernelFinished {
|
||||||
id: self.session.id,
|
id: self.session.id,
|
||||||
with_exception: true,
|
with_exception: true,
|
||||||
exception_source: destination,
|
|
||||||
source: self.session.source,
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn subkernel_load_run_reply(&mut self, succeeded: bool) {
|
fn process_kern_message(&mut self, rank: u8, timer: GlobalTimer) -> Result<bool, Error> {
|
||||||
if self.session.kernel_state == KernelState::SubkernelAwaitLoad {
|
|
||||||
self.control
|
|
||||||
.tx
|
|
||||||
.send(kernel::Message::SubkernelLoadRunReply { succeeded: succeeded });
|
|
||||||
self.session.kernel_state = KernelState::Running;
|
|
||||||
} else {
|
|
||||||
warn!("received unsolicited SubkernelLoadRunReply");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn remote_subkernel_finished(&mut self, id: u32, with_exception: bool, exception_source: u8) {
|
|
||||||
if with_exception {
|
|
||||||
self.kernel_stop();
|
|
||||||
self.last_finished = Some(SubkernelFinished {
|
|
||||||
source: self.session.source,
|
|
||||||
id: self.session.id,
|
|
||||||
with_exception: true,
|
|
||||||
exception_source: exception_source,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
self.session.subkernels_finished.push(id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn process_kern_message(
|
|
||||||
&mut self,
|
|
||||||
router: &mut Router,
|
|
||||||
routing_table: &RoutingTable,
|
|
||||||
rank: u8,
|
|
||||||
self_destination: u8,
|
|
||||||
dma_manager: &mut DmaManager,
|
|
||||||
timer: &GlobalTimer,
|
|
||||||
) -> Result<bool, Error> {
|
|
||||||
let reply = self.control.rx.try_recv()?;
|
let reply = self.control.rx.try_recv()?;
|
||||||
match reply {
|
match reply {
|
||||||
kernel::Message::KernelFinished(_async_errors) => {
|
kernel::Message::KernelFinished(_async_errors) => {
|
||||||
self.kernel_stop();
|
self.kernel_stop();
|
||||||
dma_manager.cleanup(router, rank, self_destination, routing_table);
|
|
||||||
return Ok(true);
|
return Ok(true);
|
||||||
}
|
}
|
||||||
kernel::Message::KernelException(exceptions, stack_pointers, backtrace, async_errors) => {
|
kernel::Message::KernelException(exceptions, stack_pointers, backtrace, async_errors) => {
|
||||||
|
@ -651,7 +493,7 @@ impl<'a> Manager<'_> {
|
||||||
Err(_) => error!("Error writing exception data"),
|
Err(_) => error!("Error writing exception data"),
|
||||||
}
|
}
|
||||||
self.kernel_stop();
|
self.kernel_stop();
|
||||||
return Err(Error::KernelException(Sliceable::new(0, writer.into_inner())));
|
return Err(Error::KernelException(Sliceable::new(writer.into_inner())));
|
||||||
}
|
}
|
||||||
kernel::Message::CachePutRequest(key, value) => {
|
kernel::Message::CachePutRequest(key, value) => {
|
||||||
self.cache.insert(key, value);
|
self.cache.insert(key, value);
|
||||||
|
@ -661,117 +503,18 @@ impl<'a> Manager<'_> {
|
||||||
let value = self.cache.get(&key).unwrap_or(&DEFAULT).clone();
|
let value = self.cache.get(&key).unwrap_or(&DEFAULT).clone();
|
||||||
self.control.tx.send(kernel::Message::CacheGetReply(value));
|
self.control.tx.send(kernel::Message::CacheGetReply(value));
|
||||||
}
|
}
|
||||||
|
kernel::Message::SubkernelMsgSend { id: _, data } => {
|
||||||
kernel::Message::DmaPutRequest(recorder) => {
|
self.session.messages.accept_outgoing(data)?;
|
||||||
// ddma is always used on satellites
|
|
||||||
if let Ok(id) = dma_manager.put_record(recorder, self_destination) {
|
|
||||||
dma_manager.upload_traces(id, router, rank, self_destination, routing_table)?;
|
|
||||||
self.session.kernel_state = KernelState::DmaUploading;
|
|
||||||
} else {
|
|
||||||
unexpected!("DMAError: found an unsupported call to RTIO devices on master")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
kernel::Message::DmaEraseRequest(name) => {
|
|
||||||
dma_manager.erase_name(&name, router, rank, self_destination, routing_table);
|
|
||||||
}
|
|
||||||
kernel::Message::DmaGetRequest(name) => {
|
|
||||||
let dma_meta = dma_manager.retrieve(self_destination, &name);
|
|
||||||
self.control.tx.send(kernel::Message::DmaGetReply(dma_meta));
|
|
||||||
}
|
|
||||||
kernel::Message::DmaStartRemoteRequest { id, timestamp } => {
|
|
||||||
if self.session.kernel_state != KernelState::DmaUploading {
|
|
||||||
dma_manager.playback_remote(
|
|
||||||
id as u32,
|
|
||||||
timestamp as u64,
|
|
||||||
router,
|
|
||||||
rank,
|
|
||||||
self_destination,
|
|
||||||
routing_table,
|
|
||||||
)?;
|
|
||||||
} else {
|
|
||||||
self.session.kernel_state = KernelState::DmaPendingPlayback {
|
|
||||||
id: id as u32,
|
|
||||||
timestamp: timestamp as u64,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
kernel::Message::DmaAwaitRemoteRequest(_id) => {
|
|
||||||
let max_time = timer.get_time() + Milliseconds(10000);
|
|
||||||
self.session.kernel_state = match self.session.kernel_state {
|
|
||||||
// if we are still waiting for the traces to be uploaded, extend the state by timeout
|
|
||||||
KernelState::DmaPendingPlayback { id, timestamp } => KernelState::DmaPendingAwait {
|
|
||||||
id: id,
|
|
||||||
timestamp: timestamp,
|
|
||||||
max_time: max_time,
|
|
||||||
},
|
|
||||||
_ => KernelState::DmaAwait { max_time: max_time },
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
kernel::Message::SubkernelMsgSend {
|
|
||||||
id: _id,
|
|
||||||
destination: msg_dest,
|
|
||||||
data,
|
|
||||||
} => {
|
|
||||||
let msg_dest = msg_dest.or(Some(self.session.source)).unwrap();
|
|
||||||
self.session.messages.accept_outgoing(
|
|
||||||
self.session.id,
|
|
||||||
self_destination,
|
|
||||||
msg_dest,
|
|
||||||
data,
|
|
||||||
routing_table,
|
|
||||||
rank,
|
|
||||||
router,
|
|
||||||
)?;
|
|
||||||
self.session.kernel_state = KernelState::MsgSending;
|
self.session.kernel_state = KernelState::MsgSending;
|
||||||
}
|
}
|
||||||
kernel::Message::SubkernelMsgRecvRequest { id, timeout, tags } => {
|
kernel::Message::SubkernelMsgRecvRequest { id: _, timeout, tags } => {
|
||||||
let id = if id == -1 { self.session.id } else { id as u32 };
|
let max_time = timer.get_time() + Milliseconds(timeout);
|
||||||
let max_time = if timeout > 0 {
|
self.session.kernel_state = KernelState::MsgAwait(max_time, tags);
|
||||||
Some(timer.get_time() + Milliseconds(timeout as u64))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
self.session.kernel_state = KernelState::MsgAwait {
|
|
||||||
max_time: max_time,
|
|
||||||
id: id,
|
|
||||||
tags: tags,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
kernel::Message::SubkernelLoadRunRequest {
|
|
||||||
id,
|
|
||||||
destination: sk_destination,
|
|
||||||
run,
|
|
||||||
} => {
|
|
||||||
self.session.kernel_state = KernelState::SubkernelAwaitLoad;
|
|
||||||
router.route(
|
|
||||||
drtioaux::Packet::SubkernelLoadRunRequest {
|
|
||||||
source: self_destination,
|
|
||||||
destination: sk_destination,
|
|
||||||
id: id,
|
|
||||||
run: run,
|
|
||||||
},
|
|
||||||
routing_table,
|
|
||||||
rank,
|
|
||||||
self_destination,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
kernel::Message::SubkernelAwaitFinishRequest { id, timeout } => {
|
|
||||||
let max_time = if timeout > 0 {
|
|
||||||
Some(timer.get_time() + Milliseconds(timeout as u64))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
self.session.kernel_state = KernelState::SubkernelAwaitFinish {
|
|
||||||
max_time: max_time,
|
|
||||||
id: id,
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
kernel::Message::UpDestinationsRequest(destination) => {
|
kernel::Message::UpDestinationsRequest(destination) => {
|
||||||
self.control.tx.send(kernel::Message::UpDestinationsReply(
|
self.control
|
||||||
destination == (self_destination as i32),
|
.tx
|
||||||
));
|
.send(kernel::Message::UpDestinationsReply(destination == (rank as i32)));
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
unexpected!("unexpected message from core1 while kernel was running: {:?}", reply);
|
unexpected!("unexpected message from core1 while kernel was running: {:?}", reply);
|
||||||
|
@ -780,11 +523,10 @@ impl<'a> Manager<'_> {
|
||||||
Ok(false)
|
Ok(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_external_messages(&mut self, timer: &GlobalTimer) -> Result<(), Error> {
|
fn process_external_messages(&mut self, timer: GlobalTimer) -> Result<(), Error> {
|
||||||
match &self.session.kernel_state {
|
match &self.session.kernel_state {
|
||||||
KernelState::MsgAwait { max_time, id, tags } => {
|
KernelState::MsgAwait(timeout, tags) => {
|
||||||
if let Some(max_time) = *max_time {
|
if timer.get_time() > *timeout {
|
||||||
if timer.get_time() > max_time {
|
|
||||||
self.control.tx.send(kernel::Message::SubkernelMsgRecvReply {
|
self.control.tx.send(kernel::Message::SubkernelMsgRecvReply {
|
||||||
status: kernel::SubkernelStatus::Timeout,
|
status: kernel::SubkernelStatus::Timeout,
|
||||||
count: 0,
|
count: 0,
|
||||||
|
@ -792,8 +534,7 @@ impl<'a> Manager<'_> {
|
||||||
self.session.kernel_state = KernelState::Running;
|
self.session.kernel_state = KernelState::Running;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
}
|
if let Some(message) = self.session.messages.get_incoming() {
|
||||||
if let Some(message) = self.session.messages.get_incoming(*id) {
|
|
||||||
self.control.tx.send(kernel::Message::SubkernelMsgRecvReply {
|
self.control.tx.send(kernel::Message::SubkernelMsgRecvReply {
|
||||||
status: kernel::SubkernelStatus::NoError,
|
status: kernel::SubkernelStatus::NoError,
|
||||||
count: message.count,
|
count: message.count,
|
||||||
|
@ -814,47 +555,11 @@ impl<'a> Manager<'_> {
|
||||||
Err(Error::AwaitingMessage)
|
Err(Error::AwaitingMessage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
KernelState::SubkernelAwaitFinish { max_time, id } => {
|
|
||||||
if let Some(max_time) = *max_time {
|
|
||||||
if timer.get_time() > max_time {
|
|
||||||
self.control.tx.send(kernel::Message::SubkernelAwaitFinishReply {
|
|
||||||
status: kernel::SubkernelStatus::Timeout,
|
|
||||||
});
|
|
||||||
self.session.kernel_state = KernelState::Running;
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let mut i = 0;
|
|
||||||
for status in &self.session.subkernels_finished {
|
|
||||||
if *status == *id {
|
|
||||||
self.control.tx.send(kernel::Message::SubkernelAwaitFinishReply {
|
|
||||||
status: kernel::SubkernelStatus::NoError,
|
|
||||||
});
|
|
||||||
self.session.kernel_state = KernelState::Running;
|
|
||||||
self.session.subkernels_finished.swap_remove(i);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
i += 1;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
KernelState::DmaAwait { max_time } | KernelState::DmaPendingAwait { max_time, .. } => {
|
|
||||||
if timer.get_time() > *max_time {
|
|
||||||
self.control.tx.send(kernel::Message::DmaAwaitRemoteReply {
|
|
||||||
timeout: true,
|
|
||||||
error: 0,
|
|
||||||
channel: 0,
|
|
||||||
timestamp: 0,
|
|
||||||
});
|
|
||||||
self.session.kernel_state = KernelState::Running;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
_ => Ok(()),
|
_ => Ok(()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pass_message_to_kernel(&mut self, message: &Message, tags: Vec<u8>, timer: &GlobalTimer) -> Result<(), Error> {
|
fn pass_message_to_kernel(&mut self, message: &Message, tags: Vec<u8>, timer: GlobalTimer) -> Result<(), Error> {
|
||||||
let mut reader = Cursor::new(&message.data);
|
let mut reader = Cursor::new(&message.data);
|
||||||
let mut current_tags: &[u8] = &tags;
|
let mut current_tags: &[u8] = &tags;
|
||||||
let mut i = message.count;
|
let mut i = message.count;
|
||||||
|
@ -877,7 +582,7 @@ impl<'a> Manager<'_> {
|
||||||
let mut writer = Cursor::new(buf);
|
let mut writer = Cursor::new(buf);
|
||||||
match write_exception(&mut writer, exceptions, stack_pointers, backtrace, async_errors) {
|
match write_exception(&mut writer, exceptions, stack_pointers, backtrace, async_errors) {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
exception = Some(Sliceable::new(0, writer.into_inner()));
|
exception = Some(Sliceable::new(writer.into_inner()));
|
||||||
}
|
}
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
unexpected = Some("Error writing exception data".to_string());
|
unexpected = Some("Error writing exception data".to_string());
|
||||||
|
@ -971,7 +676,7 @@ where
|
||||||
|
|
||||||
fn recv_w_timeout(
|
fn recv_w_timeout(
|
||||||
rx: &mut Receiver<'_, kernel::Message>,
|
rx: &mut Receiver<'_, kernel::Message>,
|
||||||
timer: &GlobalTimer,
|
timer: GlobalTimer,
|
||||||
timeout: u64,
|
timeout: u64,
|
||||||
) -> Result<kernel::Message, Error> {
|
) -> Result<kernel::Message, Error> {
|
||||||
let max_time = timer.get_time() + Milliseconds(timeout);
|
let max_time = timer.get_time() + Milliseconds(timeout);
|
||||||
|
|
Loading…
Reference in New Issue