forked from M-Labs/nac3
Compare commits
185 Commits
ndstrides-
...
master
Author | SHA1 | Date |
---|---|---|
Sebastien Bourdeauducq | 5651e00688 | |
Sebastien Bourdeauducq | f6745b987f | |
mwojcik | e0dedc6580 | |
David Mak | 28f574282c | |
David Mak | 144f0922db | |
David Mak | c58ce9c3a9 | |
David Mak | f7e296da53 | |
David Mak | b58c99369e | |
David Mak | 1a535db558 | |
David Mak | 1ba2e287a6 | |
lyken | f95f979ad3 | |
lyken | 48e2148c0f | |
David Mak | 88e57f7120 | |
David Mak | d7633c42bc | |
David Mak | a4f53b6e6b | |
David Mak | 9d9ead211e | |
David Mak | 26a1b85206 | |
David Mak | 2822074b2d | |
David Mak | fe67ed076c | |
David Mak | 94e2414df0 | |
Sebastien Bourdeauducq | 2cee760404 | |
Sebastien Bourdeauducq | 230982dc84 | |
occheung | 2bd3f63991 | |
occheung | b53266e9e6 | |
occheung | 86eb22bbf3 | |
occheung | beaa38047d | |
occheung | 705dc4ff1c | |
occheung | 979209a526 | |
David Mak | c3927d0ef6 | |
David Mak | 202a902cd0 | |
David Mak | b6e2644391 | |
David Mak | 45cd01556b | |
David Mak | b6cd2a6993 | |
David Mak | a98f33e6d1 | |
David Mak | 5839badadd | |
David Mak | 56c845aac4 | |
David Mak | 65a12d9ab3 | |
David Mak | 9c6685fa8f | |
David Mak | 2bb788e4bb | |
David Mak | 42a2f243b5 | |
David Mak | 3ce2eddcdc | |
David Mak | 51bf126a32 | |
David Mak | 1a197c67f6 | |
David Mak | 581b2f7bb2 | |
David Mak | 746329ec5d | |
David Mak | e60e8e837f | |
David Mak | 9fdbe9695d | |
David Mak | 8065e73598 | |
David Mak | 192290889b | |
David Mak | 1407553a2f | |
David Mak | c7697606e1 | |
David Mak | 88d0ccbf69 | |
David Mak | a43b59539c | |
David Mak | fe06b2806f | |
David Mak | 7f6c9a25ac | |
Sébastien Bourdeauducq | 6c8382219f | |
Sebastien Bourdeauducq | 9274a7b96b | |
Sébastien Bourdeauducq | d1c0fe2900 | |
mwojcik | f2c047ba57 | |
David Mak | 5e2e77a500 | |
David Mak | f3cc4702b9 | |
David Mak | 3e92c491f5 | |
lyken | 7f629f1579 | |
lyken | 5640a793e2 | |
David Mak | abbaa506ad | |
David Mak | f3dc02d646 | |
David Mak | ea217eaea1 | |
Sébastien Bourdeauducq | 5a34551905 | |
Sebastien Bourdeauducq | 6098b1b853 | |
Sebastien Bourdeauducq | 668ccb1c95 | |
Sebastien Bourdeauducq | a3c624d69d | |
Sébastien Bourdeauducq | bd06155f34 | |
David Mak | 9c33c4209c | |
Sebastien Bourdeauducq | 122983f11c | |
David Mak | 71c3a65a31 | |
David Mak | 8c540d1033 | |
David Mak | 0cc60a3d33 | |
David Mak | a59c26aa99 | |
David Mak | 02d93b11d1 | |
lyken | 59cad5bfe1 | |
lyken | 4318f8de84 | |
David Mak | 15ac00708a | |
lyken | c8dfdcfdea | |
Sébastien Bourdeauducq | 600a5c8679 | |
lyken | 22c4d25802 | |
lyken | 308edb8237 | |
lyken | 9848795dcc | |
lyken | 58222feed4 | |
lyken | 518f21d174 | |
lyken | e8e49684bf | |
lyken | b2900b4883 | |
lyken | c6dade1394 | |
lyken | 7e3fcc0845 | |
lyken | d3b4c60d7f | |
abdul124 | 5b2b6db7ed | |
abdul124 | 15e62f467e | |
abdul124 | 2c88924ff7 | |
abdul124 | a744b139ba | |
David Mak | 2b2b2dbf8f | |
David Mak | d9f96dab33 | |
David Mak | c5ae0e7c36 | |
David Mak | b8dab6cf7c | |
David Mak | 4d80ba38b7 | |
David Mak | 33929bda24 | |
David Mak | a8e92212c0 | |
David Mak | 908271014a | |
David Mak | c407622f5c | |
David Mak | d7952d0629 | |
David Mak | ca1395aed6 | |
David Mak | 7799aa4987 | |
David Mak | 76016a26ad | |
lyken | 8532bf5206 | |
lyken | 2cf64d8608 | |
lyken | 706759adb2 | |
lyken | b90cf2300b | |
Sebastien Bourdeauducq | 0fc26df29e | |
David Mak | 0b074c2cf2 | |
Sébastien Bourdeauducq | a0f6961e0e | |
David Mak | b1c5c2e1d4 | |
David Mak | 69320a6cf1 | |
David Mak | 9e0601837a | |
lyken | 432c81a500 | |
David Mak | 6beff7a268 | |
David Mak | 6ca7aecd4a | |
David Mak | 8fd7216243 | |
David Mak | 4f5e417012 | |
David Mak | a0614bad83 | |
David Mak | 5539d144ed | |
David Mak | b3891b9a0d | |
David Mak | 6fb8939179 | |
lyken | 973dc5041a | |
David Mak | d0da688aa7 | |
David Mak | 12c4e1cf48 | |
David Mak | 9b988647ed | |
lyken | 35a7cecc12 | |
lyken | 7e3d87f841 | |
David Mak | ac0d83ef98 | |
David Mak | 3ff6db1a29 | |
David Mak | d7b806afb4 | |
David Mak | fac60c3974 | |
David Mak | f5fb504a15 | |
David Mak | faa3bb97ad | |
David Mak | 6a64c9d1de | |
David Mak | 3dc8498202 | |
David Mak | cbf79c5e9c | |
David Mak | b8aa17bf8c | |
David Mak | f5b998cd9c | |
David Mak | c36f85ecb9 | |
lyken | 3a8c385e01 | |
lyken | 221de4d06a | |
lyken | fb9fe8edf2 | |
lyken | 894083c6a3 | |
Sébastien Bourdeauducq | 669c6aca6b | |
abdul124 | 63d2b49b09 | |
abdul124 | bf709889c4 | |
abdul124 | 1c72698d02 | |
abdul124 | 54f883f0a5 | |
abdul124 | 4a6845dac6 | |
abdul124 | 00236f48bc | |
abdul124 | a3e6bb2292 | |
abdul124 | 17171065b1 | |
abdul124 | 540b35ec84 | |
abdul124 | 4bb00c52e3 | |
abdul124 | faf07527cb | |
abdul124 | d6a4d0a634 | |
abdul124 | 2242c5af43 | |
David Mak | 318a675ea6 | |
David Mak | 32e52ce198 | |
Sebastien Bourdeauducq | 665ca8e32d | |
Sebastien Bourdeauducq | 12c12b1d80 | |
lyken | 72972fa909 | |
lyken | 142cd48594 | |
lyken | 8adfe781c5 | |
lyken | 339b74161b | |
David Mak | 8c5ba37d09 | |
David Mak | 05a8948ff2 | |
David Mak | 6d171ec284 | |
David Mak | 0ba68f6657 | |
David Mak | 693b2a8863 | |
David Mak | 5faeede0e5 | |
David Mak | 266707df9d | |
David Mak | 3d3c258756 | |
David Mak | ed1182cb24 | |
David Mak | fd025c1137 | |
David Mak | f139db9af9 |
|
@ -1,3 +1,32 @@
|
|||
BasedOnStyle: Google
|
||||
BasedOnStyle: LLVM
|
||||
|
||||
Language: Cpp
|
||||
Standard: Cpp11
|
||||
|
||||
AccessModifierOffset: -1
|
||||
AlignEscapedNewlines: Left
|
||||
AlwaysBreakAfterReturnType: None
|
||||
AlwaysBreakTemplateDeclarations: Yes
|
||||
AllowAllParametersOfDeclarationOnNextLine: false
|
||||
AllowShortFunctionsOnASingleLine: Inline
|
||||
BinPackParameters: false
|
||||
BreakBeforeBinaryOperators: NonAssignment
|
||||
BreakBeforeTernaryOperators: true
|
||||
BreakConstructorInitializers: AfterColon
|
||||
BreakInheritanceList: AfterColon
|
||||
ColumnLimit: 120
|
||||
ConstructorInitializerAllOnOneLineOrOnePerLine: true
|
||||
ContinuationIndentWidth: 4
|
||||
DerivePointerAlignment: false
|
||||
IndentCaseLabels: true
|
||||
IndentPPDirectives: None
|
||||
IndentWidth: 4
|
||||
ReflowComments: false
|
||||
MaxEmptyLinesToKeep: 1
|
||||
PointerAlignment: Left
|
||||
ReflowComments: true
|
||||
SortIncludes: false
|
||||
SortUsingDeclarations: true
|
||||
SpaceAfterTemplateKeyword: false
|
||||
SpacesBeforeTrailingComments: 2
|
||||
TabWidth: 4
|
||||
UseTab: Never
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
__pycache__
|
||||
/target
|
||||
/nac3standalone/demo/linalg/target
|
||||
nix/windows/msys2
|
||||
|
|
|
@ -1,24 +1,24 @@
|
|||
# See https://pre-commit.com for more information
|
||||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
|
||||
default_stages: [commit]
|
||||
default_stages: [pre-commit]
|
||||
|
||||
repos:
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: nac3-cargo-fmt
|
||||
name: nac3 cargo format
|
||||
entry: cargo
|
||||
entry: nix
|
||||
language: system
|
||||
types: [file, rust]
|
||||
pass_filenames: false
|
||||
description: Runs cargo fmt on the codebase.
|
||||
args: [fmt]
|
||||
args: [develop, -c, cargo, fmt, --all]
|
||||
- id: nac3-cargo-clippy
|
||||
name: nac3 cargo clippy
|
||||
entry: cargo
|
||||
entry: nix
|
||||
language: system
|
||||
types: [file, rust]
|
||||
pass_filenames: false
|
||||
description: Runs cargo clippy on the codebase.
|
||||
args: [clippy, --tests]
|
||||
args: [develop, -c, cargo, clippy, --tests]
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -4,6 +4,7 @@ members = [
|
|||
"nac3ast",
|
||||
"nac3parser",
|
||||
"nac3core",
|
||||
"nac3core/nac3core_derive",
|
||||
"nac3standalone",
|
||||
"nac3artiq",
|
||||
"runkernel",
|
||||
|
|
|
@ -2,11 +2,11 @@
|
|||
"nodes": {
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1720418205,
|
||||
"narHash": "sha256-cPJoFPXU44GlhWg4pUk9oUPqurPlCFZ11ZQPk21GTPU=",
|
||||
"lastModified": 1731319897,
|
||||
"narHash": "sha256-PbABj4tnbWFMfBp6OcUK5iGy1QY+/Z96ZcLpooIbuEI=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "655a58a72a6601292512670343087c2d75d859c1",
|
||||
"rev": "dc460ec76cbff0e66e269457d7b728432263166c",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
44
flake.nix
44
flake.nix
|
@ -6,6 +6,7 @@
|
|||
outputs = { self, nixpkgs }:
|
||||
let
|
||||
pkgs = import nixpkgs { system = "x86_64-linux"; };
|
||||
pkgs32 = import nixpkgs { system = "i686-linux"; };
|
||||
in rec {
|
||||
packages.x86_64-linux = rec {
|
||||
llvm-nac3 = pkgs.callPackage ./nix/llvm {};
|
||||
|
@ -13,9 +14,24 @@
|
|||
''
|
||||
mkdir -p $out/bin
|
||||
ln -s ${pkgs.llvmPackages_14.clang-unwrapped}/bin/clang $out/bin/clang-irrt
|
||||
ln -s ${pkgs.llvmPackages_14.clang}/bin/clang $out/bin/clang-irrt-test
|
||||
ln -s ${pkgs.llvmPackages_14.llvm.out}/bin/llvm-as $out/bin/llvm-as-irrt
|
||||
'';
|
||||
demo-linalg-stub = pkgs.rustPlatform.buildRustPackage {
|
||||
name = "demo-linalg-stub";
|
||||
src = ./nac3standalone/demo/linalg;
|
||||
cargoLock = {
|
||||
lockFile = ./nac3standalone/demo/linalg/Cargo.lock;
|
||||
};
|
||||
doCheck = false;
|
||||
};
|
||||
demo-linalg-stub32 = pkgs32.rustPlatform.buildRustPackage {
|
||||
name = "demo-linalg-stub32";
|
||||
src = ./nac3standalone/demo/linalg;
|
||||
cargoLock = {
|
||||
lockFile = ./nac3standalone/demo/linalg/Cargo.lock;
|
||||
};
|
||||
doCheck = false;
|
||||
};
|
||||
nac3artiq = pkgs.python3Packages.toPythonModule (
|
||||
pkgs.rustPlatform.buildRustPackage rec {
|
||||
name = "nac3artiq";
|
||||
|
@ -24,9 +40,8 @@
|
|||
cargoLock = {
|
||||
lockFile = ./Cargo.lock;
|
||||
};
|
||||
cargoTestFlags = [ "--features" "test" ];
|
||||
passthru.cargoLock = cargoLock;
|
||||
nativeBuildInputs = [ pkgs.python3 pkgs.llvmPackages_14.clang llvm-tools-irrt pkgs.llvmPackages_14.llvm.out llvm-nac3 ];
|
||||
nativeBuildInputs = [ pkgs.python3 (pkgs.wrapClangMulti pkgs.llvmPackages_14.clang) llvm-tools-irrt pkgs.llvmPackages_14.llvm.out llvm-nac3 ];
|
||||
buildInputs = [ pkgs.python3 llvm-nac3 ];
|
||||
checkInputs = [ (pkgs.python3.withPackages(ps: [ ps.numpy ps.scipy ])) ];
|
||||
checkPhase =
|
||||
|
@ -34,7 +49,9 @@
|
|||
echo "Checking nac3standalone demos..."
|
||||
pushd nac3standalone/demo
|
||||
patchShebangs .
|
||||
./check_demos.sh
|
||||
export DEMO_LINALG_STUB=${demo-linalg-stub}/lib/liblinalg.a
|
||||
export DEMO_LINALG_STUB32=${demo-linalg-stub32}/lib/liblinalg.a
|
||||
./check_demos.sh -i686
|
||||
popd
|
||||
echo "Running Cargo tests..."
|
||||
cargoCheckHook
|
||||
|
@ -90,18 +107,18 @@
|
|||
(pkgs.fetchFromGitHub {
|
||||
owner = "m-labs";
|
||||
repo = "sipyco";
|
||||
rev = "939f84f9b5eef7efbf7423c735d1834783b6140e";
|
||||
sha256 = "sha256-15Nun4EY35j+6SPZkjzZtyH/ncxLS60KuGJjFh5kSTc=";
|
||||
rev = "094a6cd63ffa980ef63698920170e50dc9ba77fd";
|
||||
sha256 = "sha256-PPnAyDedUQ7Og/Cby9x5OT9wMkNGTP8GS53V6N/dk4w=";
|
||||
})
|
||||
(pkgs.fetchFromGitHub {
|
||||
owner = "m-labs";
|
||||
repo = "artiq";
|
||||
rev = "923ca3377d42c815f979983134ec549dc39d3ca0";
|
||||
sha256 = "sha256-oJoEeNEeNFSUyh6jXG8Tzp6qHVikeHS0CzfE+mODPgw=";
|
||||
rev = "28c9de3e251daa89a8c9fd79d5ab64a3ec03bac6";
|
||||
sha256 = "sha256-vAvpbHc5B+1wtG8zqN7j9dQE1ON+i22v+uqA+tw6Gak=";
|
||||
})
|
||||
];
|
||||
buildInputs = [
|
||||
(python3-mimalloc.withPackages(ps: [ ps.numpy ps.scipy ps.jsonschema ps.lmdb nac3artiq-instrumented ]))
|
||||
(python3-mimalloc.withPackages(ps: [ ps.numpy ps.scipy ps.jsonschema ps.lmdb ps.platformdirs nac3artiq-instrumented ]))
|
||||
pkgs.llvmPackages_14.llvm.out
|
||||
];
|
||||
phases = [ "buildPhase" "installPhase" ];
|
||||
|
@ -151,7 +168,7 @@
|
|||
buildInputs = with pkgs; [
|
||||
# build dependencies
|
||||
packages.x86_64-linux.llvm-nac3
|
||||
llvmPackages_14.clang llvmPackages_14.llvm.out llvmPackages_14.lldb.out # for running nac3standalone demos
|
||||
(pkgs.wrapClangMulti llvmPackages_14.clang) llvmPackages_14.llvm.out # for running nac3standalone demos
|
||||
packages.x86_64-linux.llvm-tools-irrt
|
||||
cargo
|
||||
rustc
|
||||
|
@ -163,9 +180,12 @@
|
|||
clippy
|
||||
pre-commit
|
||||
rustfmt
|
||||
rust-analyzer
|
||||
];
|
||||
RUST_SRC_PATH = "${pkgs.rust.packages.stable.rustPlatform.rustLibSrc}";
|
||||
shellHook =
|
||||
''
|
||||
export DEMO_LINALG_STUB=${packages.x86_64-linux.demo-linalg-stub}/lib/liblinalg.a
|
||||
export DEMO_LINALG_STUB32=${packages.x86_64-linux.demo-linalg-stub32}/lib/liblinalg.a
|
||||
'';
|
||||
};
|
||||
devShells.x86_64-linux.msys2 = pkgs.mkShell {
|
||||
name = "nac3-dev-shell-msys2";
|
||||
|
|
|
@ -12,15 +12,10 @@ crate-type = ["cdylib"]
|
|||
itertools = "0.13"
|
||||
pyo3 = { version = "0.21", features = ["extension-module", "gil-refs"] }
|
||||
parking_lot = "0.12"
|
||||
tempfile = "3.10"
|
||||
nac3parser = { path = "../nac3parser" }
|
||||
tempfile = "3.13"
|
||||
nac3core = { path = "../nac3core" }
|
||||
nac3ld = { path = "../nac3ld" }
|
||||
|
||||
[dependencies.inkwell]
|
||||
version = "0.4"
|
||||
default-features = false
|
||||
features = ["llvm14-0", "target-x86", "target-arm", "target-riscv", "no-libffi-linking"]
|
||||
|
||||
[features]
|
||||
init-llvm-profile = []
|
||||
no-escape-analysis = ["nac3core/no-escape-analysis"]
|
||||
|
|
|
@ -112,10 +112,15 @@ def extern(function):
|
|||
register_function(function)
|
||||
return function
|
||||
|
||||
def rpc(function):
|
||||
"""Decorates a function declaration defined by the core device runtime."""
|
||||
register_function(function)
|
||||
return function
|
||||
|
||||
def rpc(arg=None, flags={}):
|
||||
"""Decorates a function or method to be executed on the host interpreter."""
|
||||
if arg is None:
|
||||
def inner_decorator(function):
|
||||
return rpc(function, flags)
|
||||
return inner_decorator
|
||||
register_function(arg)
|
||||
return arg
|
||||
|
||||
def kernel(function_or_method):
|
||||
"""Decorates a function or method to be executed on the core device."""
|
||||
|
@ -201,7 +206,7 @@ class Core:
|
|||
embedding = EmbeddingMap()
|
||||
|
||||
if allow_registration:
|
||||
compiler.analyze(registered_functions, registered_classes)
|
||||
compiler.analyze(registered_functions, registered_classes, set())
|
||||
allow_registration = False
|
||||
|
||||
if hasattr(method, "__self__"):
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
from min_artiq import *
|
||||
from numpy import ndarray, zeros as np_zeros
|
||||
|
||||
|
||||
@nac3
|
||||
class StrFail:
|
||||
core: KernelInvariant[Core]
|
||||
|
||||
def __init__(self):
|
||||
self.core = Core()
|
||||
|
||||
@kernel
|
||||
def hello(self, arg: str):
|
||||
pass
|
||||
|
||||
@kernel
|
||||
def consume_ndarray(self, arg: ndarray[str, 1]):
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
self.hello("world")
|
||||
self.consume_ndarray(np_zeros([10], dtype=str))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
StrFail().run()
|
File diff suppressed because it is too large
Load Diff
|
@ -1,10 +1,4 @@
|
|||
#![deny(
|
||||
future_incompatible,
|
||||
let_underscore,
|
||||
nonstandard_style,
|
||||
rust_2024_compatibility,
|
||||
clippy::all
|
||||
)]
|
||||
#![deny(future_incompatible, let_underscore, nonstandard_style, clippy::all)]
|
||||
#![warn(clippy::pedantic)]
|
||||
#![allow(
|
||||
unsafe_op_in_unsafe_fn,
|
||||
|
@ -16,63 +10,65 @@
|
|||
clippy::wildcard_imports
|
||||
)]
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::fs;
|
||||
use std::io::Write;
|
||||
use std::process::Command;
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
fs,
|
||||
io::Write,
|
||||
process::Command,
|
||||
rc::Rc,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use inkwell::{
|
||||
use itertools::Itertools;
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use pyo3::{
|
||||
create_exception, exceptions,
|
||||
prelude::*,
|
||||
types::{PyBytes, PyDict, PyNone, PySet},
|
||||
};
|
||||
use tempfile::{self, TempDir};
|
||||
|
||||
use nac3core::{
|
||||
codegen::{
|
||||
concrete_type::ConcreteTypeStore, gen_func_impl, irrt::load_irrt, CodeGenLLVMOptions,
|
||||
CodeGenTargetMachineOptions, CodeGenTask, CodeGenerator, WithCall, WorkerRegistry,
|
||||
},
|
||||
inkwell::{
|
||||
context::Context,
|
||||
memory_buffer::MemoryBuffer,
|
||||
module::{Linkage, Module},
|
||||
module::{FlagBehavior, Linkage, Module},
|
||||
passes::PassBuilderOptions,
|
||||
support::is_multithreaded,
|
||||
targets::*,
|
||||
OptimizationLevel,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
use nac3core::codegen::{gen_func_impl, CodeGenLLVMOptions, CodeGenTargetMachineOptions};
|
||||
use nac3core::toplevel::builtins::get_exn_constructor;
|
||||
use nac3core::typecheck::typedef::{TypeEnum, Unifier, VarMap};
|
||||
use nac3parser::{
|
||||
ast::{ExprKind, Stmt, StmtKind, StrRef},
|
||||
},
|
||||
nac3parser::{
|
||||
ast::{Constant, ExprKind, Located, Stmt, StmtKind, StrRef},
|
||||
parser::parse_program,
|
||||
};
|
||||
use pyo3::create_exception;
|
||||
use pyo3::prelude::*;
|
||||
use pyo3::{exceptions, types::PyBytes, types::PyDict, types::PySet};
|
||||
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
|
||||
use nac3core::{
|
||||
codegen::irrt::load_irrt,
|
||||
codegen::{concrete_type::ConcreteTypeStore, CodeGenTask, WithCall, WorkerRegistry},
|
||||
},
|
||||
symbol_resolver::SymbolResolver,
|
||||
toplevel::{
|
||||
composer::{ComposerConfig, TopLevelComposer},
|
||||
builtins::get_exn_constructor,
|
||||
composer::{BuiltinFuncCreator, BuiltinFuncSpec, ComposerConfig, TopLevelComposer},
|
||||
DefinitionId, GenCall, TopLevelDef,
|
||||
},
|
||||
typecheck::typedef::{FunSignature, FuncArg},
|
||||
typecheck::{type_inferencer::PrimitiveStore, typedef::Type},
|
||||
typecheck::{
|
||||
type_inferencer::PrimitiveStore,
|
||||
typedef::{into_var_map, FunSignature, FuncArg, Type, TypeEnum, Unifier, VarMap},
|
||||
},
|
||||
};
|
||||
|
||||
use nac3ld::Linker;
|
||||
|
||||
use tempfile::{self, TempDir};
|
||||
|
||||
use crate::codegen::attributes_writeback;
|
||||
use crate::{
|
||||
codegen::{rpc_codegen_callback, ArtiqCodeGenerator},
|
||||
symbol_resolver::{DeferredEvaluationStore, InnerResolver, PythonHelper, Resolver},
|
||||
use codegen::{
|
||||
attributes_writeback, gen_core_log, gen_rtio_log, rpc_codegen_callback, ArtiqCodeGenerator,
|
||||
};
|
||||
use symbol_resolver::{DeferredEvaluationStore, InnerResolver, PythonHelper, Resolver};
|
||||
use timeline::TimeFns;
|
||||
|
||||
mod codegen;
|
||||
mod symbol_resolver;
|
||||
mod timeline;
|
||||
|
||||
use timeline::TimeFns;
|
||||
|
||||
#[derive(PartialEq, Clone, Copy)]
|
||||
enum Isa {
|
||||
Host,
|
||||
|
@ -126,7 +122,7 @@ struct Nac3 {
|
|||
isa: Isa,
|
||||
time_fns: &'static (dyn TimeFns + Sync),
|
||||
primitive: PrimitiveStore,
|
||||
builtins: Vec<(StrRef, FunSignature, Arc<GenCall>)>,
|
||||
builtins: Vec<BuiltinFuncSpec>,
|
||||
pyid_to_def: Arc<RwLock<HashMap<u64, DefinitionId>>>,
|
||||
primitive_ids: PrimitivePythonId,
|
||||
working_directory: TempDir,
|
||||
|
@ -146,14 +142,32 @@ impl Nac3 {
|
|||
module: &PyObject,
|
||||
registered_class_ids: &HashSet<u64>,
|
||||
) -> PyResult<()> {
|
||||
let (module_name, source_file) = Python::with_gil(|py| -> PyResult<(String, String)> {
|
||||
let (module_name, source_file, source) =
|
||||
Python::with_gil(|py| -> PyResult<(String, String, String)> {
|
||||
let module: &PyAny = module.extract(py)?;
|
||||
Ok((module.getattr("__name__")?.extract()?, module.getattr("__file__")?.extract()?))
|
||||
let source_file = module.getattr("__file__");
|
||||
let (source_file, source) = if let Ok(source_file) = source_file {
|
||||
let source_file = source_file.extract()?;
|
||||
(
|
||||
source_file,
|
||||
fs::read_to_string(&source_file).map_err(|e| {
|
||||
exceptions::PyIOError::new_err(format!(
|
||||
"failed to read input file: {e}"
|
||||
))
|
||||
})?,
|
||||
)
|
||||
} else {
|
||||
// kernels submitted by content have no file
|
||||
// but still can provide source by StringLoader
|
||||
let get_src_fn = module
|
||||
.getattr("__loader__")?
|
||||
.extract::<PyObject>()?
|
||||
.getattr(py, "get_source")?;
|
||||
("<expcontent>", get_src_fn.call1(py, (PyNone::get(py),))?.extract(py)?)
|
||||
};
|
||||
Ok((module.getattr("__name__")?.extract()?, source_file.to_string(), source))
|
||||
})?;
|
||||
|
||||
let source = fs::read_to_string(&source_file).map_err(|e| {
|
||||
exceptions::PyIOError::new_err(format!("failed to read input file: {e}"))
|
||||
})?;
|
||||
let parser_result = parse_program(&source, source_file.into())
|
||||
.map_err(|e| exceptions::PySyntaxError::new_err(format!("parse error: {e}")))?;
|
||||
|
||||
|
@ -193,10 +207,8 @@ impl Nac3 {
|
|||
body.retain(|stmt| {
|
||||
if let StmtKind::FunctionDef { ref decorator_list, .. } = stmt.node {
|
||||
decorator_list.iter().any(|decorator| {
|
||||
if let ExprKind::Name { id, .. } = decorator.node {
|
||||
id.to_string() == "kernel"
|
||||
|| id.to_string() == "portable"
|
||||
|| id.to_string() == "rpc"
|
||||
if let Some(id) = decorator_id_string(decorator) {
|
||||
id == "kernel" || id == "portable" || id == "rpc"
|
||||
} else {
|
||||
false
|
||||
}
|
||||
|
@ -209,9 +221,8 @@ impl Nac3 {
|
|||
}
|
||||
StmtKind::FunctionDef { ref decorator_list, .. } => {
|
||||
decorator_list.iter().any(|decorator| {
|
||||
if let ExprKind::Name { id, .. } = decorator.node {
|
||||
let id = id.to_string();
|
||||
id == "extern" || id == "portable" || id == "kernel" || id == "rpc"
|
||||
if let Some(id) = decorator_id_string(decorator) {
|
||||
id == "extern" || id == "kernel" || id == "portable" || id == "rpc"
|
||||
} else {
|
||||
false
|
||||
}
|
||||
|
@ -264,7 +275,7 @@ impl Nac3 {
|
|||
arg_names.len(),
|
||||
));
|
||||
}
|
||||
for (i, FuncArg { ty, default_value, name }) in args.iter().enumerate() {
|
||||
for (i, FuncArg { ty, default_value, name, .. }) in args.iter().enumerate() {
|
||||
let in_name = match arg_names.get(i) {
|
||||
Some(n) => n,
|
||||
None if default_value.is_none() => {
|
||||
|
@ -300,6 +311,64 @@ impl Nac3 {
|
|||
None
|
||||
}
|
||||
|
||||
/// Returns a [`Vec`] of builtins that needs to be initialized during method compilation time.
|
||||
fn get_lateinit_builtins() -> Vec<Box<BuiltinFuncCreator>> {
|
||||
vec![
|
||||
Box::new(|primitives, unifier| {
|
||||
let arg_ty = unifier.get_fresh_var(Some("T".into()), None);
|
||||
|
||||
(
|
||||
"core_log".into(),
|
||||
FunSignature {
|
||||
args: vec![FuncArg {
|
||||
name: "arg".into(),
|
||||
ty: arg_ty.ty,
|
||||
default_value: None,
|
||||
is_vararg: false,
|
||||
}],
|
||||
ret: primitives.none,
|
||||
vars: into_var_map([arg_ty]),
|
||||
},
|
||||
Arc::new(GenCall::new(Box::new(move |ctx, obj, fun, args, generator| {
|
||||
gen_core_log(ctx, &obj, fun, &args, generator)?;
|
||||
|
||||
Ok(None)
|
||||
}))),
|
||||
)
|
||||
}),
|
||||
Box::new(|primitives, unifier| {
|
||||
let arg_ty = unifier.get_fresh_var(Some("T".into()), None);
|
||||
|
||||
(
|
||||
"rtio_log".into(),
|
||||
FunSignature {
|
||||
args: vec![
|
||||
FuncArg {
|
||||
name: "channel".into(),
|
||||
ty: primitives.str,
|
||||
default_value: None,
|
||||
is_vararg: false,
|
||||
},
|
||||
FuncArg {
|
||||
name: "arg".into(),
|
||||
ty: arg_ty.ty,
|
||||
default_value: None,
|
||||
is_vararg: false,
|
||||
},
|
||||
],
|
||||
ret: primitives.none,
|
||||
vars: into_var_map([arg_ty]),
|
||||
},
|
||||
Arc::new(GenCall::new(Box::new(move |ctx, obj, fun, args, generator| {
|
||||
gen_rtio_log(ctx, &obj, fun, &args, generator)?;
|
||||
|
||||
Ok(None)
|
||||
}))),
|
||||
)
|
||||
}),
|
||||
]
|
||||
}
|
||||
|
||||
fn compile_method<T>(
|
||||
&self,
|
||||
obj: &PyAny,
|
||||
|
@ -312,6 +381,7 @@ impl Nac3 {
|
|||
let size_t = self.isa.get_size_type();
|
||||
let (mut composer, mut builtins_def, mut builtins_ty) = TopLevelComposer::new(
|
||||
self.builtins.clone(),
|
||||
Self::get_lateinit_builtins(),
|
||||
ComposerConfig { kernel_ann: Some("Kernel"), kernel_invariant_ann: "KernelInvariant" },
|
||||
size_t,
|
||||
);
|
||||
|
@ -388,7 +458,6 @@ impl Nac3 {
|
|||
pyid_to_type: pyid_to_type.clone(),
|
||||
primitive_ids: self.primitive_ids.clone(),
|
||||
global_value_ids: global_value_ids.clone(),
|
||||
class_names: Mutex::default(),
|
||||
name_to_pyid: name_to_pyid.clone(),
|
||||
module: module.clone(),
|
||||
id_to_pyval: RwLock::default(),
|
||||
|
@ -419,9 +488,25 @@ impl Nac3 {
|
|||
|
||||
match &stmt.node {
|
||||
StmtKind::FunctionDef { decorator_list, .. } => {
|
||||
if decorator_list.iter().any(|decorator| matches!(decorator.node, ExprKind::Name { id, .. } if id == "rpc".into())) {
|
||||
store_fun.call1(py, (def_id.0.into_py(py), module.getattr(py, name.to_string().as_str()).unwrap())).unwrap();
|
||||
rpc_ids.push((None, def_id));
|
||||
if decorator_list
|
||||
.iter()
|
||||
.any(|decorator| decorator_id_string(decorator) == Some("rpc".to_string()))
|
||||
{
|
||||
store_fun
|
||||
.call1(
|
||||
py,
|
||||
(
|
||||
def_id.0.into_py(py),
|
||||
module.getattr(py, name.to_string().as_str()).unwrap(),
|
||||
),
|
||||
)
|
||||
.unwrap();
|
||||
let is_async = decorator_list.iter().any(|decorator| {
|
||||
decorator_get_flags(decorator)
|
||||
.iter()
|
||||
.any(|constant| *constant == Constant::Str("async".into()))
|
||||
});
|
||||
rpc_ids.push((None, def_id, is_async));
|
||||
}
|
||||
}
|
||||
StmtKind::ClassDef { name, body, .. } => {
|
||||
|
@ -429,19 +514,26 @@ impl Nac3 {
|
|||
let class_obj = module.getattr(py, class_name.as_str()).unwrap();
|
||||
for stmt in body {
|
||||
if let StmtKind::FunctionDef { name, decorator_list, .. } = &stmt.node {
|
||||
if decorator_list.iter().any(|decorator| matches!(decorator.node, ExprKind::Name { id, .. } if id == "rpc".into())) {
|
||||
if decorator_list.iter().any(|decorator| {
|
||||
decorator_id_string(decorator) == Some("rpc".to_string())
|
||||
}) {
|
||||
let is_async = decorator_list.iter().any(|decorator| {
|
||||
decorator_get_flags(decorator)
|
||||
.iter()
|
||||
.any(|constant| *constant == Constant::Str("async".into()))
|
||||
});
|
||||
if name == &"__init__".into() {
|
||||
return Err(CompileError::new_err(format!(
|
||||
"compilation failed\n----------\nThe constructor of class {} should not be decorated with rpc decorator (at {})",
|
||||
class_name, stmt.location
|
||||
)));
|
||||
}
|
||||
rpc_ids.push((Some((class_obj.clone(), *name)), def_id));
|
||||
rpc_ids.push((Some((class_obj.clone(), *name)), def_id, is_async));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
|
||||
let id = *name_to_pyid.get(&name).unwrap();
|
||||
|
@ -480,7 +572,6 @@ impl Nac3 {
|
|||
pyid_to_type: pyid_to_type.clone(),
|
||||
primitive_ids: self.primitive_ids.clone(),
|
||||
global_value_ids: global_value_ids.clone(),
|
||||
class_names: Mutex::default(),
|
||||
id_to_pyval: RwLock::default(),
|
||||
id_to_primitive: RwLock::default(),
|
||||
field_to_val: RwLock::default(),
|
||||
|
@ -497,6 +588,10 @@ impl Nac3 {
|
|||
.register_top_level(synthesized.pop().unwrap(), Some(resolver.clone()), "", false)
|
||||
.unwrap();
|
||||
|
||||
// Process IRRT
|
||||
let context = Context::create();
|
||||
let irrt = load_irrt(&context, resolver.as_ref());
|
||||
|
||||
let fun_signature =
|
||||
FunSignature { args: vec![], ret: self.primitive.none, vars: VarMap::new() };
|
||||
let mut store = ConcreteTypeStore::new();
|
||||
|
@ -534,13 +629,12 @@ impl Nac3 {
|
|||
let top_level = Arc::new(composer.make_top_level_context());
|
||||
|
||||
{
|
||||
let rpc_codegen = rpc_codegen_callback();
|
||||
let defs = top_level.definitions.read();
|
||||
for (class_data, id) in &rpc_ids {
|
||||
for (class_data, id, is_async) in &rpc_ids {
|
||||
let mut def = defs[id.0].write();
|
||||
match &mut *def {
|
||||
TopLevelDef::Function { codegen_callback, .. } => {
|
||||
*codegen_callback = Some(rpc_codegen.clone());
|
||||
*codegen_callback = Some(rpc_codegen_callback(*is_async));
|
||||
}
|
||||
TopLevelDef::Class { methods, .. } => {
|
||||
let (class_def, method_name) = class_data.as_ref().unwrap();
|
||||
|
@ -551,7 +645,7 @@ impl Nac3 {
|
|||
if let TopLevelDef::Function { codegen_callback, .. } =
|
||||
&mut *defs[id.0].write()
|
||||
{
|
||||
*codegen_callback = Some(rpc_codegen.clone());
|
||||
*codegen_callback = Some(rpc_codegen_callback(*is_async));
|
||||
store_fun
|
||||
.call1(
|
||||
py,
|
||||
|
@ -566,6 +660,11 @@ impl Nac3 {
|
|||
}
|
||||
}
|
||||
}
|
||||
TopLevelDef::Variable { .. } => {
|
||||
return Err(CompileError::new_err(String::from(
|
||||
"Unsupported @rpc annotation on global variable",
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -586,33 +685,12 @@ impl Nac3 {
|
|||
let task = CodeGenTask {
|
||||
subst: Vec::default(),
|
||||
symbol_name: "__modinit__".to_string(),
|
||||
body: instance.body,
|
||||
signature,
|
||||
resolver: resolver.clone(),
|
||||
store,
|
||||
unifier_index: instance.unifier_id,
|
||||
calls: instance.calls,
|
||||
id: 0,
|
||||
};
|
||||
|
||||
let mut store = ConcreteTypeStore::new();
|
||||
let mut cache = HashMap::new();
|
||||
let signature = store.from_signature(
|
||||
&mut composer.unifier,
|
||||
&self.primitive,
|
||||
&fun_signature,
|
||||
&mut cache,
|
||||
);
|
||||
let signature = store.add_cty(signature);
|
||||
let attributes_writeback_task = CodeGenTask {
|
||||
subst: Vec::default(),
|
||||
symbol_name: "attributes_writeback".to_string(),
|
||||
body: Arc::new(Vec::default()),
|
||||
signature,
|
||||
resolver,
|
||||
store,
|
||||
unifier_index: instance.unifier_id,
|
||||
calls: Arc::new(HashMap::default()),
|
||||
calls: instance.calls,
|
||||
id: 0,
|
||||
};
|
||||
|
||||
|
@ -625,7 +703,9 @@ impl Nac3 {
|
|||
let buffer = buffer.as_slice().into();
|
||||
membuffer.lock().push(buffer);
|
||||
})));
|
||||
let size_t = if self.isa == Isa::Host { 64 } else { 32 };
|
||||
let size_t = context
|
||||
.ptr_sized_int_type(&self.get_llvm_target_machine().get_target_data(), None)
|
||||
.get_bit_width();
|
||||
let num_threads = if is_multithreaded() { 4 } else { 1 };
|
||||
let thread_names: Vec<String> = (0..num_threads).map(|_| "main".to_string()).collect();
|
||||
let threads: Vec<_> = thread_names
|
||||
|
@ -634,16 +714,27 @@ impl Nac3 {
|
|||
.collect();
|
||||
|
||||
let membuffer = membuffers.clone();
|
||||
let mut has_return = false;
|
||||
py.allow_threads(|| {
|
||||
let (registry, handles) =
|
||||
WorkerRegistry::create_workers(threads, top_level.clone(), &self.llvm_options, &f);
|
||||
registry.add_task(task);
|
||||
registry.wait_tasks_complete(handles);
|
||||
|
||||
let mut generator =
|
||||
ArtiqCodeGenerator::new("attributes_writeback".to_string(), size_t, self.time_fns);
|
||||
let context = inkwell::context::Context::create();
|
||||
let module = context.create_module("attributes_writeback");
|
||||
let mut generator = ArtiqCodeGenerator::new("main".to_string(), size_t, self.time_fns);
|
||||
let context = Context::create();
|
||||
let module = context.create_module("main");
|
||||
let target_machine = self.llvm_options.create_target_machine().unwrap();
|
||||
module.set_data_layout(&target_machine.get_target_data().get_data_layout());
|
||||
module.set_triple(&target_machine.get_triple());
|
||||
module.add_basic_value_flag(
|
||||
"Debug Info Version",
|
||||
FlagBehavior::Warning,
|
||||
context.i32_type().const_int(3, false),
|
||||
);
|
||||
module.add_basic_value_flag(
|
||||
"Dwarf Version",
|
||||
FlagBehavior::Warning,
|
||||
context.i32_type().const_int(4, false),
|
||||
);
|
||||
let builder = context.create_builder();
|
||||
let (_, module, _) = gen_func_impl(
|
||||
&context,
|
||||
|
@ -651,9 +742,27 @@ impl Nac3 {
|
|||
®istry,
|
||||
builder,
|
||||
module,
|
||||
attributes_writeback_task,
|
||||
task,
|
||||
|generator, ctx| {
|
||||
attributes_writeback(ctx, generator, inner_resolver.as_ref(), &host_attributes)
|
||||
assert_eq!(instance.body.len(), 1, "toplevel module should have 1 statement");
|
||||
let StmtKind::Expr { value: ref expr, .. } = instance.body[0].node else {
|
||||
unreachable!("toplevel statement must be an expression")
|
||||
};
|
||||
let ExprKind::Call { .. } = expr.node else {
|
||||
unreachable!("toplevel expression must be a function call")
|
||||
};
|
||||
|
||||
let return_obj =
|
||||
generator.gen_expr(ctx, expr)?.map(|value| (expr.custom.unwrap(), value));
|
||||
has_return = return_obj.is_some();
|
||||
registry.wait_tasks_complete(handles);
|
||||
attributes_writeback(
|
||||
ctx,
|
||||
generator,
|
||||
inner_resolver.as_ref(),
|
||||
&host_attributes,
|
||||
return_obj,
|
||||
)
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
|
@ -662,37 +771,24 @@ impl Nac3 {
|
|||
membuffer.lock().push(buffer);
|
||||
});
|
||||
|
||||
let context = inkwell::context::Context::create();
|
||||
embedding_map.setattr("expects_return", has_return).unwrap();
|
||||
|
||||
// Link all modules into `main`.
|
||||
let buffers = membuffers.lock();
|
||||
let main = context
|
||||
.create_module_from_ir(MemoryBuffer::create_from_memory_range(&buffers[0], "main"))
|
||||
.create_module_from_ir(MemoryBuffer::create_from_memory_range(
|
||||
buffers.last().unwrap(),
|
||||
"main",
|
||||
))
|
||||
.unwrap();
|
||||
for buffer in buffers.iter().skip(1) {
|
||||
for buffer in buffers.iter().rev().skip(1) {
|
||||
let other = context
|
||||
.create_module_from_ir(MemoryBuffer::create_from_memory_range(buffer, "main"))
|
||||
.unwrap();
|
||||
|
||||
main.link_in_module(other).map_err(|err| CompileError::new_err(err.to_string()))?;
|
||||
}
|
||||
let builder = context.create_builder();
|
||||
let modinit_return = main
|
||||
.get_function("__modinit__")
|
||||
.unwrap()
|
||||
.get_last_basic_block()
|
||||
.unwrap()
|
||||
.get_terminator()
|
||||
.unwrap();
|
||||
builder.position_before(&modinit_return);
|
||||
builder
|
||||
.build_call(
|
||||
main.get_function("attributes_writeback").unwrap(),
|
||||
&[],
|
||||
"attributes_writeback",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
main.link_in_module(load_irrt(&context))
|
||||
.map_err(|err| CompileError::new_err(err.to_string()))?;
|
||||
main.link_in_module(irrt).map_err(|err| CompileError::new_err(err.to_string()))?;
|
||||
|
||||
let mut function_iter = main.get_first_function();
|
||||
while let Some(func) = function_iter {
|
||||
|
@ -778,6 +874,41 @@ impl Nac3 {
|
|||
}
|
||||
}
|
||||
|
||||
/// Retrieves the Name.id from a decorator, supports decorators with arguments.
|
||||
fn decorator_id_string(decorator: &Located<ExprKind>) -> Option<String> {
|
||||
if let ExprKind::Name { id, .. } = decorator.node {
|
||||
// Bare decorator
|
||||
return Some(id.to_string());
|
||||
} else if let ExprKind::Call { func, .. } = &decorator.node {
|
||||
// Decorators that are calls (e.g. "@rpc()") have Call for the node,
|
||||
// need to extract the id from within.
|
||||
if let ExprKind::Name { id, .. } = func.node {
|
||||
return Some(id.to_string());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Retrieves flags from a decorator, if any.
|
||||
fn decorator_get_flags(decorator: &Located<ExprKind>) -> Vec<Constant> {
|
||||
let mut flags = vec![];
|
||||
if let ExprKind::Call { keywords, .. } = &decorator.node {
|
||||
for keyword in keywords {
|
||||
if keyword.node.arg != Some("flags".into()) {
|
||||
continue;
|
||||
}
|
||||
if let ExprKind::Set { elts } = &keyword.node.value.node {
|
||||
for elt in elts {
|
||||
if let ExprKind::Constant { value, .. } = &elt.node {
|
||||
flags.push(value.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
flags
|
||||
}
|
||||
|
||||
fn link_with_lld(elf_filename: String, obj_filename: String) -> PyResult<()> {
|
||||
let linker_args = vec![
|
||||
"-shared".to_string(),
|
||||
|
@ -847,7 +978,7 @@ impl Nac3 {
|
|||
Isa::RiscV32IMA => &timeline::NOW_PINNING_TIME_FNS,
|
||||
Isa::CortexA9 | Isa::Host => &timeline::EXTERN_TIME_FNS,
|
||||
};
|
||||
let primitive: PrimitiveStore = TopLevelComposer::make_primitives(isa.get_size_type()).0;
|
||||
let (primitive, _) = TopLevelComposer::make_primitives(isa.get_size_type());
|
||||
let builtins = vec![
|
||||
(
|
||||
"now_mu".into(),
|
||||
|
@ -863,6 +994,7 @@ impl Nac3 {
|
|||
name: "t".into(),
|
||||
ty: primitive.int64,
|
||||
default_value: None,
|
||||
is_vararg: false,
|
||||
}],
|
||||
ret: primitive.none,
|
||||
vars: VarMap::new(),
|
||||
|
@ -882,6 +1014,7 @@ impl Nac3 {
|
|||
name: "dt".into(),
|
||||
ty: primitive.int64,
|
||||
default_value: None,
|
||||
is_vararg: false,
|
||||
}],
|
||||
ret: primitive.none,
|
||||
vars: VarMap::new(),
|
||||
|
@ -957,7 +1090,12 @@ impl Nac3 {
|
|||
})
|
||||
}
|
||||
|
||||
fn analyze(&mut self, functions: &PySet, classes: &PySet) -> PyResult<()> {
|
||||
fn analyze(
|
||||
&mut self,
|
||||
functions: &PySet,
|
||||
classes: &PySet,
|
||||
content_modules: &PySet,
|
||||
) -> PyResult<()> {
|
||||
let (modules, class_ids) =
|
||||
Python::with_gil(|py| -> PyResult<(HashMap<u64, PyObject>, HashSet<u64>)> {
|
||||
let mut modules: HashMap<u64, PyObject> = HashMap::new();
|
||||
|
@ -967,14 +1105,22 @@ impl Nac3 {
|
|||
let getmodule_fn = PyModule::import(py, "inspect")?.getattr("getmodule")?;
|
||||
|
||||
for function in functions {
|
||||
let module = getmodule_fn.call1((function,))?.extract()?;
|
||||
let module: PyObject = getmodule_fn.call1((function,))?.extract()?;
|
||||
if !module.is_none(py) {
|
||||
modules.insert(id_fn.call1((&module,))?.extract()?, module);
|
||||
}
|
||||
}
|
||||
for class in classes {
|
||||
let module = getmodule_fn.call1((class,))?.extract()?;
|
||||
let module: PyObject = getmodule_fn.call1((class,))?.extract()?;
|
||||
if !module.is_none(py) {
|
||||
modules.insert(id_fn.call1((&module,))?.extract()?, module);
|
||||
}
|
||||
class_ids.insert(id_fn.call1((class,))?.extract()?);
|
||||
}
|
||||
for module in content_modules {
|
||||
let module: PyObject = module.extract()?;
|
||||
modules.insert(id_fn.call1((&module,))?.extract()?, module.into());
|
||||
}
|
||||
Ok((modules, class_ids))
|
||||
})?;
|
||||
|
||||
|
|
|
@ -1,14 +1,27 @@
|
|||
use inkwell::{
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering::Relaxed},
|
||||
Arc,
|
||||
},
|
||||
};
|
||||
|
||||
use itertools::Itertools;
|
||||
use parking_lot::RwLock;
|
||||
use pyo3::{
|
||||
types::{PyDict, PyTuple},
|
||||
PyAny, PyObject, PyResult, Python,
|
||||
};
|
||||
|
||||
use nac3core::{
|
||||
codegen::{types::NDArrayType, CodeGenContext, CodeGenerator},
|
||||
inkwell::{
|
||||
module::Linkage,
|
||||
types::{BasicType, BasicTypeEnum},
|
||||
values::BasicValueEnum,
|
||||
AddressSpace,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
use nac3core::{
|
||||
codegen::{
|
||||
classes::{NDArrayType, ProxyType},
|
||||
CodeGenContext, CodeGenerator,
|
||||
},
|
||||
nac3parser::ast::{self, StrRef},
|
||||
symbol_resolver::{StaticValue, SymbolResolver, SymbolValue, ValueEnum},
|
||||
toplevel::{
|
||||
helper::PrimDef,
|
||||
|
@ -20,21 +33,8 @@ use nac3core::{
|
|||
typedef::{into_var_map, iter_type_vars, Type, TypeEnum, TypeVar, Unifier, VarMap},
|
||||
},
|
||||
};
|
||||
use nac3parser::ast::{self, StrRef};
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use pyo3::{
|
||||
types::{PyDict, PyTuple},
|
||||
PyAny, PyObject, PyResult, Python,
|
||||
};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering::Relaxed},
|
||||
Arc,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::PrimitivePythonId;
|
||||
use super::PrimitivePythonId;
|
||||
|
||||
pub enum PrimitiveValue {
|
||||
I32(i32),
|
||||
|
@ -79,7 +79,6 @@ pub struct InnerResolver {
|
|||
pub id_to_primitive: RwLock<HashMap<u64, PrimitiveValue>>,
|
||||
pub field_to_val: RwLock<HashMap<ResolverField, Option<PyFieldHandle>>>,
|
||||
pub global_value_ids: Arc<RwLock<HashMap<u64, PyObject>>>,
|
||||
pub class_names: Mutex<HashMap<StrRef, Type>>,
|
||||
pub pyid_to_def: Arc<RwLock<HashMap<u64, DefinitionId>>>,
|
||||
pub pyid_to_type: Arc<RwLock<HashMap<u64, Type>>>,
|
||||
pub primitive_ids: PrimitivePythonId,
|
||||
|
@ -133,6 +132,8 @@ impl StaticValue for PythonValue {
|
|||
format!("{}_const", self.id).as_str(),
|
||||
);
|
||||
global.set_constant(true);
|
||||
// Set linkage of global to private to avoid name collisions
|
||||
global.set_linkage(Linkage::Private);
|
||||
global.set_initializer(&ctx.ctx.const_struct(
|
||||
&[ctx.ctx.i32_type().const_int(u64::from(id), false).into()],
|
||||
false,
|
||||
|
@ -163,7 +164,7 @@ impl StaticValue for PythonValue {
|
|||
PrimitiveValue::Bool(val) => {
|
||||
ctx.ctx.i8_type().const_int(u64::from(*val), false).into()
|
||||
}
|
||||
PrimitiveValue::Str(val) => ctx.ctx.const_string(val.as_bytes(), true).into(),
|
||||
PrimitiveValue::Str(val) => ctx.gen_string(generator, val).into(),
|
||||
});
|
||||
}
|
||||
if let Some(global) = ctx.module.get_global(&self.id.to_string()) {
|
||||
|
@ -351,7 +352,7 @@ impl InnerResolver {
|
|||
Ok(Ok((ndarray, false)))
|
||||
} else if ty_id == self.primitive_ids.tuple {
|
||||
// do not handle type var param and concrete check here
|
||||
Ok(Ok((unifier.add_ty(TypeEnum::TTuple { ty: vec![] }), false)))
|
||||
Ok(Ok((unifier.add_ty(TypeEnum::TTuple { ty: vec![], is_vararg_ctx: false }), false)))
|
||||
} else if ty_id == self.primitive_ids.option {
|
||||
Ok(Ok((primitives.option, false)))
|
||||
} else if ty_id == self.primitive_ids.none {
|
||||
|
@ -555,7 +556,10 @@ impl InnerResolver {
|
|||
Err(err) => return Ok(Err(err)),
|
||||
_ => return Ok(Err("tuple type needs at least 1 type parameters".to_string()))
|
||||
};
|
||||
Ok(Ok((unifier.add_ty(TypeEnum::TTuple { ty: args }), true)))
|
||||
Ok(Ok((
|
||||
unifier.add_ty(TypeEnum::TTuple { ty: args, is_vararg_ctx: false }),
|
||||
true,
|
||||
)))
|
||||
}
|
||||
TypeEnum::TObj { params, obj_id, .. } => {
|
||||
let subst = {
|
||||
|
@ -797,7 +801,9 @@ impl InnerResolver {
|
|||
.map(|elem| self.get_obj_type(py, elem, unifier, defs, primitives))
|
||||
.collect();
|
||||
let types = types?;
|
||||
Ok(types.map(|types| unifier.add_ty(TypeEnum::TTuple { ty: types })))
|
||||
Ok(types.map(|types| {
|
||||
unifier.add_ty(TypeEnum::TTuple { ty: types, is_vararg_ctx: false })
|
||||
}))
|
||||
}
|
||||
// special handling for option type since its class member layout in python side
|
||||
// is special and cannot be mapped directly to a nac3 type as below
|
||||
|
@ -972,7 +978,7 @@ impl InnerResolver {
|
|||
} else if ty_id == self.primitive_ids.string || ty_id == self.primitive_ids.np_str_ {
|
||||
let val: String = obj.extract().unwrap();
|
||||
self.id_to_primitive.write().insert(id, PrimitiveValue::Str(val.clone()));
|
||||
Ok(Some(ctx.ctx.const_string(val.as_bytes(), true).into()))
|
||||
Ok(Some(ctx.gen_string(generator, val).into()))
|
||||
} else if ty_id == self.primitive_ids.float || ty_id == self.primitive_ids.float64 {
|
||||
let val: f64 = obj.extract().unwrap();
|
||||
self.id_to_primitive.write().insert(id, PrimitiveValue::F64(val));
|
||||
|
@ -1087,7 +1093,7 @@ impl InnerResolver {
|
|||
if self.global_value_ids.read().contains_key(&id) {
|
||||
let global = ctx.module.get_global(&id_str).unwrap_or_else(|| {
|
||||
ctx.module.add_global(
|
||||
ndarray_llvm_ty.as_underlying_type(),
|
||||
ndarray_llvm_ty.element_type().into_struct_type(),
|
||||
Some(AddressSpace::default()),
|
||||
&id_str,
|
||||
)
|
||||
|
@ -1181,7 +1187,7 @@ impl InnerResolver {
|
|||
data_global.set_initializer(&data);
|
||||
|
||||
// create a global for the ndarray object and initialize it
|
||||
let value = ndarray_llvm_ty.as_underlying_type().const_named_struct(&[
|
||||
let value = ndarray_llvm_ty.element_type().into_struct_type().const_named_struct(&[
|
||||
llvm_usize.const_int(ndarray_ndims, false).into(),
|
||||
shape_global
|
||||
.as_pointer_value()
|
||||
|
@ -1194,7 +1200,7 @@ impl InnerResolver {
|
|||
]);
|
||||
|
||||
let ndarray = ctx.module.add_global(
|
||||
ndarray_llvm_ty.as_underlying_type(),
|
||||
ndarray_llvm_ty.element_type().into_struct_type(),
|
||||
Some(AddressSpace::default()),
|
||||
&id_str,
|
||||
);
|
||||
|
@ -1203,7 +1209,9 @@ impl InnerResolver {
|
|||
Ok(Some(ndarray.as_pointer_value().into()))
|
||||
} else if ty_id == self.primitive_ids.tuple {
|
||||
let expected_ty_enum = ctx.unifier.get_ty_immutable(expected_ty);
|
||||
let TypeEnum::TTuple { ty } = expected_ty_enum.as_ref() else { unreachable!() };
|
||||
let TypeEnum::TTuple { ty, is_vararg_ctx: false } = expected_ty_enum.as_ref() else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
let tup_tys = ty.iter();
|
||||
let elements: &PyTuple = obj.downcast()?;
|
||||
|
@ -1459,6 +1467,7 @@ impl SymbolResolver for Resolver {
|
|||
&self,
|
||||
id: StrRef,
|
||||
_: &mut CodeGenContext<'ctx, '_>,
|
||||
_: &mut dyn CodeGenerator,
|
||||
) -> Option<ValueEnum<'ctx>> {
|
||||
let sym_value = {
|
||||
let id_to_val = self.0.id_to_pyval.read();
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
use inkwell::{
|
||||
use itertools::Either;
|
||||
|
||||
use nac3core::{
|
||||
codegen::CodeGenContext,
|
||||
inkwell::{
|
||||
values::{BasicValueEnum, CallSiteValue},
|
||||
AddressSpace, AtomicOrdering,
|
||||
},
|
||||
};
|
||||
use itertools::Either;
|
||||
use nac3core::codegen::CodeGenContext;
|
||||
|
||||
/// Functions for manipulating the timeline.
|
||||
pub trait TimeFns {
|
||||
|
@ -31,7 +34,7 @@ impl TimeFns for NowPinningTimeFns64 {
|
|||
.unwrap_or_else(|| ctx.module.add_global(i64_type, None, "now"));
|
||||
let now_hiptr = ctx
|
||||
.builder
|
||||
.build_bitcast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr")
|
||||
.build_bit_cast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr")
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap();
|
||||
|
||||
|
@ -80,7 +83,7 @@ impl TimeFns for NowPinningTimeFns64 {
|
|||
.unwrap_or_else(|| ctx.module.add_global(i64_type, None, "now"));
|
||||
let now_hiptr = ctx
|
||||
.builder
|
||||
.build_bitcast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr")
|
||||
.build_bit_cast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr")
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap();
|
||||
|
||||
|
@ -109,7 +112,7 @@ impl TimeFns for NowPinningTimeFns64 {
|
|||
.unwrap_or_else(|| ctx.module.add_global(i64_type, None, "now"));
|
||||
let now_hiptr = ctx
|
||||
.builder
|
||||
.build_bitcast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr")
|
||||
.build_bit_cast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr")
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap();
|
||||
|
||||
|
@ -207,7 +210,7 @@ impl TimeFns for NowPinningTimeFns {
|
|||
.unwrap_or_else(|| ctx.module.add_global(i64_type, None, "now"));
|
||||
let now_hiptr = ctx
|
||||
.builder
|
||||
.build_bitcast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr")
|
||||
.build_bit_cast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr")
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap();
|
||||
|
||||
|
@ -258,7 +261,7 @@ impl TimeFns for NowPinningTimeFns {
|
|||
let time_lo = ctx.builder.build_int_truncate(time, i32_type, "time.lo").unwrap();
|
||||
let now_hiptr = ctx
|
||||
.builder
|
||||
.build_bitcast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr")
|
||||
.build_bit_cast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr")
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap();
|
||||
|
||||
|
|
|
@ -10,7 +10,6 @@ constant-optimization = ["fold"]
|
|||
fold = []
|
||||
|
||||
[dependencies]
|
||||
lazy_static = "1.5"
|
||||
parking_lot = "0.12"
|
||||
string-interner = "0.17"
|
||||
fxhash = "0.2"
|
||||
|
|
|
@ -5,14 +5,12 @@ pub use crate::location::Location;
|
|||
|
||||
use fxhash::FxBuildHasher;
|
||||
use parking_lot::{Mutex, MutexGuard};
|
||||
use std::{cell::RefCell, collections::HashMap, fmt};
|
||||
use std::{cell::RefCell, collections::HashMap, fmt, sync::LazyLock};
|
||||
use string_interner::{symbol::SymbolU32, DefaultBackend, StringInterner};
|
||||
|
||||
pub type Interner = StringInterner<DefaultBackend, FxBuildHasher>;
|
||||
lazy_static! {
|
||||
static ref INTERNER: Mutex<Interner> =
|
||||
Mutex::new(StringInterner::with_hasher(FxBuildHasher::default()));
|
||||
}
|
||||
static INTERNER: LazyLock<Mutex<Interner>> =
|
||||
LazyLock::new(|| Mutex::new(StringInterner::with_hasher(FxBuildHasher::default())));
|
||||
|
||||
thread_local! {
|
||||
static LOCAL_INTERNER: RefCell<HashMap<String, StrRef>> = RefCell::default();
|
||||
|
|
|
@ -1,10 +1,4 @@
|
|||
#![deny(
|
||||
future_incompatible,
|
||||
let_underscore,
|
||||
nonstandard_style,
|
||||
rust_2024_compatibility,
|
||||
clippy::all
|
||||
)]
|
||||
#![deny(future_incompatible, let_underscore, nonstandard_style, clippy::all)]
|
||||
#![warn(clippy::pedantic)]
|
||||
#![allow(
|
||||
clippy::missing_errors_doc,
|
||||
|
@ -14,9 +8,6 @@
|
|||
clippy::wildcard_imports
|
||||
)]
|
||||
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
mod ast_gen;
|
||||
mod constant;
|
||||
#[cfg(feature = "fold")]
|
||||
|
|
|
@ -1,26 +1,29 @@
|
|||
[features]
|
||||
test = []
|
||||
|
||||
[package]
|
||||
name = "nac3core"
|
||||
version = "0.1.0"
|
||||
authors = ["M-Labs"]
|
||||
edition = "2021"
|
||||
|
||||
[features]
|
||||
default = ["derive"]
|
||||
derive = ["dep:nac3core_derive"]
|
||||
no-escape-analysis = []
|
||||
|
||||
[dependencies]
|
||||
itertools = "0.13"
|
||||
crossbeam = "0.8"
|
||||
indexmap = "2.2"
|
||||
indexmap = "2.6"
|
||||
parking_lot = "0.12"
|
||||
rayon = "1.8"
|
||||
rayon = "1.10"
|
||||
nac3core_derive = { path = "nac3core_derive", optional = true }
|
||||
nac3parser = { path = "../nac3parser" }
|
||||
strum = "0.26.2"
|
||||
strum_macros = "0.26.4"
|
||||
strum = "0.26"
|
||||
strum_macros = "0.26"
|
||||
|
||||
[dependencies.inkwell]
|
||||
version = "0.4"
|
||||
version = "0.5"
|
||||
default-features = false
|
||||
features = ["llvm14-0", "target-x86", "target-arm", "target-riscv", "no-libffi-linking"]
|
||||
features = ["llvm14-0-prefer-dynamic", "target-x86", "target-arm", "target-riscv", "no-libffi-linking"]
|
||||
|
||||
[dev-dependencies]
|
||||
test-case = "1.2.0"
|
||||
|
|
|
@ -1,46 +1,32 @@
|
|||
use regex::Regex;
|
||||
use std::{
|
||||
env,
|
||||
fs::File,
|
||||
io::Write,
|
||||
path::{Path, PathBuf},
|
||||
path::Path,
|
||||
process::{Command, Stdio},
|
||||
};
|
||||
|
||||
const CMD_IRRT_CLANG: &str = "clang-irrt";
|
||||
const CMD_IRRT_CLANG_TEST: &str = "clang-irrt-test";
|
||||
const CMD_IRRT_LLVM_AS: &str = "llvm-as-irrt";
|
||||
use regex::Regex;
|
||||
|
||||
fn get_out_dir() -> PathBuf {
|
||||
PathBuf::from(env::var("OUT_DIR").unwrap())
|
||||
}
|
||||
fn main() {
|
||||
let out_dir = env::var("OUT_DIR").unwrap();
|
||||
let out_dir = Path::new(&out_dir);
|
||||
let irrt_dir = Path::new("irrt");
|
||||
|
||||
fn get_irrt_dir() -> &'static Path {
|
||||
Path::new("irrt")
|
||||
}
|
||||
|
||||
/// Compile `irrt.cpp` for use in `src/codegen`
|
||||
fn compile_irrt_cpp() {
|
||||
let out_dir = get_out_dir();
|
||||
let irrt_dir = get_irrt_dir();
|
||||
let irrt_cpp_path = irrt_dir.join("irrt.cpp");
|
||||
|
||||
/*
|
||||
* HACK: Sadly, clang doesn't let us emit generic LLVM bitcode.
|
||||
* Compiling for WASM32 and filtering the output with regex is the closest we can get.
|
||||
*/
|
||||
let irrt_cpp_path = irrt_dir.join("irrt.cpp");
|
||||
let flags: &[&str] = &[
|
||||
let mut flags: Vec<&str> = vec![
|
||||
"--target=wasm32",
|
||||
"-x",
|
||||
"c++",
|
||||
"-std=c++20",
|
||||
"-fno-discard-value-names",
|
||||
"-fno-exceptions",
|
||||
"-fno-rtti",
|
||||
match env::var("PROFILE").as_deref() {
|
||||
Ok("debug") => "-O0",
|
||||
Ok("release") => "-O3",
|
||||
flavor => panic!("Unknown or missing build flavor {flavor:?}"),
|
||||
},
|
||||
"-emit-llvm",
|
||||
"-S",
|
||||
"-Wall",
|
||||
|
@ -52,16 +38,26 @@ fn compile_irrt_cpp() {
|
|||
irrt_cpp_path.to_str().unwrap(),
|
||||
];
|
||||
|
||||
match env::var("PROFILE").as_deref() {
|
||||
Ok("debug") => {
|
||||
flags.push("-O0");
|
||||
flags.push("-DIRRT_DEBUG_ASSERT");
|
||||
}
|
||||
Ok("release") => {
|
||||
flags.push("-O3");
|
||||
}
|
||||
flavor => panic!("Unknown or missing build flavor {flavor:?}"),
|
||||
}
|
||||
|
||||
// Tell Cargo to rerun if any file under `irrt_dir` (recursive) changes
|
||||
println!("cargo:rerun-if-changed={}", irrt_dir.to_str().unwrap());
|
||||
|
||||
// Compile IRRT and capture the LLVM IR output
|
||||
let output = Command::new(CMD_IRRT_CLANG)
|
||||
let output = Command::new("clang-irrt")
|
||||
.args(flags)
|
||||
.output()
|
||||
.map(|o| {
|
||||
.inspect(|o| {
|
||||
assert!(o.status.success(), "{}", std::str::from_utf8(&o.stderr).unwrap());
|
||||
o
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
|
@ -102,9 +98,7 @@ fn compile_irrt_cpp() {
|
|||
file.write_all(filtered_output.as_bytes()).unwrap();
|
||||
}
|
||||
|
||||
// Assemble the emitted and filtered IR to .bc
|
||||
// That .bc will be integrated into nac3core's codegen
|
||||
let mut llvm_as = Command::new(CMD_IRRT_LLVM_AS)
|
||||
let mut llvm_as = Command::new("llvm-as-irrt")
|
||||
.stdin(Stdio::piped())
|
||||
.arg("-o")
|
||||
.arg(out_dir.join("irrt.bc"))
|
||||
|
@ -113,48 +107,3 @@ fn compile_irrt_cpp() {
|
|||
llvm_as.stdin.as_mut().unwrap().write_all(filtered_output.as_bytes()).unwrap();
|
||||
assert!(llvm_as.wait().unwrap().success());
|
||||
}
|
||||
|
||||
/// Compile `irrt_test.cpp` for testing
|
||||
fn compile_irrt_test_cpp() {
|
||||
let out_dir = get_out_dir();
|
||||
let irrt_dir = get_irrt_dir();
|
||||
|
||||
let exe_path = out_dir.join("irrt_test.out"); // Output path of the compiled test executable
|
||||
let irrt_test_cpp_path = irrt_dir.join("irrt_test.cpp");
|
||||
let flags: &[&str] = &[
|
||||
irrt_test_cpp_path.to_str().unwrap(),
|
||||
"-x",
|
||||
"c++",
|
||||
"-I",
|
||||
irrt_dir.to_str().unwrap(),
|
||||
"-g",
|
||||
"-fno-discard-value-names",
|
||||
"-O0",
|
||||
"-Wall",
|
||||
"-Wextra",
|
||||
"-Werror=return-type",
|
||||
"-lm", // for `tgamma()`, `lgamma()`
|
||||
"-o",
|
||||
exe_path.to_str().unwrap(),
|
||||
];
|
||||
|
||||
Command::new(CMD_IRRT_CLANG_TEST)
|
||||
.args(flags)
|
||||
.output()
|
||||
.map(|o| {
|
||||
assert!(o.status.success(), "{}", std::str::from_utf8(&o.stderr).unwrap());
|
||||
o
|
||||
})
|
||||
.unwrap();
|
||||
println!("cargo:rerun-if-changed={}", irrt_dir.to_str().unwrap());
|
||||
}
|
||||
|
||||
fn main() {
|
||||
compile_irrt_cpp();
|
||||
|
||||
// https://github.com/rust-lang/cargo/issues/2549
|
||||
// `cargo test -F test` to also build `irrt_test.cpp
|
||||
if cfg!(feature = "test") {
|
||||
compile_irrt_test_cpp();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,10 +1,5 @@
|
|||
#define IRRT_DEFINE_TYPEDEF_INTS
|
||||
#include <irrt_everything.hpp>
|
||||
|
||||
/*
|
||||
* All IRRT implementations.
|
||||
*
|
||||
* We don't have pre-compiled objects, so we are writing all implementations in
|
||||
* headers and concatenate them with `#include` into one massive source file that
|
||||
* contains all the IRRT stuff.
|
||||
*/
|
||||
#include "irrt/exception.hpp"
|
||||
#include "irrt/list.hpp"
|
||||
#include "irrt/math.hpp"
|
||||
#include "irrt/ndarray.hpp"
|
||||
#include "irrt/slice.hpp"
|
||||
|
|
|
@ -1,39 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <irrt/int_defs.hpp>
|
||||
|
||||
/*
|
||||
This file defines all ARTIQ-specific structures
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief ARTIQ's `cslice` object
|
||||
*
|
||||
* See https://docs.rs/cslice/0.3.0/src/cslice/lib.rs.html#33-37
|
||||
*/
|
||||
template <typename SizeT>
|
||||
struct CSlice {
|
||||
const char *base;
|
||||
SizeT len;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Int type of ARTIQ's `Exception` IDs.
|
||||
*/
|
||||
typedef uint32_t ExceptionId;
|
||||
|
||||
/**
|
||||
* @brief ARTIQ's `Exception` object
|
||||
*
|
||||
* See https://github.com/m-labs/artiq/blob/b0d2705c385f64b6e6711c1726cd9178f40b598e/artiq/firmware/libeh/eh_artiq.rs#L1C1-L17C1
|
||||
*/
|
||||
template <typename SizeT>
|
||||
struct Exception {
|
||||
ExceptionId id;
|
||||
CSlice<SizeT> file;
|
||||
uint32_t line;
|
||||
uint32_t column;
|
||||
CSlice<SizeT> function;
|
||||
CSlice<SizeT> message;
|
||||
uint32_t param;
|
||||
};
|
|
@ -1,347 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <irrt/int_defs.hpp>
|
||||
#include <irrt/slice.hpp>
|
||||
#include <irrt/utils.hpp>
|
||||
|
||||
// NDArray indices are always `uint32_t`.
|
||||
using NDIndexInt = uint32_t;
|
||||
|
||||
namespace {
|
||||
// adapted from GNU Scientific Library:
|
||||
// https://git.savannah.gnu.org/cgit/gsl.git/tree/sys/pow_int.c
|
||||
// need to make sure `exp >= 0` before calling this function
|
||||
template <typename T>
|
||||
T __nac3_int_exp_impl(T base, T exp) {
|
||||
T res = 1;
|
||||
/* repeated squaring method */
|
||||
do {
|
||||
if (exp & 1) {
|
||||
res *= base; /* for n odd */
|
||||
}
|
||||
exp >>= 1;
|
||||
base *= base;
|
||||
} while (exp);
|
||||
return res;
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
SizeT __nac3_ndarray_calc_size_impl(const SizeT* list_data, SizeT list_len,
|
||||
SizeT begin_idx, SizeT end_idx) {
|
||||
__builtin_assume(end_idx <= list_len);
|
||||
|
||||
SizeT num_elems = 1;
|
||||
for (SizeT i = begin_idx; i < end_idx; ++i) {
|
||||
SizeT val = list_data[i];
|
||||
__builtin_assume(val > 0);
|
||||
num_elems *= val;
|
||||
}
|
||||
return num_elems;
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
void __nac3_ndarray_calc_nd_indices_impl(SizeT index, const SizeT* dims,
|
||||
SizeT num_dims, NDIndexInt* idxs) {
|
||||
SizeT stride = 1;
|
||||
for (SizeT dim = 0; dim < num_dims; dim++) {
|
||||
SizeT i = num_dims - dim - 1;
|
||||
__builtin_assume(dims[i] > 0);
|
||||
idxs[i] = (index / stride) % dims[i];
|
||||
stride *= dims[i];
|
||||
}
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
SizeT __nac3_ndarray_flatten_index_impl(const SizeT* dims, SizeT num_dims,
|
||||
const NDIndexInt* indices,
|
||||
SizeT num_indices) {
|
||||
SizeT idx = 0;
|
||||
SizeT stride = 1;
|
||||
for (SizeT i = 0; i < num_dims; ++i) {
|
||||
SizeT ri = num_dims - i - 1;
|
||||
if (ri < num_indices) {
|
||||
idx += stride * indices[ri];
|
||||
}
|
||||
|
||||
__builtin_assume(dims[i] > 0);
|
||||
stride *= dims[ri];
|
||||
}
|
||||
return idx;
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
void __nac3_ndarray_calc_broadcast_impl(const SizeT* lhs_dims, SizeT lhs_ndims,
|
||||
const SizeT* rhs_dims, SizeT rhs_ndims,
|
||||
SizeT* out_dims) {
|
||||
SizeT max_ndims = lhs_ndims > rhs_ndims ? lhs_ndims : rhs_ndims;
|
||||
|
||||
for (SizeT i = 0; i < max_ndims; ++i) {
|
||||
const SizeT* lhs_dim_sz =
|
||||
i < lhs_ndims ? &lhs_dims[lhs_ndims - i - 1] : nullptr;
|
||||
const SizeT* rhs_dim_sz =
|
||||
i < rhs_ndims ? &rhs_dims[rhs_ndims - i - 1] : nullptr;
|
||||
|
||||
SizeT* out_dim = &out_dims[max_ndims - i - 1];
|
||||
|
||||
if (lhs_dim_sz == nullptr) {
|
||||
*out_dim = *rhs_dim_sz;
|
||||
} else if (rhs_dim_sz == nullptr) {
|
||||
*out_dim = *lhs_dim_sz;
|
||||
} else if (*lhs_dim_sz == 1) {
|
||||
*out_dim = *rhs_dim_sz;
|
||||
} else if (*rhs_dim_sz == 1) {
|
||||
*out_dim = *lhs_dim_sz;
|
||||
} else if (*lhs_dim_sz == *rhs_dim_sz) {
|
||||
*out_dim = *lhs_dim_sz;
|
||||
} else {
|
||||
__builtin_unreachable();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
void __nac3_ndarray_calc_broadcast_idx_impl(const SizeT* src_dims,
|
||||
SizeT src_ndims,
|
||||
const NDIndexInt* in_idx,
|
||||
NDIndexInt* out_idx) {
|
||||
for (SizeT i = 0; i < src_ndims; ++i) {
|
||||
SizeT src_i = src_ndims - i - 1;
|
||||
out_idx[src_i] = src_dims[src_i] == 1 ? 0 : in_idx[src_i];
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
extern "C" {
|
||||
#define DEF_nac3_int_exp_(T) \
|
||||
T __nac3_int_exp_##T(T base, T exp) { \
|
||||
return __nac3_int_exp_impl(base, exp); \
|
||||
}
|
||||
|
||||
DEF_nac3_int_exp_(int32_t);
|
||||
DEF_nac3_int_exp_(int64_t);
|
||||
DEF_nac3_int_exp_(uint32_t);
|
||||
DEF_nac3_int_exp_(uint64_t);
|
||||
|
||||
SliceIndex __nac3_slice_index_bound(SliceIndex i, const SliceIndex len) {
|
||||
if (i < 0) {
|
||||
i = len + i;
|
||||
}
|
||||
if (i < 0) {
|
||||
return 0;
|
||||
} else if (i > len) {
|
||||
return len;
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
SliceIndex __nac3_range_slice_len(const SliceIndex start, const SliceIndex end,
|
||||
const SliceIndex step) {
|
||||
SliceIndex diff = end - start;
|
||||
if (diff > 0 && step > 0) {
|
||||
return ((diff - 1) / step) + 1;
|
||||
} else if (diff < 0 && step < 0) {
|
||||
return ((diff + 1) / step) + 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle list assignment and dropping part of the list when
|
||||
// both dest_step and src_step are +1.
|
||||
// - All the index must *not* be out-of-bound or negative,
|
||||
// - The end index is *inclusive*,
|
||||
// - The length of src and dest slice size should already
|
||||
// be checked: if dest.step == 1 then len(src) <= len(dest) else
|
||||
// len(src) == len(dest)
|
||||
SliceIndex __nac3_list_slice_assign_var_size(
|
||||
SliceIndex dest_start, SliceIndex dest_end, SliceIndex dest_step,
|
||||
uint8_t* dest_arr, SliceIndex dest_arr_len, SliceIndex src_start,
|
||||
SliceIndex src_end, SliceIndex src_step, uint8_t* src_arr,
|
||||
SliceIndex src_arr_len, const SliceIndex size) {
|
||||
/* if dest_arr_len == 0, do nothing since we do not support
|
||||
* extending list
|
||||
*/
|
||||
if (dest_arr_len == 0) return dest_arr_len;
|
||||
/* if both step is 1, memmove directly, handle the dropping of
|
||||
* the list, and shrink size */
|
||||
if (src_step == dest_step && dest_step == 1) {
|
||||
const SliceIndex src_len =
|
||||
(src_end >= src_start) ? (src_end - src_start + 1) : 0;
|
||||
const SliceIndex dest_len =
|
||||
(dest_end >= dest_start) ? (dest_end - dest_start + 1) : 0;
|
||||
if (src_len > 0) {
|
||||
__builtin_memmove(dest_arr + dest_start * size,
|
||||
src_arr + src_start * size, src_len * size);
|
||||
}
|
||||
if (dest_len > 0) {
|
||||
/* dropping */
|
||||
__builtin_memmove(dest_arr + (dest_start + src_len) * size,
|
||||
dest_arr + (dest_end + 1) * size,
|
||||
(dest_arr_len - dest_end - 1) * size);
|
||||
}
|
||||
/* shrink size */
|
||||
return dest_arr_len - (dest_len - src_len);
|
||||
}
|
||||
/* if two range overlaps, need alloca */
|
||||
uint8_t need_alloca =
|
||||
(dest_arr == src_arr) &&
|
||||
!(max(dest_start, dest_end) < min(src_start, src_end) ||
|
||||
max(src_start, src_end) < min(dest_start, dest_end));
|
||||
if (need_alloca) {
|
||||
uint8_t* tmp =
|
||||
reinterpret_cast<uint8_t*>(__builtin_alloca(src_arr_len * size));
|
||||
__builtin_memcpy(tmp, src_arr, src_arr_len * size);
|
||||
src_arr = tmp;
|
||||
}
|
||||
SliceIndex src_ind = src_start;
|
||||
SliceIndex dest_ind = dest_start;
|
||||
for (; (src_step > 0) ? (src_ind <= src_end) : (src_ind >= src_end);
|
||||
src_ind += src_step, dest_ind += dest_step) {
|
||||
/* for constant optimization */
|
||||
if (size == 1) {
|
||||
__builtin_memcpy(dest_arr + dest_ind, src_arr + src_ind, 1);
|
||||
} else if (size == 4) {
|
||||
__builtin_memcpy(dest_arr + dest_ind * 4, src_arr + src_ind * 4, 4);
|
||||
} else if (size == 8) {
|
||||
__builtin_memcpy(dest_arr + dest_ind * 8, src_arr + src_ind * 8, 8);
|
||||
} else {
|
||||
/* memcpy for var size, cannot overlap after previous
|
||||
* alloca */
|
||||
__builtin_memcpy(dest_arr + dest_ind * size,
|
||||
src_arr + src_ind * size, size);
|
||||
}
|
||||
}
|
||||
/* only dest_step == 1 can we shrink the dest list. */
|
||||
/* size should be ensured prior to calling this function */
|
||||
if (dest_step == 1 && dest_end >= dest_start) {
|
||||
__builtin_memmove(
|
||||
dest_arr + dest_ind * size, dest_arr + (dest_end + 1) * size,
|
||||
(dest_arr_len - dest_end - 1) * size + size + size + size);
|
||||
return dest_arr_len - (dest_end - dest_ind) - 1;
|
||||
}
|
||||
return dest_arr_len;
|
||||
}
|
||||
|
||||
int32_t __nac3_isinf(double x) { return __builtin_isinf(x); }
|
||||
|
||||
int32_t __nac3_isnan(double x) { return __builtin_isnan(x); }
|
||||
|
||||
double tgamma(double arg);
|
||||
|
||||
double __nac3_gamma(double z) {
|
||||
// Handling for denormals
|
||||
// | x | Python gamma(x) | C tgamma(x) |
|
||||
// --- | ----------------- | --------------- | ----------- |
|
||||
// (1) | nan | nan | nan |
|
||||
// (2) | -inf | -inf | inf |
|
||||
// (3) | inf | inf | inf |
|
||||
// (4) | 0.0 | inf | inf |
|
||||
// (5) | {-1.0, -2.0, ...} | inf | nan |
|
||||
|
||||
// (1)-(3)
|
||||
if (__builtin_isinf(z) || __builtin_isnan(z)) {
|
||||
return z;
|
||||
}
|
||||
|
||||
double v = tgamma(z);
|
||||
|
||||
// (4)-(5)
|
||||
return __builtin_isinf(v) || __builtin_isnan(v) ? __builtin_inf() : v;
|
||||
}
|
||||
|
||||
double lgamma(double arg);
|
||||
|
||||
double __nac3_gammaln(double x) {
|
||||
// libm's handling of value overflows differs from scipy:
|
||||
// - scipy: gammaln(-inf) -> -inf
|
||||
// - libm : lgamma(-inf) -> inf
|
||||
|
||||
if (__builtin_isinf(x)) {
|
||||
return x;
|
||||
}
|
||||
|
||||
return lgamma(x);
|
||||
}
|
||||
|
||||
double j0(double x);
|
||||
|
||||
double __nac3_j0(double x) {
|
||||
// libm's handling of value overflows differs from scipy:
|
||||
// - scipy: j0(inf) -> nan
|
||||
// - libm : j0(inf) -> 0.0
|
||||
|
||||
if (__builtin_isinf(x)) {
|
||||
return __builtin_nan("");
|
||||
}
|
||||
|
||||
return j0(x);
|
||||
}
|
||||
|
||||
uint32_t __nac3_ndarray_calc_size(const uint32_t* list_data, uint32_t list_len,
|
||||
uint32_t begin_idx, uint32_t end_idx) {
|
||||
return __nac3_ndarray_calc_size_impl(list_data, list_len, begin_idx,
|
||||
end_idx);
|
||||
}
|
||||
|
||||
uint64_t __nac3_ndarray_calc_size64(const uint64_t* list_data,
|
||||
uint64_t list_len, uint64_t begin_idx,
|
||||
uint64_t end_idx) {
|
||||
return __nac3_ndarray_calc_size_impl(list_data, list_len, begin_idx,
|
||||
end_idx);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_nd_indices(uint32_t index, const uint32_t* dims,
|
||||
uint32_t num_dims, NDIndexInt* idxs) {
|
||||
__nac3_ndarray_calc_nd_indices_impl(index, dims, num_dims, idxs);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_nd_indices64(uint64_t index, const uint64_t* dims,
|
||||
uint64_t num_dims, NDIndexInt* idxs) {
|
||||
__nac3_ndarray_calc_nd_indices_impl(index, dims, num_dims, idxs);
|
||||
}
|
||||
|
||||
uint32_t __nac3_ndarray_flatten_index(const uint32_t* dims, uint32_t num_dims,
|
||||
const NDIndexInt* indices,
|
||||
uint32_t num_indices) {
|
||||
return __nac3_ndarray_flatten_index_impl(dims, num_dims, indices,
|
||||
num_indices);
|
||||
}
|
||||
|
||||
uint64_t __nac3_ndarray_flatten_index64(const uint64_t* dims, uint64_t num_dims,
|
||||
const NDIndexInt* indices,
|
||||
uint64_t num_indices) {
|
||||
return __nac3_ndarray_flatten_index_impl(dims, num_dims, indices,
|
||||
num_indices);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_broadcast(const uint32_t* lhs_dims, uint32_t lhs_ndims,
|
||||
const uint32_t* rhs_dims, uint32_t rhs_ndims,
|
||||
uint32_t* out_dims) {
|
||||
return __nac3_ndarray_calc_broadcast_impl(lhs_dims, lhs_ndims, rhs_dims,
|
||||
rhs_ndims, out_dims);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_broadcast64(const uint64_t* lhs_dims,
|
||||
uint64_t lhs_ndims,
|
||||
const uint64_t* rhs_dims,
|
||||
uint64_t rhs_ndims, uint64_t* out_dims) {
|
||||
return __nac3_ndarray_calc_broadcast_impl(lhs_dims, lhs_ndims, rhs_dims,
|
||||
rhs_ndims, out_dims);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_broadcast_idx(const uint32_t* src_dims,
|
||||
uint32_t src_ndims,
|
||||
const NDIndexInt* in_idx,
|
||||
NDIndexInt* out_idx) {
|
||||
__nac3_ndarray_calc_broadcast_idx_impl(src_dims, src_ndims, in_idx,
|
||||
out_idx);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_broadcast_idx64(const uint64_t* src_dims,
|
||||
uint64_t src_ndims,
|
||||
const NDIndexInt* in_idx,
|
||||
NDIndexInt* out_idx) {
|
||||
__nac3_ndarray_calc_broadcast_idx_impl(src_dims, src_ndims, in_idx,
|
||||
out_idx);
|
||||
}
|
||||
} // extern "C"
|
|
@ -0,0 +1,9 @@
|
|||
#pragma once
|
||||
|
||||
#include "irrt/int_types.hpp"
|
||||
|
||||
template<typename SizeT>
|
||||
struct CSlice {
|
||||
void* base;
|
||||
SizeT len;
|
||||
};
|
|
@ -0,0 +1,25 @@
|
|||
#pragma once
|
||||
|
||||
// Set in nac3core/build.rs
|
||||
#ifdef IRRT_DEBUG_ASSERT
|
||||
#define IRRT_DEBUG_ASSERT_BOOL true
|
||||
#else
|
||||
#define IRRT_DEBUG_ASSERT_BOOL false
|
||||
#endif
|
||||
|
||||
#define raise_debug_assert(SizeT, msg, param1, param2, param3) \
|
||||
raise_exception(SizeT, EXN_ASSERTION_ERROR, "IRRT debug assert failed: " msg, param1, param2, param3)
|
||||
|
||||
#define debug_assert_eq(SizeT, lhs, rhs) \
|
||||
if constexpr (IRRT_DEBUG_ASSERT_BOOL) { \
|
||||
if ((lhs) != (rhs)) { \
|
||||
raise_debug_assert(SizeT, "LHS = {0}. RHS = {1}", lhs, rhs, NO_PARAM); \
|
||||
} \
|
||||
}
|
||||
|
||||
#define debug_assert(SizeT, expr) \
|
||||
if constexpr (IRRT_DEBUG_ASSERT_BOOL) { \
|
||||
if (!(expr)) { \
|
||||
raise_debug_assert(SizeT, "Got false.", NO_PARAM, NO_PARAM, NO_PARAM); \
|
||||
} \
|
||||
}
|
|
@ -1,94 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <irrt/artiq_defs.hpp>
|
||||
#include <irrt/int_defs.hpp>
|
||||
#include <irrt/utils.hpp>
|
||||
|
||||
namespace {
|
||||
/**
|
||||
* @brief A (limited) set of known Error IDs
|
||||
*/
|
||||
struct ErrorIds {
|
||||
ExceptionId index_error;
|
||||
ExceptionId value_error;
|
||||
ExceptionId assertion_error;
|
||||
ExceptionId runtime_error;
|
||||
ExceptionId type_error;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief The IRRT error context object
|
||||
*
|
||||
* This object contains all the details needed to propagate Python-like Exceptions in
|
||||
* IRRT - within IRRT itself or propagate out of extern calls from nac3core.
|
||||
*/
|
||||
struct ErrorContext {
|
||||
/**
|
||||
* @brief The set of all
|
||||
*/
|
||||
const ErrorIds *error_ids;
|
||||
|
||||
// Error thrown by IRRT
|
||||
ExceptionId error_id;
|
||||
const char *message_template;
|
||||
uint64_t param1;
|
||||
uint64_t param2;
|
||||
uint64_t param3;
|
||||
|
||||
void initialize(const ErrorIds *error_ids) {
|
||||
this->error_ids = error_ids;
|
||||
clear_error();
|
||||
}
|
||||
|
||||
void clear_error() {
|
||||
// Point the message_template to an empty str. Don't set it to nullptr
|
||||
// as a sentinel
|
||||
this->message_template = "";
|
||||
}
|
||||
|
||||
void set_error(ExceptionId error_id, const char *message,
|
||||
uint64_t param1 = 0, uint64_t param2 = 0,
|
||||
uint64_t param3 = 0) {
|
||||
this->error_id = error_id;
|
||||
this->message_template = message;
|
||||
this->param1 = param1;
|
||||
this->param2 = param2;
|
||||
this->param3 = param3;
|
||||
}
|
||||
|
||||
bool has_error() { return !cstr_utils::is_empty(message_template); }
|
||||
|
||||
template <typename SizeT>
|
||||
void get_error_str(CSlice<SizeT> *dst_str) {
|
||||
dst_str->base = message_template;
|
||||
dst_str->len = (SizeT)cstr_utils::length(message_template);
|
||||
}
|
||||
};
|
||||
} // namespace
|
||||
|
||||
extern "C" {
|
||||
void __nac3_error_context_initialize(ErrorContext *errctx,
|
||||
ErrorIds *error_ids) {
|
||||
errctx->initialize(error_ids);
|
||||
}
|
||||
|
||||
bool __nac3_error_context_has_error(ErrorContext *errctx) {
|
||||
return errctx->has_error();
|
||||
}
|
||||
|
||||
void __nac3_error_context_get_error_str(ErrorContext *errctx,
|
||||
CSlice<int32_t> *dst_str) {
|
||||
errctx->get_error_str<int32_t>(dst_str);
|
||||
}
|
||||
|
||||
void __nac3_error_context_get_error_str64(ErrorContext *errctx,
|
||||
CSlice<int64_t> *dst_str) {
|
||||
errctx->get_error_str<int64_t>(dst_str);
|
||||
}
|
||||
|
||||
// Used for testing
|
||||
void __nac3_error_dummy_raise(ErrorContext *errctx) {
|
||||
errctx->set_error(errctx->error_ids->runtime_error,
|
||||
"Error thrown from __nac3_error_dummy_raise");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,85 @@
|
|||
#pragma once
|
||||
|
||||
#include "irrt/cslice.hpp"
|
||||
#include "irrt/int_types.hpp"
|
||||
|
||||
/**
|
||||
* @brief The int type of ARTIQ exception IDs.
|
||||
*/
|
||||
using ExceptionId = int32_t;
|
||||
|
||||
/*
|
||||
* Set of exceptions C++ IRRT can use.
|
||||
* Must be synchronized with `setup_irrt_exceptions` in `nac3core/src/codegen/irrt/mod.rs`.
|
||||
*/
|
||||
extern "C" {
|
||||
ExceptionId EXN_INDEX_ERROR;
|
||||
ExceptionId EXN_VALUE_ERROR;
|
||||
ExceptionId EXN_ASSERTION_ERROR;
|
||||
ExceptionId EXN_TYPE_ERROR;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Extern function to `__nac3_raise`
|
||||
*
|
||||
* The parameter `err` could be `Exception<int32_t>` or `Exception<int64_t>`. The caller
|
||||
* must make sure to pass `Exception`s with the correct `SizeT` depending on the `size_t` of the runtime.
|
||||
*/
|
||||
extern "C" void __nac3_raise(void* err);
|
||||
|
||||
namespace {
|
||||
/**
|
||||
* @brief NAC3's Exception struct
|
||||
*/
|
||||
template<typename SizeT>
|
||||
struct Exception {
|
||||
ExceptionId id;
|
||||
CSlice<SizeT> filename;
|
||||
int32_t line;
|
||||
int32_t column;
|
||||
CSlice<SizeT> function;
|
||||
CSlice<SizeT> msg;
|
||||
int64_t params[3];
|
||||
};
|
||||
|
||||
constexpr int64_t NO_PARAM = 0;
|
||||
|
||||
template<typename SizeT>
|
||||
void _raise_exception_helper(ExceptionId id,
|
||||
const char* filename,
|
||||
int32_t line,
|
||||
const char* function,
|
||||
const char* msg,
|
||||
int64_t param0,
|
||||
int64_t param1,
|
||||
int64_t param2) {
|
||||
Exception<SizeT> e = {
|
||||
.id = id,
|
||||
.filename = {.base = reinterpret_cast<void*>(const_cast<char*>(filename)),
|
||||
.len = static_cast<SizeT>(__builtin_strlen(filename))},
|
||||
.line = line,
|
||||
.column = 0,
|
||||
.function = {.base = reinterpret_cast<void*>(const_cast<char*>(function)),
|
||||
.len = static_cast<SizeT>(__builtin_strlen(function))},
|
||||
.msg = {.base = reinterpret_cast<void*>(const_cast<char*>(msg)),
|
||||
.len = static_cast<SizeT>(__builtin_strlen(msg))},
|
||||
};
|
||||
e.params[0] = param0;
|
||||
e.params[1] = param1;
|
||||
e.params[2] = param2;
|
||||
__nac3_raise(reinterpret_cast<void*>(&e));
|
||||
__builtin_unreachable();
|
||||
}
|
||||
} // namespace
|
||||
|
||||
/**
|
||||
* @brief Raise an exception with location details (location in the IRRT source files).
|
||||
* @param SizeT The runtime `size_t` type.
|
||||
* @param id The ID of the exception to raise.
|
||||
* @param msg A global constant C-string of the error message.
|
||||
*
|
||||
* `param0` to `param2` are optional format arguments of `msg`. They should be set to
|
||||
* `NO_PARAM` to indicate they are unused.
|
||||
*/
|
||||
#define raise_exception(SizeT, id, msg, param0, param1, param2) \
|
||||
_raise_exception_helper<SizeT>(id, __FILE__, __LINE__, __FUNCTION__, msg, param0, param1, param2)
|
|
@ -1,12 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
// This is made toggleable since `irrt_test.cpp` itself would include
|
||||
// headers that define these typedefs
|
||||
#ifdef IRRT_DEFINE_TYPEDEF_INTS
|
||||
using int8_t = _BitInt(8);
|
||||
using uint8_t = unsigned _BitInt(8);
|
||||
using int32_t = _BitInt(32);
|
||||
using uint32_t = unsigned _BitInt(32);
|
||||
using int64_t = _BitInt(64);
|
||||
using uint64_t = unsigned _BitInt(64);
|
||||
#endif
|
|
@ -0,0 +1,27 @@
|
|||
#pragma once
|
||||
|
||||
#if __STDC_VERSION__ >= 202000
|
||||
using int8_t = _BitInt(8);
|
||||
using uint8_t = unsigned _BitInt(8);
|
||||
using int32_t = _BitInt(32);
|
||||
using uint32_t = unsigned _BitInt(32);
|
||||
using int64_t = _BitInt(64);
|
||||
using uint64_t = unsigned _BitInt(64);
|
||||
#else
|
||||
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wdeprecated-type"
|
||||
using int8_t = _ExtInt(8);
|
||||
using uint8_t = unsigned _ExtInt(8);
|
||||
using int32_t = _ExtInt(32);
|
||||
using uint32_t = unsigned _ExtInt(32);
|
||||
using int64_t = _ExtInt(64);
|
||||
using uint64_t = unsigned _ExtInt(64);
|
||||
#pragma clang diagnostic pop
|
||||
|
||||
#endif
|
||||
|
||||
// NDArray indices are always `uint32_t`.
|
||||
using NDIndex = uint32_t;
|
||||
// The type of an index or a value describing the length of a range/slice is always `int32_t`.
|
||||
using SliceIndex = int32_t;
|
|
@ -0,0 +1,81 @@
|
|||
#pragma once
|
||||
|
||||
#include "irrt/int_types.hpp"
|
||||
#include "irrt/math_util.hpp"
|
||||
|
||||
extern "C" {
|
||||
// Handle list assignment and dropping part of the list when
|
||||
// both dest_step and src_step are +1.
|
||||
// - All the index must *not* be out-of-bound or negative,
|
||||
// - The end index is *inclusive*,
|
||||
// - The length of src and dest slice size should already
|
||||
// be checked: if dest.step == 1 then len(src) <= len(dest) else len(src) == len(dest)
|
||||
SliceIndex __nac3_list_slice_assign_var_size(SliceIndex dest_start,
|
||||
SliceIndex dest_end,
|
||||
SliceIndex dest_step,
|
||||
void* dest_arr,
|
||||
SliceIndex dest_arr_len,
|
||||
SliceIndex src_start,
|
||||
SliceIndex src_end,
|
||||
SliceIndex src_step,
|
||||
void* src_arr,
|
||||
SliceIndex src_arr_len,
|
||||
const SliceIndex size) {
|
||||
/* if dest_arr_len == 0, do nothing since we do not support extending list */
|
||||
if (dest_arr_len == 0)
|
||||
return dest_arr_len;
|
||||
/* if both step is 1, memmove directly, handle the dropping of the list, and shrink size */
|
||||
if (src_step == dest_step && dest_step == 1) {
|
||||
const SliceIndex src_len = (src_end >= src_start) ? (src_end - src_start + 1) : 0;
|
||||
const SliceIndex dest_len = (dest_end >= dest_start) ? (dest_end - dest_start + 1) : 0;
|
||||
if (src_len > 0) {
|
||||
__builtin_memmove(static_cast<uint8_t*>(dest_arr) + dest_start * size,
|
||||
static_cast<uint8_t*>(src_arr) + src_start * size, src_len * size);
|
||||
}
|
||||
if (dest_len > 0) {
|
||||
/* dropping */
|
||||
__builtin_memmove(static_cast<uint8_t*>(dest_arr) + (dest_start + src_len) * size,
|
||||
static_cast<uint8_t*>(dest_arr) + (dest_end + 1) * size,
|
||||
(dest_arr_len - dest_end - 1) * size);
|
||||
}
|
||||
/* shrink size */
|
||||
return dest_arr_len - (dest_len - src_len);
|
||||
}
|
||||
/* if two range overlaps, need alloca */
|
||||
uint8_t need_alloca = (dest_arr == src_arr)
|
||||
&& !(max(dest_start, dest_end) < min(src_start, src_end)
|
||||
|| max(src_start, src_end) < min(dest_start, dest_end));
|
||||
if (need_alloca) {
|
||||
void* tmp = __builtin_alloca(src_arr_len * size);
|
||||
__builtin_memcpy(tmp, src_arr, src_arr_len * size);
|
||||
src_arr = tmp;
|
||||
}
|
||||
SliceIndex src_ind = src_start;
|
||||
SliceIndex dest_ind = dest_start;
|
||||
for (; (src_step > 0) ? (src_ind <= src_end) : (src_ind >= src_end); src_ind += src_step, dest_ind += dest_step) {
|
||||
/* for constant optimization */
|
||||
if (size == 1) {
|
||||
__builtin_memcpy(static_cast<uint8_t*>(dest_arr) + dest_ind, static_cast<uint8_t*>(src_arr) + src_ind, 1);
|
||||
} else if (size == 4) {
|
||||
__builtin_memcpy(static_cast<uint8_t*>(dest_arr) + dest_ind * 4,
|
||||
static_cast<uint8_t*>(src_arr) + src_ind * 4, 4);
|
||||
} else if (size == 8) {
|
||||
__builtin_memcpy(static_cast<uint8_t*>(dest_arr) + dest_ind * 8,
|
||||
static_cast<uint8_t*>(src_arr) + src_ind * 8, 8);
|
||||
} else {
|
||||
/* memcpy for var size, cannot overlap after previous alloca */
|
||||
__builtin_memcpy(static_cast<uint8_t*>(dest_arr) + dest_ind * size,
|
||||
static_cast<uint8_t*>(src_arr) + src_ind * size, size);
|
||||
}
|
||||
}
|
||||
/* only dest_step == 1 can we shrink the dest list. */
|
||||
/* size should be ensured prior to calling this function */
|
||||
if (dest_step == 1 && dest_end >= dest_start) {
|
||||
__builtin_memmove(static_cast<uint8_t*>(dest_arr) + dest_ind * size,
|
||||
static_cast<uint8_t*>(dest_arr) + (dest_end + 1) * size,
|
||||
(dest_arr_len - dest_end - 1) * size);
|
||||
return dest_arr_len - (dest_end - dest_ind) - 1;
|
||||
}
|
||||
return dest_arr_len;
|
||||
}
|
||||
} // extern "C"
|
|
@ -0,0 +1,93 @@
|
|||
#pragma once
|
||||
|
||||
namespace {
|
||||
// adapted from GNU Scientific Library: https://git.savannah.gnu.org/cgit/gsl.git/tree/sys/pow_int.c
|
||||
// need to make sure `exp >= 0` before calling this function
|
||||
template<typename T>
|
||||
T __nac3_int_exp_impl(T base, T exp) {
|
||||
T res = 1;
|
||||
/* repeated squaring method */
|
||||
do {
|
||||
if (exp & 1) {
|
||||
res *= base; /* for n odd */
|
||||
}
|
||||
exp >>= 1;
|
||||
base *= base;
|
||||
} while (exp);
|
||||
return res;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
#define DEF_nac3_int_exp_(T) \
|
||||
T __nac3_int_exp_##T(T base, T exp) { \
|
||||
return __nac3_int_exp_impl(base, exp); \
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
|
||||
// Putting semicolons here to make clang-format not reformat this into
|
||||
// a stair shape.
|
||||
DEF_nac3_int_exp_(int32_t);
|
||||
DEF_nac3_int_exp_(int64_t);
|
||||
DEF_nac3_int_exp_(uint32_t);
|
||||
DEF_nac3_int_exp_(uint64_t);
|
||||
|
||||
int32_t __nac3_isinf(double x) {
|
||||
return __builtin_isinf(x);
|
||||
}
|
||||
|
||||
int32_t __nac3_isnan(double x) {
|
||||
return __builtin_isnan(x);
|
||||
}
|
||||
|
||||
double tgamma(double arg);
|
||||
|
||||
double __nac3_gamma(double z) {
|
||||
// Handling for denormals
|
||||
// | x | Python gamma(x) | C tgamma(x) |
|
||||
// --- | ----------------- | --------------- | ----------- |
|
||||
// (1) | nan | nan | nan |
|
||||
// (2) | -inf | -inf | inf |
|
||||
// (3) | inf | inf | inf |
|
||||
// (4) | 0.0 | inf | inf |
|
||||
// (5) | {-1.0, -2.0, ...} | inf | nan |
|
||||
|
||||
// (1)-(3)
|
||||
if (__builtin_isinf(z) || __builtin_isnan(z)) {
|
||||
return z;
|
||||
}
|
||||
|
||||
double v = tgamma(z);
|
||||
|
||||
// (4)-(5)
|
||||
return __builtin_isinf(v) || __builtin_isnan(v) ? __builtin_inf() : v;
|
||||
}
|
||||
|
||||
double lgamma(double arg);
|
||||
|
||||
double __nac3_gammaln(double x) {
|
||||
// libm's handling of value overflows differs from scipy:
|
||||
// - scipy: gammaln(-inf) -> -inf
|
||||
// - libm : lgamma(-inf) -> inf
|
||||
|
||||
if (__builtin_isinf(x)) {
|
||||
return x;
|
||||
}
|
||||
|
||||
return lgamma(x);
|
||||
}
|
||||
|
||||
double j0(double x);
|
||||
|
||||
double __nac3_j0(double x) {
|
||||
// libm's handling of value overflows differs from scipy:
|
||||
// - scipy: j0(inf) -> nan
|
||||
// - libm : j0(inf) -> 0.0
|
||||
|
||||
if (__builtin_isinf(x)) {
|
||||
return __builtin_nan("");
|
||||
}
|
||||
|
||||
return j0(x);
|
||||
}
|
||||
} // namespace
|
|
@ -0,0 +1,13 @@
|
|||
#pragma once
|
||||
|
||||
namespace {
|
||||
template<typename T>
|
||||
const T& max(const T& a, const T& b) {
|
||||
return a > b ? a : b;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
const T& min(const T& a, const T& b) {
|
||||
return a > b ? b : a;
|
||||
}
|
||||
} // namespace
|
|
@ -0,0 +1,144 @@
|
|||
#pragma once
|
||||
|
||||
#include "irrt/int_types.hpp"
|
||||
|
||||
namespace {
|
||||
template<typename SizeT>
|
||||
SizeT __nac3_ndarray_calc_size_impl(const SizeT* list_data, SizeT list_len, SizeT begin_idx, SizeT end_idx) {
|
||||
__builtin_assume(end_idx <= list_len);
|
||||
|
||||
SizeT num_elems = 1;
|
||||
for (SizeT i = begin_idx; i < end_idx; ++i) {
|
||||
SizeT val = list_data[i];
|
||||
__builtin_assume(val > 0);
|
||||
num_elems *= val;
|
||||
}
|
||||
return num_elems;
|
||||
}
|
||||
|
||||
template<typename SizeT>
|
||||
void __nac3_ndarray_calc_nd_indices_impl(SizeT index, const SizeT* dims, SizeT num_dims, NDIndex* idxs) {
|
||||
SizeT stride = 1;
|
||||
for (SizeT dim = 0; dim < num_dims; dim++) {
|
||||
SizeT i = num_dims - dim - 1;
|
||||
__builtin_assume(dims[i] > 0);
|
||||
idxs[i] = (index / stride) % dims[i];
|
||||
stride *= dims[i];
|
||||
}
|
||||
}
|
||||
|
||||
template<typename SizeT>
|
||||
SizeT __nac3_ndarray_flatten_index_impl(const SizeT* dims, SizeT num_dims, const NDIndex* indices, SizeT num_indices) {
|
||||
SizeT idx = 0;
|
||||
SizeT stride = 1;
|
||||
for (SizeT i = 0; i < num_dims; ++i) {
|
||||
SizeT ri = num_dims - i - 1;
|
||||
if (ri < num_indices) {
|
||||
idx += stride * indices[ri];
|
||||
}
|
||||
|
||||
__builtin_assume(dims[i] > 0);
|
||||
stride *= dims[ri];
|
||||
}
|
||||
return idx;
|
||||
}
|
||||
|
||||
template<typename SizeT>
|
||||
void __nac3_ndarray_calc_broadcast_impl(const SizeT* lhs_dims,
|
||||
SizeT lhs_ndims,
|
||||
const SizeT* rhs_dims,
|
||||
SizeT rhs_ndims,
|
||||
SizeT* out_dims) {
|
||||
SizeT max_ndims = lhs_ndims > rhs_ndims ? lhs_ndims : rhs_ndims;
|
||||
|
||||
for (SizeT i = 0; i < max_ndims; ++i) {
|
||||
const SizeT* lhs_dim_sz = i < lhs_ndims ? &lhs_dims[lhs_ndims - i - 1] : nullptr;
|
||||
const SizeT* rhs_dim_sz = i < rhs_ndims ? &rhs_dims[rhs_ndims - i - 1] : nullptr;
|
||||
SizeT* out_dim = &out_dims[max_ndims - i - 1];
|
||||
|
||||
if (lhs_dim_sz == nullptr) {
|
||||
*out_dim = *rhs_dim_sz;
|
||||
} else if (rhs_dim_sz == nullptr) {
|
||||
*out_dim = *lhs_dim_sz;
|
||||
} else if (*lhs_dim_sz == 1) {
|
||||
*out_dim = *rhs_dim_sz;
|
||||
} else if (*rhs_dim_sz == 1) {
|
||||
*out_dim = *lhs_dim_sz;
|
||||
} else if (*lhs_dim_sz == *rhs_dim_sz) {
|
||||
*out_dim = *lhs_dim_sz;
|
||||
} else {
|
||||
__builtin_unreachable();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<typename SizeT>
|
||||
void __nac3_ndarray_calc_broadcast_idx_impl(const SizeT* src_dims,
|
||||
SizeT src_ndims,
|
||||
const NDIndex* in_idx,
|
||||
NDIndex* out_idx) {
|
||||
for (SizeT i = 0; i < src_ndims; ++i) {
|
||||
SizeT src_i = src_ndims - i - 1;
|
||||
out_idx[src_i] = src_dims[src_i] == 1 ? 0 : in_idx[src_i];
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
extern "C" {
|
||||
uint32_t __nac3_ndarray_calc_size(const uint32_t* list_data, uint32_t list_len, uint32_t begin_idx, uint32_t end_idx) {
|
||||
return __nac3_ndarray_calc_size_impl(list_data, list_len, begin_idx, end_idx);
|
||||
}
|
||||
|
||||
uint64_t
|
||||
__nac3_ndarray_calc_size64(const uint64_t* list_data, uint64_t list_len, uint64_t begin_idx, uint64_t end_idx) {
|
||||
return __nac3_ndarray_calc_size_impl(list_data, list_len, begin_idx, end_idx);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_nd_indices(uint32_t index, const uint32_t* dims, uint32_t num_dims, NDIndex* idxs) {
|
||||
__nac3_ndarray_calc_nd_indices_impl(index, dims, num_dims, idxs);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_nd_indices64(uint64_t index, const uint64_t* dims, uint64_t num_dims, NDIndex* idxs) {
|
||||
__nac3_ndarray_calc_nd_indices_impl(index, dims, num_dims, idxs);
|
||||
}
|
||||
|
||||
uint32_t
|
||||
__nac3_ndarray_flatten_index(const uint32_t* dims, uint32_t num_dims, const NDIndex* indices, uint32_t num_indices) {
|
||||
return __nac3_ndarray_flatten_index_impl(dims, num_dims, indices, num_indices);
|
||||
}
|
||||
|
||||
uint64_t
|
||||
__nac3_ndarray_flatten_index64(const uint64_t* dims, uint64_t num_dims, const NDIndex* indices, uint64_t num_indices) {
|
||||
return __nac3_ndarray_flatten_index_impl(dims, num_dims, indices, num_indices);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_broadcast(const uint32_t* lhs_dims,
|
||||
uint32_t lhs_ndims,
|
||||
const uint32_t* rhs_dims,
|
||||
uint32_t rhs_ndims,
|
||||
uint32_t* out_dims) {
|
||||
return __nac3_ndarray_calc_broadcast_impl(lhs_dims, lhs_ndims, rhs_dims, rhs_ndims, out_dims);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_broadcast64(const uint64_t* lhs_dims,
|
||||
uint64_t lhs_ndims,
|
||||
const uint64_t* rhs_dims,
|
||||
uint64_t rhs_ndims,
|
||||
uint64_t* out_dims) {
|
||||
return __nac3_ndarray_calc_broadcast_impl(lhs_dims, lhs_ndims, rhs_dims, rhs_ndims, out_dims);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_broadcast_idx(const uint32_t* src_dims,
|
||||
uint32_t src_ndims,
|
||||
const NDIndex* in_idx,
|
||||
NDIndex* out_idx) {
|
||||
__nac3_ndarray_calc_broadcast_idx_impl(src_dims, src_ndims, in_idx, out_idx);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_broadcast_idx64(const uint64_t* src_dims,
|
||||
uint64_t src_ndims,
|
||||
const NDIndex* in_idx,
|
||||
NDIndex* out_idx) {
|
||||
__nac3_ndarray_calc_broadcast_idx_impl(src_dims, src_ndims, in_idx, out_idx);
|
||||
}
|
||||
} // namespace
|
|
@ -1,305 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <irrt/error_context.hpp>
|
||||
#include <irrt/int_defs.hpp>
|
||||
#include <irrt/ndarray/def.hpp>
|
||||
|
||||
namespace {
|
||||
namespace ndarray {
|
||||
namespace basic {
|
||||
namespace util {
|
||||
/**
|
||||
* @brief Asserts that `shape` does not contain negative dimensions.
|
||||
*
|
||||
* @param ndims Number of dimensions in `shape`
|
||||
* @param shape The shape to check on
|
||||
*/
|
||||
template <typename SizeT>
|
||||
void assert_shape_no_negative(ErrorContext* errctx, SizeT ndims,
|
||||
const SizeT* shape) {
|
||||
for (SizeT axis = 0; axis < ndims; axis++) {
|
||||
if (shape[axis] < 0) {
|
||||
errctx->set_error(errctx->error_ids->value_error,
|
||||
"negative dimensions are not allowed; axis {0} "
|
||||
"has dimension {1}",
|
||||
axis, shape[axis]);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Returns the number of elements of an ndarray given its shape.
|
||||
*
|
||||
* @param ndims Number of dimensions in `shape`
|
||||
* @param shape The shape of the ndarray
|
||||
*/
|
||||
template <typename SizeT>
|
||||
SizeT calc_size_from_shape(SizeT ndims, const SizeT* shape) {
|
||||
SizeT size = 1;
|
||||
for (SizeT axis = 0; axis < ndims; axis++) size *= shape[axis];
|
||||
return size;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Compute the array indices of the `nth` (0-based) element of an ndarray given only its shape.
|
||||
*
|
||||
* @param ndims Number of elements in `shape` and `indices`
|
||||
* @param shape The shape of the ndarray
|
||||
* @param indices The returned indices indexing the ndarray with shape `shape`.
|
||||
* @param nth The index of the element of interest.
|
||||
*/
|
||||
template <typename SizeT>
|
||||
void set_indices_by_nth(SizeT ndims, const SizeT* shape, SizeT* indices,
|
||||
SizeT nth) {
|
||||
for (int32_t i = 0; i < ndims; i++) {
|
||||
int32_t axis = ndims - i - 1;
|
||||
int32_t dim = shape[axis];
|
||||
|
||||
indices[axis] = nth % dim;
|
||||
nth /= dim;
|
||||
}
|
||||
}
|
||||
} // namespace util
|
||||
|
||||
/**
|
||||
* @brief Return the number of elements of an `ndarray`
|
||||
*
|
||||
* This function corresponds to `<an_ndarray>.size`
|
||||
*/
|
||||
template <typename SizeT>
|
||||
SizeT size(const NDArray<SizeT>* ndarray) {
|
||||
return util::calc_size_from_shape(ndarray->ndims, ndarray->shape);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return of the number of its content of an `ndarray`.
|
||||
*
|
||||
* This function corresponds to `<an_ndarray>.nbytes`.
|
||||
*/
|
||||
template <typename SizeT>
|
||||
SizeT nbytes(const NDArray<SizeT>* ndarray) {
|
||||
return size(ndarray) * ndarray->itemsize;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update the strides of an ndarray given an ndarray `shape`
|
||||
* and assuming that the ndarray is fully c-contagious.
|
||||
*
|
||||
* You might want to read https://ajcr.net/stride-guide-part-1/.
|
||||
*/
|
||||
template <typename SizeT>
|
||||
void set_strides_by_shape(NDArray<SizeT>* ndarray) {
|
||||
SizeT stride_product = 1;
|
||||
for (SizeT i = 0; i < ndarray->ndims; i++) {
|
||||
int axis = ndarray->ndims - i - 1;
|
||||
ndarray->strides[axis] = stride_product * ndarray->itemsize;
|
||||
stride_product *= ndarray->shape[axis];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return the pointer to the element indexed by `indices`.
|
||||
*/
|
||||
template <typename SizeT>
|
||||
uint8_t* get_pelement_by_indices(const NDArray<SizeT>* ndarray,
|
||||
const SizeT* indices) {
|
||||
uint8_t* element = ndarray->data;
|
||||
for (SizeT dim_i = 0; dim_i < ndarray->ndims; dim_i++)
|
||||
element += indices[dim_i] * ndarray->strides[dim_i];
|
||||
return element;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return the pointer to the nth (0-based) element in a flattened view of `ndarray`.
|
||||
*/
|
||||
template <typename SizeT>
|
||||
uint8_t* get_nth_pelement(const NDArray<SizeT>* ndarray, SizeT nth) {
|
||||
SizeT* indices = (SizeT*)__builtin_alloca(sizeof(SizeT) * ndarray->ndims);
|
||||
util::set_indices_by_nth(ndarray->ndims, ndarray->shape, indices, nth);
|
||||
return get_pelement_by_indices(ndarray, indices);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Like `get_nth_pelement` but asserts that `nth` is in bounds.
|
||||
*/
|
||||
template <typename SizeT>
|
||||
uint8_t* checked_get_nth_pelement(ErrorContext* errctx,
|
||||
const NDArray<SizeT>* ndarray, SizeT nth) {
|
||||
SizeT arr_size = ndarray->size();
|
||||
if (!(0 <= nth && nth < arr_size)) {
|
||||
errctx->set_error(
|
||||
errctx->error_ids->index_error,
|
||||
"index {0} is out of bounds, valid range is {1} <= index < {2}",
|
||||
nth, 0, arr_size);
|
||||
return 0;
|
||||
}
|
||||
return get_nth_pelement(ndarray, nth);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set an element in `ndarray`.
|
||||
*
|
||||
* @param pelement Pointer to the element in `ndarray` to be set.
|
||||
* @param pvalue Pointer to the value `pelement` will be set to.
|
||||
*/
|
||||
template <typename SizeT>
|
||||
void set_pelement_value(NDArray<SizeT>* ndarray, uint8_t* pelement,
|
||||
const uint8_t* pvalue) {
|
||||
__builtin_memcpy(pelement, pvalue, ndarray->itemsize);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the `len()` of an ndarray, and asserts that `ndarray` is a sized object.
|
||||
*
|
||||
* This function corresponds to `<an_ndarray>.__len__`.
|
||||
*
|
||||
* @param dst_length The returned result
|
||||
*/
|
||||
template <typename SizeT>
|
||||
void len(ErrorContext* errctx, const NDArray<SizeT>* ndarray,
|
||||
SliceIndex* dst_length) {
|
||||
// numpy prohibits `__len__` on unsized objects
|
||||
if (ndarray->ndims == 0) {
|
||||
errctx->set_error(errctx->error_ids->type_error,
|
||||
"len() of unsized object");
|
||||
return;
|
||||
}
|
||||
|
||||
*dst_length = (SliceIndex)ndarray->shape[0];
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Copy data from one ndarray to another of the exact same size and itemsize.
|
||||
*
|
||||
* Both ndarrays will be viewed in their flatten views when copying the elements.
|
||||
*/
|
||||
template <typename SizeT>
|
||||
void copy_data(const NDArray<SizeT>* src_ndarray, NDArray<SizeT>* dst_ndarray) {
|
||||
__builtin_assume(src_ndarray->itemsize == dst_ndarray->itemsize);
|
||||
|
||||
for (SizeT i = 0; i < size(src_ndarray); i++) {
|
||||
auto src_element = ndarray::basic::get_nth_pelement(src_ndarray, i);
|
||||
auto dst_element = ndarray::basic::get_nth_pelement(dst_ndarray, i);
|
||||
ndarray::basic::set_pelement_value(dst_ndarray, dst_element,
|
||||
src_element);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return a boolean indicating if `ndarray` is (C-)contiguous.
|
||||
*
|
||||
* You may want to see: ndarray's rules for C-contiguity: https://github.com/numpy/numpy/blob/df256d0d2f3bc6833699529824781c58f9c6e697/numpy/core/src/multiarray/flagsobject.c#L95C1-L99C45
|
||||
*/
|
||||
template <typename SizeT>
|
||||
bool is_c_contiguous(const NDArray<SizeT>* ndarray) {
|
||||
// Other references:
|
||||
// - tinynumpy's implementation: https://github.com/wadetb/tinynumpy/blob/0d23d22e07062ffab2afa287374c7b366eebdda1/tinynumpy/tinynumpy.py#L102
|
||||
// - ndarray's flags["C_CONTIGUOUS"]: https://numpy.org/doc/stable/reference/generated/numpy.ndarray.flags.html#numpy.ndarray.flags
|
||||
// - ndarray's rules for C-contiguity: https://github.com/numpy/numpy/blob/df256d0d2f3bc6833699529824781c58f9c6e697/numpy/core/src/multiarray/flagsobject.c#L95C1-L99C45
|
||||
|
||||
// From https://github.com/numpy/numpy/blob/df256d0d2f3bc6833699529824781c58f9c6e697/numpy/core/src/multiarray/flagsobject.c#L95C1-L99C45:
|
||||
//
|
||||
// The traditional rule is that for an array to be flagged as C contiguous,
|
||||
// the following must hold:
|
||||
//
|
||||
// strides[-1] == itemsize
|
||||
// strides[i] == shape[i+1] * strides[i + 1]
|
||||
// [...]
|
||||
// According to these rules, a 0- or 1-dimensional array is either both
|
||||
// C- and F-contiguous, or neither; and an array with 2+ dimensions
|
||||
// can be C- or F- contiguous, or neither, but not both. Though there
|
||||
// there are exceptions for arrays with zero or one item, in the first
|
||||
// case the check is relaxed up to and including the first dimension
|
||||
// with shape[i] == 0. In the second case `strides == itemsize` will
|
||||
// can be true for all dimensions and both flags are set.
|
||||
|
||||
if (ndarray->ndims == 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (ndarray->strides[ndarray->ndims - 1] != ndarray->itemsize) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (SizeT i = 1; i < ndarray->ndims; i++) {
|
||||
SizeT axis_i = ndarray->ndims - i - 1;
|
||||
if (ndarray->strides[axis_i] !=
|
||||
ndarray->shape[axis_i + 1] + ndarray->strides[axis_i + 1]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
} // namespace basic
|
||||
} // namespace ndarray
|
||||
} // namespace
|
||||
|
||||
extern "C" {
|
||||
using namespace ndarray::basic;
|
||||
|
||||
uint32_t __nac3_ndarray_size(NDArray<int32_t>* ndarray) {
|
||||
return size(ndarray);
|
||||
}
|
||||
|
||||
uint64_t __nac3_ndarray_size64(NDArray<int64_t>* ndarray) {
|
||||
return size(ndarray);
|
||||
}
|
||||
|
||||
uint32_t __nac3_ndarray_nbytes(NDArray<int32_t>* ndarray) {
|
||||
return nbytes(ndarray);
|
||||
}
|
||||
|
||||
uint64_t __nac3_ndarray_nbytes64(NDArray<int64_t>* ndarray) {
|
||||
return nbytes(ndarray);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_len(ErrorContext* errctx, NDArray<int32_t>* ndarray,
|
||||
SliceIndex* dst_len) {
|
||||
return len(errctx, ndarray, dst_len);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_len64(ErrorContext* errctx, NDArray<int64_t>* ndarray,
|
||||
SliceIndex* dst_len) {
|
||||
return len(errctx, ndarray, dst_len);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_util_assert_shape_no_negative(ErrorContext* errctx,
|
||||
int32_t ndims,
|
||||
int32_t* shape) {
|
||||
util::assert_shape_no_negative(errctx, ndims, shape);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_util_assert_shape_no_negative64(ErrorContext* errctx,
|
||||
int64_t ndims,
|
||||
int64_t* shape) {
|
||||
util::assert_shape_no_negative(errctx, ndims, shape);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_set_strides_by_shape(NDArray<int32_t>* ndarray) {
|
||||
set_strides_by_shape(ndarray);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_set_strides_by_shape64(NDArray<int64_t>* ndarray) {
|
||||
set_strides_by_shape(ndarray);
|
||||
}
|
||||
|
||||
bool __nac3_ndarray_is_c_contiguous(NDArray<int32_t>* ndarray) {
|
||||
return is_c_contiguous(ndarray);
|
||||
}
|
||||
|
||||
bool __nac3_ndarray_is_c_contiguous64(NDArray<int64_t>* ndarray) {
|
||||
return is_c_contiguous(ndarray);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_copy_data(NDArray<int32_t>* src_ndarray,
|
||||
NDArray<int32_t>* dst_ndarray) {
|
||||
copy_data(src_ndarray, dst_ndarray);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_copy_data64(NDArray<int64_t>* src_ndarray,
|
||||
NDArray<int64_t>* dst_ndarray) {
|
||||
copy_data(src_ndarray, dst_ndarray);
|
||||
}
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
namespace {
|
||||
/**
|
||||
* @brief The NDArray object
|
||||
*
|
||||
* The official numpy implementations: https://github.com/numpy/numpy/blob/735a477f0bc2b5b84d0e72d92f224bde78d4e069/doc/source/reference/c-api/types-and-structures.rst
|
||||
*/
|
||||
template <typename SizeT>
|
||||
struct NDArray {
|
||||
/**
|
||||
* @brief The underlying data this `ndarray` is pointing to.
|
||||
*
|
||||
* Must be set to `nullptr` to indicate that this NDArray's `data` is uninitialized.
|
||||
*/
|
||||
uint8_t* data;
|
||||
|
||||
/**
|
||||
* @brief The number of bytes of a single element in `data`.
|
||||
*/
|
||||
SizeT itemsize;
|
||||
|
||||
/**
|
||||
* @brief The number of dimensions of this shape.
|
||||
*/
|
||||
SizeT ndims;
|
||||
|
||||
/**
|
||||
* @brief The NDArray shape, with length equal to `ndims`.
|
||||
*
|
||||
* Note that it may contain 0.
|
||||
*/
|
||||
SizeT* shape;
|
||||
|
||||
/**
|
||||
* @brief Array strides, with length equal to `ndims`
|
||||
*
|
||||
* The stride values are in units of bytes, not number of elements.
|
||||
*
|
||||
* Note that `strides` can have negative values.
|
||||
*/
|
||||
SizeT* strides;
|
||||
};
|
||||
} // namespace
|
|
@ -1,38 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <irrt/ndarray/basic.hpp>
|
||||
#include <irrt/ndarray/def.hpp>
|
||||
|
||||
namespace {
|
||||
namespace ndarray {
|
||||
namespace fill {
|
||||
|
||||
/**
|
||||
* Fill an ndarray with a value.
|
||||
*
|
||||
* @param pvalue Pointer to the fill value, and the fill value should be of `ndarray->itemsize` bytes.
|
||||
*/
|
||||
template <typename SizeT>
|
||||
void fill_generic(NDArray<SizeT>* ndarray, const uint8_t* pvalue) {
|
||||
const SizeT size = ndarray::basic::size(ndarray);
|
||||
for (SizeT i = 0; i < size; i++) {
|
||||
uint8_t* pelement = ndarray::basic::get_nth_pelement(
|
||||
ndarray, i); // No need for checked_get_nth_pelement
|
||||
ndarray::basic::set_pelement_value(ndarray, pelement, pvalue);
|
||||
}
|
||||
}
|
||||
} // namespace fill
|
||||
} // namespace ndarray
|
||||
} // namespace
|
||||
|
||||
extern "C" {
|
||||
using namespace ndarray::fill;
|
||||
|
||||
void __nac3_ndarray_fill_generic(NDArray<int32_t>* ndarray, uint8_t* pvalue) {
|
||||
fill_generic(ndarray, pvalue);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_fill_generic64(NDArray<int64_t>* ndarray, uint8_t* pvalue) {
|
||||
fill_generic(ndarray, pvalue);
|
||||
}
|
||||
}
|
|
@ -1,200 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <irrt/error_context.hpp>
|
||||
#include <irrt/ndarray/basic.hpp>
|
||||
#include <irrt/ndarray/def.hpp>
|
||||
#include <irrt/slice.hpp>
|
||||
|
||||
namespace {
|
||||
typedef uint8_t NDIndexType;
|
||||
|
||||
/**
|
||||
* @brief A single element index
|
||||
*
|
||||
* See https://numpy.org/doc/stable/user/basics.indexing.html#single-element-indexing
|
||||
*
|
||||
* `data` points to a `SliceIndex`.
|
||||
*/
|
||||
const NDIndexType ND_INDEX_TYPE_SINGLE_ELEMENT = 0;
|
||||
/**
|
||||
* @brief A slice index
|
||||
*
|
||||
* See https://numpy.org/doc/stable/user/basics.indexing.html#slicing-and-striding
|
||||
*
|
||||
* `data` points to a `UserRange`.
|
||||
*/
|
||||
const NDIndexType ND_INDEX_TYPE_SLICE = 1;
|
||||
|
||||
/**
|
||||
* @brief An index used in ndarray indexing
|
||||
*/
|
||||
struct NDIndex {
|
||||
/**
|
||||
* @brief Enum tag to specify the type of index.
|
||||
*
|
||||
* Please see comments of each enum constant.
|
||||
*/
|
||||
NDIndexType type;
|
||||
|
||||
/**
|
||||
* @brief The accompanying data associated with `type`.
|
||||
*
|
||||
* Please see comments of each enum constant.
|
||||
*/
|
||||
uint8_t* data;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
namespace {
|
||||
namespace ndarray {
|
||||
namespace indexing {
|
||||
namespace util {
|
||||
|
||||
/**
|
||||
* @brief Return the expected rank of the resulting ndarray
|
||||
* created by indexing an ndarray of rank `ndims` using `indexes`.
|
||||
*/
|
||||
template <typename SizeT>
|
||||
void deduce_ndims_after_indexing(ErrorContext* errctx, SizeT* final_ndims,
|
||||
SizeT ndims, SizeT num_indexes,
|
||||
const NDIndex* indexes) {
|
||||
if (num_indexes > ndims) {
|
||||
errctx->set_error(errctx->error_ids->index_error,
|
||||
"too many indices for array: array is "
|
||||
"{0}-dimensional, but {1} were indexed",
|
||||
ndims, num_indexes);
|
||||
return;
|
||||
}
|
||||
|
||||
*final_ndims = ndims;
|
||||
for (SizeT i = 0; i < num_indexes; i++) {
|
||||
if (indexes[i].type == ND_INDEX_TYPE_SINGLE_ELEMENT) {
|
||||
// An index demotes the rank by 1
|
||||
(*final_ndims)--;
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace util
|
||||
|
||||
/**
|
||||
* @brief Perform ndarray "basic indexing" (https://numpy.org/doc/stable/user/basics.indexing.html#basic-indexing)
|
||||
*
|
||||
* This is function very similar to performing `dst_ndarray = src_ndarray[indexes]` in Python (where the variables
|
||||
* can all be found in the parameter of this function).
|
||||
*
|
||||
* In other words, this function takes in an ndarray (`src_ndarray`), index it with `indexes`, and return the
|
||||
* indexed array (by writing the result to `dst_ndarray`).
|
||||
*
|
||||
* This function also does proper assertions on `indexes`.
|
||||
*
|
||||
* # Notes on `dst_ndarray`
|
||||
* The caller is responsible for allocating space for the resulting ndarray.
|
||||
* Here is what this function expects from `dst_ndarray` when called:
|
||||
* - `dst_ndarray->data` does not have to be initialized.
|
||||
* - `dst_ndarray->itemsize` does not have to be initialized.
|
||||
* - `dst_ndarray->ndims` must be initialized, and it must be equal to the expected `ndims` of the `dst_ndarray` after
|
||||
* indexing `src_ndarray` with `indexes`.
|
||||
* - `dst_ndarray->shape` must be allocated, through it can contain uninitialized values.
|
||||
* - `dst_ndarray->strides` must be allocated, through it can contain uninitialized values.
|
||||
* When this function call ends:
|
||||
* - `dst_ndarray->data` is set to `src_ndarray->data` (`dst_ndarray` is just a view to `src_ndarray`)
|
||||
* - `dst_ndarray->itemsize` is set to `src_ndarray->itemsize`
|
||||
* - `dst_ndarray->ndims` is unchanged.
|
||||
* - `dst_ndarray->shape` is updated according to how `src_ndarray` is indexed.
|
||||
* - `dst_ndarray->strides` is updated accordingly by how ndarray indexing works.
|
||||
*
|
||||
* @param indexes Indexes to index `src_ndarray`, ordered in the same way you would write them in Python.
|
||||
* @param src_ndarray The NDArray to be indexed.
|
||||
* @param dst_ndarray The resulting NDArray after indexing. Further details in the comments above,
|
||||
*/
|
||||
template <typename SizeT>
|
||||
void index(ErrorContext* errctx, SizeT num_indexes, const NDIndex* indexes,
|
||||
const NDArray<SizeT>* src_ndarray, NDArray<SizeT>* dst_ndarray) {
|
||||
// Reference code: https://github.com/wadetb/tinynumpy/blob/0d23d22e07062ffab2afa287374c7b366eebdda1/tinynumpy/tinynumpy.py#L652
|
||||
|
||||
dst_ndarray->data = src_ndarray->data;
|
||||
dst_ndarray->itemsize = src_ndarray->itemsize;
|
||||
|
||||
SizeT src_axis = 0;
|
||||
SizeT dst_axis = 0;
|
||||
|
||||
for (SliceIndex i = 0; i < num_indexes; i++) {
|
||||
const NDIndex* index = &indexes[i];
|
||||
if (index->type == ND_INDEX_TYPE_SINGLE_ELEMENT) {
|
||||
SliceIndex input = *((SliceIndex*)index->data);
|
||||
SliceIndex k = slice::resolve_index_in_length(
|
||||
src_ndarray->shape[src_axis], input);
|
||||
|
||||
if (k == slice::OUT_OF_BOUNDS) {
|
||||
errctx->set_error(errctx->error_ids->index_error,
|
||||
"index {0} is out of bounds for axis {1} "
|
||||
"with size {2}",
|
||||
input, src_axis,
|
||||
src_ndarray->shape[src_axis]);
|
||||
return;
|
||||
}
|
||||
|
||||
dst_ndarray->data += k * src_ndarray->strides[src_axis];
|
||||
|
||||
src_axis++;
|
||||
} else if (index->type == ND_INDEX_TYPE_SLICE) {
|
||||
UserSlice* input = (UserSlice*)index->data;
|
||||
|
||||
Slice slice;
|
||||
input->indices_checked(errctx, src_ndarray->shape[src_axis],
|
||||
&slice);
|
||||
if (errctx->has_error()) {
|
||||
return;
|
||||
}
|
||||
|
||||
dst_ndarray->data +=
|
||||
(SizeT)slice.start * src_ndarray->strides[src_axis];
|
||||
dst_ndarray->strides[dst_axis] =
|
||||
((SizeT)slice.step) * src_ndarray->strides[src_axis];
|
||||
dst_ndarray->shape[dst_axis] = (SizeT)slice.len();
|
||||
|
||||
dst_axis++;
|
||||
src_axis++;
|
||||
} else {
|
||||
__builtin_unreachable();
|
||||
}
|
||||
}
|
||||
|
||||
for (; dst_axis < dst_ndarray->ndims; dst_axis++, src_axis++) {
|
||||
dst_ndarray->shape[dst_axis] = src_ndarray->shape[src_axis];
|
||||
dst_ndarray->strides[dst_axis] = src_ndarray->strides[src_axis];
|
||||
}
|
||||
}
|
||||
} // namespace indexing
|
||||
} // namespace ndarray
|
||||
} // namespace
|
||||
|
||||
extern "C" {
|
||||
using namespace ndarray::indexing;
|
||||
|
||||
void __nac3_ndarray_indexing_deduce_ndims_after_indexing(
|
||||
ErrorContext* errctx, int32_t* result, int32_t ndims, int32_t num_indexes,
|
||||
const NDIndex* indexes) {
|
||||
ndarray::indexing::util::deduce_ndims_after_indexing(errctx, result, ndims,
|
||||
num_indexes, indexes);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_indexing_deduce_ndims_after_indexing64(
|
||||
ErrorContext* errctx, int64_t* result, int64_t ndims, int64_t num_indexes,
|
||||
const NDIndex* indexes) {
|
||||
ndarray::indexing::util::deduce_ndims_after_indexing(errctx, result, ndims,
|
||||
num_indexes, indexes);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_index(ErrorContext* errctx, int32_t num_indexes,
|
||||
NDIndex* indexes, NDArray<int32_t>* src_ndarray,
|
||||
NDArray<int32_t>* dst_ndarray) {
|
||||
index(errctx, num_indexes, indexes, src_ndarray, dst_ndarray);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_index64(ErrorContext* errctx, int64_t num_indexes,
|
||||
NDIndex* indexes, NDArray<int64_t>* src_ndarray,
|
||||
NDArray<int64_t>* dst_ndarray) {
|
||||
index(errctx, num_indexes, indexes, src_ndarray, dst_ndarray);
|
||||
}
|
||||
}
|
|
@ -1,116 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <irrt/error_context.hpp>
|
||||
#include <irrt/int_defs.hpp>
|
||||
#include <irrt/ndarray/def.hpp>
|
||||
|
||||
namespace {
|
||||
namespace ndarray {
|
||||
namespace reshape {
|
||||
namespace util {
|
||||
|
||||
/**
|
||||
* @brief Perform assertions on and resolve unknown dimensions in `new_shape` in `np.reshape(<ndarray>, new_shape)`
|
||||
*
|
||||
* If `new_shape` indeed contains unknown dimensions (specified with `-1`, just like numpy), `new_shape` will be
|
||||
* modified to contain the resolved dimension.
|
||||
*
|
||||
* To perform assertions on and resolve unknown dimensions in `new_shape`, we don't need the actual
|
||||
* `<ndarray>` object itself, but only the `.size` of the `<ndarray>`.
|
||||
*
|
||||
* @param size The `.size` of `<ndarray>`
|
||||
* @param new_ndims Number of elements in `new_shape`
|
||||
* @param new_shape Target shape to reshape to
|
||||
*/
|
||||
template <typename SizeT>
|
||||
void resolve_and_check_new_shape(ErrorContext* errctx, SizeT size,
|
||||
SizeT new_ndims, SizeT* new_shape) {
|
||||
// Is there a -1 in `new_shape`?
|
||||
bool neg1_exists = false;
|
||||
// Location of -1, only initialized if `neg1_exists` is true
|
||||
SizeT neg1_axis_i;
|
||||
// The computed ndarray size of `new_shape`
|
||||
SizeT new_size = 1;
|
||||
|
||||
for (SizeT axis_i = 0; axis_i < new_ndims; axis_i++) {
|
||||
SizeT dim = new_shape[axis_i];
|
||||
if (dim < 0) {
|
||||
if (dim == -1) {
|
||||
if (neg1_exists) {
|
||||
// Multiple `-1` found. Throw an error.
|
||||
errctx->set_error(errctx->error_ids->value_error,
|
||||
"can only specify one unknown dimension");
|
||||
return;
|
||||
} else {
|
||||
neg1_exists = true;
|
||||
neg1_axis_i = axis_i;
|
||||
}
|
||||
} else {
|
||||
// TODO: What? In `np.reshape` any negative dimensions is
|
||||
// treated like its `-1`.
|
||||
//
|
||||
// Try running `np.zeros((3, 4)).reshape((-999, 2))`
|
||||
//
|
||||
// It is not documented by numpy.
|
||||
// Throw an error for now...
|
||||
|
||||
errctx->set_error(errctx->error_ids->value_error,
|
||||
"Found negative dimension {0} on axis {1}",
|
||||
dim, axis_i);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
new_size *= dim;
|
||||
}
|
||||
}
|
||||
|
||||
bool can_reshape;
|
||||
if (neg1_exists) {
|
||||
// Let `x` be the unknown dimension
|
||||
// solve `x * <new_size> = <size>`
|
||||
if (new_size == 0 && size == 0) {
|
||||
// `x` has infinitely many solutions
|
||||
can_reshape = false;
|
||||
} else if (new_size == 0 && size != 0) {
|
||||
// `x` has no solutions
|
||||
can_reshape = false;
|
||||
} else if (size % new_size != 0) {
|
||||
// `x` has no integer solutions
|
||||
can_reshape = false;
|
||||
} else {
|
||||
can_reshape = true;
|
||||
new_shape[neg1_axis_i] = size / new_size; // Resolve dimension
|
||||
}
|
||||
} else {
|
||||
can_reshape = (new_size == size);
|
||||
}
|
||||
|
||||
if (!can_reshape) {
|
||||
errctx->set_error(errctx->error_ids->value_error,
|
||||
"cannot reshape array of size {0} into given shape",
|
||||
size);
|
||||
return;
|
||||
}
|
||||
}
|
||||
} // namespace util
|
||||
} // namespace reshape
|
||||
} // namespace ndarray
|
||||
} // namespace
|
||||
|
||||
extern "C" {
|
||||
|
||||
void __nac3_ndarray_resolve_and_check_new_shape(ErrorContext* errctx,
|
||||
int32_t size, int32_t new_ndims,
|
||||
int32_t* new_shape) {
|
||||
ndarray::reshape::util::resolve_and_check_new_shape(errctx, size, new_ndims,
|
||||
new_shape);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_resolve_and_check_new_shape64(ErrorContext* errctx,
|
||||
int64_t size,
|
||||
int64_t new_ndims,
|
||||
int64_t* new_shape) {
|
||||
ndarray::reshape::util::resolve_and_check_new_shape(errctx, size, new_ndims,
|
||||
new_shape);
|
||||
}
|
||||
}
|
|
@ -1,162 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <irrt/int_defs.hpp>
|
||||
#include <irrt/ndarray/def.hpp>
|
||||
#include <irrt/slice.hpp>
|
||||
|
||||
/*
|
||||
* Notes on `np.transpose(<array>, <axes>)`
|
||||
*
|
||||
* TODO: `axes`, if specified, can actually contain negative indices,
|
||||
* but it is not documented in numpy.
|
||||
*
|
||||
* Supporting it for now.
|
||||
*/
|
||||
|
||||
namespace {
|
||||
namespace ndarray {
|
||||
namespace transpose {
|
||||
namespace util {
|
||||
|
||||
/**
|
||||
* @brief Do assertions on `<axes>` in `np.transpose(<array>, <axes>)`.
|
||||
*
|
||||
* Note that `np.transpose`'s `<axe>` argument is optional. If the argument
|
||||
* is specified but the user, use this function to do assertions on it.
|
||||
*
|
||||
* @param ndims The number of dimensions of `<array>`
|
||||
* @param num_axes Number of elements in `<axes>` as specified by the user.
|
||||
* This should be equal to `ndims`. If not, a "ValueError: axes don't match array" is thrown.
|
||||
* @param axes The user specified `<axes>`.
|
||||
*/
|
||||
template <typename SizeT>
|
||||
void assert_transpose_axes(ErrorContext* errctx, SizeT ndims, SizeT num_axes,
|
||||
const SizeT* axes) {
|
||||
/*
|
||||
* TODO: `axes` can actually contain negative indices, but it is not documented in numpy.
|
||||
*
|
||||
* Supporting it for now.
|
||||
*/
|
||||
|
||||
if (ndims != num_axes) {
|
||||
errctx->set_error(errctx->error_ids->value_error,
|
||||
"axes don't match array");
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: Optimize this
|
||||
bool* axe_specified = (bool*)__builtin_alloca(sizeof(bool) * ndims);
|
||||
for (SizeT i = 0; i < ndims; i++) axe_specified[i] = false;
|
||||
|
||||
for (SizeT i = 0; i < ndims; i++) {
|
||||
SizeT axis = slice::resolve_index_in_length(ndims, axes[i]);
|
||||
if (axis == slice::OUT_OF_BOUNDS) {
|
||||
// TODO: numpy actually throws a `numpy.exceptions.AxisError`
|
||||
errctx->set_error(
|
||||
errctx->error_ids->value_error,
|
||||
"axis {0} is out of bounds for array of dimension {1}", axis,
|
||||
ndims);
|
||||
return;
|
||||
}
|
||||
|
||||
if (axe_specified[axis]) {
|
||||
errctx->set_error(errctx->error_ids->value_error,
|
||||
"repeated axis in transpose");
|
||||
return;
|
||||
}
|
||||
|
||||
axe_specified[axis] = true;
|
||||
}
|
||||
}
|
||||
} // namespace util
|
||||
|
||||
/**
|
||||
* @brief Create a transpose view of `src_ndarray` and perform proper assertions.
|
||||
*
|
||||
* This function is very similar to doing `dst_ndarray = np.transpose(src_ndarray, <axes>)`.
|
||||
* If `<axes>` is supposed to be `None`, caller can pass in a `nullptr` to `<axes>`.
|
||||
*
|
||||
* The transpose view created is returned by modifying `dst_ndarray`.
|
||||
*
|
||||
* The caller is responsible for setting up `dst_ndarray` before calling this function.
|
||||
* Here is what this function expects from `dst_ndarray` when called:
|
||||
* - `dst_ndarray->data` does not have to be initialized.
|
||||
* - `dst_ndarray->itemsize` does not have to be initialized.
|
||||
* - `dst_ndarray->ndims` must be initialized, must be equal to `src_ndarray->ndims`.
|
||||
* - `dst_ndarray->shape` must be allocated, through it can contain uninitialized values.
|
||||
* - `dst_ndarray->strides` must be allocated, through it can contain uninitialized values.
|
||||
* When this function call ends:
|
||||
* - `dst_ndarray->data` is set to `src_ndarray->data` (`dst_ndarray` is just a view to `src_ndarray`)
|
||||
* - `dst_ndarray->itemsize` is set to `src_ndarray->itemsize`
|
||||
* - `dst_ndarray->ndims` is unchanged
|
||||
* - `dst_ndarray->shape` is updated according to how `np.transpose` works
|
||||
* - `dst_ndarray->strides` is updated according to how `np.transpose` works
|
||||
*
|
||||
* @param src_ndarray The NDArray to build a transpose view on
|
||||
* @param dst_ndarray The resulting NDArray after transpose. Further details in the comments above,
|
||||
* @param num_axes Number of elements in axes, can be undefined if `axes` is nullptr.
|
||||
* @param axes Axes permutation. Set it to `nullptr` if `<axes>` is supposed to be `None`.
|
||||
*/
|
||||
template <typename SizeT>
|
||||
void transpose(ErrorContext* errctx, const NDArray<SizeT>* src_ndarray,
|
||||
NDArray<SizeT>* dst_ndarray, SizeT num_axes, const SizeT* axes) {
|
||||
__builtin_assume(src_ndarray->ndims == dst_ndarray->ndims);
|
||||
const auto ndims = src_ndarray->ndims;
|
||||
|
||||
if (axes != nullptr) {
|
||||
util::assert_transpose_axes(errctx, ndims, num_axes, axes);
|
||||
if (errctx->has_error()) return;
|
||||
}
|
||||
|
||||
dst_ndarray->data = src_ndarray->data;
|
||||
dst_ndarray->itemsize = src_ndarray->itemsize;
|
||||
|
||||
// Check out https://ajcr.net/stride-guide-part-2/ to see how `np.transpose` works behind the scenes.
|
||||
if (axes == nullptr) {
|
||||
// `np.transpose(<array>, axes=None)`
|
||||
|
||||
/*
|
||||
* Minor note: `np.transpose(<array>, axes=None)` is equivalent to
|
||||
* `np.transpose(<array>, axes=[N-1, N-2, ..., 0])` - basically it
|
||||
* is reversing the order of strides and shape.
|
||||
*
|
||||
* This is a fast implementation to handle this special (but very common) case.
|
||||
*/
|
||||
|
||||
for (SizeT axis = 0; axis < ndims; axis++) {
|
||||
dst_ndarray->shape[axis] = src_ndarray->shape[ndims - axis - 1];
|
||||
dst_ndarray->strides[axis] = src_ndarray->strides[ndims - axis - 1];
|
||||
}
|
||||
} else {
|
||||
// `np.transpose(<array>, <axes>)`
|
||||
|
||||
// Permute strides and shape according to `axes`, while resolving negative indices in `axes`
|
||||
for (SizeT axis = 0; axis < ndims; axis++) {
|
||||
// `i` cannot be OUT_OF_BOUNDS because of assertions
|
||||
SizeT i = slice::resolve_index_in_length(ndims, axes[axis]);
|
||||
|
||||
dst_ndarray->shape[axis] = src_ndarray->shape[i];
|
||||
dst_ndarray->strides[axis] = src_ndarray->strides[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace transpose
|
||||
} // namespace ndarray
|
||||
} // namespace
|
||||
|
||||
extern "C" {
|
||||
using namespace ndarray::transpose;
|
||||
void __nac3_ndarray_transpose(ErrorContext* errctx,
|
||||
const NDArray<int32_t>* src_ndarray,
|
||||
NDArray<int32_t>* dst_ndarray, int32_t num_axes,
|
||||
const int32_t* axes) {
|
||||
transpose(errctx, src_ndarray, dst_ndarray, num_axes, axes);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_transpose64(ErrorContext* errctx,
|
||||
const NDArray<int64_t>* src_ndarray,
|
||||
NDArray<int64_t>* dst_ndarray, int64_t num_axes,
|
||||
const int64_t* axes) {
|
||||
transpose(errctx, src_ndarray, dst_ndarray, num_axes, axes);
|
||||
}
|
||||
}
|
|
@ -1,34 +1,22 @@
|
|||
#pragma once
|
||||
|
||||
#include <irrt/error_context.hpp>
|
||||
#include <irrt/int_defs.hpp>
|
||||
#include <irrt/slice.hpp>
|
||||
#include <irrt/utils.hpp>
|
||||
#include "irrt/int_types.hpp"
|
||||
|
||||
// The type of an index or a value describing the length of a
|
||||
// range/slice is always `int32_t`.
|
||||
using SliceIndex = int32_t;
|
||||
extern "C" {
|
||||
SliceIndex __nac3_slice_index_bound(SliceIndex i, const SliceIndex len) {
|
||||
if (i < 0) {
|
||||
i = len + i;
|
||||
}
|
||||
if (i < 0) {
|
||||
return 0;
|
||||
} else if (i > len) {
|
||||
return len;
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
/**
|
||||
* @brief A Python-like slice with resolved indices.
|
||||
*
|
||||
* "Resolved indices" means that `start` and `stop` must be positive and are
|
||||
* bound to a known length.
|
||||
*/
|
||||
struct Slice {
|
||||
SliceIndex start;
|
||||
SliceIndex stop;
|
||||
SliceIndex step;
|
||||
|
||||
/**
|
||||
* @brief Calculate and return the length / the number of the slice.
|
||||
*
|
||||
* If this were a Python range, this function would be `len(range(start, stop, step))`.
|
||||
*/
|
||||
SliceIndex len() {
|
||||
SliceIndex diff = stop - start;
|
||||
SliceIndex __nac3_range_slice_len(const SliceIndex start, const SliceIndex end, const SliceIndex step) {
|
||||
SliceIndex diff = end - start;
|
||||
if (diff > 0 && step > 0) {
|
||||
return ((diff - 1) / step) + 1;
|
||||
} else if (diff < 0 && step < 0) {
|
||||
|
@ -36,130 +24,5 @@ struct Slice {
|
|||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
namespace slice {
|
||||
/**
|
||||
* @brief Resolve a slice index under a given length like Python indexing.
|
||||
*
|
||||
* In Python, if you have a `list` of length 100, `list[-1]` resolves to
|
||||
* `list[99]`, so `resolve_index_in_length_clamped(100, -1)` returns `99`.
|
||||
*
|
||||
* If `length` is 0, 0 is returned for any value of `index`.
|
||||
*
|
||||
* If `index` is out of bounds, clamps the returned value between `0` and
|
||||
* `length - 1` (inclusive).
|
||||
*
|
||||
*/
|
||||
SliceIndex resolve_index_in_length_clamped(SliceIndex length,
|
||||
SliceIndex index) {
|
||||
if (index < 0) {
|
||||
return max<SliceIndex>(length + index, 0);
|
||||
} else {
|
||||
return min<SliceIndex>(length, index);
|
||||
}
|
||||
}
|
||||
|
||||
const SliceIndex OUT_OF_BOUNDS = -1;
|
||||
|
||||
/**
|
||||
* @brief Like `resolve_index_in_length_clamped`, but returns `OUT_OF_BOUNDS`
|
||||
* if `index` is out of bounds.
|
||||
*/
|
||||
SliceIndex resolve_index_in_length(SliceIndex length, SliceIndex index) {
|
||||
SliceIndex resolved = index < 0 ? length + index : index;
|
||||
if (0 <= resolved && resolved < length) {
|
||||
return resolved;
|
||||
} else {
|
||||
return OUT_OF_BOUNDS;
|
||||
}
|
||||
}
|
||||
} // namespace slice
|
||||
|
||||
/**
|
||||
* @brief A Python-like slice with **unresolved** indices.
|
||||
*/
|
||||
struct UserSlice {
|
||||
bool start_defined;
|
||||
SliceIndex start;
|
||||
|
||||
bool stop_defined;
|
||||
SliceIndex stop;
|
||||
|
||||
bool step_defined;
|
||||
SliceIndex step;
|
||||
|
||||
UserSlice() { this->reset(); }
|
||||
|
||||
void reset() {
|
||||
this->start_defined = false;
|
||||
this->stop_defined = false;
|
||||
this->step_defined = false;
|
||||
}
|
||||
|
||||
void set_start(SliceIndex start) {
|
||||
this->start_defined = true;
|
||||
this->start = start;
|
||||
}
|
||||
|
||||
void set_stop(SliceIndex stop) {
|
||||
this->stop_defined = true;
|
||||
this->stop = stop;
|
||||
}
|
||||
|
||||
void set_step(SliceIndex step) {
|
||||
this->step_defined = true;
|
||||
this->step = step;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Resolve this slice.
|
||||
*
|
||||
* In Python, this would be `slice(start, stop, step).indices(length)`.
|
||||
*
|
||||
* @return A `Slice` with the resolved indices.
|
||||
*/
|
||||
Slice indices(SliceIndex length) {
|
||||
Slice result;
|
||||
|
||||
result.step = step_defined ? step : 1;
|
||||
bool step_is_negative = result.step < 0;
|
||||
|
||||
if (start_defined) {
|
||||
result.start =
|
||||
slice::resolve_index_in_length_clamped(length, start);
|
||||
} else {
|
||||
result.start = step_is_negative ? length - 1 : 0;
|
||||
}
|
||||
|
||||
if (stop_defined) {
|
||||
result.stop = slice::resolve_index_in_length_clamped(length, stop);
|
||||
} else {
|
||||
result.stop = step_is_negative ? -1 : length;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Like `.indices()` but with assertions.
|
||||
*/
|
||||
void indices_checked(ErrorContext* errctx, SliceIndex length,
|
||||
Slice* result) {
|
||||
if (length < 0) {
|
||||
errctx->set_error(errctx->error_ids->value_error,
|
||||
"length should not be negative, got {0}", length);
|
||||
return;
|
||||
}
|
||||
|
||||
if (this->step_defined && this->step == 0) {
|
||||
errctx->set_error(errctx->error_ids->value_error,
|
||||
"slice step cannot be zero");
|
||||
return;
|
||||
}
|
||||
|
||||
*result = this->indices(length);
|
||||
}
|
||||
};
|
||||
} // namespace
|
|
@ -1,104 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
namespace {
|
||||
template <typename T>
|
||||
const T& max(const T& a, const T& b) {
|
||||
return a > b ? a : b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
const T& min(const T& a, const T& b) {
|
||||
return a > b ? b : a;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Compare contents of two arrays with the same length.
|
||||
*/
|
||||
template <typename T>
|
||||
bool arrays_match(int len, T* as, T* bs) {
|
||||
for (int i = 0; i < len; i++) {
|
||||
if (as[i] != bs[i]) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
namespace cstr_utils {
|
||||
/**
|
||||
* @brief Return true if `str` is empty.
|
||||
*/
|
||||
bool is_empty(const char* str) { return str[0] == '\0'; }
|
||||
|
||||
/**
|
||||
* @brief Implementation of `strcmp()`
|
||||
*/
|
||||
int8_t compare(const char* a, const char* b) {
|
||||
uint32_t i = 0;
|
||||
while (true) {
|
||||
if (a[i] < b[i]) {
|
||||
return -1;
|
||||
} else if (a[i] > b[i]) {
|
||||
return 1;
|
||||
} else {
|
||||
if (a[i] == '\0') {
|
||||
return 0;
|
||||
} else {
|
||||
i++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return true two strings have the same content.
|
||||
*/
|
||||
int8_t equal(const char* a, const char* b) { return compare(a, b) == 0; }
|
||||
|
||||
/**
|
||||
* @brief Implementation of `strlen()`.
|
||||
*/
|
||||
uint32_t length(const char* str) {
|
||||
uint32_t length = 0;
|
||||
while (*str != '\0') {
|
||||
length++;
|
||||
str++;
|
||||
}
|
||||
return length;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Copy a null-terminated string to a buffer with limited size and guaranteed null-termination.
|
||||
*
|
||||
* `dst_max_size` must be greater than 0, otherwise this function has undefined behavior.
|
||||
*
|
||||
* This function attempts to copy everything from `src` from `dst`, and *always* null-terminates `dst`.
|
||||
*
|
||||
* If the size of `dst` is too small, the final byte (`dst[dst_max_size - 1]`) of `dst` will be set to
|
||||
* the null terminator.
|
||||
*
|
||||
* @param src String to copy from.
|
||||
* @param dst Buffer to copy string to.
|
||||
* @param dst_max_size
|
||||
* Number of bytes of this buffer, including the space needed for the null terminator.
|
||||
* Must be greater than 0.
|
||||
* @return If `dst` is too small to contain everything in `src`.
|
||||
*/
|
||||
bool copy(const char* src, char* dst, uint32_t dst_max_size) {
|
||||
for (uint32_t i = 0; i < dst_max_size; i++) {
|
||||
bool is_last = i + 1 == dst_max_size;
|
||||
if (is_last && src[i] != '\0') {
|
||||
dst[i] = '\0';
|
||||
return false;
|
||||
}
|
||||
|
||||
if (src[i] == '\0') {
|
||||
dst[i] = '\0';
|
||||
return true;
|
||||
}
|
||||
|
||||
dst[i] = src[i];
|
||||
}
|
||||
|
||||
__builtin_unreachable();
|
||||
}
|
||||
} // namespace cstr_utils
|
||||
} // namespace
|
|
@ -1,14 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <irrt/artiq_defs.hpp>
|
||||
#include <irrt/core.hpp>
|
||||
#include <irrt/error_context.hpp>
|
||||
#include <irrt/int_defs.hpp>
|
||||
#include <irrt/ndarray/basic.hpp>
|
||||
#include <irrt/ndarray/def.hpp>
|
||||
#include <irrt/ndarray/fill.hpp>
|
||||
#include <irrt/ndarray/indexing.hpp>
|
||||
#include <irrt/ndarray/reshape.hpp>
|
||||
#include <irrt/ndarray/transpose.hpp>
|
||||
#include <irrt/slice.hpp>
|
||||
#include <irrt/utils.hpp>
|
|
@ -1,18 +0,0 @@
|
|||
// This file will be compiled like a real C++ program,
|
||||
// and we do have the luxury to use the standard libraries.
|
||||
// That is if the nix flakes do not have issues... especially on msys2...
|
||||
#include <cstdint>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <test/test_core.hpp>
|
||||
#include <test/test_ndarray_basic.hpp>
|
||||
#include <test/test_ndarray_indexing.hpp>
|
||||
#include <test/test_slice.hpp>
|
||||
|
||||
int main() {
|
||||
test::core::run();
|
||||
test::slice::run();
|
||||
test::ndarray_basic::run();
|
||||
test::ndarray_indexing::run();
|
||||
return 0;
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <irrt_everything.hpp>
|
||||
#include <test/util.hpp>
|
||||
|
||||
/*
|
||||
Include this header for every test_*.cpp
|
||||
*/
|
|
@ -1,16 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <test/includes.hpp>
|
||||
|
||||
namespace test {
|
||||
namespace core {
|
||||
void test_int_exp() {
|
||||
BEGIN_TEST();
|
||||
|
||||
assert_values_match(125, __nac3_int_exp_impl<int32_t>(5, 3));
|
||||
assert_values_match(3125, __nac3_int_exp_impl<int32_t>(5, 5));
|
||||
}
|
||||
|
||||
void run() { test_int_exp(); }
|
||||
} // namespace core
|
||||
} // namespace test
|
|
@ -1,30 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <test/includes.hpp>
|
||||
|
||||
namespace test {
|
||||
namespace ndarray_basic {
|
||||
void test_calc_size_from_shape_normal() {
|
||||
// Test shapes with normal values
|
||||
BEGIN_TEST();
|
||||
|
||||
int32_t shape[4] = {2, 3, 5, 7};
|
||||
assert_values_match(
|
||||
210, ndarray::basic::util::calc_size_from_shape<int32_t>(4, shape));
|
||||
}
|
||||
|
||||
void test_calc_size_from_shape_has_zero() {
|
||||
// Test shapes with 0 in them
|
||||
BEGIN_TEST();
|
||||
|
||||
int32_t shape[4] = {2, 0, 5, 7};
|
||||
assert_values_match(
|
||||
0, ndarray::basic::util::calc_size_from_shape<int32_t>(4, shape));
|
||||
}
|
||||
|
||||
void run() {
|
||||
test_calc_size_from_shape_normal();
|
||||
test_calc_size_from_shape_has_zero();
|
||||
}
|
||||
} // namespace ndarray_basic
|
||||
} // namespace test
|
|
@ -1,220 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <test/includes.hpp>
|
||||
|
||||
namespace test {
|
||||
namespace ndarray_indexing {
|
||||
void test_normal_1() {
|
||||
/*
|
||||
Reference Python code:
|
||||
```python
|
||||
ndarray = np.arange(12, dtype=np.float64).reshape((3, 4));
|
||||
# array([[ 0., 1., 2., 3.],
|
||||
# [ 4., 5., 6., 7.],
|
||||
# [ 8., 9., 10., 11.]])
|
||||
|
||||
dst_ndarray = ndarray[-2:, 1::2]
|
||||
# array([[ 5., 7.],
|
||||
# [ 9., 11.]])
|
||||
|
||||
assert dst_ndarray.shape == (2, 2)
|
||||
assert dst_ndarray.strides == (32, 16)
|
||||
assert dst_ndarray[0, 0] == 5.0
|
||||
assert dst_ndarray[0, 1] == 7.0
|
||||
assert dst_ndarray[1, 0] == 9.0
|
||||
assert dst_ndarray[1, 1] == 11.0
|
||||
```
|
||||
*/
|
||||
BEGIN_TEST();
|
||||
|
||||
// Prepare src_ndarray
|
||||
double src_data[12] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0,
|
||||
6.0, 7.0, 8.0, 9.0, 10.0, 11.0};
|
||||
int32_t src_itemsize = sizeof(double);
|
||||
const int32_t src_ndims = 2;
|
||||
int32_t src_shape[src_ndims] = {3, 4};
|
||||
int32_t src_strides[src_ndims] = {};
|
||||
NDArray<int32_t> src_ndarray = {.data = (uint8_t *)src_data,
|
||||
.itemsize = src_itemsize,
|
||||
.ndims = src_ndims,
|
||||
.shape = src_shape,
|
||||
.strides = src_strides};
|
||||
ndarray::basic::set_strides_by_shape(&src_ndarray);
|
||||
|
||||
// Prepare dst_ndarray
|
||||
const int32_t dst_ndims = 2;
|
||||
int32_t dst_shape[dst_ndims] = {999, 999}; // Empty values
|
||||
int32_t dst_strides[dst_ndims] = {999, 999}; // Empty values
|
||||
NDArray<int32_t> dst_ndarray = {.data = nullptr,
|
||||
.ndims = dst_ndims,
|
||||
.shape = dst_shape,
|
||||
.strides = dst_strides};
|
||||
|
||||
// Create the subscripts in `ndarray[-2::, 1::2]`
|
||||
UserSlice subscript_1;
|
||||
subscript_1.set_start(-2);
|
||||
|
||||
UserSlice subscript_2;
|
||||
subscript_2.set_start(1);
|
||||
subscript_2.set_step(2);
|
||||
|
||||
const int32_t num_indexes = 2;
|
||||
NDIndex indexes[num_indexes] = {
|
||||
{.type = ND_INDEX_TYPE_SLICE, .data = (uint8_t *)&subscript_1},
|
||||
{.type = ND_INDEX_TYPE_SLICE, .data = (uint8_t *)&subscript_2}};
|
||||
|
||||
ErrorContext errctx = create_testing_errctx();
|
||||
ndarray::indexing::index(&errctx, num_indexes, indexes, &src_ndarray,
|
||||
&dst_ndarray);
|
||||
assert_errctx_no_error(&errctx);
|
||||
|
||||
int32_t expected_shape[dst_ndims] = {2, 2};
|
||||
int32_t expected_strides[dst_ndims] = {32, 16};
|
||||
|
||||
assert_arrays_match(dst_ndims, expected_shape, dst_ndarray.shape);
|
||||
assert_arrays_match(dst_ndims, expected_strides, dst_ndarray.strides);
|
||||
|
||||
// dst_ndarray[0, 0]
|
||||
assert_values_match(5.0,
|
||||
*((double *)ndarray::basic::get_pelement_by_indices(
|
||||
&dst_ndarray, (int32_t[dst_ndims]){0, 0})));
|
||||
// dst_ndarray[0, 1]
|
||||
assert_values_match(7.0,
|
||||
*((double *)ndarray::basic::get_pelement_by_indices(
|
||||
&dst_ndarray, (int32_t[dst_ndims]){0, 1})));
|
||||
// dst_ndarray[1, 0]
|
||||
assert_values_match(9.0,
|
||||
*((double *)ndarray::basic::get_pelement_by_indices(
|
||||
&dst_ndarray, (int32_t[dst_ndims]){1, 0})));
|
||||
// dst_ndarray[1, 1]
|
||||
assert_values_match(11.0,
|
||||
*((double *)ndarray::basic::get_pelement_by_indices(
|
||||
&dst_ndarray, (int32_t[dst_ndims]){1, 1})));
|
||||
}
|
||||
|
||||
void test_normal_2() {
|
||||
/*
|
||||
```python
|
||||
ndarray = np.arange(12, dtype=np.float64).reshape((3, 4))
|
||||
# array([[ 0., 1., 2., 3.],
|
||||
# [ 4., 5., 6., 7.],
|
||||
# [ 8., 9., 10., 11.]])
|
||||
|
||||
dst_ndarray = ndarray[2, ::-2]
|
||||
# array([11., 9.])
|
||||
|
||||
assert dst_ndarray.shape == (2,)
|
||||
assert dst_ndarray.strides == (-16,)
|
||||
assert dst_ndarray[0] == 11.0
|
||||
assert dst_ndarray[1] == 9.0
|
||||
```
|
||||
*/
|
||||
BEGIN_TEST();
|
||||
|
||||
// Prepare src_ndarray
|
||||
double src_data[12] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0,
|
||||
6.0, 7.0, 8.0, 9.0, 10.0, 11.0};
|
||||
int32_t src_itemsize = sizeof(double);
|
||||
const int32_t src_ndims = 2;
|
||||
int32_t src_shape[src_ndims] = {3, 4};
|
||||
int32_t src_strides[src_ndims] = {};
|
||||
NDArray<int32_t> src_ndarray = {.data = (uint8_t *)src_data,
|
||||
.itemsize = src_itemsize,
|
||||
.ndims = src_ndims,
|
||||
.shape = src_shape,
|
||||
.strides = src_strides};
|
||||
ndarray::basic::set_strides_by_shape(&src_ndarray);
|
||||
|
||||
// Prepare dst_ndarray
|
||||
const int32_t dst_ndims = 1;
|
||||
int32_t dst_shape[dst_ndims] = {999}; // Empty values
|
||||
int32_t dst_strides[dst_ndims] = {999}; // Empty values
|
||||
NDArray<int32_t> dst_ndarray = {.data = nullptr,
|
||||
.ndims = dst_ndims,
|
||||
.shape = dst_shape,
|
||||
.strides = dst_strides};
|
||||
|
||||
// Create the subscripts in `ndarray[2, ::-2]`
|
||||
int32_t subscript_1 = 2;
|
||||
|
||||
UserSlice subscript_2;
|
||||
subscript_2.set_step(-2);
|
||||
|
||||
const int32_t num_indexes = 2;
|
||||
NDIndex indexes[num_indexes] = {
|
||||
{.type = ND_INDEX_TYPE_SINGLE_ELEMENT, .data = (uint8_t *)&subscript_1},
|
||||
{.type = ND_INDEX_TYPE_SLICE, .data = (uint8_t *)&subscript_2}};
|
||||
|
||||
ErrorContext errctx = create_testing_errctx();
|
||||
ndarray::indexing::index(&errctx, num_indexes, indexes, &src_ndarray,
|
||||
&dst_ndarray);
|
||||
assert_errctx_no_error(&errctx);
|
||||
|
||||
int32_t expected_shape[dst_ndims] = {2};
|
||||
int32_t expected_strides[dst_ndims] = {-16};
|
||||
assert_arrays_match(dst_ndims, expected_shape, dst_ndarray.shape);
|
||||
assert_arrays_match(dst_ndims, expected_strides, dst_ndarray.strides);
|
||||
|
||||
assert_values_match(11.0,
|
||||
*((double *)ndarray::basic::get_pelement_by_indices(
|
||||
&dst_ndarray, (int32_t[dst_ndims]){0})));
|
||||
assert_values_match(9.0,
|
||||
*((double *)ndarray::basic::get_pelement_by_indices(
|
||||
&dst_ndarray, (int32_t[dst_ndims]){1})));
|
||||
}
|
||||
|
||||
void test_index_subscript_out_of_bounds() {
|
||||
/*
|
||||
# Consider `my_array`
|
||||
|
||||
print(my_array.shape)
|
||||
# (4, 5, 6)
|
||||
|
||||
my_array[2, 100] # error, index subscript at axis 1 is out of bounds
|
||||
*/
|
||||
BEGIN_TEST();
|
||||
|
||||
// Prepare src_ndarray
|
||||
const int32_t src_ndims = 2;
|
||||
int32_t src_shape[src_ndims] = {3, 4};
|
||||
int32_t src_strides[src_ndims] = {};
|
||||
NDArray<int32_t> src_ndarray = {
|
||||
.data = (uint8_t *)nullptr, // placeholder, we wouldn't access it
|
||||
.itemsize = sizeof(double), // placeholder
|
||||
.ndims = src_ndims,
|
||||
.shape = src_shape,
|
||||
.strides = src_strides};
|
||||
ndarray::basic::set_strides_by_shape(&src_ndarray);
|
||||
|
||||
// Create the subscripts in `my_array[2, 100]`
|
||||
int32_t subscript_1 = 2;
|
||||
int32_t subscript_2 = 100;
|
||||
|
||||
const int32_t num_indexes = 2;
|
||||
NDIndex indexes[num_indexes] = {
|
||||
{.type = ND_INDEX_TYPE_SINGLE_ELEMENT, .data = (uint8_t *)&subscript_1},
|
||||
{.type = ND_INDEX_TYPE_SINGLE_ELEMENT,
|
||||
.data = (uint8_t *)&subscript_2}};
|
||||
|
||||
// Prepare dst_ndarray
|
||||
const int32_t dst_ndims = 0;
|
||||
int32_t dst_shape[dst_ndims] = {};
|
||||
int32_t dst_strides[dst_ndims] = {};
|
||||
NDArray<int32_t> dst_ndarray = {.data = nullptr, // placehloder
|
||||
.ndims = dst_ndims,
|
||||
.shape = dst_shape,
|
||||
.strides = dst_strides};
|
||||
|
||||
ErrorContext errctx = create_testing_errctx();
|
||||
ndarray::indexing::index(&errctx, num_indexes, indexes, &src_ndarray,
|
||||
&dst_ndarray);
|
||||
assert_errctx_has_error(&errctx, errctx.error_ids->index_error);
|
||||
}
|
||||
|
||||
void run() {
|
||||
test_normal_1();
|
||||
test_normal_2();
|
||||
test_index_subscript_out_of_bounds();
|
||||
}
|
||||
} // namespace ndarray_indexing
|
||||
} // namespace test
|
|
@ -1,92 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <irrt_everything.hpp>
|
||||
#include <test/includes.hpp>
|
||||
|
||||
namespace test {
|
||||
namespace slice {
|
||||
void test_slice_normal() {
|
||||
// Normal situation
|
||||
BEGIN_TEST();
|
||||
|
||||
UserSlice user_slice;
|
||||
user_slice.set_stop(5);
|
||||
|
||||
Slice slice = user_slice.indices(100);
|
||||
|
||||
printf("%d, %d, %d\n", slice.start, slice.stop, slice.step);
|
||||
|
||||
assert_values_match(0, slice.start);
|
||||
assert_values_match(5, slice.stop);
|
||||
assert_values_match(1, slice.step);
|
||||
}
|
||||
|
||||
void test_slice_start_too_large() {
|
||||
// Start is too large and should be clamped to length
|
||||
BEGIN_TEST();
|
||||
|
||||
UserSlice user_slice;
|
||||
user_slice.set_start(400);
|
||||
|
||||
Slice slice = user_slice.indices(100);
|
||||
|
||||
assert_values_match(100, slice.start);
|
||||
assert_values_match(100, slice.stop);
|
||||
assert_values_match(1, slice.step);
|
||||
}
|
||||
|
||||
void test_slice_negative_start_stop() {
|
||||
// Negative start/stop should be resolved
|
||||
BEGIN_TEST();
|
||||
|
||||
UserSlice user_slice;
|
||||
user_slice.set_start(-10);
|
||||
user_slice.set_stop(-5);
|
||||
|
||||
Slice slice = user_slice.indices(100);
|
||||
|
||||
assert_values_match(90, slice.start);
|
||||
assert_values_match(95, slice.stop);
|
||||
assert_values_match(1, slice.step);
|
||||
}
|
||||
|
||||
void test_slice_only_negative_step() {
|
||||
// Things like `[::-5]` should be handled correctly
|
||||
BEGIN_TEST();
|
||||
|
||||
UserSlice user_slice;
|
||||
user_slice.set_step(-5);
|
||||
|
||||
Slice slice = user_slice.indices(100);
|
||||
|
||||
assert_values_match(99, slice.start);
|
||||
assert_values_match(-1, slice.stop);
|
||||
assert_values_match(-5, slice.step);
|
||||
}
|
||||
|
||||
void test_slice_step_zero() {
|
||||
// Step = 0 is a value error
|
||||
BEGIN_TEST();
|
||||
|
||||
ErrorContext errctx = create_testing_errctx();
|
||||
|
||||
UserSlice user_slice;
|
||||
user_slice.set_start(2);
|
||||
user_slice.set_stop(12);
|
||||
user_slice.set_step(0);
|
||||
|
||||
Slice slice;
|
||||
user_slice.indices_checked(&errctx, 100, &slice);
|
||||
|
||||
assert_errctx_has_error(&errctx, errctx.error_ids->value_error);
|
||||
}
|
||||
|
||||
void run() {
|
||||
test_slice_normal();
|
||||
test_slice_start_too_large();
|
||||
test_slice_negative_start_stop();
|
||||
test_slice_only_negative_step();
|
||||
test_slice_step_zero();
|
||||
}
|
||||
} // namespace slice
|
||||
} // namespace test
|
|
@ -1,179 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
template <class T>
|
||||
void print_value(const T& value);
|
||||
|
||||
template <>
|
||||
void print_value(const int8_t& value) {
|
||||
printf("%d", value);
|
||||
}
|
||||
|
||||
template <>
|
||||
void print_value(const int32_t& value) {
|
||||
printf("%d", value);
|
||||
}
|
||||
|
||||
template <>
|
||||
void print_value(const uint8_t& value) {
|
||||
printf("%u", value);
|
||||
}
|
||||
|
||||
template <>
|
||||
void print_value(const uint32_t& value) {
|
||||
printf("%u", value);
|
||||
}
|
||||
|
||||
template <>
|
||||
void print_value(const float& value) {
|
||||
printf("%f", value);
|
||||
}
|
||||
|
||||
template <>
|
||||
void print_value(const double& value) {
|
||||
printf("%f", value);
|
||||
}
|
||||
|
||||
void __begin_test(const char* function_name, const char* file, int line) {
|
||||
printf("######### Running %s @ %s:%d\n", function_name, file, line);
|
||||
}
|
||||
|
||||
#define BEGIN_TEST() __begin_test(__FUNCTION__, __FILE__, __LINE__)
|
||||
|
||||
void test_fail() {
|
||||
printf("[!] Test failed. Exiting with status code 1.\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void debug_print_array(int len, const T* as) {
|
||||
printf("[");
|
||||
for (int i = 0; i < len; i++) {
|
||||
if (i != 0) printf(", ");
|
||||
print_value(as[i]);
|
||||
}
|
||||
printf("]");
|
||||
}
|
||||
|
||||
void print_assertion_passed(const char* file, int line) {
|
||||
printf("[*] Assertion passed on %s:%d\n", file, line);
|
||||
}
|
||||
|
||||
void print_assertion_failed(const char* file, int line) {
|
||||
printf("[!] Assertion failed on %s:%d\n", file, line);
|
||||
}
|
||||
|
||||
void __assert_true(const char* file, int line, bool cond) {
|
||||
if (cond) {
|
||||
print_assertion_passed(file, line);
|
||||
} else {
|
||||
print_assertion_failed(file, line);
|
||||
test_fail();
|
||||
}
|
||||
}
|
||||
|
||||
#define assert_true(cond) __assert_true(__FILE__, __LINE__, cond)
|
||||
|
||||
template <typename T>
|
||||
void __assert_arrays_match(const char* file, int line, int len,
|
||||
const T* expected, const T* got) {
|
||||
if (arrays_match(len, expected, got)) {
|
||||
print_assertion_passed(file, line);
|
||||
} else {
|
||||
print_assertion_failed(file, line);
|
||||
printf("Expect = ");
|
||||
debug_print_array(len, expected);
|
||||
printf("\n");
|
||||
printf(" Got = ");
|
||||
debug_print_array(len, got);
|
||||
printf("\n");
|
||||
test_fail();
|
||||
}
|
||||
}
|
||||
|
||||
#define assert_arrays_match(len, expected, got) \
|
||||
__assert_arrays_match(__FILE__, __LINE__, len, expected, got)
|
||||
|
||||
template <typename T>
|
||||
void __assert_values_match(const char* file, int line, T expected, T got) {
|
||||
if (expected == got) {
|
||||
print_assertion_passed(file, line);
|
||||
} else {
|
||||
print_assertion_failed(file, line);
|
||||
printf("Expect = ");
|
||||
print_value(expected);
|
||||
printf("\n");
|
||||
printf(" Got = ");
|
||||
print_value(got);
|
||||
printf("\n");
|
||||
test_fail();
|
||||
}
|
||||
}
|
||||
|
||||
#define assert_values_match(expected, got) \
|
||||
__assert_values_match(__FILE__, __LINE__, expected, got)
|
||||
|
||||
// A fake set of ErrorIds for testing only
|
||||
const ErrorIds TEST_ERROR_IDS = {
|
||||
.index_error = 0,
|
||||
.value_error = 1,
|
||||
.assertion_error = 2,
|
||||
.runtime_error = 3,
|
||||
.type_error = 4,
|
||||
};
|
||||
|
||||
ErrorContext create_testing_errctx() {
|
||||
// Everything is global so it is fine to directly return a struct
|
||||
// ErrorContext
|
||||
ErrorContext errctx;
|
||||
errctx.initialize(&TEST_ERROR_IDS);
|
||||
return errctx;
|
||||
}
|
||||
|
||||
void print_errctx_content(ErrorContext* errctx) {
|
||||
if (errctx->has_error()) {
|
||||
printf(
|
||||
"(Error ID %d): %s ... where param1 = %ld, param2 = %ld, param3 = "
|
||||
"%ld\n",
|
||||
errctx->error_id, errctx->message_template, errctx->param1,
|
||||
errctx->param2, errctx->param3);
|
||||
} else {
|
||||
printf("<no error>\n");
|
||||
}
|
||||
}
|
||||
|
||||
void __assert_errctx_no_error(const char* file, int line,
|
||||
ErrorContext* errctx) {
|
||||
if (errctx->has_error()) {
|
||||
print_assertion_failed(file, line);
|
||||
printf("Expecting no error but caught the following:\n\n");
|
||||
print_errctx_content(errctx);
|
||||
test_fail();
|
||||
}
|
||||
}
|
||||
|
||||
#define assert_errctx_no_error(errctx) \
|
||||
__assert_errctx_no_error(__FILE__, __LINE__, errctx)
|
||||
|
||||
void __assert_errctx_has_error(const char* file, int line, ErrorContext* errctx,
|
||||
ExceptionId expected_error_id) {
|
||||
if (errctx->has_error()) {
|
||||
if (errctx->error_id != expected_error_id) {
|
||||
print_assertion_failed(file, line);
|
||||
printf(
|
||||
"Expecting error id %d but got error id %d. Error caught:\n\n",
|
||||
expected_error_id, errctx->error_id);
|
||||
print_errctx_content(errctx);
|
||||
test_fail();
|
||||
}
|
||||
} else {
|
||||
print_assertion_failed(file, line);
|
||||
printf("Expecting an error, but there is none.");
|
||||
test_fail();
|
||||
}
|
||||
}
|
||||
|
||||
#define assert_errctx_has_error(errctx, expected_error_id) \
|
||||
__assert_errctx_has_error(__FILE__, __LINE__, errctx, expected_error_id)
|
|
@ -0,0 +1,21 @@
|
|||
[package]
|
||||
name = "nac3core_derive"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
proc-macro = true
|
||||
|
||||
[[test]]
|
||||
name = "structfields_tests"
|
||||
path = "tests/structfields_test.rs"
|
||||
|
||||
[dev-dependencies]
|
||||
nac3core = { path = ".." }
|
||||
trybuild = { version = "1.0", features = ["diff"] }
|
||||
|
||||
[dependencies]
|
||||
proc-macro2 = "1.0"
|
||||
proc-macro-error = "1.0"
|
||||
syn = "2.0"
|
||||
quote = "1.0"
|
|
@ -0,0 +1,320 @@
|
|||
use proc_macro::TokenStream;
|
||||
use proc_macro_error::{abort, proc_macro_error};
|
||||
use quote::quote;
|
||||
use syn::{
|
||||
parse_macro_input, spanned::Spanned, Data, DataStruct, Expr, ExprField, ExprMethodCall,
|
||||
ExprPath, GenericArgument, Ident, LitStr, Path, PathArguments, Type, TypePath,
|
||||
};
|
||||
|
||||
/// Extracts all generic arguments of a [`Type`] into a [`Vec`].
|
||||
///
|
||||
/// Returns [`Some`] of a possibly-empty [`Vec`] if the path of `ty` matches with
|
||||
/// `expected_ty_name`, otherwise returns [`None`].
|
||||
fn extract_generic_args(expected_ty_name: &'static str, ty: &Type) -> Option<Vec<GenericArgument>> {
|
||||
let Type::Path(TypePath { qself: None, path, .. }) = ty else {
|
||||
return None;
|
||||
};
|
||||
|
||||
let segments = &path.segments;
|
||||
if segments.len() != 1 {
|
||||
return None;
|
||||
};
|
||||
|
||||
let segment = segments.iter().next().unwrap();
|
||||
if segment.ident != expected_ty_name {
|
||||
return None;
|
||||
}
|
||||
|
||||
let PathArguments::AngleBracketed(path_args) = &segment.arguments else {
|
||||
return Some(Vec::new());
|
||||
};
|
||||
let args = &path_args.args;
|
||||
|
||||
Some(args.iter().cloned().collect::<Vec<_>>())
|
||||
}
|
||||
|
||||
/// Maps a `path` matching one of the `target_idents` into the `replacement` [`Ident`].
|
||||
fn map_path_to_ident(path: &Path, target_idents: &[&str], replacement: &str) -> Option<Ident> {
|
||||
path.require_ident()
|
||||
.ok()
|
||||
.filter(|ident| target_idents.iter().any(|target| ident == target))
|
||||
.map(|ident| Ident::new(replacement, ident.span()))
|
||||
}
|
||||
|
||||
/// Extracts the left-hand side of a dot-expression.
|
||||
fn extract_dot_operand(expr: &Expr) -> Option<&Expr> {
|
||||
match expr {
|
||||
Expr::MethodCall(ExprMethodCall { receiver: operand, .. })
|
||||
| Expr::Field(ExprField { base: operand, .. }) => Some(operand),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Replaces the top-level receiver of a dot-expression with an [`Ident`], returning `Some(&mut expr)` if the
|
||||
/// replacement is performed.
|
||||
///
|
||||
/// The top-level receiver is the left-most receiver expression, e.g. the top-level receiver of `a.b.c.foo()` is `a`.
|
||||
fn replace_top_level_receiver(expr: &mut Expr, ident: Ident) -> Option<&mut Expr> {
|
||||
if let Expr::MethodCall(ExprMethodCall { receiver: operand, .. })
|
||||
| Expr::Field(ExprField { base: operand, .. }) = expr
|
||||
{
|
||||
return if extract_dot_operand(operand).is_some() {
|
||||
if replace_top_level_receiver(operand, ident).is_some() {
|
||||
Some(expr)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
*operand = Box::new(Expr::Path(ExprPath {
|
||||
attrs: Vec::default(),
|
||||
qself: None,
|
||||
path: ident.into(),
|
||||
}));
|
||||
|
||||
Some(expr)
|
||||
};
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Iterates all operands to the left-hand side of the `.` of an [expression][`Expr`], i.e. the container operand of all
|
||||
/// [`Expr::Field`] and the receiver operand of all [`Expr::MethodCall`].
|
||||
///
|
||||
/// The iterator will return the operand expressions in reverse order of appearance. For example, `a.b.c.func()` will
|
||||
/// return `vec![c, b, a]`.
|
||||
fn iter_dot_operands(expr: &Expr) -> impl Iterator<Item = &Expr> {
|
||||
let mut o = extract_dot_operand(expr);
|
||||
|
||||
std::iter::from_fn(move || {
|
||||
let this = o;
|
||||
o = o.as_ref().and_then(|o| extract_dot_operand(o));
|
||||
|
||||
this
|
||||
})
|
||||
}
|
||||
|
||||
/// Normalizes a value expression for use when creating an instance of this structure, returning a
|
||||
/// [`proc_macro2::TokenStream`] of tokens representing the normalized expression.
|
||||
fn normalize_value_expr(expr: &Expr) -> proc_macro2::TokenStream {
|
||||
match &expr {
|
||||
Expr::Path(ExprPath { qself: None, path, .. }) => {
|
||||
if let Some(ident) = map_path_to_ident(path, &["usize", "size_t"], "llvm_usize") {
|
||||
quote! { #ident }
|
||||
} else {
|
||||
abort!(
|
||||
path,
|
||||
format!(
|
||||
"Expected one of `size_t`, `usize`, or an implicit call expression in #[value_type(...)], found {}",
|
||||
quote!(#expr).to_string(),
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
Expr::Call(_) => {
|
||||
quote! { ctx.#expr }
|
||||
}
|
||||
|
||||
Expr::MethodCall(_) => {
|
||||
let base_receiver = iter_dot_operands(expr).last();
|
||||
|
||||
match base_receiver {
|
||||
// `usize.{...}`, `size_t.{...}` -> Rewrite the identifiers to `llvm_usize`
|
||||
Some(Expr::Path(ExprPath { qself: None, path, .. }))
|
||||
if map_path_to_ident(path, &["usize", "size_t"], "llvm_usize").is_some() =>
|
||||
{
|
||||
let ident =
|
||||
map_path_to_ident(path, &["usize", "size_t"], "llvm_usize").unwrap();
|
||||
|
||||
let mut expr = expr.clone();
|
||||
let expr = replace_top_level_receiver(&mut expr, ident).unwrap();
|
||||
|
||||
quote!(#expr)
|
||||
}
|
||||
|
||||
// `ctx.{...}`, `context.{...}` -> Rewrite the identifiers to `ctx`
|
||||
Some(Expr::Path(ExprPath { qself: None, path, .. }))
|
||||
if map_path_to_ident(path, &["ctx", "context"], "ctx").is_some() =>
|
||||
{
|
||||
let ident = map_path_to_ident(path, &["ctx", "context"], "ctx").unwrap();
|
||||
|
||||
let mut expr = expr.clone();
|
||||
let expr = replace_top_level_receiver(&mut expr, ident).unwrap();
|
||||
|
||||
quote!(#expr)
|
||||
}
|
||||
|
||||
// No reserved identifier prefix -> Prepend `ctx.` to the entire expression
|
||||
_ => quote! { ctx.#expr },
|
||||
}
|
||||
}
|
||||
|
||||
_ => {
|
||||
abort!(
|
||||
expr,
|
||||
format!(
|
||||
"Expected one of `size_t`, `usize`, or an implicit call expression in #[value_type(...)], found {}",
|
||||
quote!(#expr).to_string(),
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Derives an implementation of `codegen::types::structure::StructFields`.
|
||||
///
|
||||
/// The benefit of using `#[derive(StructFields)]` is that all index- or order-dependent logic required by
|
||||
/// `impl StructFields` is automatically generated by this implementation, including the field index as required by
|
||||
/// `StructField::new` and the fields as returned by `StructFields::to_vec`.
|
||||
///
|
||||
/// # Prerequisites
|
||||
///
|
||||
/// In order to derive from [`StructFields`], you must implement (or derive) [`Eq`] and [`Copy`] as required by
|
||||
/// `StructFields`.
|
||||
///
|
||||
/// Moreover, `#[derive(StructFields)]` can only be used for `struct`s with named fields, and may only contain fields
|
||||
/// with either `StructField` or [`PhantomData`] types.
|
||||
///
|
||||
/// # Attributes for [`StructFields`]
|
||||
///
|
||||
/// Each `StructField` field must be declared with the `#[value_type(...)]` attribute. The argument of `value_type`
|
||||
/// accepts one of the following:
|
||||
///
|
||||
/// - An expression returning an instance of `inkwell::types::BasicType` (with or without the receiver `ctx`/`context`).
|
||||
/// For example, `context.i8_type()`, `ctx.i8_type()`, and `i8_type()` all refer to `i8`.
|
||||
/// - The reserved identifiers `usize` and `size_t` referring to an `inkwell::types::IntType` of the platform-dependent
|
||||
/// integer size. `usize` and `size_t` can also be used as the receiver to other method calls, e.g.
|
||||
/// `usize.array_type(3)`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// The following is an example of an LLVM slice implemented using `#[derive(StructFields)]`.
|
||||
///
|
||||
/// ```rust,ignore
|
||||
/// use nac3core::{
|
||||
/// codegen::types::structure::StructField,
|
||||
/// inkwell::{
|
||||
/// values::{IntValue, PointerValue},
|
||||
/// AddressSpace,
|
||||
/// },
|
||||
/// };
|
||||
/// use nac3core_derive::StructFields;
|
||||
///
|
||||
/// // All classes that implement StructFields must also implement Eq and Copy
|
||||
/// #[derive(PartialEq, Eq, Clone, Copy, StructFields)]
|
||||
/// pub struct SliceValue<'ctx> {
|
||||
/// // Declares ptr have a value type of i8*
|
||||
/// //
|
||||
/// // Can also be written as `ctx.i8_type().ptr_type(...)` or `context.i8_type().ptr_type(...)`
|
||||
/// #[value_type(i8_type().ptr_type(AddressSpace::default()))]
|
||||
/// ptr: StructField<'ctx, PointerValue<'ctx>>,
|
||||
///
|
||||
/// // Declares len have a value type of usize, depending on the target compilation platform
|
||||
/// #[value_type(usize)]
|
||||
/// len: StructField<'ctx, IntValue<'ctx>>,
|
||||
/// }
|
||||
/// ```
|
||||
#[proc_macro_derive(StructFields, attributes(value_type))]
|
||||
#[proc_macro_error]
|
||||
pub fn derive(input: TokenStream) -> TokenStream {
|
||||
let input = parse_macro_input!(input as syn::DeriveInput);
|
||||
let ident = &input.ident;
|
||||
|
||||
let Data::Struct(DataStruct { fields, .. }) = &input.data else {
|
||||
abort!(input, "Only structs with named fields are supported");
|
||||
};
|
||||
if let Err(err_span) =
|
||||
fields
|
||||
.iter()
|
||||
.try_for_each(|field| if field.ident.is_some() { Ok(()) } else { Err(field.span()) })
|
||||
{
|
||||
abort!(err_span, "Only structs with named fields are supported");
|
||||
};
|
||||
|
||||
// Check if struct<'ctx>
|
||||
if input.generics.params.len() != 1 {
|
||||
abort!(input.generics, "Expected exactly 1 generic parameter")
|
||||
}
|
||||
|
||||
let phantom_info = fields
|
||||
.iter()
|
||||
.filter(|field| extract_generic_args("PhantomData", &field.ty).is_some())
|
||||
.map(|field| field.ident.as_ref().unwrap())
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let field_info = fields
|
||||
.iter()
|
||||
.filter(|field| extract_generic_args("PhantomData", &field.ty).is_none())
|
||||
.map(|field| {
|
||||
let ident = field.ident.as_ref().unwrap();
|
||||
let ty = &field.ty;
|
||||
|
||||
let Some(_) = extract_generic_args("StructField", ty) else {
|
||||
abort!(field, "Only StructField and PhantomData are allowed")
|
||||
};
|
||||
|
||||
let attrs = &field.attrs;
|
||||
let Some(value_type_attr) =
|
||||
attrs.iter().find(|attr| attr.path().is_ident("value_type"))
|
||||
else {
|
||||
abort!(field, "Expected #[value_type(...)] attribute for field");
|
||||
};
|
||||
|
||||
let Ok(value_type_expr) = value_type_attr.parse_args::<Expr>() else {
|
||||
abort!(value_type_attr, "Expected expression in #[value_type(...)]");
|
||||
};
|
||||
|
||||
let value_expr_toks = normalize_value_expr(&value_type_expr);
|
||||
|
||||
(ident.clone(), value_expr_toks)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// `<*>::new` impl of `StructField` and `PhantomData` for `StructFields::new`
|
||||
let phantoms_create = phantom_info
|
||||
.iter()
|
||||
.map(|id| quote! { #id: ::std::marker::PhantomData })
|
||||
.collect::<Vec<_>>();
|
||||
let fields_create = field_info
|
||||
.iter()
|
||||
.map(|(id, ty)| {
|
||||
let id_lit = LitStr::new(&id.to_string(), id.span());
|
||||
quote! {
|
||||
#id: ::nac3core::codegen::types::structure::StructField::create(
|
||||
&mut counter,
|
||||
#id_lit,
|
||||
#ty,
|
||||
)
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// `.into()` impl of `StructField` for `StructFields::to_vec`
|
||||
let fields_into =
|
||||
field_info.iter().map(|(id, _)| quote! { self.#id.into() }).collect::<Vec<_>>();
|
||||
|
||||
let impl_block = quote! {
|
||||
impl<'ctx> ::nac3core::codegen::types::structure::StructFields<'ctx> for #ident<'ctx> {
|
||||
fn new(ctx: impl ::nac3core::inkwell::context::AsContextRef<'ctx>, llvm_usize: ::nac3core::inkwell::types::IntType<'ctx>) -> Self {
|
||||
let ctx = unsafe { ::nac3core::inkwell::context::ContextRef::new(ctx.as_ctx_ref()) };
|
||||
|
||||
let mut counter = ::nac3core::codegen::types::structure::FieldIndexCounter::default();
|
||||
|
||||
#ident {
|
||||
#(#fields_create),*
|
||||
#(#phantoms_create),*
|
||||
}
|
||||
}
|
||||
|
||||
fn to_vec(&self) -> ::std::vec::Vec<(&'static str, ::nac3core::inkwell::types::BasicTypeEnum<'ctx>)> {
|
||||
vec![
|
||||
#(#fields_into),*
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
impl_block.into()
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
use nac3core_derive::StructFields;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy, StructFields)]
|
||||
pub struct EmptyValue<'ctx> {
|
||||
_phantom: PhantomData<&'ctx ()>,
|
||||
}
|
||||
|
||||
fn main() {}
|
|
@ -0,0 +1,20 @@
|
|||
use nac3core::{
|
||||
codegen::types::structure::StructField,
|
||||
inkwell::{
|
||||
values::{IntValue, PointerValue},
|
||||
AddressSpace,
|
||||
},
|
||||
};
|
||||
use nac3core_derive::StructFields;
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy, StructFields)]
|
||||
pub struct NDArrayValue<'ctx> {
|
||||
#[value_type(usize)]
|
||||
ndims: StructField<'ctx, IntValue<'ctx>>,
|
||||
#[value_type(usize.ptr_type(AddressSpace::default()))]
|
||||
shape: StructField<'ctx, PointerValue<'ctx>>,
|
||||
#[value_type(i8_type().ptr_type(AddressSpace::default()))]
|
||||
data: StructField<'ctx, PointerValue<'ctx>>,
|
||||
}
|
||||
|
||||
fn main() {}
|
|
@ -0,0 +1,18 @@
|
|||
use nac3core::{
|
||||
codegen::types::structure::StructField,
|
||||
inkwell::{
|
||||
values::{IntValue, PointerValue},
|
||||
AddressSpace,
|
||||
},
|
||||
};
|
||||
use nac3core_derive::StructFields;
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy, StructFields)]
|
||||
pub struct SliceValue<'ctx> {
|
||||
#[value_type(i8_type().ptr_type(AddressSpace::default()))]
|
||||
ptr: StructField<'ctx, PointerValue<'ctx>>,
|
||||
#[value_type(usize)]
|
||||
len: StructField<'ctx, IntValue<'ctx>>,
|
||||
}
|
||||
|
||||
fn main() {}
|
|
@ -0,0 +1,18 @@
|
|||
use nac3core::{
|
||||
codegen::types::structure::StructField,
|
||||
inkwell::{
|
||||
values::{IntValue, PointerValue},
|
||||
AddressSpace,
|
||||
},
|
||||
};
|
||||
use nac3core_derive::StructFields;
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy, StructFields)]
|
||||
pub struct SliceValue<'ctx> {
|
||||
#[value_type(context.i8_type().ptr_type(AddressSpace::default()))]
|
||||
ptr: StructField<'ctx, PointerValue<'ctx>>,
|
||||
#[value_type(usize)]
|
||||
len: StructField<'ctx, IntValue<'ctx>>,
|
||||
}
|
||||
|
||||
fn main() {}
|
|
@ -0,0 +1,18 @@
|
|||
use nac3core::{
|
||||
codegen::types::structure::StructField,
|
||||
inkwell::{
|
||||
values::{IntValue, PointerValue},
|
||||
AddressSpace,
|
||||
},
|
||||
};
|
||||
use nac3core_derive::StructFields;
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy, StructFields)]
|
||||
pub struct SliceValue<'ctx> {
|
||||
#[value_type(ctx.i8_type().ptr_type(AddressSpace::default()))]
|
||||
ptr: StructField<'ctx, PointerValue<'ctx>>,
|
||||
#[value_type(usize)]
|
||||
len: StructField<'ctx, IntValue<'ctx>>,
|
||||
}
|
||||
|
||||
fn main() {}
|
|
@ -0,0 +1,18 @@
|
|||
use nac3core::{
|
||||
codegen::types::structure::StructField,
|
||||
inkwell::{
|
||||
values::{IntValue, PointerValue},
|
||||
AddressSpace,
|
||||
},
|
||||
};
|
||||
use nac3core_derive::StructFields;
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy, StructFields)]
|
||||
pub struct SliceValue<'ctx> {
|
||||
#[value_type(i8_type().ptr_type(AddressSpace::default()))]
|
||||
ptr: StructField<'ctx, PointerValue<'ctx>>,
|
||||
#[value_type(size_t)]
|
||||
len: StructField<'ctx, IntValue<'ctx>>,
|
||||
}
|
||||
|
||||
fn main() {}
|
|
@ -0,0 +1,10 @@
|
|||
#[test]
|
||||
fn test_parse_empty() {
|
||||
let t = trybuild::TestCases::new();
|
||||
t.pass("tests/structfields_empty.rs");
|
||||
t.pass("tests/structfields_slice.rs");
|
||||
t.pass("tests/structfields_slice_ctx.rs");
|
||||
t.pass("tests/structfields_slice_context.rs");
|
||||
t.pass("tests/structfields_slice_sizet.rs");
|
||||
t.pass("tests/structfields_ndarray.rs");
|
||||
}
|
|
@ -1,26 +1,106 @@
|
|||
use inkwell::types::BasicTypeEnum;
|
||||
use inkwell::values::BasicValueEnum;
|
||||
use inkwell::{FloatPredicate, IntPredicate, OptimizationLevel};
|
||||
use inkwell::{
|
||||
types::BasicTypeEnum,
|
||||
values::{BasicValue, BasicValueEnum, IntValue, PointerValue},
|
||||
FloatPredicate, IntPredicate, OptimizationLevel,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
|
||||
use crate::codegen::classes::{NDArrayValue, ProxyValue, UntypedArrayLikeAccessor};
|
||||
use crate::codegen::numpy::ndarray_elementwise_unaryop_impl;
|
||||
use crate::codegen::stmt::gen_for_callback_incrementing;
|
||||
use crate::codegen::{extern_fns, irrt, llvm_intrinsics, numpy, CodeGenContext, CodeGenerator};
|
||||
use crate::toplevel::helper::PrimDef;
|
||||
use crate::toplevel::numpy::unpack_ndarray_var_tys;
|
||||
use crate::typecheck::typedef::Type;
|
||||
use super::{
|
||||
expr::destructure_range,
|
||||
extern_fns, irrt,
|
||||
irrt::calculate_len_for_slice_range,
|
||||
llvm_intrinsics,
|
||||
macros::codegen_unreachable,
|
||||
numpy,
|
||||
numpy::ndarray_elementwise_unaryop_impl,
|
||||
stmt::gen_for_callback_incrementing,
|
||||
values::{
|
||||
ArrayLikeValue, NDArrayValue, ProxyValue, RangeValue, TypedArrayLikeAccessor,
|
||||
UntypedArrayLikeAccessor, UntypedArrayLikeMutator,
|
||||
},
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
use crate::{
|
||||
toplevel::{
|
||||
helper::{arraylike_flatten_element_type, PrimDef},
|
||||
numpy::unpack_ndarray_var_tys,
|
||||
},
|
||||
typecheck::typedef::{Type, TypeEnum},
|
||||
};
|
||||
|
||||
/// Shorthand for [`unreachable!()`] when a type of argument is not supported.
|
||||
///
|
||||
/// The generated message will contain the function name and the name of the unsupported type.
|
||||
fn unsupported_type(ctx: &CodeGenContext<'_, '_>, fn_name: &str, tys: &[Type]) -> ! {
|
||||
unreachable!(
|
||||
codegen_unreachable!(
|
||||
ctx,
|
||||
"{fn_name}() not supported for '{}'",
|
||||
tys.iter().map(|ty| format!("'{}'", ctx.unifier.stringify(*ty))).join(", "),
|
||||
)
|
||||
}
|
||||
|
||||
/// Invokes the `len` builtin function.
|
||||
pub fn call_len<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
n: (Type, BasicValueEnum<'ctx>),
|
||||
) -> Result<IntValue<'ctx>, String> {
|
||||
let llvm_i32 = ctx.ctx.i32_type();
|
||||
let range_ty = ctx.primitives.range;
|
||||
let (arg_ty, arg) = n;
|
||||
|
||||
Ok(if ctx.unifier.unioned(arg_ty, range_ty) {
|
||||
let arg = RangeValue::from_pointer_value(arg.into_pointer_value(), Some("range"));
|
||||
let (start, end, step) = destructure_range(ctx, arg);
|
||||
calculate_len_for_slice_range(generator, ctx, start, end, step)
|
||||
} else {
|
||||
match &*ctx.unifier.get_ty_immutable(arg_ty) {
|
||||
TypeEnum::TTuple { ty, .. } => llvm_i32.const_int(ty.len() as u64, false),
|
||||
TypeEnum::TObj { obj_id, .. } if *obj_id == PrimDef::List.id() => {
|
||||
let zero = llvm_i32.const_zero();
|
||||
let len = ctx
|
||||
.build_gep_and_load(
|
||||
arg.into_pointer_value(),
|
||||
&[zero, llvm_i32.const_int(1, false)],
|
||||
None,
|
||||
)
|
||||
.into_int_value();
|
||||
ctx.builder.build_int_truncate_or_bit_cast(len, llvm_i32, "len").unwrap()
|
||||
}
|
||||
TypeEnum::TObj { obj_id, .. } if *obj_id == PrimDef::NDArray.id() => {
|
||||
let elem_ty = arraylike_flatten_element_type(&mut ctx.unifier, arg_ty);
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
|
||||
let arg = NDArrayValue::from_pointer_value(
|
||||
arg.into_pointer_value(),
|
||||
ctx.get_llvm_type(generator, elem_ty),
|
||||
llvm_usize,
|
||||
None,
|
||||
);
|
||||
|
||||
let ndims = arg.shape().size(ctx, generator);
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
ctx.builder
|
||||
.build_int_compare(IntPredicate::NE, ndims, llvm_usize.const_zero(), "")
|
||||
.unwrap(),
|
||||
"0:TypeError",
|
||||
"len() of unsized object",
|
||||
[None, None, None],
|
||||
ctx.current_loc,
|
||||
);
|
||||
|
||||
let len = unsafe {
|
||||
arg.shape().get_typed_unchecked(ctx, generator, &llvm_usize.const_zero(), None)
|
||||
};
|
||||
|
||||
ctx.builder.build_int_truncate_or_bit_cast(len, llvm_i32, "len").unwrap()
|
||||
}
|
||||
_ => codegen_unreachable!(ctx),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Invokes the `int32` builtin function.
|
||||
pub fn call_int32<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
|
@ -31,7 +111,6 @@ pub fn call_int32<'ctx, G: CodeGenerator + ?Sized>(
|
|||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
|
||||
let (n_ty, n) = n;
|
||||
|
||||
Ok(match n {
|
||||
BasicValueEnum::IntValue(n) if matches!(n.get_type().get_bit_width(), 1 | 8) => {
|
||||
debug_assert!(ctx.unifier.unioned(n_ty, ctx.primitives.bool));
|
||||
|
@ -67,13 +146,14 @@ pub fn call_int32<'ctx, G: CodeGenerator + ?Sized>(
|
|||
if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) =>
|
||||
{
|
||||
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty);
|
||||
let llvm_elem_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
|
||||
let ndarray = ndarray_elementwise_unaryop_impl(
|
||||
generator,
|
||||
ctx,
|
||||
ctx.primitives.int32,
|
||||
None,
|
||||
NDArrayValue::from_ptr_val(n, llvm_usize, None),
|
||||
NDArrayValue::from_pointer_value(n, llvm_elem_ty, llvm_usize, None),
|
||||
|generator, ctx, val| call_int32(generator, ctx, (elem_ty, val)),
|
||||
)?;
|
||||
|
||||
|
@ -129,13 +209,14 @@ pub fn call_int64<'ctx, G: CodeGenerator + ?Sized>(
|
|||
if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) =>
|
||||
{
|
||||
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty);
|
||||
let llvm_elem_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
|
||||
let ndarray = ndarray_elementwise_unaryop_impl(
|
||||
generator,
|
||||
ctx,
|
||||
ctx.primitives.int64,
|
||||
None,
|
||||
NDArrayValue::from_ptr_val(n, llvm_usize, None),
|
||||
NDArrayValue::from_pointer_value(n, llvm_elem_ty, llvm_usize, None),
|
||||
|generator, ctx, val| call_int64(generator, ctx, (elem_ty, val)),
|
||||
)?;
|
||||
|
||||
|
@ -207,13 +288,14 @@ pub fn call_uint32<'ctx, G: CodeGenerator + ?Sized>(
|
|||
if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) =>
|
||||
{
|
||||
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty);
|
||||
let llvm_elem_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
|
||||
let ndarray = ndarray_elementwise_unaryop_impl(
|
||||
generator,
|
||||
ctx,
|
||||
ctx.primitives.uint32,
|
||||
None,
|
||||
NDArrayValue::from_ptr_val(n, llvm_usize, None),
|
||||
NDArrayValue::from_pointer_value(n, llvm_elem_ty, llvm_usize, None),
|
||||
|generator, ctx, val| call_uint32(generator, ctx, (elem_ty, val)),
|
||||
)?;
|
||||
|
||||
|
@ -274,13 +356,14 @@ pub fn call_uint64<'ctx, G: CodeGenerator + ?Sized>(
|
|||
if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) =>
|
||||
{
|
||||
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty);
|
||||
let llvm_elem_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
|
||||
let ndarray = ndarray_elementwise_unaryop_impl(
|
||||
generator,
|
||||
ctx,
|
||||
ctx.primitives.uint64,
|
||||
None,
|
||||
NDArrayValue::from_ptr_val(n, llvm_usize, None),
|
||||
NDArrayValue::from_pointer_value(n, llvm_elem_ty, llvm_usize, None),
|
||||
|generator, ctx, val| call_uint64(generator, ctx, (elem_ty, val)),
|
||||
)?;
|
||||
|
||||
|
@ -340,13 +423,14 @@ pub fn call_float<'ctx, G: CodeGenerator + ?Sized>(
|
|||
if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) =>
|
||||
{
|
||||
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty);
|
||||
let llvm_elem_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
|
||||
let ndarray = ndarray_elementwise_unaryop_impl(
|
||||
generator,
|
||||
ctx,
|
||||
ctx.primitives.float,
|
||||
None,
|
||||
NDArrayValue::from_ptr_val(n, llvm_usize, None),
|
||||
NDArrayValue::from_pointer_value(n, llvm_elem_ty, llvm_usize, None),
|
||||
|generator, ctx, val| call_float(generator, ctx, (elem_ty, val)),
|
||||
)?;
|
||||
|
||||
|
@ -386,13 +470,14 @@ pub fn call_round<'ctx, G: CodeGenerator + ?Sized>(
|
|||
if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) =>
|
||||
{
|
||||
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty);
|
||||
let llvm_elem_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
|
||||
let ndarray = ndarray_elementwise_unaryop_impl(
|
||||
generator,
|
||||
ctx,
|
||||
ret_elem_ty,
|
||||
None,
|
||||
NDArrayValue::from_ptr_val(n, llvm_usize, None),
|
||||
NDArrayValue::from_pointer_value(n, llvm_elem_ty, llvm_usize, None),
|
||||
|generator, ctx, val| call_round(generator, ctx, (elem_ty, val), ret_elem_ty),
|
||||
)?;
|
||||
|
||||
|
@ -426,13 +511,14 @@ pub fn call_numpy_round<'ctx, G: CodeGenerator + ?Sized>(
|
|||
if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) =>
|
||||
{
|
||||
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty);
|
||||
let llvm_elem_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
|
||||
let ndarray = ndarray_elementwise_unaryop_impl(
|
||||
generator,
|
||||
ctx,
|
||||
ctx.primitives.float,
|
||||
None,
|
||||
NDArrayValue::from_ptr_val(n, llvm_usize, None),
|
||||
NDArrayValue::from_pointer_value(n, llvm_elem_ty, llvm_usize, None),
|
||||
|generator, ctx, val| call_numpy_round(generator, ctx, (elem_ty, val)),
|
||||
)?;
|
||||
|
||||
|
@ -491,13 +577,14 @@ pub fn call_bool<'ctx, G: CodeGenerator + ?Sized>(
|
|||
if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) =>
|
||||
{
|
||||
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty);
|
||||
let llvm_elem_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
|
||||
let ndarray = ndarray_elementwise_unaryop_impl(
|
||||
generator,
|
||||
ctx,
|
||||
ctx.primitives.bool,
|
||||
None,
|
||||
NDArrayValue::from_ptr_val(n, llvm_usize, None),
|
||||
NDArrayValue::from_pointer_value(n, llvm_elem_ty, llvm_usize, None),
|
||||
|generator, ctx, val| {
|
||||
let elem = call_bool(generator, ctx, (elem_ty, val))?;
|
||||
|
||||
|
@ -545,13 +632,14 @@ pub fn call_floor<'ctx, G: CodeGenerator + ?Sized>(
|
|||
if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) =>
|
||||
{
|
||||
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty);
|
||||
let llvm_elem_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
|
||||
let ndarray = ndarray_elementwise_unaryop_impl(
|
||||
generator,
|
||||
ctx,
|
||||
ret_elem_ty,
|
||||
None,
|
||||
NDArrayValue::from_ptr_val(n, llvm_usize, None),
|
||||
NDArrayValue::from_pointer_value(n, llvm_elem_ty, llvm_usize, None),
|
||||
|generator, ctx, val| call_floor(generator, ctx, (elem_ty, val), ret_elem_ty),
|
||||
)?;
|
||||
|
||||
|
@ -595,14 +683,15 @@ pub fn call_ceil<'ctx, G: CodeGenerator + ?Sized>(
|
|||
if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) =>
|
||||
{
|
||||
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty);
|
||||
let llvm_elem_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
|
||||
let ndarray = ndarray_elementwise_unaryop_impl(
|
||||
generator,
|
||||
ctx,
|
||||
ret_elem_ty,
|
||||
None,
|
||||
NDArrayValue::from_ptr_val(n, llvm_usize, None),
|
||||
|generator, ctx, val| call_floor(generator, ctx, (elem_ty, val), ret_elem_ty),
|
||||
NDArrayValue::from_pointer_value(n, llvm_elem_ty, llvm_usize, None),
|
||||
|generator, ctx, val| call_ceil(generator, ctx, (elem_ty, val), ret_elem_ty),
|
||||
)?;
|
||||
|
||||
ndarray.as_base_value().into()
|
||||
|
@ -719,7 +808,7 @@ pub fn call_numpy_minimum<'ctx, G: CodeGenerator + ?Sized>(
|
|||
} else if is_ndarray2 {
|
||||
unpack_ndarray_var_tys(&mut ctx.unifier, x2_ty).0
|
||||
} else {
|
||||
unreachable!()
|
||||
codegen_unreachable!(ctx)
|
||||
};
|
||||
|
||||
let x1_scalar_ty = if is_ndarray1 { dtype } else { x1_ty };
|
||||
|
@ -730,8 +819,8 @@ pub fn call_numpy_minimum<'ctx, G: CodeGenerator + ?Sized>(
|
|||
ctx,
|
||||
dtype,
|
||||
None,
|
||||
(x1, !is_ndarray1),
|
||||
(x2, !is_ndarray2),
|
||||
(x1_ty, x1, !is_ndarray1),
|
||||
(x2_ty, x2, !is_ndarray2),
|
||||
|generator, ctx, (lhs, rhs)| {
|
||||
call_numpy_minimum(generator, ctx, (x1_scalar_ty, lhs), (x2_scalar_ty, rhs))
|
||||
},
|
||||
|
@ -823,17 +912,17 @@ pub fn call_numpy_max_min<'ctx, G: CodeGenerator + ?Sized>(
|
|||
match fn_name {
|
||||
"np_argmin" | "np_argmax" => llvm_int64.const_zero().into(),
|
||||
"np_max" | "np_min" => a,
|
||||
_ => unreachable!(),
|
||||
_ => codegen_unreachable!(ctx),
|
||||
}
|
||||
}
|
||||
BasicValueEnum::PointerValue(n)
|
||||
if a_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) =>
|
||||
{
|
||||
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, a_ty);
|
||||
let llvm_ndarray_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
let llvm_elem_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
|
||||
let n = NDArrayValue::from_ptr_val(n, llvm_usize, None);
|
||||
let n_sz = irrt::call_ndarray_calc_size(generator, ctx, &n.dim_sizes(), (None, None));
|
||||
let n = NDArrayValue::from_pointer_value(n, llvm_elem_ty, llvm_usize, None);
|
||||
let n_sz = irrt::call_ndarray_calc_size(generator, ctx, &n.shape(), (None, None));
|
||||
if ctx.registry.llvm_options.opt_level == OptimizationLevel::None {
|
||||
let n_sz_eqz = ctx
|
||||
.builder
|
||||
|
@ -850,7 +939,7 @@ pub fn call_numpy_max_min<'ctx, G: CodeGenerator + ?Sized>(
|
|||
);
|
||||
}
|
||||
|
||||
let accumulator_addr = generator.gen_var_alloc(ctx, llvm_ndarray_ty, None)?;
|
||||
let accumulator_addr = generator.gen_var_alloc(ctx, llvm_elem_ty, None)?;
|
||||
let res_idx = generator.gen_var_alloc(ctx, llvm_int64.into(), None)?;
|
||||
|
||||
unsafe {
|
||||
|
@ -863,6 +952,7 @@ pub fn call_numpy_max_min<'ctx, G: CodeGenerator + ?Sized>(
|
|||
gen_for_callback_incrementing(
|
||||
generator,
|
||||
ctx,
|
||||
None,
|
||||
llvm_int64.const_int(1, false),
|
||||
(n_sz, false),
|
||||
|generator, ctx, _, idx| {
|
||||
|
@ -877,7 +967,7 @@ pub fn call_numpy_max_min<'ctx, G: CodeGenerator + ?Sized>(
|
|||
"np_argmax" | "np_max" => {
|
||||
call_max(ctx, (elem_ty, accumulator), (elem_ty, elem))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
_ => codegen_unreachable!(ctx),
|
||||
};
|
||||
|
||||
let updated_idx = match (accumulator, result) {
|
||||
|
@ -914,7 +1004,7 @@ pub fn call_numpy_max_min<'ctx, G: CodeGenerator + ?Sized>(
|
|||
match fn_name {
|
||||
"np_argmin" | "np_argmax" => ctx.builder.build_load(res_idx, "").unwrap(),
|
||||
"np_max" | "np_min" => ctx.builder.build_load(accumulator_addr, "").unwrap(),
|
||||
_ => unreachable!(),
|
||||
_ => codegen_unreachable!(ctx),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -980,7 +1070,7 @@ pub fn call_numpy_maximum<'ctx, G: CodeGenerator + ?Sized>(
|
|||
} else if is_ndarray2 {
|
||||
unpack_ndarray_var_tys(&mut ctx.unifier, x2_ty).0
|
||||
} else {
|
||||
unreachable!()
|
||||
codegen_unreachable!(ctx)
|
||||
};
|
||||
|
||||
let x1_scalar_ty = if is_ndarray1 { dtype } else { x1_ty };
|
||||
|
@ -991,8 +1081,8 @@ pub fn call_numpy_maximum<'ctx, G: CodeGenerator + ?Sized>(
|
|||
ctx,
|
||||
dtype,
|
||||
None,
|
||||
(x1, !is_ndarray1),
|
||||
(x2, !is_ndarray2),
|
||||
(x1_ty, x1, !is_ndarray1),
|
||||
(x2_ty, x2, !is_ndarray2),
|
||||
|generator, ctx, (lhs, rhs)| {
|
||||
call_numpy_maximum(generator, ctx, (x1_scalar_ty, lhs), (x2_scalar_ty, rhs))
|
||||
},
|
||||
|
@ -1037,6 +1127,7 @@ where
|
|||
{
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
let (arg_elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, arg_ty);
|
||||
let llvm_arg_elem_ty = ctx.get_llvm_type(generator, arg_elem_ty);
|
||||
let ret_elem_ty = get_ret_elem_type(ctx, arg_elem_ty);
|
||||
|
||||
let ndarray = ndarray_elementwise_unaryop_impl(
|
||||
|
@ -1044,7 +1135,7 @@ where
|
|||
ctx,
|
||||
ret_elem_ty,
|
||||
None,
|
||||
NDArrayValue::from_ptr_val(x, llvm_usize, None),
|
||||
NDArrayValue::from_pointer_value(x, llvm_arg_elem_ty, llvm_usize, None),
|
||||
|generator, ctx, elem_val| {
|
||||
helper_call_numpy_unary_elementwise(
|
||||
generator,
|
||||
|
@ -1420,7 +1511,7 @@ pub fn call_numpy_arctan2<'ctx, G: CodeGenerator + ?Sized>(
|
|||
} else if is_ndarray2 {
|
||||
unpack_ndarray_var_tys(&mut ctx.unifier, x2_ty).0
|
||||
} else {
|
||||
unreachable!()
|
||||
codegen_unreachable!(ctx)
|
||||
};
|
||||
|
||||
let x1_scalar_ty = if is_ndarray1 { dtype } else { x1_ty };
|
||||
|
@ -1431,8 +1522,8 @@ pub fn call_numpy_arctan2<'ctx, G: CodeGenerator + ?Sized>(
|
|||
ctx,
|
||||
dtype,
|
||||
None,
|
||||
(x1, !is_ndarray1),
|
||||
(x2, !is_ndarray2),
|
||||
(x1_ty, x1, !is_ndarray1),
|
||||
(x2_ty, x2, !is_ndarray2),
|
||||
|generator, ctx, (lhs, rhs)| {
|
||||
call_numpy_arctan2(generator, ctx, (x1_scalar_ty, lhs), (x2_scalar_ty, rhs))
|
||||
},
|
||||
|
@ -1487,7 +1578,7 @@ pub fn call_numpy_copysign<'ctx, G: CodeGenerator + ?Sized>(
|
|||
} else if is_ndarray2 {
|
||||
unpack_ndarray_var_tys(&mut ctx.unifier, x2_ty).0
|
||||
} else {
|
||||
unreachable!()
|
||||
codegen_unreachable!(ctx)
|
||||
};
|
||||
|
||||
let x1_scalar_ty = if is_ndarray1 { dtype } else { x1_ty };
|
||||
|
@ -1498,8 +1589,8 @@ pub fn call_numpy_copysign<'ctx, G: CodeGenerator + ?Sized>(
|
|||
ctx,
|
||||
dtype,
|
||||
None,
|
||||
(x1, !is_ndarray1),
|
||||
(x2, !is_ndarray2),
|
||||
(x1_ty, x1, !is_ndarray1),
|
||||
(x2_ty, x2, !is_ndarray2),
|
||||
|generator, ctx, (lhs, rhs)| {
|
||||
call_numpy_copysign(generator, ctx, (x1_scalar_ty, lhs), (x2_scalar_ty, rhs))
|
||||
},
|
||||
|
@ -1554,7 +1645,7 @@ pub fn call_numpy_fmax<'ctx, G: CodeGenerator + ?Sized>(
|
|||
} else if is_ndarray2 {
|
||||
unpack_ndarray_var_tys(&mut ctx.unifier, x2_ty).0
|
||||
} else {
|
||||
unreachable!()
|
||||
codegen_unreachable!(ctx)
|
||||
};
|
||||
|
||||
let x1_scalar_ty = if is_ndarray1 { dtype } else { x1_ty };
|
||||
|
@ -1565,8 +1656,8 @@ pub fn call_numpy_fmax<'ctx, G: CodeGenerator + ?Sized>(
|
|||
ctx,
|
||||
dtype,
|
||||
None,
|
||||
(x1, !is_ndarray1),
|
||||
(x2, !is_ndarray2),
|
||||
(x1_ty, x1, !is_ndarray1),
|
||||
(x2_ty, x2, !is_ndarray2),
|
||||
|generator, ctx, (lhs, rhs)| {
|
||||
call_numpy_fmax(generator, ctx, (x1_scalar_ty, lhs), (x2_scalar_ty, rhs))
|
||||
},
|
||||
|
@ -1621,7 +1712,7 @@ pub fn call_numpy_fmin<'ctx, G: CodeGenerator + ?Sized>(
|
|||
} else if is_ndarray2 {
|
||||
unpack_ndarray_var_tys(&mut ctx.unifier, x2_ty).0
|
||||
} else {
|
||||
unreachable!()
|
||||
codegen_unreachable!(ctx)
|
||||
};
|
||||
|
||||
let x1_scalar_ty = if is_ndarray1 { dtype } else { x1_ty };
|
||||
|
@ -1632,8 +1723,8 @@ pub fn call_numpy_fmin<'ctx, G: CodeGenerator + ?Sized>(
|
|||
ctx,
|
||||
dtype,
|
||||
None,
|
||||
(x1, !is_ndarray1),
|
||||
(x2, !is_ndarray2),
|
||||
(x1_ty, x1, !is_ndarray1),
|
||||
(x2_ty, x2, !is_ndarray2),
|
||||
|generator, ctx, (lhs, rhs)| {
|
||||
call_numpy_fmin(generator, ctx, (x1_scalar_ty, lhs), (x2_scalar_ty, rhs))
|
||||
},
|
||||
|
@ -1688,8 +1779,8 @@ pub fn call_numpy_ldexp<'ctx, G: CodeGenerator + ?Sized>(
|
|||
ctx,
|
||||
dtype,
|
||||
None,
|
||||
(x1, !is_ndarray1),
|
||||
(x2, !is_ndarray2),
|
||||
(x1_ty, x1, !is_ndarray1),
|
||||
(x2_ty, x2, !is_ndarray2),
|
||||
|generator, ctx, (lhs, rhs)| {
|
||||
call_numpy_ldexp(generator, ctx, (x1_scalar_ty, lhs), (x2_scalar_ty, rhs))
|
||||
},
|
||||
|
@ -1744,7 +1835,7 @@ pub fn call_numpy_hypot<'ctx, G: CodeGenerator + ?Sized>(
|
|||
} else if is_ndarray2 {
|
||||
unpack_ndarray_var_tys(&mut ctx.unifier, x2_ty).0
|
||||
} else {
|
||||
unreachable!()
|
||||
codegen_unreachable!(ctx)
|
||||
};
|
||||
|
||||
let x1_scalar_ty = if is_ndarray1 { dtype } else { x1_ty };
|
||||
|
@ -1755,8 +1846,8 @@ pub fn call_numpy_hypot<'ctx, G: CodeGenerator + ?Sized>(
|
|||
ctx,
|
||||
dtype,
|
||||
None,
|
||||
(x1, !is_ndarray1),
|
||||
(x2, !is_ndarray2),
|
||||
(x1_ty, x1, !is_ndarray1),
|
||||
(x2_ty, x2, !is_ndarray2),
|
||||
|generator, ctx, (lhs, rhs)| {
|
||||
call_numpy_hypot(generator, ctx, (x1_scalar_ty, lhs), (x2_scalar_ty, rhs))
|
||||
},
|
||||
|
@ -1811,7 +1902,7 @@ pub fn call_numpy_nextafter<'ctx, G: CodeGenerator + ?Sized>(
|
|||
} else if is_ndarray2 {
|
||||
unpack_ndarray_var_tys(&mut ctx.unifier, x2_ty).0
|
||||
} else {
|
||||
unreachable!()
|
||||
codegen_unreachable!(ctx)
|
||||
};
|
||||
|
||||
let x1_scalar_ty = if is_ndarray1 { dtype } else { x1_ty };
|
||||
|
@ -1822,8 +1913,8 @@ pub fn call_numpy_nextafter<'ctx, G: CodeGenerator + ?Sized>(
|
|||
ctx,
|
||||
dtype,
|
||||
None,
|
||||
(x1, !is_ndarray1),
|
||||
(x2, !is_ndarray2),
|
||||
(x1_ty, x1, !is_ndarray1),
|
||||
(x2_ty, x2, !is_ndarray2),
|
||||
|generator, ctx, (lhs, rhs)| {
|
||||
call_numpy_nextafter(generator, ctx, (x1_scalar_ty, lhs), (x2_scalar_ty, rhs))
|
||||
},
|
||||
|
@ -1835,3 +1926,501 @@ pub fn call_numpy_nextafter<'ctx, G: CodeGenerator + ?Sized>(
|
|||
_ => unsupported_type(ctx, FN_NAME, &[x1_ty, x2_ty]),
|
||||
})
|
||||
}
|
||||
|
||||
/// Allocates a struct with the fields specified by `out_matrices` and returns a pointer to it
|
||||
fn build_output_struct<'ctx>(
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
out_matrices: Vec<BasicValueEnum<'ctx>>,
|
||||
) -> PointerValue<'ctx> {
|
||||
let field_ty =
|
||||
out_matrices.iter().map(BasicValueEnum::get_type).collect::<Vec<BasicTypeEnum>>();
|
||||
let out_ty = ctx.ctx.struct_type(&field_ty, false);
|
||||
let out_ptr = ctx.builder.build_alloca(out_ty, "").unwrap();
|
||||
|
||||
for (i, v) in out_matrices.into_iter().enumerate() {
|
||||
unsafe {
|
||||
let ptr = ctx
|
||||
.builder
|
||||
.build_in_bounds_gep(
|
||||
out_ptr,
|
||||
&[
|
||||
ctx.ctx.i32_type().const_zero(),
|
||||
ctx.ctx.i32_type().const_int(i as u64, false),
|
||||
],
|
||||
"",
|
||||
)
|
||||
.unwrap();
|
||||
ctx.builder.build_store(ptr, v).unwrap();
|
||||
}
|
||||
}
|
||||
out_ptr
|
||||
}
|
||||
|
||||
/// Invokes the `np_linalg_cholesky` linalg function
|
||||
pub fn call_np_linalg_cholesky<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
x1: (Type, BasicValueEnum<'ctx>),
|
||||
) -> Result<BasicValueEnum<'ctx>, String> {
|
||||
const FN_NAME: &str = "np_linalg_cholesky";
|
||||
let (x1_ty, x1) = x1;
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
|
||||
if let BasicValueEnum::PointerValue(n1) = x1 {
|
||||
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, x1_ty);
|
||||
let n1_elem_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
|
||||
let BasicTypeEnum::FloatType(_) = n1_elem_ty else {
|
||||
unsupported_type(ctx, FN_NAME, &[x1_ty]);
|
||||
};
|
||||
|
||||
let n1 = NDArrayValue::from_pointer_value(n1, n1_elem_ty, llvm_usize, None);
|
||||
let dim0 = unsafe {
|
||||
n1.shape()
|
||||
.get_unchecked(ctx, generator, &llvm_usize.const_zero(), None)
|
||||
.into_int_value()
|
||||
};
|
||||
let dim1 = unsafe {
|
||||
n1.shape()
|
||||
.get_unchecked(ctx, generator, &llvm_usize.const_int(1, false), None)
|
||||
.into_int_value()
|
||||
};
|
||||
|
||||
let out = numpy::create_ndarray_const_shape(generator, ctx, elem_ty, &[dim0, dim1])
|
||||
.unwrap()
|
||||
.as_base_value()
|
||||
.as_basic_value_enum();
|
||||
|
||||
extern_fns::call_np_linalg_cholesky(ctx, x1, out, None);
|
||||
Ok(out)
|
||||
} else {
|
||||
unsupported_type(ctx, FN_NAME, &[x1_ty])
|
||||
}
|
||||
}
|
||||
|
||||
/// Invokes the `np_linalg_qr` linalg function
|
||||
pub fn call_np_linalg_qr<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
x1: (Type, BasicValueEnum<'ctx>),
|
||||
) -> Result<BasicValueEnum<'ctx>, String> {
|
||||
const FN_NAME: &str = "np_linalg_qr";
|
||||
let (x1_ty, x1) = x1;
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
|
||||
if let BasicValueEnum::PointerValue(n1) = x1 {
|
||||
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, x1_ty);
|
||||
let n1_elem_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
|
||||
let BasicTypeEnum::FloatType(_) = n1_elem_ty else {
|
||||
unimplemented!("{FN_NAME} operates on float type NdArrays only");
|
||||
};
|
||||
|
||||
let n1 = NDArrayValue::from_pointer_value(n1, n1_elem_ty, llvm_usize, None);
|
||||
let dim0 = unsafe {
|
||||
n1.shape()
|
||||
.get_unchecked(ctx, generator, &llvm_usize.const_zero(), None)
|
||||
.into_int_value()
|
||||
};
|
||||
let dim1 = unsafe {
|
||||
n1.shape()
|
||||
.get_unchecked(ctx, generator, &llvm_usize.const_int(1, false), None)
|
||||
.into_int_value()
|
||||
};
|
||||
let k = llvm_intrinsics::call_int_smin(ctx, dim0, dim1, None);
|
||||
|
||||
let out_q = numpy::create_ndarray_const_shape(generator, ctx, elem_ty, &[dim0, k])
|
||||
.unwrap()
|
||||
.as_base_value()
|
||||
.as_basic_value_enum();
|
||||
let out_r = numpy::create_ndarray_const_shape(generator, ctx, elem_ty, &[k, dim1])
|
||||
.unwrap()
|
||||
.as_base_value()
|
||||
.as_basic_value_enum();
|
||||
|
||||
extern_fns::call_np_linalg_qr(ctx, x1, out_q, out_r, None);
|
||||
|
||||
let out_ptr = build_output_struct(ctx, vec![out_q, out_r]);
|
||||
|
||||
Ok(ctx.builder.build_load(out_ptr, "QR_Factorization_result").map(Into::into).unwrap())
|
||||
} else {
|
||||
unsupported_type(ctx, FN_NAME, &[x1_ty])
|
||||
}
|
||||
}
|
||||
|
||||
/// Invokes the `np_linalg_svd` linalg function
|
||||
pub fn call_np_linalg_svd<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
x1: (Type, BasicValueEnum<'ctx>),
|
||||
) -> Result<BasicValueEnum<'ctx>, String> {
|
||||
const FN_NAME: &str = "np_linalg_svd";
|
||||
let (x1_ty, x1) = x1;
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
|
||||
if let BasicValueEnum::PointerValue(n1) = x1 {
|
||||
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, x1_ty);
|
||||
let n1_elem_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
|
||||
let BasicTypeEnum::FloatType(_) = n1_elem_ty else {
|
||||
unsupported_type(ctx, FN_NAME, &[x1_ty]);
|
||||
};
|
||||
|
||||
let n1 = NDArrayValue::from_pointer_value(n1, n1_elem_ty, llvm_usize, None);
|
||||
|
||||
let dim0 = unsafe {
|
||||
n1.shape()
|
||||
.get_unchecked(ctx, generator, &llvm_usize.const_zero(), None)
|
||||
.into_int_value()
|
||||
};
|
||||
let dim1 = unsafe {
|
||||
n1.shape()
|
||||
.get_unchecked(ctx, generator, &llvm_usize.const_int(1, false), None)
|
||||
.into_int_value()
|
||||
};
|
||||
let k = llvm_intrinsics::call_int_smin(ctx, dim0, dim1, None);
|
||||
|
||||
let out_u = numpy::create_ndarray_const_shape(generator, ctx, elem_ty, &[dim0, dim0])
|
||||
.unwrap()
|
||||
.as_base_value()
|
||||
.as_basic_value_enum();
|
||||
let out_s = numpy::create_ndarray_const_shape(generator, ctx, elem_ty, &[k])
|
||||
.unwrap()
|
||||
.as_base_value()
|
||||
.as_basic_value_enum();
|
||||
let out_vh = numpy::create_ndarray_const_shape(generator, ctx, elem_ty, &[dim1, dim1])
|
||||
.unwrap()
|
||||
.as_base_value()
|
||||
.as_basic_value_enum();
|
||||
|
||||
extern_fns::call_np_linalg_svd(ctx, x1, out_u, out_s, out_vh, None);
|
||||
|
||||
let out_ptr = build_output_struct(ctx, vec![out_u, out_s, out_vh]);
|
||||
|
||||
Ok(ctx.builder.build_load(out_ptr, "SVD_Factorization_result").map(Into::into).unwrap())
|
||||
} else {
|
||||
unsupported_type(ctx, FN_NAME, &[x1_ty])
|
||||
}
|
||||
}
|
||||
|
||||
/// Invokes the `np_linalg_inv` linalg function
|
||||
pub fn call_np_linalg_inv<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
x1: (Type, BasicValueEnum<'ctx>),
|
||||
) -> Result<BasicValueEnum<'ctx>, String> {
|
||||
const FN_NAME: &str = "np_linalg_inv";
|
||||
let (x1_ty, x1) = x1;
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
|
||||
if let BasicValueEnum::PointerValue(n1) = x1 {
|
||||
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, x1_ty);
|
||||
let n1_elem_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
|
||||
let BasicTypeEnum::FloatType(_) = n1_elem_ty else {
|
||||
unsupported_type(ctx, FN_NAME, &[x1_ty]);
|
||||
};
|
||||
|
||||
let n1 = NDArrayValue::from_pointer_value(n1, n1_elem_ty, llvm_usize, None);
|
||||
let dim0 = unsafe {
|
||||
n1.shape()
|
||||
.get_unchecked(ctx, generator, &llvm_usize.const_zero(), None)
|
||||
.into_int_value()
|
||||
};
|
||||
let dim1 = unsafe {
|
||||
n1.shape()
|
||||
.get_unchecked(ctx, generator, &llvm_usize.const_int(1, false), None)
|
||||
.into_int_value()
|
||||
};
|
||||
|
||||
let out = numpy::create_ndarray_const_shape(generator, ctx, elem_ty, &[dim0, dim1])
|
||||
.unwrap()
|
||||
.as_base_value()
|
||||
.as_basic_value_enum();
|
||||
|
||||
extern_fns::call_np_linalg_inv(ctx, x1, out, None);
|
||||
Ok(out)
|
||||
} else {
|
||||
unsupported_type(ctx, FN_NAME, &[x1_ty])
|
||||
}
|
||||
}
|
||||
|
||||
/// Invokes the `np_linalg_pinv` linalg function
|
||||
pub fn call_np_linalg_pinv<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
x1: (Type, BasicValueEnum<'ctx>),
|
||||
) -> Result<BasicValueEnum<'ctx>, String> {
|
||||
const FN_NAME: &str = "np_linalg_pinv";
|
||||
let (x1_ty, x1) = x1;
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
|
||||
if let BasicValueEnum::PointerValue(n1) = x1 {
|
||||
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, x1_ty);
|
||||
let n1_elem_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
|
||||
let BasicTypeEnum::FloatType(_) = n1_elem_ty else {
|
||||
unsupported_type(ctx, FN_NAME, &[x1_ty]);
|
||||
};
|
||||
|
||||
let n1 = NDArrayValue::from_pointer_value(n1, n1_elem_ty, llvm_usize, None);
|
||||
|
||||
let dim0 = unsafe {
|
||||
n1.shape()
|
||||
.get_unchecked(ctx, generator, &llvm_usize.const_zero(), None)
|
||||
.into_int_value()
|
||||
};
|
||||
let dim1 = unsafe {
|
||||
n1.shape()
|
||||
.get_unchecked(ctx, generator, &llvm_usize.const_int(1, false), None)
|
||||
.into_int_value()
|
||||
};
|
||||
|
||||
let out = numpy::create_ndarray_const_shape(generator, ctx, elem_ty, &[dim1, dim0])
|
||||
.unwrap()
|
||||
.as_base_value()
|
||||
.as_basic_value_enum();
|
||||
|
||||
extern_fns::call_np_linalg_pinv(ctx, x1, out, None);
|
||||
Ok(out)
|
||||
} else {
|
||||
unsupported_type(ctx, FN_NAME, &[x1_ty])
|
||||
}
|
||||
}
|
||||
|
||||
/// Invokes the `sp_linalg_lu` linalg function
|
||||
pub fn call_sp_linalg_lu<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
x1: (Type, BasicValueEnum<'ctx>),
|
||||
) -> Result<BasicValueEnum<'ctx>, String> {
|
||||
const FN_NAME: &str = "sp_linalg_lu";
|
||||
let (x1_ty, x1) = x1;
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
|
||||
if let BasicValueEnum::PointerValue(n1) = x1 {
|
||||
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, x1_ty);
|
||||
let n1_elem_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
|
||||
let BasicTypeEnum::FloatType(_) = n1_elem_ty else {
|
||||
unsupported_type(ctx, FN_NAME, &[x1_ty]);
|
||||
};
|
||||
|
||||
let n1 = NDArrayValue::from_pointer_value(n1, n1_elem_ty, llvm_usize, None);
|
||||
|
||||
let dim0 = unsafe {
|
||||
n1.shape()
|
||||
.get_unchecked(ctx, generator, &llvm_usize.const_zero(), None)
|
||||
.into_int_value()
|
||||
};
|
||||
let dim1 = unsafe {
|
||||
n1.shape()
|
||||
.get_unchecked(ctx, generator, &llvm_usize.const_int(1, false), None)
|
||||
.into_int_value()
|
||||
};
|
||||
let k = llvm_intrinsics::call_int_smin(ctx, dim0, dim1, None);
|
||||
|
||||
let out_l = numpy::create_ndarray_const_shape(generator, ctx, elem_ty, &[dim0, k])
|
||||
.unwrap()
|
||||
.as_base_value()
|
||||
.as_basic_value_enum();
|
||||
let out_u = numpy::create_ndarray_const_shape(generator, ctx, elem_ty, &[k, dim1])
|
||||
.unwrap()
|
||||
.as_base_value()
|
||||
.as_basic_value_enum();
|
||||
|
||||
extern_fns::call_sp_linalg_lu(ctx, x1, out_l, out_u, None);
|
||||
|
||||
let out_ptr = build_output_struct(ctx, vec![out_l, out_u]);
|
||||
Ok(ctx.builder.build_load(out_ptr, "LU_Factorization_result").map(Into::into).unwrap())
|
||||
} else {
|
||||
unsupported_type(ctx, FN_NAME, &[x1_ty])
|
||||
}
|
||||
}
|
||||
|
||||
/// Invokes the `np_linalg_matrix_power` linalg function
|
||||
pub fn call_np_linalg_matrix_power<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
x1: (Type, BasicValueEnum<'ctx>),
|
||||
x2: (Type, BasicValueEnum<'ctx>),
|
||||
) -> Result<BasicValueEnum<'ctx>, String> {
|
||||
const FN_NAME: &str = "np_linalg_matrix_power";
|
||||
let (x1_ty, x1) = x1;
|
||||
let (x2_ty, x2) = x2;
|
||||
let x2 = call_float(generator, ctx, (x2_ty, x2)).unwrap();
|
||||
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
if let (BasicValueEnum::PointerValue(n1), BasicValueEnum::FloatValue(n2)) = (x1, x2) {
|
||||
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, x1_ty);
|
||||
let n1_elem_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
|
||||
let BasicTypeEnum::FloatType(_) = n1_elem_ty else {
|
||||
unsupported_type(ctx, FN_NAME, &[x1_ty, x2_ty]);
|
||||
};
|
||||
|
||||
let n1 = NDArrayValue::from_pointer_value(n1, n1_elem_ty, llvm_usize, None);
|
||||
// Changing second parameter to a `NDArray` for uniformity in function call
|
||||
let n2_array = numpy::create_ndarray_const_shape(
|
||||
generator,
|
||||
ctx,
|
||||
elem_ty,
|
||||
&[llvm_usize.const_int(1, false)],
|
||||
)
|
||||
.unwrap();
|
||||
unsafe {
|
||||
n2_array.data().set_unchecked(
|
||||
ctx,
|
||||
generator,
|
||||
&llvm_usize.const_zero(),
|
||||
n2.as_basic_value_enum(),
|
||||
);
|
||||
};
|
||||
let n2_array = n2_array.as_base_value().as_basic_value_enum();
|
||||
|
||||
let outdim0 = unsafe {
|
||||
n1.shape()
|
||||
.get_unchecked(ctx, generator, &llvm_usize.const_zero(), None)
|
||||
.into_int_value()
|
||||
};
|
||||
let outdim1 = unsafe {
|
||||
n1.shape()
|
||||
.get_unchecked(ctx, generator, &llvm_usize.const_int(1, false), None)
|
||||
.into_int_value()
|
||||
};
|
||||
|
||||
let out = numpy::create_ndarray_const_shape(generator, ctx, elem_ty, &[outdim0, outdim1])
|
||||
.unwrap()
|
||||
.as_base_value()
|
||||
.as_basic_value_enum();
|
||||
|
||||
extern_fns::call_np_linalg_matrix_power(ctx, x1, n2_array, out, None);
|
||||
Ok(out)
|
||||
} else {
|
||||
unsupported_type(ctx, FN_NAME, &[x1_ty, x2_ty])
|
||||
}
|
||||
}
|
||||
|
||||
/// Invokes the `np_linalg_det` linalg function
|
||||
pub fn call_np_linalg_det<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
x1: (Type, BasicValueEnum<'ctx>),
|
||||
) -> Result<BasicValueEnum<'ctx>, String> {
|
||||
const FN_NAME: &str = "np_linalg_matrix_power";
|
||||
let (x1_ty, x1) = x1;
|
||||
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
if let BasicValueEnum::PointerValue(_) = x1 {
|
||||
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, x1_ty);
|
||||
let n1_elem_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
|
||||
let BasicTypeEnum::FloatType(_) = n1_elem_ty else {
|
||||
unsupported_type(ctx, FN_NAME, &[x1_ty]);
|
||||
};
|
||||
|
||||
// Changing second parameter to a `NDArray` for uniformity in function call
|
||||
let out = numpy::create_ndarray_const_shape(
|
||||
generator,
|
||||
ctx,
|
||||
elem_ty,
|
||||
&[llvm_usize.const_int(1, false)],
|
||||
)
|
||||
.unwrap();
|
||||
extern_fns::call_np_linalg_det(ctx, x1, out.as_base_value().as_basic_value_enum(), None);
|
||||
let res =
|
||||
unsafe { out.data().get_unchecked(ctx, generator, &llvm_usize.const_zero(), None) };
|
||||
Ok(res)
|
||||
} else {
|
||||
unsupported_type(ctx, FN_NAME, &[x1_ty])
|
||||
}
|
||||
}
|
||||
|
||||
/// Invokes the `sp_linalg_schur` linalg function
|
||||
pub fn call_sp_linalg_schur<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
x1: (Type, BasicValueEnum<'ctx>),
|
||||
) -> Result<BasicValueEnum<'ctx>, String> {
|
||||
const FN_NAME: &str = "sp_linalg_schur";
|
||||
let (x1_ty, x1) = x1;
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
|
||||
if let BasicValueEnum::PointerValue(n1) = x1 {
|
||||
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, x1_ty);
|
||||
let n1_elem_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
|
||||
let BasicTypeEnum::FloatType(_) = n1_elem_ty else {
|
||||
unsupported_type(ctx, FN_NAME, &[x1_ty]);
|
||||
};
|
||||
|
||||
let n1 = NDArrayValue::from_pointer_value(n1, n1_elem_ty, llvm_usize, None);
|
||||
|
||||
let dim0 = unsafe {
|
||||
n1.shape()
|
||||
.get_unchecked(ctx, generator, &llvm_usize.const_zero(), None)
|
||||
.into_int_value()
|
||||
};
|
||||
let out_t = numpy::create_ndarray_const_shape(generator, ctx, elem_ty, &[dim0, dim0])
|
||||
.unwrap()
|
||||
.as_base_value()
|
||||
.as_basic_value_enum();
|
||||
let out_z = numpy::create_ndarray_const_shape(generator, ctx, elem_ty, &[dim0, dim0])
|
||||
.unwrap()
|
||||
.as_base_value()
|
||||
.as_basic_value_enum();
|
||||
|
||||
extern_fns::call_sp_linalg_schur(ctx, x1, out_t, out_z, None);
|
||||
|
||||
let out_ptr = build_output_struct(ctx, vec![out_t, out_z]);
|
||||
Ok(ctx.builder.build_load(out_ptr, "Schur_Factorization_result").map(Into::into).unwrap())
|
||||
} else {
|
||||
unsupported_type(ctx, FN_NAME, &[x1_ty])
|
||||
}
|
||||
}
|
||||
|
||||
/// Invokes the `sp_linalg_hessenberg` linalg function
|
||||
pub fn call_sp_linalg_hessenberg<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
x1: (Type, BasicValueEnum<'ctx>),
|
||||
) -> Result<BasicValueEnum<'ctx>, String> {
|
||||
const FN_NAME: &str = "sp_linalg_hessenberg";
|
||||
let (x1_ty, x1) = x1;
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
|
||||
if let BasicValueEnum::PointerValue(n1) = x1 {
|
||||
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, x1_ty);
|
||||
let n1_elem_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
|
||||
let BasicTypeEnum::FloatType(_) = n1_elem_ty else {
|
||||
unsupported_type(ctx, FN_NAME, &[x1_ty]);
|
||||
};
|
||||
|
||||
let n1 = NDArrayValue::from_pointer_value(n1, n1_elem_ty, llvm_usize, None);
|
||||
|
||||
let dim0 = unsafe {
|
||||
n1.shape()
|
||||
.get_unchecked(ctx, generator, &llvm_usize.const_zero(), None)
|
||||
.into_int_value()
|
||||
};
|
||||
let out_h = numpy::create_ndarray_const_shape(generator, ctx, elem_ty, &[dim0, dim0])
|
||||
.unwrap()
|
||||
.as_base_value()
|
||||
.as_basic_value_enum();
|
||||
let out_q = numpy::create_ndarray_const_shape(generator, ctx, elem_ty, &[dim0, dim0])
|
||||
.unwrap()
|
||||
.as_base_value()
|
||||
.as_basic_value_enum();
|
||||
extern_fns::call_sp_linalg_hessenberg(ctx, x1, out_h, out_q, None);
|
||||
|
||||
let out_ptr = build_output_struct(ctx, vec![out_h, out_q]);
|
||||
Ok(ctx
|
||||
.builder
|
||||
.build_load(out_ptr, "Hessenberg_decomposition_result")
|
||||
.map(Into::into)
|
||||
.unwrap())
|
||||
} else {
|
||||
unsupported_type(ctx, FN_NAME, &[x1_ty])
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,3 +1,9 @@
|
|||
use std::collections::HashMap;
|
||||
|
||||
use indexmap::IndexMap;
|
||||
|
||||
use nac3parser::ast::StrRef;
|
||||
|
||||
use crate::{
|
||||
symbol_resolver::SymbolValue,
|
||||
toplevel::DefinitionId,
|
||||
|
@ -9,10 +15,6 @@ use crate::{
|
|||
},
|
||||
};
|
||||
|
||||
use indexmap::IndexMap;
|
||||
use nac3parser::ast::StrRef;
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub struct ConcreteTypeStore {
|
||||
store: Vec<ConcreteTypeEnum>,
|
||||
}
|
||||
|
@ -25,6 +27,7 @@ pub struct ConcreteFuncArg {
|
|||
pub name: StrRef,
|
||||
pub ty: ConcreteType,
|
||||
pub default_value: Option<SymbolValue>,
|
||||
pub is_vararg: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
|
@ -46,6 +49,7 @@ pub enum ConcreteTypeEnum {
|
|||
TPrimitive(Primitive),
|
||||
TTuple {
|
||||
ty: Vec<ConcreteType>,
|
||||
is_vararg_ctx: bool,
|
||||
},
|
||||
TObj {
|
||||
obj_id: DefinitionId,
|
||||
|
@ -102,8 +106,16 @@ impl ConcreteTypeStore {
|
|||
.iter()
|
||||
.map(|arg| ConcreteFuncArg {
|
||||
name: arg.name,
|
||||
ty: self.from_unifier_type(unifier, primitives, arg.ty, cache),
|
||||
ty: if arg.is_vararg {
|
||||
let tuple_ty = unifier
|
||||
.add_ty(TypeEnum::TTuple { ty: vec![arg.ty], is_vararg_ctx: true });
|
||||
|
||||
self.from_unifier_type(unifier, primitives, tuple_ty, cache)
|
||||
} else {
|
||||
self.from_unifier_type(unifier, primitives, arg.ty, cache)
|
||||
},
|
||||
default_value: arg.default_value.clone(),
|
||||
is_vararg: arg.is_vararg,
|
||||
})
|
||||
.collect(),
|
||||
ret: self.from_unifier_type(unifier, primitives, signature.ret, cache),
|
||||
|
@ -158,11 +170,12 @@ impl ConcreteTypeStore {
|
|||
cache.insert(ty, None);
|
||||
let ty_enum = unifier.get_ty(ty);
|
||||
let result = match &*ty_enum {
|
||||
TypeEnum::TTuple { ty } => ConcreteTypeEnum::TTuple {
|
||||
TypeEnum::TTuple { ty, is_vararg_ctx } => ConcreteTypeEnum::TTuple {
|
||||
ty: ty
|
||||
.iter()
|
||||
.map(|t| self.from_unifier_type(unifier, primitives, *t, cache))
|
||||
.collect(),
|
||||
is_vararg_ctx: *is_vararg_ctx,
|
||||
},
|
||||
TypeEnum::TObj { obj_id, fields, params } => ConcreteTypeEnum::TObj {
|
||||
obj_id: *obj_id,
|
||||
|
@ -248,11 +261,12 @@ impl ConcreteTypeStore {
|
|||
*cache.get_mut(&cty).unwrap() = Some(ty);
|
||||
return ty;
|
||||
}
|
||||
ConcreteTypeEnum::TTuple { ty } => TypeEnum::TTuple {
|
||||
ConcreteTypeEnum::TTuple { ty, is_vararg_ctx } => TypeEnum::TTuple {
|
||||
ty: ty
|
||||
.iter()
|
||||
.map(|cty| self.to_unifier_type(unifier, primitives, *cty, cache))
|
||||
.collect(),
|
||||
is_vararg_ctx: *is_vararg_ctx,
|
||||
},
|
||||
ConcreteTypeEnum::TVirtual { ty } => {
|
||||
TypeEnum::TVirtual { ty: self.to_unifier_type(unifier, primitives, *ty, cache) }
|
||||
|
@ -277,6 +291,7 @@ impl ConcreteTypeStore {
|
|||
name: arg.name,
|
||||
ty: self.to_unifier_type(unifier, primitives, arg.ty, cache),
|
||||
default_value: arg.default_value.clone(),
|
||||
is_vararg: false,
|
||||
})
|
||||
.collect(),
|
||||
ret: self.to_unifier_type(unifier, primitives, *ret, cache),
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,8 +1,10 @@
|
|||
use inkwell::attributes::{Attribute, AttributeLoc};
|
||||
use inkwell::values::{BasicValueEnum, CallSiteValue, FloatValue, IntValue};
|
||||
use inkwell::{
|
||||
attributes::{Attribute, AttributeLoc},
|
||||
values::{BasicValueEnum, CallSiteValue, FloatValue, IntValue},
|
||||
};
|
||||
use itertools::Either;
|
||||
|
||||
use crate::codegen::CodeGenContext;
|
||||
use super::CodeGenContext;
|
||||
|
||||
/// Macro to generate extern function
|
||||
/// Both function return type and function parameter type are `FloatValue`
|
||||
|
@ -13,8 +15,8 @@ use crate::codegen::CodeGenContext;
|
|||
/// * `$extern_fn:literal`: Name of underlying extern function
|
||||
///
|
||||
/// Optional Arguments:
|
||||
/// * `$(,$attributes:literal)*)`: Attributes linked with the extern function
|
||||
/// The default attributes are "mustprogress", "nofree", "nounwind", "willreturn", and "writeonly"
|
||||
/// * `$(,$attributes:literal)*)`: Attributes linked with the extern function.
|
||||
/// The default attributes are "mustprogress", "nofree", "nounwind", "willreturn", and "writeonly".
|
||||
/// These will be used unless other attributes are specified
|
||||
/// * `$(,$args:ident)*`: Operands of the extern function
|
||||
/// The data type of these operands will be set to `FloatValue`
|
||||
|
@ -130,3 +132,62 @@ pub fn call_ldexp<'ctx>(
|
|||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Macro to generate `np_linalg` and `sp_linalg` functions
|
||||
/// The function takes as input `NDArray` and returns ()
|
||||
///
|
||||
/// Arguments:
|
||||
/// * `$fn_name:ident`: The identifier of the rust function to be generated
|
||||
/// * `$extern_fn:literal`: Name of underlying extern function
|
||||
/// * (2/3/4): Number of `NDArray` that function takes as input
|
||||
///
|
||||
/// Note:
|
||||
/// The operands and resulting `NDArray` are both passed as input to the funcion
|
||||
/// It is the responsibility of caller to ensure that output `NDArray` is properly allocated on stack
|
||||
/// The function changes the content of the output `NDArray` in-place
|
||||
macro_rules! generate_linalg_extern_fn {
|
||||
($fn_name:ident, $extern_fn:literal, 2) => {
|
||||
generate_linalg_extern_fn!($fn_name, $extern_fn, mat1, mat2);
|
||||
};
|
||||
($fn_name:ident, $extern_fn:literal, 3) => {
|
||||
generate_linalg_extern_fn!($fn_name, $extern_fn, mat1, mat2, mat3);
|
||||
};
|
||||
($fn_name:ident, $extern_fn:literal, 4) => {
|
||||
generate_linalg_extern_fn!($fn_name, $extern_fn, mat1, mat2, mat3, mat4);
|
||||
};
|
||||
($fn_name:ident, $extern_fn:literal $(,$input_matrix:ident)*) => {
|
||||
#[doc = concat!("Invokes the linalg `", stringify!($extern_fn), " function." )]
|
||||
pub fn $fn_name<'ctx>(
|
||||
ctx: &mut CodeGenContext<'ctx, '_>
|
||||
$(,$input_matrix: BasicValueEnum<'ctx>)*,
|
||||
name: Option<&str>,
|
||||
){
|
||||
const FN_NAME: &str = $extern_fn;
|
||||
let extern_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
|
||||
let fn_type = ctx.ctx.void_type().fn_type(&[$($input_matrix.get_type().into()),*], false);
|
||||
|
||||
let func = ctx.module.add_function(FN_NAME, fn_type, None);
|
||||
for attr in ["mustprogress", "nofree", "nounwind", "willreturn", "writeonly"] {
|
||||
func.add_attribute(
|
||||
AttributeLoc::Function,
|
||||
ctx.ctx.create_enum_attribute(Attribute::get_named_enum_kind_id(attr), 0),
|
||||
);
|
||||
}
|
||||
func
|
||||
});
|
||||
|
||||
ctx.builder.build_call(extern_fn, &[$($input_matrix.into(),)*], name.unwrap_or_default()).unwrap();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
generate_linalg_extern_fn!(call_np_linalg_cholesky, "np_linalg_cholesky", 2);
|
||||
generate_linalg_extern_fn!(call_np_linalg_qr, "np_linalg_qr", 3);
|
||||
generate_linalg_extern_fn!(call_np_linalg_svd, "np_linalg_svd", 4);
|
||||
generate_linalg_extern_fn!(call_np_linalg_inv, "np_linalg_inv", 2);
|
||||
generate_linalg_extern_fn!(call_np_linalg_pinv, "np_linalg_pinv", 2);
|
||||
generate_linalg_extern_fn!(call_np_linalg_matrix_power, "np_linalg_matrix_power", 3);
|
||||
generate_linalg_extern_fn!(call_np_linalg_det, "np_linalg_det", 2);
|
||||
generate_linalg_extern_fn!(call_sp_linalg_lu, "sp_linalg_lu", 3);
|
||||
generate_linalg_extern_fn!(call_sp_linalg_schur, "sp_linalg_schur", 3);
|
||||
generate_linalg_extern_fn!(call_sp_linalg_hessenberg, "sp_linalg_hessenberg", 3);
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
use crate::{
|
||||
codegen::{bool_to_i1, bool_to_i8, classes::ArraySliceValue, expr::*, stmt::*, CodeGenContext},
|
||||
symbol_resolver::ValueEnum,
|
||||
toplevel::{DefinitionId, TopLevelDef},
|
||||
typecheck::typedef::{FunSignature, Type},
|
||||
};
|
||||
use inkwell::{
|
||||
context::Context,
|
||||
types::{BasicTypeEnum, IntType},
|
||||
values::{BasicValueEnum, IntValue, PointerValue},
|
||||
};
|
||||
|
||||
use nac3parser::ast::{Expr, Stmt, StrRef};
|
||||
|
||||
use super::model::SizeTModel;
|
||||
use super::{bool_to_i1, bool_to_i8, expr::*, stmt::*, values::ArraySliceValue, CodeGenContext};
|
||||
use crate::{
|
||||
symbol_resolver::ValueEnum,
|
||||
toplevel::{DefinitionId, TopLevelDef},
|
||||
typecheck::typedef::{FunSignature, Type},
|
||||
};
|
||||
|
||||
pub trait CodeGenerator {
|
||||
/// Return the module name for the code generator.
|
||||
|
@ -19,10 +19,6 @@ pub trait CodeGenerator {
|
|||
|
||||
fn get_size_type<'ctx>(&self, ctx: &'ctx Context) -> IntType<'ctx>;
|
||||
|
||||
fn get_sizet<'ctx>(&self, ctx: &'ctx Context) -> SizeTModel<'ctx> {
|
||||
SizeTModel(self.get_size_type(ctx))
|
||||
}
|
||||
|
||||
/// Generate function call and returns the function return value.
|
||||
/// - obj: Optional object for method call.
|
||||
/// - fun: Function signature and definition ID.
|
||||
|
@ -63,6 +59,7 @@ pub trait CodeGenerator {
|
|||
/// - fun: Function signature, definition ID and the substitution key.
|
||||
/// - params: Function parameters. Note that this does not include the object even if the
|
||||
/// function is a class method.
|
||||
///
|
||||
/// Note that this function should check if the function is generated in another thread (due to
|
||||
/// possible race condition), see the default implementation for an example.
|
||||
fn gen_func_instance<'ctx>(
|
||||
|
@ -129,11 +126,45 @@ pub trait CodeGenerator {
|
|||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
target: &Expr<Option<Type>>,
|
||||
value: ValueEnum<'ctx>,
|
||||
value_ty: Type,
|
||||
) -> Result<(), String>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
gen_assign(self, ctx, target, value)
|
||||
gen_assign(self, ctx, target, value, value_ty)
|
||||
}
|
||||
|
||||
/// Generate code for an assignment expression where LHS is a `"target_list"`.
|
||||
///
|
||||
/// See <https://docs.python.org/3/reference/simple_stmts.html#assignment-statements>.
|
||||
fn gen_assign_target_list<'ctx>(
|
||||
&mut self,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
targets: &Vec<Expr<Option<Type>>>,
|
||||
value: ValueEnum<'ctx>,
|
||||
value_ty: Type,
|
||||
) -> Result<(), String>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
gen_assign_target_list(self, ctx, targets, value, value_ty)
|
||||
}
|
||||
|
||||
/// Generate code for an item assignment.
|
||||
///
|
||||
/// i.e., `target[key] = value`
|
||||
fn gen_setitem<'ctx>(
|
||||
&mut self,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
target: &Expr<Option<Type>>,
|
||||
key: &Expr<Option<Type>>,
|
||||
value: ValueEnum<'ctx>,
|
||||
value_ty: Type,
|
||||
) -> Result<(), String>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
gen_setitem(self, ctx, target, key, value, value_ty)
|
||||
}
|
||||
|
||||
/// Generate code for a while expression.
|
||||
|
|
|
@ -1,195 +0,0 @@
|
|||
use crate::codegen::{model::*, structs::cslice::CSlice, CodeGenContext, CodeGenerator};
|
||||
|
||||
use super::util::get_sized_dependent_function_name;
|
||||
|
||||
/// The [`IntModel`] of nac3core's error ID.
|
||||
///
|
||||
/// It is always [`Int32`].
|
||||
type ErrorId = Int32;
|
||||
|
||||
#[allow(clippy::struct_field_names)]
|
||||
pub struct ErrorIdsFields {
|
||||
pub index_error: Field<NIntModel<ErrorId>>,
|
||||
pub value_error: Field<NIntModel<ErrorId>>,
|
||||
pub assertion_error: Field<NIntModel<ErrorId>>,
|
||||
pub runtime_error: Field<NIntModel<ErrorId>>,
|
||||
pub type_error: Field<NIntModel<ErrorId>>,
|
||||
}
|
||||
|
||||
/// Corresponds to IRRT's `struct ErrorIds`
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
|
||||
pub struct ErrorIds;
|
||||
|
||||
impl<'ctx> StructKind<'ctx> for ErrorIds {
|
||||
type Fields = ErrorIdsFields;
|
||||
|
||||
fn struct_name(&self) -> &'static str {
|
||||
"ErrorIds"
|
||||
}
|
||||
|
||||
fn build_fields(&self, builder: &mut FieldBuilder) -> Self::Fields {
|
||||
Self::Fields {
|
||||
index_error: builder.add_field_auto("index_error"),
|
||||
value_error: builder.add_field_auto("value_error"),
|
||||
assertion_error: builder.add_field_auto("assertion_error"),
|
||||
runtime_error: builder.add_field_auto("runtime_error"),
|
||||
type_error: builder.add_field_auto("type_error"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ErrorContextFields {
|
||||
pub error_ids: Field<PointerModel<StructModel<ErrorIds>>>,
|
||||
pub error_id: Field<NIntModel<ErrorId>>,
|
||||
pub message_template: Field<PointerModel<NIntModel<Byte>>>,
|
||||
pub param1: Field<NIntModel<Int64>>,
|
||||
pub param2: Field<NIntModel<Int64>>,
|
||||
pub param3: Field<NIntModel<Int64>>,
|
||||
}
|
||||
|
||||
/// Corresponds to IRRT's `struct ErrorContext`
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
|
||||
pub struct ErrorContext;
|
||||
|
||||
impl<'ctx> StructKind<'ctx> for ErrorContext {
|
||||
type Fields = ErrorContextFields;
|
||||
|
||||
fn struct_name(&self) -> &'static str {
|
||||
"ErrorIds"
|
||||
}
|
||||
|
||||
fn build_fields(&self, builder: &mut FieldBuilder) -> Self::Fields {
|
||||
Self::Fields {
|
||||
error_ids: builder.add_field_auto("error_ids"),
|
||||
error_id: builder.add_field_auto("error_id"),
|
||||
message_template: builder.add_field_auto("message_template"),
|
||||
param1: builder.add_field_auto("param1"),
|
||||
param2: builder.add_field_auto("param2"),
|
||||
param3: builder.add_field_auto("param3"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare ErrorIds
|
||||
fn build_error_ids<'ctx>(ctx: &CodeGenContext<'ctx, '_>) -> Pointer<'ctx, StructModel<ErrorIds>> {
|
||||
// ErrorIdsLens.get_fields(ctx.ctx).assertion_error.
|
||||
let error_ids = StructModel(ErrorIds).alloca(ctx, "error_ids");
|
||||
let i32_model = NIntModel(Int32);
|
||||
// i32_model.make_constant()
|
||||
|
||||
let get_string_id =
|
||||
|string_id| i32_model.constant(ctx.ctx, ctx.resolver.get_string_id(string_id) as u64);
|
||||
|
||||
error_ids.gep(ctx, |f| f.index_error).store(ctx, get_string_id("0:IndexError"));
|
||||
error_ids.gep(ctx, |f| f.value_error).store(ctx, get_string_id("0:ValueError"));
|
||||
error_ids.gep(ctx, |f| f.assertion_error).store(ctx, get_string_id("0:AssertionError"));
|
||||
error_ids.gep(ctx, |f| f.runtime_error).store(ctx, get_string_id("0:RuntimeError"));
|
||||
error_ids.gep(ctx, |f| f.type_error).store(ctx, get_string_id("0:TypeError"));
|
||||
|
||||
error_ids
|
||||
}
|
||||
|
||||
pub fn call_nac3_error_context_initialize<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
perrctx: Pointer<'ctx, StructModel<ErrorContext>>,
|
||||
perror_ids: Pointer<'ctx, StructModel<ErrorIds>>,
|
||||
) {
|
||||
FunctionBuilder::begin(ctx, "__nac3_error_context_initialize")
|
||||
.arg("errctx", perrctx)
|
||||
.arg("error_ids", perror_ids)
|
||||
.returning_void();
|
||||
}
|
||||
|
||||
pub fn call_nac3_error_context_has_error<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
errctx: Pointer<'ctx, StructModel<ErrorContext>>,
|
||||
) -> NInt<'ctx, Bool> {
|
||||
FunctionBuilder::begin(ctx, "__nac3_error_context_has_error")
|
||||
.arg("errctx", errctx)
|
||||
.returning("has_error", NIntModel(Bool))
|
||||
}
|
||||
|
||||
pub fn call_nac3_error_context_get_error_str<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
errctx: Pointer<'ctx, StructModel<ErrorContext>>,
|
||||
dst_str: Pointer<'ctx, StructModel<CSlice<'ctx>>>,
|
||||
) {
|
||||
let sizet = generator.get_sizet(ctx.ctx);
|
||||
|
||||
FunctionBuilder::begin(
|
||||
ctx,
|
||||
&get_sized_dependent_function_name(sizet, "__nac3_error_context_get_error_str"),
|
||||
)
|
||||
.arg("errctx", errctx)
|
||||
.arg("dst_str", dst_str)
|
||||
.returning_void();
|
||||
}
|
||||
|
||||
/// Setup a [`ErrorContext`] that could
|
||||
/// be passed to IRRT functions taking in a `ErrorContext* errctx`
|
||||
/// for error reporting purposes.
|
||||
///
|
||||
/// Also see: [`check_error_context`]
|
||||
pub fn setup_error_context<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
) -> Pointer<'ctx, StructModel<ErrorContext>> {
|
||||
let error_ids = build_error_ids(ctx);
|
||||
let errctx_ptr = StructModel(ErrorContext).alloca(ctx, "errctx");
|
||||
call_nac3_error_context_initialize(ctx, errctx_ptr, error_ids);
|
||||
errctx_ptr
|
||||
}
|
||||
|
||||
/// Check a [`ErrorContext`] to see
|
||||
/// if it contains error.
|
||||
///
|
||||
/// If there is an error, an LLVM exception will be raised at runtime.
|
||||
pub fn check_error_context<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
errctx_ptr: Pointer<'ctx, StructModel<ErrorContext>>,
|
||||
) {
|
||||
let sizet = generator.get_sizet(ctx.ctx);
|
||||
let cslice_model = StructModel(CSlice { sizet });
|
||||
|
||||
let current_bb = ctx.builder.get_insert_block().unwrap();
|
||||
let irrt_has_error_bb = ctx.ctx.insert_basic_block_after(current_bb, "irrt_has_error");
|
||||
let end_bb = ctx.ctx.insert_basic_block_after(irrt_has_error_bb, "end");
|
||||
|
||||
// Inserting into `current_bb`
|
||||
let has_error = call_nac3_error_context_has_error(ctx, errctx_ptr);
|
||||
ctx.builder.build_conditional_branch(has_error.value, irrt_has_error_bb, end_bb).unwrap();
|
||||
|
||||
// Inserting into `irrt_has_error_bb`
|
||||
ctx.builder.position_at_end(irrt_has_error_bb);
|
||||
|
||||
// Load all the values for `ctx.make_assert_impl_by_id`
|
||||
let pstr = cslice_model.alloca(ctx, "error_str");
|
||||
call_nac3_error_context_get_error_str(generator, ctx, errctx_ptr, pstr);
|
||||
|
||||
let error_id = errctx_ptr.gep(ctx, |f| f.error_id).load(ctx, "error_id");
|
||||
let msg = pstr.load(ctx, "msg");
|
||||
let param1 = errctx_ptr.gep(ctx, |f| f.param1).load(ctx, "param1");
|
||||
let param2 = errctx_ptr.gep(ctx, |f| f.param2).load(ctx, "param2");
|
||||
let param3 = errctx_ptr.gep(ctx, |f| f.param3).load(ctx, "param3");
|
||||
|
||||
ctx.raise_exn_impl(
|
||||
generator,
|
||||
error_id,
|
||||
msg,
|
||||
[Some(param1), Some(param2), Some(param3)],
|
||||
ctx.current_loc,
|
||||
);
|
||||
|
||||
// Position to `end_bb` for continuation
|
||||
ctx.builder.position_at_end(end_bb);
|
||||
}
|
||||
|
||||
pub fn call_nac3_dummy_raise<G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext,
|
||||
) {
|
||||
let errctx = setup_error_context(ctx);
|
||||
FunctionBuilder::begin(ctx, "__nac3_error_dummy_raise").arg("errctx", errctx).returning_void();
|
||||
check_error_context(generator, ctx, errctx);
|
||||
}
|
|
@ -0,0 +1,162 @@
|
|||
use inkwell::{
|
||||
types::BasicTypeEnum,
|
||||
values::{BasicValueEnum, CallSiteValue, IntValue},
|
||||
AddressSpace, IntPredicate,
|
||||
};
|
||||
use itertools::Either;
|
||||
|
||||
use super::calculate_len_for_slice_range;
|
||||
use crate::codegen::{
|
||||
macros::codegen_unreachable,
|
||||
values::{ArrayLikeValue, ListValue},
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
|
||||
/// This function handles 'end' **inclusively**.
|
||||
/// Order of tuples `assign_idx` and `value_idx` is ('start', 'end', 'step').
|
||||
/// Negative index should be handled before entering this function
|
||||
pub fn list_slice_assignment<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ty: BasicTypeEnum<'ctx>,
|
||||
dest_arr: ListValue<'ctx>,
|
||||
dest_idx: (IntValue<'ctx>, IntValue<'ctx>, IntValue<'ctx>),
|
||||
src_arr: ListValue<'ctx>,
|
||||
src_idx: (IntValue<'ctx>, IntValue<'ctx>, IntValue<'ctx>),
|
||||
) {
|
||||
let size_ty = generator.get_size_type(ctx.ctx);
|
||||
let int8_ptr = ctx.ctx.i8_type().ptr_type(AddressSpace::default());
|
||||
let int32 = ctx.ctx.i32_type();
|
||||
let (fun_symbol, elem_ptr_type) = ("__nac3_list_slice_assign_var_size", int8_ptr);
|
||||
let slice_assign_fun = {
|
||||
let ty_vec = vec![
|
||||
int32.into(), // dest start idx
|
||||
int32.into(), // dest end idx
|
||||
int32.into(), // dest step
|
||||
elem_ptr_type.into(), // dest arr ptr
|
||||
int32.into(), // dest arr len
|
||||
int32.into(), // src start idx
|
||||
int32.into(), // src end idx
|
||||
int32.into(), // src step
|
||||
elem_ptr_type.into(), // src arr ptr
|
||||
int32.into(), // src arr len
|
||||
int32.into(), // size
|
||||
];
|
||||
ctx.module.get_function(fun_symbol).unwrap_or_else(|| {
|
||||
let fn_t = int32.fn_type(ty_vec.as_slice(), false);
|
||||
ctx.module.add_function(fun_symbol, fn_t, None)
|
||||
})
|
||||
};
|
||||
|
||||
let zero = int32.const_zero();
|
||||
let one = int32.const_int(1, false);
|
||||
let dest_arr_ptr = dest_arr.data().base_ptr(ctx, generator);
|
||||
let dest_arr_ptr =
|
||||
ctx.builder.build_pointer_cast(dest_arr_ptr, elem_ptr_type, "dest_arr_ptr_cast").unwrap();
|
||||
let dest_len = dest_arr.load_size(ctx, Some("dest.len"));
|
||||
let dest_len = ctx.builder.build_int_truncate_or_bit_cast(dest_len, int32, "srclen32").unwrap();
|
||||
let src_arr_ptr = src_arr.data().base_ptr(ctx, generator);
|
||||
let src_arr_ptr =
|
||||
ctx.builder.build_pointer_cast(src_arr_ptr, elem_ptr_type, "src_arr_ptr_cast").unwrap();
|
||||
let src_len = src_arr.load_size(ctx, Some("src.len"));
|
||||
let src_len = ctx.builder.build_int_truncate_or_bit_cast(src_len, int32, "srclen32").unwrap();
|
||||
|
||||
// index in bound and positive should be done
|
||||
// assert if dest.step == 1 then len(src) <= len(dest) else len(src) == len(dest), and
|
||||
// throw exception if not satisfied
|
||||
let src_end = ctx
|
||||
.builder
|
||||
.build_select(
|
||||
ctx.builder.build_int_compare(IntPredicate::SLT, src_idx.2, zero, "is_neg").unwrap(),
|
||||
ctx.builder.build_int_sub(src_idx.1, one, "e_min_one").unwrap(),
|
||||
ctx.builder.build_int_add(src_idx.1, one, "e_add_one").unwrap(),
|
||||
"final_e",
|
||||
)
|
||||
.map(BasicValueEnum::into_int_value)
|
||||
.unwrap();
|
||||
let dest_end = ctx
|
||||
.builder
|
||||
.build_select(
|
||||
ctx.builder.build_int_compare(IntPredicate::SLT, dest_idx.2, zero, "is_neg").unwrap(),
|
||||
ctx.builder.build_int_sub(dest_idx.1, one, "e_min_one").unwrap(),
|
||||
ctx.builder.build_int_add(dest_idx.1, one, "e_add_one").unwrap(),
|
||||
"final_e",
|
||||
)
|
||||
.map(BasicValueEnum::into_int_value)
|
||||
.unwrap();
|
||||
let src_slice_len =
|
||||
calculate_len_for_slice_range(generator, ctx, src_idx.0, src_end, src_idx.2);
|
||||
let dest_slice_len =
|
||||
calculate_len_for_slice_range(generator, ctx, dest_idx.0, dest_end, dest_idx.2);
|
||||
let src_eq_dest = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::EQ, src_slice_len, dest_slice_len, "slice_src_eq_dest")
|
||||
.unwrap();
|
||||
let src_slt_dest = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::SLT, src_slice_len, dest_slice_len, "slice_src_slt_dest")
|
||||
.unwrap();
|
||||
let dest_step_eq_one = ctx
|
||||
.builder
|
||||
.build_int_compare(
|
||||
IntPredicate::EQ,
|
||||
dest_idx.2,
|
||||
dest_idx.2.get_type().const_int(1, false),
|
||||
"slice_dest_step_eq_one",
|
||||
)
|
||||
.unwrap();
|
||||
let cond_1 = ctx.builder.build_and(dest_step_eq_one, src_slt_dest, "slice_cond_1").unwrap();
|
||||
let cond = ctx.builder.build_or(src_eq_dest, cond_1, "slice_cond").unwrap();
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
cond,
|
||||
"0:ValueError",
|
||||
"attempt to assign sequence of size {0} to slice of size {1} with step size {2}",
|
||||
[Some(src_slice_len), Some(dest_slice_len), Some(dest_idx.2)],
|
||||
ctx.current_loc,
|
||||
);
|
||||
|
||||
let new_len = {
|
||||
let args = vec![
|
||||
dest_idx.0.into(), // dest start idx
|
||||
dest_idx.1.into(), // dest end idx
|
||||
dest_idx.2.into(), // dest step
|
||||
dest_arr_ptr.into(), // dest arr ptr
|
||||
dest_len.into(), // dest arr len
|
||||
src_idx.0.into(), // src start idx
|
||||
src_idx.1.into(), // src end idx
|
||||
src_idx.2.into(), // src step
|
||||
src_arr_ptr.into(), // src arr ptr
|
||||
src_len.into(), // src arr len
|
||||
{
|
||||
let s = match ty {
|
||||
BasicTypeEnum::FloatType(t) => t.size_of(),
|
||||
BasicTypeEnum::IntType(t) => t.size_of(),
|
||||
BasicTypeEnum::PointerType(t) => t.size_of(),
|
||||
BasicTypeEnum::StructType(t) => t.size_of().unwrap(),
|
||||
_ => codegen_unreachable!(ctx),
|
||||
};
|
||||
ctx.builder.build_int_truncate_or_bit_cast(s, int32, "size").unwrap()
|
||||
}
|
||||
.into(),
|
||||
];
|
||||
ctx.builder
|
||||
.build_call(slice_assign_fun, args.as_slice(), "slice_assign")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
};
|
||||
// update length
|
||||
let need_update =
|
||||
ctx.builder.build_int_compare(IntPredicate::NE, new_len, dest_len, "need_update").unwrap();
|
||||
let current = ctx.builder.get_insert_block().unwrap().get_parent().unwrap();
|
||||
let update_bb = ctx.ctx.append_basic_block(current, "update");
|
||||
let cont_bb = ctx.ctx.append_basic_block(current, "cont");
|
||||
ctx.builder.build_conditional_branch(need_update, update_bb, cont_bb).unwrap();
|
||||
ctx.builder.position_at_end(update_bb);
|
||||
let new_len = ctx.builder.build_int_z_extend_or_bit_cast(new_len, size_ty, "new_len").unwrap();
|
||||
dest_arr.store_size(ctx, generator, new_len);
|
||||
ctx.builder.build_unconditional_branch(cont_bb).unwrap();
|
||||
ctx.builder.position_at_end(cont_bb);
|
||||
}
|
|
@ -0,0 +1,152 @@
|
|||
use inkwell::{
|
||||
values::{BasicValueEnum, CallSiteValue, FloatValue, IntValue},
|
||||
IntPredicate,
|
||||
};
|
||||
use itertools::Either;
|
||||
|
||||
use crate::codegen::{
|
||||
macros::codegen_unreachable,
|
||||
{CodeGenContext, CodeGenerator},
|
||||
};
|
||||
|
||||
// repeated squaring method adapted from GNU Scientific Library:
|
||||
// https://git.savannah.gnu.org/cgit/gsl.git/tree/sys/pow_int.c
|
||||
pub fn integer_power<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
base: IntValue<'ctx>,
|
||||
exp: IntValue<'ctx>,
|
||||
signed: bool,
|
||||
) -> IntValue<'ctx> {
|
||||
let symbol = match (base.get_type().get_bit_width(), exp.get_type().get_bit_width(), signed) {
|
||||
(32, 32, true) => "__nac3_int_exp_int32_t",
|
||||
(64, 64, true) => "__nac3_int_exp_int64_t",
|
||||
(32, 32, false) => "__nac3_int_exp_uint32_t",
|
||||
(64, 64, false) => "__nac3_int_exp_uint64_t",
|
||||
_ => codegen_unreachable!(ctx),
|
||||
};
|
||||
let base_type = base.get_type();
|
||||
let pow_fun = ctx.module.get_function(symbol).unwrap_or_else(|| {
|
||||
let fn_type = base_type.fn_type(&[base_type.into(), base_type.into()], false);
|
||||
ctx.module.add_function(symbol, fn_type, None)
|
||||
});
|
||||
// throw exception when exp < 0
|
||||
let ge_zero = ctx
|
||||
.builder
|
||||
.build_int_compare(
|
||||
IntPredicate::SGE,
|
||||
exp,
|
||||
exp.get_type().const_zero(),
|
||||
"assert_int_pow_ge_0",
|
||||
)
|
||||
.unwrap();
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
ge_zero,
|
||||
"0:ValueError",
|
||||
"integer power must be positive or zero",
|
||||
[None, None, None],
|
||||
ctx.current_loc,
|
||||
);
|
||||
ctx.builder
|
||||
.build_call(pow_fun, &[base.into(), exp.into()], "call_int_pow")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Generates a call to `isinf` in IR. Returns an `i1` representing the result.
|
||||
pub fn call_isinf<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
v: FloatValue<'ctx>,
|
||||
) -> IntValue<'ctx> {
|
||||
let intrinsic_fn = ctx.module.get_function("__nac3_isinf").unwrap_or_else(|| {
|
||||
let fn_type = ctx.ctx.i32_type().fn_type(&[ctx.ctx.f64_type().into()], false);
|
||||
ctx.module.add_function("__nac3_isinf", fn_type, None)
|
||||
});
|
||||
|
||||
let ret = ctx
|
||||
.builder
|
||||
.build_call(intrinsic_fn, &[v.into()], "isinf")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap();
|
||||
|
||||
generator.bool_to_i1(ctx, ret)
|
||||
}
|
||||
|
||||
/// Generates a call to `isnan` in IR. Returns an `i1` representing the result.
|
||||
pub fn call_isnan<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
v: FloatValue<'ctx>,
|
||||
) -> IntValue<'ctx> {
|
||||
let intrinsic_fn = ctx.module.get_function("__nac3_isnan").unwrap_or_else(|| {
|
||||
let fn_type = ctx.ctx.i32_type().fn_type(&[ctx.ctx.f64_type().into()], false);
|
||||
ctx.module.add_function("__nac3_isnan", fn_type, None)
|
||||
});
|
||||
|
||||
let ret = ctx
|
||||
.builder
|
||||
.build_call(intrinsic_fn, &[v.into()], "isnan")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap();
|
||||
|
||||
generator.bool_to_i1(ctx, ret)
|
||||
}
|
||||
|
||||
/// Generates a call to `gamma` in IR. Returns an `f64` representing the result.
|
||||
pub fn call_gamma<'ctx>(ctx: &CodeGenContext<'ctx, '_>, v: FloatValue<'ctx>) -> FloatValue<'ctx> {
|
||||
let llvm_f64 = ctx.ctx.f64_type();
|
||||
|
||||
let intrinsic_fn = ctx.module.get_function("__nac3_gamma").unwrap_or_else(|| {
|
||||
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
|
||||
ctx.module.add_function("__nac3_gamma", fn_type, None)
|
||||
});
|
||||
|
||||
ctx.builder
|
||||
.build_call(intrinsic_fn, &[v.into()], "gamma")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_float_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Generates a call to `gammaln` in IR. Returns an `f64` representing the result.
|
||||
pub fn call_gammaln<'ctx>(ctx: &CodeGenContext<'ctx, '_>, v: FloatValue<'ctx>) -> FloatValue<'ctx> {
|
||||
let llvm_f64 = ctx.ctx.f64_type();
|
||||
|
||||
let intrinsic_fn = ctx.module.get_function("__nac3_gammaln").unwrap_or_else(|| {
|
||||
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
|
||||
ctx.module.add_function("__nac3_gammaln", fn_type, None)
|
||||
});
|
||||
|
||||
ctx.builder
|
||||
.build_call(intrinsic_fn, &[v.into()], "gammaln")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_float_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Generates a call to `j0` in IR. Returns an `f64` representing the result.
|
||||
pub fn call_j0<'ctx>(ctx: &CodeGenContext<'ctx, '_>, v: FloatValue<'ctx>) -> FloatValue<'ctx> {
|
||||
let llvm_f64 = ctx.ctx.f64_type();
|
||||
|
||||
let intrinsic_fn = ctx.module.get_function("__nac3_j0").unwrap_or_else(|| {
|
||||
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
|
||||
ctx.module.add_function("__nac3_j0", fn_type, None)
|
||||
});
|
||||
|
||||
ctx.builder
|
||||
.build_call(intrinsic_fn, &[v.into()], "j0")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_float_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
|
@ -1,34 +1,28 @@
|
|||
use crate::typecheck::typedef::Type;
|
||||
|
||||
mod error_context;
|
||||
pub mod ndarray;
|
||||
pub mod slice;
|
||||
mod test;
|
||||
mod util;
|
||||
|
||||
use super::{
|
||||
classes::{
|
||||
ArrayLikeIndexer, ArrayLikeValue, ArraySliceValue, ListValue, NDArrayValue,
|
||||
TypedArrayLikeAdapter, UntypedArrayLikeAccessor,
|
||||
},
|
||||
llvm_intrinsics, CodeGenContext, CodeGenerator, Int, Int64, NIntModel,
|
||||
};
|
||||
use crate::codegen::classes::TypedArrayLikeAccessor;
|
||||
use crate::codegen::stmt::gen_for_callback_incrementing;
|
||||
use inkwell::{
|
||||
attributes::{Attribute, AttributeLoc},
|
||||
context::Context,
|
||||
memory_buffer::MemoryBuffer,
|
||||
module::Module,
|
||||
types::{BasicTypeEnum, IntType},
|
||||
values::{BasicValueEnum, CallSiteValue, FloatValue, IntValue},
|
||||
AddressSpace, IntPredicate,
|
||||
values::{BasicValue, BasicValueEnum, IntValue},
|
||||
IntPredicate,
|
||||
};
|
||||
use itertools::Either;
|
||||
|
||||
use nac3parser::ast::Expr;
|
||||
|
||||
use super::{CodeGenContext, CodeGenerator};
|
||||
use crate::{symbol_resolver::SymbolResolver, typecheck::typedef::Type};
|
||||
pub use list::*;
|
||||
pub use math::*;
|
||||
pub use ndarray::*;
|
||||
pub use slice::*;
|
||||
|
||||
mod list;
|
||||
mod math;
|
||||
mod ndarray;
|
||||
mod slice;
|
||||
|
||||
#[must_use]
|
||||
pub fn load_irrt(ctx: &Context) -> Module {
|
||||
pub fn load_irrt<'ctx>(ctx: &'ctx Context, symbol_resolver: &dyn SymbolResolver) -> Module<'ctx> {
|
||||
let bitcode_buf = MemoryBuffer::create_from_memory_range(
|
||||
include_bytes!(concat!(env!("OUT_DIR"), "/irrt.bc")),
|
||||
"irrt_bitcode_buffer",
|
||||
|
@ -44,91 +38,28 @@ pub fn load_irrt(ctx: &Context) -> Module {
|
|||
let function = irrt_mod.get_function(symbol).unwrap();
|
||||
function.add_attribute(AttributeLoc::Function, ctx.create_enum_attribute(inline_attr, 0));
|
||||
}
|
||||
|
||||
// Initialize all global `EXN_*` exception IDs in IRRT with the [`SymbolResolver`].
|
||||
let exn_id_type = ctx.i32_type();
|
||||
let errors = &[
|
||||
("EXN_INDEX_ERROR", "0:IndexError"),
|
||||
("EXN_VALUE_ERROR", "0:ValueError"),
|
||||
("EXN_ASSERTION_ERROR", "0:AssertionError"),
|
||||
("EXN_TYPE_ERROR", "0:TypeError"),
|
||||
];
|
||||
for (irrt_name, symbol_name) in errors {
|
||||
let exn_id = symbol_resolver.get_string_id(symbol_name);
|
||||
let exn_id = exn_id_type.const_int(exn_id as u64, false).as_basic_value_enum();
|
||||
|
||||
let global = irrt_mod.get_global(irrt_name).unwrap_or_else(|| {
|
||||
panic!("Exception symbol name '{irrt_name}' should exist in the IRRT LLVM module")
|
||||
});
|
||||
global.set_initializer(&exn_id);
|
||||
}
|
||||
|
||||
irrt_mod
|
||||
}
|
||||
|
||||
// repeated squaring method adapted from GNU Scientific Library:
|
||||
// https://git.savannah.gnu.org/cgit/gsl.git/tree/sys/pow_int.c
|
||||
pub fn integer_power<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
base: IntValue<'ctx>,
|
||||
exp: IntValue<'ctx>,
|
||||
signed: bool,
|
||||
) -> IntValue<'ctx> {
|
||||
let symbol = match (base.get_type().get_bit_width(), exp.get_type().get_bit_width(), signed) {
|
||||
(32, 32, true) => "__nac3_int_exp_int32_t",
|
||||
(64, 64, true) => "__nac3_int_exp_int64_t",
|
||||
(32, 32, false) => "__nac3_int_exp_uint32_t",
|
||||
(64, 64, false) => "__nac3_int_exp_uint64_t",
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let base_type = base.get_type();
|
||||
let pow_fun = ctx.module.get_function(symbol).unwrap_or_else(|| {
|
||||
let fn_type = base_type.fn_type(&[base_type.into(), base_type.into()], false);
|
||||
ctx.module.add_function(symbol, fn_type, None)
|
||||
});
|
||||
// throw exception when exp < 0
|
||||
let ge_zero = ctx
|
||||
.builder
|
||||
.build_int_compare(
|
||||
IntPredicate::SGE,
|
||||
exp,
|
||||
exp.get_type().const_zero(),
|
||||
"assert_int_pow_ge_0",
|
||||
)
|
||||
.unwrap();
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
ge_zero,
|
||||
"0:ValueError",
|
||||
"integer power must be positive or zero",
|
||||
[None, None, None],
|
||||
ctx.current_loc,
|
||||
);
|
||||
ctx.builder
|
||||
.build_call(pow_fun, &[base.into(), exp.into()], "call_int_pow")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn calculate_len_for_slice_range<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
start: IntValue<'ctx>,
|
||||
end: IntValue<'ctx>,
|
||||
step: IntValue<'ctx>,
|
||||
) -> IntValue<'ctx> {
|
||||
const SYMBOL: &str = "__nac3_range_slice_len";
|
||||
let len_func = ctx.module.get_function(SYMBOL).unwrap_or_else(|| {
|
||||
let i32_t = ctx.ctx.i32_type();
|
||||
let fn_t = i32_t.fn_type(&[i32_t.into(), i32_t.into(), i32_t.into()], false);
|
||||
ctx.module.add_function(SYMBOL, fn_t, None)
|
||||
});
|
||||
|
||||
// assert step != 0, throw exception if not
|
||||
let not_zero = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::NE, step, step.get_type().const_zero(), "range_step_ne")
|
||||
.unwrap();
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
not_zero,
|
||||
"0:ValueError",
|
||||
"step must not be zero",
|
||||
[None, None, None],
|
||||
ctx.current_loc,
|
||||
);
|
||||
ctx.builder
|
||||
.build_call(len_func, &[start.into(), end.into(), step.into()], "calc_len")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// NOTE: the output value of the end index of this function should be compared ***inclusively***,
|
||||
/// because python allows `a[2::-1]`, whose semantic is `[a[2], a[1], a[0]]`, which is equivalent to
|
||||
/// NO numeric slice in python.
|
||||
|
@ -294,655 +225,3 @@ pub fn handle_slice_indices<'ctx, G: CodeGenerator>(
|
|||
}
|
||||
}))
|
||||
}
|
||||
|
||||
/// this function allows index out of range, since python
|
||||
/// allows index out of range in slice (`a = [1,2,3]; a[1:10] == [2,3]`).
|
||||
pub fn handle_slice_index_bound<'ctx, G: CodeGenerator>(
|
||||
i: &Expr<Option<Type>>,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
generator: &mut G,
|
||||
length: IntValue<'ctx>,
|
||||
) -> Result<Option<IntValue<'ctx>>, String> {
|
||||
const SYMBOL: &str = "__nac3_slice_index_bound";
|
||||
let func = ctx.module.get_function(SYMBOL).unwrap_or_else(|| {
|
||||
let i32_t = ctx.ctx.i32_type();
|
||||
let fn_t = i32_t.fn_type(&[i32_t.into(), i32_t.into()], false);
|
||||
ctx.module.add_function(SYMBOL, fn_t, None)
|
||||
});
|
||||
|
||||
let i = if let Some(v) = generator.gen_expr(ctx, i)? {
|
||||
v.to_basic_value_enum(ctx, generator, i.custom.unwrap())?
|
||||
} else {
|
||||
return Ok(None);
|
||||
};
|
||||
Ok(Some(
|
||||
ctx.builder
|
||||
.build_call(func, &[i.into(), length.into()], "bounded_ind")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap(),
|
||||
))
|
||||
}
|
||||
|
||||
/// This function handles 'end' **inclusively**.
|
||||
/// Order of tuples `assign_idx` and `value_idx` is ('start', 'end', 'step').
|
||||
/// Negative index should be handled before entering this function
|
||||
pub fn list_slice_assignment<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ty: BasicTypeEnum<'ctx>,
|
||||
dest_arr: ListValue<'ctx>,
|
||||
dest_idx: (IntValue<'ctx>, IntValue<'ctx>, IntValue<'ctx>),
|
||||
src_arr: ListValue<'ctx>,
|
||||
src_idx: (IntValue<'ctx>, IntValue<'ctx>, IntValue<'ctx>),
|
||||
) {
|
||||
let size_ty = generator.get_size_type(ctx.ctx);
|
||||
let int8_ptr = ctx.ctx.i8_type().ptr_type(AddressSpace::default());
|
||||
let int32 = ctx.ctx.i32_type();
|
||||
let (fun_symbol, elem_ptr_type) = ("__nac3_list_slice_assign_var_size", int8_ptr);
|
||||
let slice_assign_fun = {
|
||||
let ty_vec = vec![
|
||||
int32.into(), // dest start idx
|
||||
int32.into(), // dest end idx
|
||||
int32.into(), // dest step
|
||||
elem_ptr_type.into(), // dest arr ptr
|
||||
int32.into(), // dest arr len
|
||||
int32.into(), // src start idx
|
||||
int32.into(), // src end idx
|
||||
int32.into(), // src step
|
||||
elem_ptr_type.into(), // src arr ptr
|
||||
int32.into(), // src arr len
|
||||
int32.into(), // size
|
||||
];
|
||||
ctx.module.get_function(fun_symbol).unwrap_or_else(|| {
|
||||
let fn_t = int32.fn_type(ty_vec.as_slice(), false);
|
||||
ctx.module.add_function(fun_symbol, fn_t, None)
|
||||
})
|
||||
};
|
||||
|
||||
let zero = int32.const_zero();
|
||||
let one = int32.const_int(1, false);
|
||||
let dest_arr_ptr = dest_arr.data().base_ptr(ctx, generator);
|
||||
let dest_arr_ptr =
|
||||
ctx.builder.build_pointer_cast(dest_arr_ptr, elem_ptr_type, "dest_arr_ptr_cast").unwrap();
|
||||
let dest_len = dest_arr.load_size(ctx, Some("dest.len"));
|
||||
let dest_len = ctx.builder.build_int_truncate_or_bit_cast(dest_len, int32, "srclen32").unwrap();
|
||||
let src_arr_ptr = src_arr.data().base_ptr(ctx, generator);
|
||||
let src_arr_ptr =
|
||||
ctx.builder.build_pointer_cast(src_arr_ptr, elem_ptr_type, "src_arr_ptr_cast").unwrap();
|
||||
let src_len = src_arr.load_size(ctx, Some("src.len"));
|
||||
let src_len = ctx.builder.build_int_truncate_or_bit_cast(src_len, int32, "srclen32").unwrap();
|
||||
|
||||
// index in bound and positive should be done
|
||||
// assert if dest.step == 1 then len(src) <= len(dest) else len(src) == len(dest), and
|
||||
// throw exception if not satisfied
|
||||
let src_end = ctx
|
||||
.builder
|
||||
.build_select(
|
||||
ctx.builder.build_int_compare(IntPredicate::SLT, src_idx.2, zero, "is_neg").unwrap(),
|
||||
ctx.builder.build_int_sub(src_idx.1, one, "e_min_one").unwrap(),
|
||||
ctx.builder.build_int_add(src_idx.1, one, "e_add_one").unwrap(),
|
||||
"final_e",
|
||||
)
|
||||
.map(BasicValueEnum::into_int_value)
|
||||
.unwrap();
|
||||
let dest_end = ctx
|
||||
.builder
|
||||
.build_select(
|
||||
ctx.builder.build_int_compare(IntPredicate::SLT, dest_idx.2, zero, "is_neg").unwrap(),
|
||||
ctx.builder.build_int_sub(dest_idx.1, one, "e_min_one").unwrap(),
|
||||
ctx.builder.build_int_add(dest_idx.1, one, "e_add_one").unwrap(),
|
||||
"final_e",
|
||||
)
|
||||
.map(BasicValueEnum::into_int_value)
|
||||
.unwrap();
|
||||
let src_slice_len =
|
||||
calculate_len_for_slice_range(generator, ctx, src_idx.0, src_end, src_idx.2);
|
||||
let dest_slice_len =
|
||||
calculate_len_for_slice_range(generator, ctx, dest_idx.0, dest_end, dest_idx.2);
|
||||
let src_eq_dest = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::EQ, src_slice_len, dest_slice_len, "slice_src_eq_dest")
|
||||
.unwrap();
|
||||
let src_slt_dest = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::SLT, src_slice_len, dest_slice_len, "slice_src_slt_dest")
|
||||
.unwrap();
|
||||
let dest_step_eq_one = ctx
|
||||
.builder
|
||||
.build_int_compare(
|
||||
IntPredicate::EQ,
|
||||
dest_idx.2,
|
||||
dest_idx.2.get_type().const_int(1, false),
|
||||
"slice_dest_step_eq_one",
|
||||
)
|
||||
.unwrap();
|
||||
let cond_1 = ctx.builder.build_and(dest_step_eq_one, src_slt_dest, "slice_cond_1").unwrap();
|
||||
let cond = ctx.builder.build_or(src_eq_dest, cond_1, "slice_cond").unwrap();
|
||||
|
||||
// TODO: Temporary fix. Rewrite `list_slice_assignment` later
|
||||
// Exception params should have been i64
|
||||
{
|
||||
let param_model = NIntModel(Int64);
|
||||
|
||||
let src_slice_len =
|
||||
Int::from(src_slice_len).s_extend_or_bit_cast(ctx, param_model, "src_slice_len");
|
||||
let dest_slice_len =
|
||||
Int::from(dest_slice_len).s_extend_or_bit_cast(ctx, param_model, "dest_slice_len");
|
||||
let dest_idx_2 = Int::from(dest_idx.2).s_extend_or_bit_cast(ctx, param_model, "dest_idx_2");
|
||||
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
cond,
|
||||
"0:ValueError",
|
||||
"attempt to assign sequence of size {0} to slice of size {1} with step size {2}",
|
||||
[Some(src_slice_len.value), Some(dest_slice_len.value), Some(dest_idx_2.value)],
|
||||
ctx.current_loc,
|
||||
);
|
||||
}
|
||||
|
||||
let new_len = {
|
||||
let args = vec![
|
||||
dest_idx.0.into(), // dest start idx
|
||||
dest_idx.1.into(), // dest end idx
|
||||
dest_idx.2.into(), // dest step
|
||||
dest_arr_ptr.into(), // dest arr ptr
|
||||
dest_len.into(), // dest arr len
|
||||
src_idx.0.into(), // src start idx
|
||||
src_idx.1.into(), // src end idx
|
||||
src_idx.2.into(), // src step
|
||||
src_arr_ptr.into(), // src arr ptr
|
||||
src_len.into(), // src arr len
|
||||
{
|
||||
let s = match ty {
|
||||
BasicTypeEnum::FloatType(t) => t.size_of(),
|
||||
BasicTypeEnum::IntType(t) => t.size_of(),
|
||||
BasicTypeEnum::PointerType(t) => t.size_of(),
|
||||
BasicTypeEnum::StructType(t) => t.size_of().unwrap(),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
ctx.builder.build_int_truncate_or_bit_cast(s, int32, "size").unwrap()
|
||||
}
|
||||
.into(),
|
||||
];
|
||||
ctx.builder
|
||||
.build_call(slice_assign_fun, args.as_slice(), "slice_assign")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
};
|
||||
// update length
|
||||
let need_update =
|
||||
ctx.builder.build_int_compare(IntPredicate::NE, new_len, dest_len, "need_update").unwrap();
|
||||
let current = ctx.builder.get_insert_block().unwrap().get_parent().unwrap();
|
||||
let update_bb = ctx.ctx.append_basic_block(current, "update");
|
||||
let cont_bb = ctx.ctx.append_basic_block(current, "cont");
|
||||
ctx.builder.build_conditional_branch(need_update, update_bb, cont_bb).unwrap();
|
||||
ctx.builder.position_at_end(update_bb);
|
||||
let new_len = ctx.builder.build_int_z_extend_or_bit_cast(new_len, size_ty, "new_len").unwrap();
|
||||
dest_arr.store_size(ctx, generator, new_len);
|
||||
ctx.builder.build_unconditional_branch(cont_bb).unwrap();
|
||||
ctx.builder.position_at_end(cont_bb);
|
||||
}
|
||||
|
||||
/// Generates a call to `isinf` in IR. Returns an `i1` representing the result.
|
||||
pub fn call_isinf<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
v: FloatValue<'ctx>,
|
||||
) -> IntValue<'ctx> {
|
||||
let intrinsic_fn = ctx.module.get_function("__nac3_isinf").unwrap_or_else(|| {
|
||||
let fn_type = ctx.ctx.i32_type().fn_type(&[ctx.ctx.f64_type().into()], false);
|
||||
ctx.module.add_function("__nac3_isinf", fn_type, None)
|
||||
});
|
||||
|
||||
let ret = ctx
|
||||
.builder
|
||||
.build_call(intrinsic_fn, &[v.into()], "isinf")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap();
|
||||
|
||||
generator.bool_to_i1(ctx, ret)
|
||||
}
|
||||
|
||||
/// Generates a call to `isnan` in IR. Returns an `i1` representing the result.
|
||||
pub fn call_isnan<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
v: FloatValue<'ctx>,
|
||||
) -> IntValue<'ctx> {
|
||||
let intrinsic_fn = ctx.module.get_function("__nac3_isnan").unwrap_or_else(|| {
|
||||
let fn_type = ctx.ctx.i32_type().fn_type(&[ctx.ctx.f64_type().into()], false);
|
||||
ctx.module.add_function("__nac3_isnan", fn_type, None)
|
||||
});
|
||||
|
||||
let ret = ctx
|
||||
.builder
|
||||
.build_call(intrinsic_fn, &[v.into()], "isnan")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap();
|
||||
|
||||
generator.bool_to_i1(ctx, ret)
|
||||
}
|
||||
|
||||
/// Generates a call to `gamma` in IR. Returns an `f64` representing the result.
|
||||
pub fn call_gamma<'ctx>(ctx: &CodeGenContext<'ctx, '_>, v: FloatValue<'ctx>) -> FloatValue<'ctx> {
|
||||
let llvm_f64 = ctx.ctx.f64_type();
|
||||
|
||||
let intrinsic_fn = ctx.module.get_function("__nac3_gamma").unwrap_or_else(|| {
|
||||
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
|
||||
ctx.module.add_function("__nac3_gamma", fn_type, None)
|
||||
});
|
||||
|
||||
ctx.builder
|
||||
.build_call(intrinsic_fn, &[v.into()], "gamma")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_float_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Generates a call to `gammaln` in IR. Returns an `f64` representing the result.
|
||||
pub fn call_gammaln<'ctx>(ctx: &CodeGenContext<'ctx, '_>, v: FloatValue<'ctx>) -> FloatValue<'ctx> {
|
||||
let llvm_f64 = ctx.ctx.f64_type();
|
||||
|
||||
let intrinsic_fn = ctx.module.get_function("__nac3_gammaln").unwrap_or_else(|| {
|
||||
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
|
||||
ctx.module.add_function("__nac3_gammaln", fn_type, None)
|
||||
});
|
||||
|
||||
ctx.builder
|
||||
.build_call(intrinsic_fn, &[v.into()], "gammaln")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_float_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Generates a call to `j0` in IR. Returns an `f64` representing the result.
|
||||
pub fn call_j0<'ctx>(ctx: &CodeGenContext<'ctx, '_>, v: FloatValue<'ctx>) -> FloatValue<'ctx> {
|
||||
let llvm_f64 = ctx.ctx.f64_type();
|
||||
|
||||
let intrinsic_fn = ctx.module.get_function("__nac3_j0").unwrap_or_else(|| {
|
||||
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
|
||||
ctx.module.add_function("__nac3_j0", fn_type, None)
|
||||
});
|
||||
|
||||
ctx.builder
|
||||
.build_call(intrinsic_fn, &[v.into()], "j0")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_float_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_calc_size`. Returns an [`IntValue`] representing the
|
||||
/// calculated total size.
|
||||
///
|
||||
/// * `dims` - An [`ArrayLikeIndexer`] containing the size of each dimension.
|
||||
/// * `range` - The dimension index to begin and end (exclusively) calculating the dimensions for,
|
||||
/// or [`None`] if starting from the first dimension and ending at the last dimension respectively.
|
||||
pub fn call_ndarray_calc_size<'ctx, G, Dims>(
|
||||
generator: &G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
dims: &Dims,
|
||||
(begin, end): (Option<IntValue<'ctx>>, Option<IntValue<'ctx>>),
|
||||
) -> IntValue<'ctx>
|
||||
where
|
||||
G: CodeGenerator + ?Sized,
|
||||
Dims: ArrayLikeIndexer<'ctx>,
|
||||
{
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
|
||||
|
||||
let ndarray_calc_size_fn_name = match llvm_usize.get_bit_width() {
|
||||
32 => "__nac3_ndarray_calc_size",
|
||||
64 => "__nac3_ndarray_calc_size64",
|
||||
bw => unreachable!("Unsupported size type bit width: {}", bw),
|
||||
};
|
||||
let ndarray_calc_size_fn_t = llvm_usize.fn_type(
|
||||
&[llvm_pusize.into(), llvm_usize.into(), llvm_usize.into(), llvm_usize.into()],
|
||||
false,
|
||||
);
|
||||
let ndarray_calc_size_fn =
|
||||
ctx.module.get_function(ndarray_calc_size_fn_name).unwrap_or_else(|| {
|
||||
ctx.module.add_function(ndarray_calc_size_fn_name, ndarray_calc_size_fn_t, None)
|
||||
});
|
||||
|
||||
let begin = begin.unwrap_or_else(|| llvm_usize.const_zero());
|
||||
let end = end.unwrap_or_else(|| dims.size(ctx, generator));
|
||||
ctx.builder
|
||||
.build_call(
|
||||
ndarray_calc_size_fn,
|
||||
&[
|
||||
dims.base_ptr(ctx, generator).into(),
|
||||
dims.size(ctx, generator).into(),
|
||||
begin.into(),
|
||||
end.into(),
|
||||
],
|
||||
"",
|
||||
)
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_calc_nd_indices`. Returns a [`TypeArrayLikeAdpater`]
|
||||
/// containing `i32` indices of the flattened index.
|
||||
///
|
||||
/// * `index` - The index to compute the multidimensional index for.
|
||||
/// * `ndarray` - LLVM pointer to the `NDArray`. This value must be the LLVM representation of an
|
||||
/// `NDArray`.
|
||||
pub fn call_ndarray_calc_nd_indices<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
index: IntValue<'ctx>,
|
||||
ndarray: NDArrayValue<'ctx>,
|
||||
) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
|
||||
let llvm_void = ctx.ctx.void_type();
|
||||
let llvm_i32 = ctx.ctx.i32_type();
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
let llvm_pi32 = llvm_i32.ptr_type(AddressSpace::default());
|
||||
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
|
||||
|
||||
let ndarray_calc_nd_indices_fn_name = match llvm_usize.get_bit_width() {
|
||||
32 => "__nac3_ndarray_calc_nd_indices",
|
||||
64 => "__nac3_ndarray_calc_nd_indices64",
|
||||
bw => unreachable!("Unsupported size type bit width: {}", bw),
|
||||
};
|
||||
let ndarray_calc_nd_indices_fn =
|
||||
ctx.module.get_function(ndarray_calc_nd_indices_fn_name).unwrap_or_else(|| {
|
||||
let fn_type = llvm_void.fn_type(
|
||||
&[llvm_usize.into(), llvm_pusize.into(), llvm_usize.into(), llvm_pi32.into()],
|
||||
false,
|
||||
);
|
||||
|
||||
ctx.module.add_function(ndarray_calc_nd_indices_fn_name, fn_type, None)
|
||||
});
|
||||
|
||||
let ndarray_num_dims = ndarray.load_ndims(ctx);
|
||||
let ndarray_dims = ndarray.dim_sizes();
|
||||
|
||||
let indices = ctx.builder.build_array_alloca(llvm_i32, ndarray_num_dims, "").unwrap();
|
||||
|
||||
ctx.builder
|
||||
.build_call(
|
||||
ndarray_calc_nd_indices_fn,
|
||||
&[
|
||||
index.into(),
|
||||
ndarray_dims.base_ptr(ctx, generator).into(),
|
||||
ndarray_num_dims.into(),
|
||||
indices.into(),
|
||||
],
|
||||
"",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
TypedArrayLikeAdapter::from(
|
||||
ArraySliceValue::from_ptr_val(indices, ndarray_num_dims, None),
|
||||
Box::new(|_, v| v.into_int_value()),
|
||||
Box::new(|_, v| v.into()),
|
||||
)
|
||||
}
|
||||
|
||||
fn call_ndarray_flatten_index_impl<'ctx, G, Indices>(
|
||||
generator: &G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
ndarray: NDArrayValue<'ctx>,
|
||||
indices: &Indices,
|
||||
) -> IntValue<'ctx>
|
||||
where
|
||||
G: CodeGenerator + ?Sized,
|
||||
Indices: ArrayLikeIndexer<'ctx>,
|
||||
{
|
||||
let llvm_i32 = ctx.ctx.i32_type();
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
|
||||
let llvm_pi32 = llvm_i32.ptr_type(AddressSpace::default());
|
||||
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
|
||||
|
||||
debug_assert_eq!(
|
||||
IntType::try_from(indices.element_type(ctx, generator))
|
||||
.map(IntType::get_bit_width)
|
||||
.unwrap_or_default(),
|
||||
llvm_i32.get_bit_width(),
|
||||
"Expected i32 value for argument `indices` to `call_ndarray_flatten_index_impl`"
|
||||
);
|
||||
debug_assert_eq!(
|
||||
indices.size(ctx, generator).get_type().get_bit_width(),
|
||||
llvm_usize.get_bit_width(),
|
||||
"Expected usize integer value for argument `indices_size` to `call_ndarray_flatten_index_impl`"
|
||||
);
|
||||
|
||||
let ndarray_flatten_index_fn_name = match llvm_usize.get_bit_width() {
|
||||
32 => "__nac3_ndarray_flatten_index",
|
||||
64 => "__nac3_ndarray_flatten_index64",
|
||||
bw => unreachable!("Unsupported size type bit width: {}", bw),
|
||||
};
|
||||
let ndarray_flatten_index_fn =
|
||||
ctx.module.get_function(ndarray_flatten_index_fn_name).unwrap_or_else(|| {
|
||||
let fn_type = llvm_usize.fn_type(
|
||||
&[llvm_pusize.into(), llvm_usize.into(), llvm_pi32.into(), llvm_usize.into()],
|
||||
false,
|
||||
);
|
||||
|
||||
ctx.module.add_function(ndarray_flatten_index_fn_name, fn_type, None)
|
||||
});
|
||||
|
||||
let ndarray_num_dims = ndarray.load_ndims(ctx);
|
||||
let ndarray_dims = ndarray.dim_sizes();
|
||||
|
||||
let index = ctx
|
||||
.builder
|
||||
.build_call(
|
||||
ndarray_flatten_index_fn,
|
||||
&[
|
||||
ndarray_dims.base_ptr(ctx, generator).into(),
|
||||
ndarray_num_dims.into(),
|
||||
indices.base_ptr(ctx, generator).into(),
|
||||
indices.size(ctx, generator).into(),
|
||||
],
|
||||
"",
|
||||
)
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap();
|
||||
|
||||
index
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_flatten_index`. Returns the flattened index for the
|
||||
/// multidimensional index.
|
||||
///
|
||||
/// * `ndarray` - LLVM pointer to the `NDArray`. This value must be the LLVM representation of an
|
||||
/// `NDArray`.
|
||||
/// * `indices` - The multidimensional index to compute the flattened index for.
|
||||
pub fn call_ndarray_flatten_index<'ctx, G, Index>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ndarray: NDArrayValue<'ctx>,
|
||||
indices: &Index,
|
||||
) -> IntValue<'ctx>
|
||||
where
|
||||
G: CodeGenerator + ?Sized,
|
||||
Index: ArrayLikeIndexer<'ctx>,
|
||||
{
|
||||
call_ndarray_flatten_index_impl(generator, ctx, ndarray, indices)
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_calc_broadcast`. Returns a tuple containing the number of
|
||||
/// dimension and size of each dimension of the resultant `ndarray`.
|
||||
pub fn call_ndarray_calc_broadcast<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
lhs: NDArrayValue<'ctx>,
|
||||
rhs: NDArrayValue<'ctx>,
|
||||
) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
|
||||
|
||||
let ndarray_calc_broadcast_fn_name = match llvm_usize.get_bit_width() {
|
||||
32 => "__nac3_ndarray_calc_broadcast",
|
||||
64 => "__nac3_ndarray_calc_broadcast64",
|
||||
bw => unreachable!("Unsupported size type bit width: {}", bw),
|
||||
};
|
||||
let ndarray_calc_broadcast_fn =
|
||||
ctx.module.get_function(ndarray_calc_broadcast_fn_name).unwrap_or_else(|| {
|
||||
let fn_type = llvm_usize.fn_type(
|
||||
&[
|
||||
llvm_pusize.into(),
|
||||
llvm_usize.into(),
|
||||
llvm_pusize.into(),
|
||||
llvm_usize.into(),
|
||||
llvm_pusize.into(),
|
||||
],
|
||||
false,
|
||||
);
|
||||
|
||||
ctx.module.add_function(ndarray_calc_broadcast_fn_name, fn_type, None)
|
||||
});
|
||||
|
||||
let lhs_ndims = lhs.load_ndims(ctx);
|
||||
let rhs_ndims = rhs.load_ndims(ctx);
|
||||
let min_ndims = llvm_intrinsics::call_int_umin(ctx, lhs_ndims, rhs_ndims, None);
|
||||
|
||||
gen_for_callback_incrementing(
|
||||
generator,
|
||||
ctx,
|
||||
llvm_usize.const_zero(),
|
||||
(min_ndims, false),
|
||||
|generator, ctx, _, idx| {
|
||||
let idx = ctx.builder.build_int_sub(min_ndims, idx, "").unwrap();
|
||||
let (lhs_dim_sz, rhs_dim_sz) = unsafe {
|
||||
(
|
||||
lhs.dim_sizes().get_typed_unchecked(ctx, generator, &idx, None),
|
||||
rhs.dim_sizes().get_typed_unchecked(ctx, generator, &idx, None),
|
||||
)
|
||||
};
|
||||
|
||||
let llvm_usize_const_one = llvm_usize.const_int(1, false);
|
||||
let lhs_eqz = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::EQ, lhs_dim_sz, llvm_usize_const_one, "")
|
||||
.unwrap();
|
||||
let rhs_eqz = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::EQ, rhs_dim_sz, llvm_usize_const_one, "")
|
||||
.unwrap();
|
||||
let lhs_or_rhs_eqz = ctx.builder.build_or(lhs_eqz, rhs_eqz, "").unwrap();
|
||||
|
||||
let lhs_eq_rhs = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::EQ, lhs_dim_sz, rhs_dim_sz, "")
|
||||
.unwrap();
|
||||
|
||||
let is_compatible = ctx.builder.build_or(lhs_or_rhs_eqz, lhs_eq_rhs, "").unwrap();
|
||||
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
is_compatible,
|
||||
"0:ValueError",
|
||||
"operands could not be broadcast together",
|
||||
[None, None, None],
|
||||
ctx.current_loc,
|
||||
);
|
||||
|
||||
Ok(())
|
||||
},
|
||||
llvm_usize.const_int(1, false),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let max_ndims = llvm_intrinsics::call_int_umax(ctx, lhs_ndims, rhs_ndims, None);
|
||||
let lhs_dims = lhs.dim_sizes().base_ptr(ctx, generator);
|
||||
let lhs_ndims = lhs.load_ndims(ctx);
|
||||
let rhs_dims = rhs.dim_sizes().base_ptr(ctx, generator);
|
||||
let rhs_ndims = rhs.load_ndims(ctx);
|
||||
let out_dims = ctx.builder.build_array_alloca(llvm_usize, max_ndims, "").unwrap();
|
||||
let out_dims = ArraySliceValue::from_ptr_val(out_dims, max_ndims, None);
|
||||
|
||||
ctx.builder
|
||||
.build_call(
|
||||
ndarray_calc_broadcast_fn,
|
||||
&[
|
||||
lhs_dims.into(),
|
||||
lhs_ndims.into(),
|
||||
rhs_dims.into(),
|
||||
rhs_ndims.into(),
|
||||
out_dims.base_ptr(ctx, generator).into(),
|
||||
],
|
||||
"",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
TypedArrayLikeAdapter::from(
|
||||
out_dims,
|
||||
Box::new(|_, v| v.into_int_value()),
|
||||
Box::new(|_, v| v.into()),
|
||||
)
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_calc_broadcast_idx`. Returns an [`ArrayAllocaValue`]
|
||||
/// containing the indices used for accessing `array` corresponding to the index of the broadcasted
|
||||
/// array `broadcast_idx`.
|
||||
pub fn call_ndarray_calc_broadcast_index<
|
||||
'ctx,
|
||||
G: CodeGenerator + ?Sized,
|
||||
BroadcastIdx: UntypedArrayLikeAccessor<'ctx>,
|
||||
>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
array: NDArrayValue<'ctx>,
|
||||
broadcast_idx: &BroadcastIdx,
|
||||
) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
|
||||
let llvm_i32 = ctx.ctx.i32_type();
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
let llvm_pi32 = llvm_i32.ptr_type(AddressSpace::default());
|
||||
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
|
||||
|
||||
let ndarray_calc_broadcast_fn_name = match llvm_usize.get_bit_width() {
|
||||
32 => "__nac3_ndarray_calc_broadcast_idx",
|
||||
64 => "__nac3_ndarray_calc_broadcast_idx64",
|
||||
bw => unreachable!("Unsupported size type bit width: {}", bw),
|
||||
};
|
||||
let ndarray_calc_broadcast_fn =
|
||||
ctx.module.get_function(ndarray_calc_broadcast_fn_name).unwrap_or_else(|| {
|
||||
let fn_type = llvm_usize.fn_type(
|
||||
&[llvm_pusize.into(), llvm_usize.into(), llvm_pi32.into(), llvm_pi32.into()],
|
||||
false,
|
||||
);
|
||||
|
||||
ctx.module.add_function(ndarray_calc_broadcast_fn_name, fn_type, None)
|
||||
});
|
||||
|
||||
let broadcast_size = broadcast_idx.size(ctx, generator);
|
||||
let out_idx = ctx.builder.build_array_alloca(llvm_i32, broadcast_size, "").unwrap();
|
||||
|
||||
let array_dims = array.dim_sizes().base_ptr(ctx, generator);
|
||||
let array_ndims = array.load_ndims(ctx);
|
||||
let broadcast_idx_ptr = unsafe {
|
||||
broadcast_idx.ptr_offset_unchecked(ctx, generator, &llvm_usize.const_zero(), None)
|
||||
};
|
||||
|
||||
ctx.builder
|
||||
.build_call(
|
||||
ndarray_calc_broadcast_fn,
|
||||
&[array_dims.into(), array_ndims.into(), broadcast_idx_ptr.into(), out_idx.into()],
|
||||
"",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
TypedArrayLikeAdapter::from(
|
||||
ArraySliceValue::from_ptr_val(out_idx, broadcast_size, None),
|
||||
Box::new(|_, v| v.into_int_value()),
|
||||
Box::new(|_, v| v.into()),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,384 @@
|
|||
use inkwell::{
|
||||
types::IntType,
|
||||
values::{BasicValueEnum, CallSiteValue, IntValue},
|
||||
AddressSpace, IntPredicate,
|
||||
};
|
||||
use itertools::Either;
|
||||
|
||||
use crate::codegen::{
|
||||
llvm_intrinsics,
|
||||
macros::codegen_unreachable,
|
||||
stmt::gen_for_callback_incrementing,
|
||||
values::{
|
||||
ArrayLikeIndexer, ArrayLikeValue, ArraySliceValue, NDArrayValue, TypedArrayLikeAccessor,
|
||||
TypedArrayLikeAdapter, UntypedArrayLikeAccessor,
|
||||
},
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_calc_size`. Returns an [`IntValue`] representing the
|
||||
/// calculated total size.
|
||||
///
|
||||
/// * `dims` - An [`ArrayLikeIndexer`] containing the size of each dimension.
|
||||
/// * `range` - The dimension index to begin and end (exclusively) calculating the dimensions for,
|
||||
/// or [`None`] if starting from the first dimension and ending at the last dimension
|
||||
/// respectively.
|
||||
pub fn call_ndarray_calc_size<'ctx, G, Dims>(
|
||||
generator: &G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
dims: &Dims,
|
||||
(begin, end): (Option<IntValue<'ctx>>, Option<IntValue<'ctx>>),
|
||||
) -> IntValue<'ctx>
|
||||
where
|
||||
G: CodeGenerator + ?Sized,
|
||||
Dims: ArrayLikeIndexer<'ctx>,
|
||||
{
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
|
||||
|
||||
let ndarray_calc_size_fn_name = match llvm_usize.get_bit_width() {
|
||||
32 => "__nac3_ndarray_calc_size",
|
||||
64 => "__nac3_ndarray_calc_size64",
|
||||
bw => codegen_unreachable!(ctx, "Unsupported size type bit width: {}", bw),
|
||||
};
|
||||
let ndarray_calc_size_fn_t = llvm_usize.fn_type(
|
||||
&[llvm_pusize.into(), llvm_usize.into(), llvm_usize.into(), llvm_usize.into()],
|
||||
false,
|
||||
);
|
||||
let ndarray_calc_size_fn =
|
||||
ctx.module.get_function(ndarray_calc_size_fn_name).unwrap_or_else(|| {
|
||||
ctx.module.add_function(ndarray_calc_size_fn_name, ndarray_calc_size_fn_t, None)
|
||||
});
|
||||
|
||||
let begin = begin.unwrap_or_else(|| llvm_usize.const_zero());
|
||||
let end = end.unwrap_or_else(|| dims.size(ctx, generator));
|
||||
ctx.builder
|
||||
.build_call(
|
||||
ndarray_calc_size_fn,
|
||||
&[
|
||||
dims.base_ptr(ctx, generator).into(),
|
||||
dims.size(ctx, generator).into(),
|
||||
begin.into(),
|
||||
end.into(),
|
||||
],
|
||||
"",
|
||||
)
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_calc_nd_indices`. Returns a [`TypeArrayLikeAdpater`]
|
||||
/// containing `i32` indices of the flattened index.
|
||||
///
|
||||
/// * `index` - The index to compute the multidimensional index for.
|
||||
/// * `ndarray` - LLVM pointer to the `NDArray`. This value must be the LLVM representation of an
|
||||
/// `NDArray`.
|
||||
pub fn call_ndarray_calc_nd_indices<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
index: IntValue<'ctx>,
|
||||
ndarray: NDArrayValue<'ctx>,
|
||||
) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
|
||||
let llvm_void = ctx.ctx.void_type();
|
||||
let llvm_i32 = ctx.ctx.i32_type();
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
let llvm_pi32 = llvm_i32.ptr_type(AddressSpace::default());
|
||||
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
|
||||
|
||||
let ndarray_calc_nd_indices_fn_name = match llvm_usize.get_bit_width() {
|
||||
32 => "__nac3_ndarray_calc_nd_indices",
|
||||
64 => "__nac3_ndarray_calc_nd_indices64",
|
||||
bw => codegen_unreachable!(ctx, "Unsupported size type bit width: {}", bw),
|
||||
};
|
||||
let ndarray_calc_nd_indices_fn =
|
||||
ctx.module.get_function(ndarray_calc_nd_indices_fn_name).unwrap_or_else(|| {
|
||||
let fn_type = llvm_void.fn_type(
|
||||
&[llvm_usize.into(), llvm_pusize.into(), llvm_usize.into(), llvm_pi32.into()],
|
||||
false,
|
||||
);
|
||||
|
||||
ctx.module.add_function(ndarray_calc_nd_indices_fn_name, fn_type, None)
|
||||
});
|
||||
|
||||
let ndarray_num_dims = ndarray.load_ndims(ctx);
|
||||
let ndarray_dims = ndarray.shape();
|
||||
|
||||
let indices = ctx.builder.build_array_alloca(llvm_i32, ndarray_num_dims, "").unwrap();
|
||||
|
||||
ctx.builder
|
||||
.build_call(
|
||||
ndarray_calc_nd_indices_fn,
|
||||
&[
|
||||
index.into(),
|
||||
ndarray_dims.base_ptr(ctx, generator).into(),
|
||||
ndarray_num_dims.into(),
|
||||
indices.into(),
|
||||
],
|
||||
"",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
TypedArrayLikeAdapter::from(
|
||||
ArraySliceValue::from_ptr_val(indices, ndarray_num_dims, None),
|
||||
Box::new(|_, v| v.into_int_value()),
|
||||
Box::new(|_, v| v.into()),
|
||||
)
|
||||
}
|
||||
|
||||
fn call_ndarray_flatten_index_impl<'ctx, G, Indices>(
|
||||
generator: &G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
ndarray: NDArrayValue<'ctx>,
|
||||
indices: &Indices,
|
||||
) -> IntValue<'ctx>
|
||||
where
|
||||
G: CodeGenerator + ?Sized,
|
||||
Indices: ArrayLikeIndexer<'ctx>,
|
||||
{
|
||||
let llvm_i32 = ctx.ctx.i32_type();
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
|
||||
let llvm_pi32 = llvm_i32.ptr_type(AddressSpace::default());
|
||||
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
|
||||
|
||||
debug_assert_eq!(
|
||||
IntType::try_from(indices.element_type(ctx, generator))
|
||||
.map(IntType::get_bit_width)
|
||||
.unwrap_or_default(),
|
||||
llvm_i32.get_bit_width(),
|
||||
"Expected i32 value for argument `indices` to `call_ndarray_flatten_index_impl`"
|
||||
);
|
||||
debug_assert_eq!(
|
||||
indices.size(ctx, generator).get_type().get_bit_width(),
|
||||
llvm_usize.get_bit_width(),
|
||||
"Expected usize integer value for argument `indices_size` to `call_ndarray_flatten_index_impl`"
|
||||
);
|
||||
|
||||
let ndarray_flatten_index_fn_name = match llvm_usize.get_bit_width() {
|
||||
32 => "__nac3_ndarray_flatten_index",
|
||||
64 => "__nac3_ndarray_flatten_index64",
|
||||
bw => codegen_unreachable!(ctx, "Unsupported size type bit width: {}", bw),
|
||||
};
|
||||
let ndarray_flatten_index_fn =
|
||||
ctx.module.get_function(ndarray_flatten_index_fn_name).unwrap_or_else(|| {
|
||||
let fn_type = llvm_usize.fn_type(
|
||||
&[llvm_pusize.into(), llvm_usize.into(), llvm_pi32.into(), llvm_usize.into()],
|
||||
false,
|
||||
);
|
||||
|
||||
ctx.module.add_function(ndarray_flatten_index_fn_name, fn_type, None)
|
||||
});
|
||||
|
||||
let ndarray_num_dims = ndarray.load_ndims(ctx);
|
||||
let ndarray_dims = ndarray.shape();
|
||||
|
||||
let index = ctx
|
||||
.builder
|
||||
.build_call(
|
||||
ndarray_flatten_index_fn,
|
||||
&[
|
||||
ndarray_dims.base_ptr(ctx, generator).into(),
|
||||
ndarray_num_dims.into(),
|
||||
indices.base_ptr(ctx, generator).into(),
|
||||
indices.size(ctx, generator).into(),
|
||||
],
|
||||
"",
|
||||
)
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap();
|
||||
|
||||
index
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_flatten_index`. Returns the flattened index for the
|
||||
/// multidimensional index.
|
||||
///
|
||||
/// * `ndarray` - LLVM pointer to the `NDArray`. This value must be the LLVM representation of an
|
||||
/// `NDArray`.
|
||||
/// * `indices` - The multidimensional index to compute the flattened index for.
|
||||
pub fn call_ndarray_flatten_index<'ctx, G, Index>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ndarray: NDArrayValue<'ctx>,
|
||||
indices: &Index,
|
||||
) -> IntValue<'ctx>
|
||||
where
|
||||
G: CodeGenerator + ?Sized,
|
||||
Index: ArrayLikeIndexer<'ctx>,
|
||||
{
|
||||
call_ndarray_flatten_index_impl(generator, ctx, ndarray, indices)
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_calc_broadcast`. Returns a tuple containing the number of
|
||||
/// dimension and size of each dimension of the resultant `ndarray`.
|
||||
pub fn call_ndarray_calc_broadcast<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
lhs: NDArrayValue<'ctx>,
|
||||
rhs: NDArrayValue<'ctx>,
|
||||
) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
|
||||
|
||||
let ndarray_calc_broadcast_fn_name = match llvm_usize.get_bit_width() {
|
||||
32 => "__nac3_ndarray_calc_broadcast",
|
||||
64 => "__nac3_ndarray_calc_broadcast64",
|
||||
bw => codegen_unreachable!(ctx, "Unsupported size type bit width: {}", bw),
|
||||
};
|
||||
let ndarray_calc_broadcast_fn =
|
||||
ctx.module.get_function(ndarray_calc_broadcast_fn_name).unwrap_or_else(|| {
|
||||
let fn_type = llvm_usize.fn_type(
|
||||
&[
|
||||
llvm_pusize.into(),
|
||||
llvm_usize.into(),
|
||||
llvm_pusize.into(),
|
||||
llvm_usize.into(),
|
||||
llvm_pusize.into(),
|
||||
],
|
||||
false,
|
||||
);
|
||||
|
||||
ctx.module.add_function(ndarray_calc_broadcast_fn_name, fn_type, None)
|
||||
});
|
||||
|
||||
let lhs_ndims = lhs.load_ndims(ctx);
|
||||
let rhs_ndims = rhs.load_ndims(ctx);
|
||||
let min_ndims = llvm_intrinsics::call_int_umin(ctx, lhs_ndims, rhs_ndims, None);
|
||||
|
||||
gen_for_callback_incrementing(
|
||||
generator,
|
||||
ctx,
|
||||
None,
|
||||
llvm_usize.const_zero(),
|
||||
(min_ndims, false),
|
||||
|generator, ctx, _, idx| {
|
||||
let idx = ctx.builder.build_int_sub(min_ndims, idx, "").unwrap();
|
||||
let (lhs_dim_sz, rhs_dim_sz) = unsafe {
|
||||
(
|
||||
lhs.shape().get_typed_unchecked(ctx, generator, &idx, None),
|
||||
rhs.shape().get_typed_unchecked(ctx, generator, &idx, None),
|
||||
)
|
||||
};
|
||||
|
||||
let llvm_usize_const_one = llvm_usize.const_int(1, false);
|
||||
let lhs_eqz = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::EQ, lhs_dim_sz, llvm_usize_const_one, "")
|
||||
.unwrap();
|
||||
let rhs_eqz = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::EQ, rhs_dim_sz, llvm_usize_const_one, "")
|
||||
.unwrap();
|
||||
let lhs_or_rhs_eqz = ctx.builder.build_or(lhs_eqz, rhs_eqz, "").unwrap();
|
||||
|
||||
let lhs_eq_rhs = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::EQ, lhs_dim_sz, rhs_dim_sz, "")
|
||||
.unwrap();
|
||||
|
||||
let is_compatible = ctx.builder.build_or(lhs_or_rhs_eqz, lhs_eq_rhs, "").unwrap();
|
||||
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
is_compatible,
|
||||
"0:ValueError",
|
||||
"operands could not be broadcast together",
|
||||
[None, None, None],
|
||||
ctx.current_loc,
|
||||
);
|
||||
|
||||
Ok(())
|
||||
},
|
||||
llvm_usize.const_int(1, false),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let max_ndims = llvm_intrinsics::call_int_umax(ctx, lhs_ndims, rhs_ndims, None);
|
||||
let lhs_dims = lhs.shape().base_ptr(ctx, generator);
|
||||
let lhs_ndims = lhs.load_ndims(ctx);
|
||||
let rhs_dims = rhs.shape().base_ptr(ctx, generator);
|
||||
let rhs_ndims = rhs.load_ndims(ctx);
|
||||
let out_dims = ctx.builder.build_array_alloca(llvm_usize, max_ndims, "").unwrap();
|
||||
let out_dims = ArraySliceValue::from_ptr_val(out_dims, max_ndims, None);
|
||||
|
||||
ctx.builder
|
||||
.build_call(
|
||||
ndarray_calc_broadcast_fn,
|
||||
&[
|
||||
lhs_dims.into(),
|
||||
lhs_ndims.into(),
|
||||
rhs_dims.into(),
|
||||
rhs_ndims.into(),
|
||||
out_dims.base_ptr(ctx, generator).into(),
|
||||
],
|
||||
"",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
TypedArrayLikeAdapter::from(
|
||||
out_dims,
|
||||
Box::new(|_, v| v.into_int_value()),
|
||||
Box::new(|_, v| v.into()),
|
||||
)
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_calc_broadcast_idx`. Returns an [`ArrayAllocaValue`]
|
||||
/// containing the indices used for accessing `array` corresponding to the index of the broadcasted
|
||||
/// array `broadcast_idx`.
|
||||
pub fn call_ndarray_calc_broadcast_index<
|
||||
'ctx,
|
||||
G: CodeGenerator + ?Sized,
|
||||
BroadcastIdx: UntypedArrayLikeAccessor<'ctx>,
|
||||
>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
array: NDArrayValue<'ctx>,
|
||||
broadcast_idx: &BroadcastIdx,
|
||||
) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
|
||||
let llvm_i32 = ctx.ctx.i32_type();
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
let llvm_pi32 = llvm_i32.ptr_type(AddressSpace::default());
|
||||
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
|
||||
|
||||
let ndarray_calc_broadcast_fn_name = match llvm_usize.get_bit_width() {
|
||||
32 => "__nac3_ndarray_calc_broadcast_idx",
|
||||
64 => "__nac3_ndarray_calc_broadcast_idx64",
|
||||
bw => codegen_unreachable!(ctx, "Unsupported size type bit width: {}", bw),
|
||||
};
|
||||
let ndarray_calc_broadcast_fn =
|
||||
ctx.module.get_function(ndarray_calc_broadcast_fn_name).unwrap_or_else(|| {
|
||||
let fn_type = llvm_usize.fn_type(
|
||||
&[llvm_pusize.into(), llvm_usize.into(), llvm_pi32.into(), llvm_pi32.into()],
|
||||
false,
|
||||
);
|
||||
|
||||
ctx.module.add_function(ndarray_calc_broadcast_fn_name, fn_type, None)
|
||||
});
|
||||
|
||||
let broadcast_size = broadcast_idx.size(ctx, generator);
|
||||
let out_idx = ctx.builder.build_array_alloca(llvm_i32, broadcast_size, "").unwrap();
|
||||
|
||||
let array_dims = array.shape().base_ptr(ctx, generator);
|
||||
let array_ndims = array.load_ndims(ctx);
|
||||
let broadcast_idx_ptr = unsafe {
|
||||
broadcast_idx.ptr_offset_unchecked(ctx, generator, &llvm_usize.const_zero(), None)
|
||||
};
|
||||
|
||||
ctx.builder
|
||||
.build_call(
|
||||
ndarray_calc_broadcast_fn,
|
||||
&[array_dims.into(), array_ndims.into(), broadcast_idx_ptr.into(), out_idx.into()],
|
||||
"",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
TypedArrayLikeAdapter::from(
|
||||
ArraySliceValue::from_ptr_val(out_idx, broadcast_size, None),
|
||||
Box::new(|_, v| v.into_int_value()),
|
||||
Box::new(|_, v| v.into()),
|
||||
)
|
||||
}
|
|
@ -1,93 +0,0 @@
|
|||
use crate::codegen::model::*;
|
||||
use crate::codegen::util::array_writer::ArrayWriter;
|
||||
use crate::codegen::{structs::ndarray::NpArray, CodeGenContext, CodeGenerator};
|
||||
|
||||
use super::basic::{
|
||||
call_nac3_ndarray_nbytes, call_nac3_ndarray_set_strides_by_shape,
|
||||
call_nac3_ndarray_util_assert_shape_no_negative,
|
||||
};
|
||||
|
||||
/**
|
||||
Allocate an ndarray on the stack given its `ndims`.
|
||||
|
||||
`shape` and `strides` will be automatically allocated on the stack.
|
||||
|
||||
The returned ndarray's content will be:
|
||||
- `data`: `nullptr`
|
||||
- `itemsize`: **uninitialized** value
|
||||
- `ndims`: initialized value, set to the input `ndims`
|
||||
- `shape`: initialized pointer to an allocated stack with **uninitialized** values
|
||||
- `strides`: initialized pointer to an allocated stack with **uninitialized** values
|
||||
*/
|
||||
pub fn alloca_ndarray<'ctx, G>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ndims: SizeT<'ctx>,
|
||||
name: &str,
|
||||
) -> Result<Pointer<'ctx, StructModel<NpArray<'ctx>>>, String>
|
||||
where
|
||||
G: CodeGenerator + ?Sized,
|
||||
{
|
||||
let sizet = generator.get_sizet(ctx.ctx);
|
||||
|
||||
let ndarray_model = StructModel(NpArray { sizet });
|
||||
let ndarray_data_model = PointerModel(NIntModel(Byte));
|
||||
|
||||
// Allocate ndarray
|
||||
let ndarray_ptr = ndarray_model.alloca(ctx, name);
|
||||
|
||||
// Set data to nullptr
|
||||
ndarray_ptr.gep(ctx, |f| f.data).store(ctx, ndarray_data_model.nullptr(ctx.ctx));
|
||||
|
||||
// Set ndims
|
||||
ndarray_ptr.gep(ctx, |f| f.ndims).store(ctx, ndims);
|
||||
|
||||
// Allocate and set shape
|
||||
let shape_array = sizet.array_alloca(ctx, ndims, "shape");
|
||||
ndarray_ptr.gep(ctx, |f| f.shape).store(ctx, shape_array.pointer);
|
||||
|
||||
// Allocate and set strides
|
||||
let strides_array = sizet.array_alloca(ctx, ndims, "strides");
|
||||
ndarray_ptr.gep(ctx, |f| f.strides).store(ctx, strides_array.pointer);
|
||||
|
||||
Ok(ndarray_ptr)
|
||||
}
|
||||
|
||||
/// Initialize an ndarray's `shape` and asserts on.
|
||||
/// `shape`'s values and prohibit illegal inputs like negative dimensions.
|
||||
pub fn init_ndarray_shape<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ndarray_ptr: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
shape_writer: &ArrayWriter<'ctx, G, SizeTModel<'ctx>, SizeTModel<'ctx>>,
|
||||
) -> Result<(), String> {
|
||||
(shape_writer.write)(generator, ctx, &ndarray_ptr.shape_slice(ctx))?;
|
||||
|
||||
call_nac3_ndarray_util_assert_shape_no_negative(
|
||||
generator,
|
||||
ctx,
|
||||
shape_writer.count,
|
||||
ndarray_ptr.gep(ctx, |f| f.shape).load(ctx, "shape"),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Initialize an ndarray's `data` by allocating a buffer on the stack.
|
||||
/// The allocated data buffer is considered to be *owned* by the ndarray.
|
||||
///
|
||||
/// `strides` of the ndarray will also be updated with `set_strides_by_shape`.
|
||||
///
|
||||
/// `shape` and `itemsize` of the ndarray ***must*** be initialized first.
|
||||
pub fn init_ndarray_data_by_alloca<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ndarray_ptr: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
) {
|
||||
let ndarray_nbytes = call_nac3_ndarray_nbytes(generator, ctx, ndarray_ptr); // Needs `itemsize` initialized
|
||||
|
||||
let data_array = NIntModel(Byte).array_alloca(ctx, ndarray_nbytes, "data");
|
||||
ndarray_ptr.gep(ctx, |f| f.data).store(ctx, data_array.pointer);
|
||||
|
||||
call_nac3_ndarray_set_strides_by_shape(generator, ctx, ndarray_ptr);
|
||||
}
|
|
@ -1,117 +0,0 @@
|
|||
use crate::codegen::irrt::error_context::{check_error_context, setup_error_context};
|
||||
use crate::codegen::irrt::slice::SliceIndex;
|
||||
use crate::codegen::irrt::util::get_sized_dependent_function_name;
|
||||
use crate::codegen::model::*;
|
||||
use crate::codegen::{structs::ndarray::NpArray, CodeGenContext, CodeGenerator};
|
||||
|
||||
pub fn call_nac3_ndarray_size<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ndarray_ptr: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
) -> SizeT<'ctx> {
|
||||
let sizet = generator.get_sizet(ctx.ctx);
|
||||
|
||||
FunctionBuilder::begin(ctx, &get_sized_dependent_function_name(sizet, "__nac3_ndarray_size"))
|
||||
.arg("ndarray", ndarray_ptr)
|
||||
.returning("size", sizet)
|
||||
}
|
||||
|
||||
pub fn call_nac3_ndarray_nbytes<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ndarray_ptr: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
) -> SizeT<'ctx> {
|
||||
let sizet = generator.get_sizet(ctx.ctx);
|
||||
|
||||
FunctionBuilder::begin(ctx, &get_sized_dependent_function_name(sizet, "__nac3_ndarray_nbytes"))
|
||||
.arg("ndarray", ndarray_ptr)
|
||||
.returning("nbytes", sizet)
|
||||
}
|
||||
|
||||
pub fn call_nac3_ndarray_len<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ndarray_ptr: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
) -> NInt<'ctx, SliceIndex> {
|
||||
let sizet = generator.get_sizet(ctx.ctx);
|
||||
let slice_index_model = NIntModel(SliceIndex::default());
|
||||
|
||||
let dst_len = slice_index_model.alloca(ctx, "dst_len");
|
||||
|
||||
let errctx = setup_error_context(ctx);
|
||||
FunctionBuilder::begin(ctx, &get_sized_dependent_function_name(sizet, "__nac3_ndarray_len"))
|
||||
.arg("errctx", errctx)
|
||||
.arg("ndarray", ndarray_ptr)
|
||||
.arg("dst_len", dst_len)
|
||||
.returning_void();
|
||||
check_error_context(generator, ctx, errctx);
|
||||
|
||||
dst_len.load(ctx, "len")
|
||||
}
|
||||
|
||||
pub fn call_nac3_ndarray_util_assert_shape_no_negative<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ndims: SizeT<'ctx>,
|
||||
shape_ptr: Pointer<'ctx, SizeTModel<'ctx>>,
|
||||
) {
|
||||
let sizet = generator.get_sizet(ctx.ctx);
|
||||
|
||||
let errctx = setup_error_context(ctx);
|
||||
FunctionBuilder::begin(
|
||||
ctx,
|
||||
&get_sized_dependent_function_name(sizet, "__nac3_ndarray_util_assert_shape_no_negative"),
|
||||
)
|
||||
.arg("errctx", errctx)
|
||||
.arg("ndims", ndims)
|
||||
.arg("shape", shape_ptr)
|
||||
.returning_void();
|
||||
check_error_context(generator, ctx, errctx);
|
||||
}
|
||||
|
||||
pub fn call_nac3_ndarray_set_strides_by_shape<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ndarray_ptr: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
) {
|
||||
let sizet = generator.get_sizet(ctx.ctx);
|
||||
|
||||
FunctionBuilder::begin(
|
||||
ctx,
|
||||
&get_sized_dependent_function_name(sizet, "__nac3_ndarray_set_strides_by_shape"),
|
||||
)
|
||||
.arg("ndarray", ndarray_ptr)
|
||||
.returning_void();
|
||||
}
|
||||
|
||||
pub fn call_nac3_ndarray_is_c_contiguous<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ndarray_ptr: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
) -> NInt<'ctx, Bool> {
|
||||
let sizet = generator.get_sizet(ctx.ctx);
|
||||
|
||||
FunctionBuilder::begin(
|
||||
ctx,
|
||||
&get_sized_dependent_function_name(sizet, "__nac3_ndarray_is_c_contiguous"),
|
||||
)
|
||||
.arg("ndarray", ndarray_ptr)
|
||||
.returning("is_c_contiguous", NIntModel(Bool))
|
||||
}
|
||||
|
||||
pub fn call_nac3_ndarray_copy_data<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
src_ndarray: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
dst_ndarray: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
) -> NInt<'ctx, Bool> {
|
||||
let sizet = generator.get_sizet(ctx.ctx);
|
||||
|
||||
FunctionBuilder::begin(
|
||||
ctx,
|
||||
&get_sized_dependent_function_name(sizet, "__nac3_ndarray_copy_data"),
|
||||
)
|
||||
.arg("src_ndarray", src_ndarray)
|
||||
.arg("dst_ndarray", dst_ndarray)
|
||||
.returning("is_c_contiguous", NIntModel(Bool))
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
use crate::codegen::{
|
||||
irrt::util::get_sized_dependent_function_name, model::*, structs::ndarray::NpArray,
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
|
||||
pub fn call_nac3_ndarray_fill_generic<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ndarray_ptr: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
fill_value_ptr: Pointer<'ctx, ByteModel>,
|
||||
) {
|
||||
let sizet = generator.get_sizet(ctx.ctx);
|
||||
|
||||
FunctionBuilder::begin(
|
||||
ctx,
|
||||
&get_sized_dependent_function_name(sizet, "__nac3_ndarray_fill_generic"),
|
||||
)
|
||||
.arg("ndarray", ndarray_ptr)
|
||||
.arg("pvalue", fill_value_ptr)
|
||||
.returning_void();
|
||||
}
|
|
@ -1,169 +0,0 @@
|
|||
use crate::codegen::{
|
||||
irrt::{
|
||||
error_context::{check_error_context, setup_error_context},
|
||||
slice::{RustUserSlice, SliceIndex, UserSlice},
|
||||
util::get_sized_dependent_function_name,
|
||||
},
|
||||
model::*,
|
||||
structs::ndarray::NpArray,
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct NDIndexFields {
|
||||
pub type_: Field<ByteModel>, // Defined to be uint8_t in IRRT
|
||||
pub data: Field<PointerModel<ByteModel>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
|
||||
pub struct NDIndex;
|
||||
|
||||
impl<'ctx> StructKind<'ctx> for NDIndex {
|
||||
type Fields = NDIndexFields;
|
||||
|
||||
fn struct_name(&self) -> &'static str {
|
||||
"NDIndex"
|
||||
}
|
||||
|
||||
fn build_fields(&self, builder: &mut FieldBuilder<'ctx>) -> Self::Fields {
|
||||
Self::Fields { type_: builder.add_field_auto("type"), data: builder.add_field_auto("data") }
|
||||
}
|
||||
}
|
||||
|
||||
// An enum variant to store the content
|
||||
// and type of an NDIndex in high level.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum RustNDIndex<'ctx> {
|
||||
SingleElement(NInt<'ctx, SliceIndex>),
|
||||
Slice(RustUserSlice<'ctx>),
|
||||
}
|
||||
|
||||
impl<'ctx> RustNDIndex<'ctx> {
|
||||
fn irrt_ndindex_id(&self) -> u64 {
|
||||
// Defined in IRRT, must be in sync
|
||||
match self {
|
||||
RustNDIndex::SingleElement(_) => 0,
|
||||
RustNDIndex::Slice(_) => 1,
|
||||
}
|
||||
}
|
||||
|
||||
fn write_to_ndindex(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
dst_ndindex_ptr: Pointer<'ctx, StructModel<NDIndex>>,
|
||||
) {
|
||||
let byte_model = ByteModel::default();
|
||||
let slice_index_model = NIntModel(SliceIndex::default());
|
||||
let user_slice_model = StructModel(UserSlice);
|
||||
|
||||
// Set `dst_ndindex_ptr->type`
|
||||
dst_ndindex_ptr
|
||||
.gep(ctx, |f| f.type_)
|
||||
.store(ctx, byte_model.constant(ctx.ctx, self.irrt_ndindex_id()));
|
||||
|
||||
// Set `dst_ndindex_ptr->data`
|
||||
let data = match self {
|
||||
RustNDIndex::SingleElement(in_index) => {
|
||||
let index_ptr = slice_index_model.alloca(ctx, "index");
|
||||
index_ptr.store(ctx, *in_index);
|
||||
index_ptr.cast_to(ctx, NIntModel(Byte), "")
|
||||
}
|
||||
RustNDIndex::Slice(in_rust_slice) => {
|
||||
let user_slice_ptr = user_slice_model.alloca(ctx, "user_slice");
|
||||
in_rust_slice.write_to_user_slice(ctx, user_slice_ptr);
|
||||
user_slice_ptr.cast_to(ctx, NIntModel(Byte), "")
|
||||
}
|
||||
};
|
||||
dst_ndindex_ptr.gep(ctx, |f| f.data).store(ctx, data);
|
||||
}
|
||||
|
||||
// Allocate an array of `NDIndex`es onto the stack and return its stack pointer
|
||||
pub fn alloca_ndindexes<G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
ndindexes: &[RustNDIndex<'ctx>],
|
||||
) -> ArraySlice<'ctx, SizeTModel<'ctx>, StructModel<NDIndex>> {
|
||||
let sizet = generator.get_sizet(ctx.ctx);
|
||||
|
||||
let ndindex_model = StructModel(NDIndex);
|
||||
let ndindex_array = ndindex_model.array_alloca(
|
||||
ctx,
|
||||
sizet.constant(ctx.ctx, ndindexes.len() as u64),
|
||||
"ndindexs",
|
||||
);
|
||||
|
||||
for (i, rust_ndindex) in ndindexes.iter().enumerate() {
|
||||
let ndindex_ptr =
|
||||
ndindex_array.ix_unchecked(ctx, sizet.constant(ctx.ctx, i as u64), "");
|
||||
rust_ndindex.write_to_ndindex(ctx, ndindex_ptr);
|
||||
}
|
||||
|
||||
ndindex_array
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn deduce_ndims_after_slicing(slices: &[RustNDIndex], original_ndims: i32) -> i32 {
|
||||
let mut final_ndims: i32 = original_ndims;
|
||||
for slice in slices {
|
||||
match slice {
|
||||
RustNDIndex::SingleElement(_) => {
|
||||
final_ndims -= 1;
|
||||
}
|
||||
RustNDIndex::Slice(_) => {}
|
||||
}
|
||||
}
|
||||
final_ndims
|
||||
}
|
||||
}
|
||||
|
||||
pub fn call_nac3_ndarray_indexing_deduce_ndims_after_indexing<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ndims: Int<'ctx>,
|
||||
num_ndindexs: SizeT<'ctx>,
|
||||
ndindexs: Pointer<'ctx, StructModel<NDIndex>>,
|
||||
) -> SizeT<'ctx> {
|
||||
let sizet = generator.get_sizet(ctx.ctx);
|
||||
|
||||
let final_ndims = sizet.alloca(ctx, "result");
|
||||
|
||||
let errctx_ptr = setup_error_context(ctx);
|
||||
FunctionBuilder::begin(
|
||||
ctx,
|
||||
&get_sized_dependent_function_name(
|
||||
sizet,
|
||||
"__nac3_ndarray_indexing_deduce_ndims_after_indexing",
|
||||
),
|
||||
)
|
||||
.arg("errctx", errctx_ptr)
|
||||
.arg("result", final_ndims)
|
||||
.arg("ndims", ndims)
|
||||
.arg("num_ndindexs", num_ndindexs)
|
||||
.arg("ndindexs", ndindexs)
|
||||
.returning_void();
|
||||
check_error_context(generator, ctx, errctx_ptr);
|
||||
|
||||
final_ndims.load(ctx, "final_ndims")
|
||||
}
|
||||
|
||||
pub fn call_nac3_ndarray_index<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
num_indexes: SizeT<'ctx>,
|
||||
indexes: Pointer<'ctx, StructModel<NDIndex>>,
|
||||
src_ndarray: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
dst_ndarray: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
) {
|
||||
let sizet = generator.get_sizet(ctx.ctx);
|
||||
let errctx_ptr = setup_error_context(ctx);
|
||||
|
||||
FunctionBuilder::begin(ctx, &get_sized_dependent_function_name(sizet, "__nac3_ndarray_index"))
|
||||
.arg("errctx", errctx_ptr)
|
||||
.arg("num_indexes", num_indexes)
|
||||
.arg("indexes", indexes)
|
||||
.arg("src_ndarray", src_ndarray)
|
||||
.arg("dst_ndarray", dst_ndarray)
|
||||
.returning_void();
|
||||
|
||||
check_error_context(generator, ctx, errctx_ptr);
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
pub mod allocation;
|
||||
pub mod basic;
|
||||
pub mod fill;
|
||||
pub mod indexing;
|
||||
pub mod reshape;
|
||||
pub mod transpose;
|
|
@ -1,30 +0,0 @@
|
|||
use crate::codegen::{
|
||||
irrt::{
|
||||
error_context::{check_error_context, setup_error_context},
|
||||
util::get_sized_dependent_function_name,
|
||||
},
|
||||
model::*,
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
|
||||
pub fn call_nac3_ndarray_resolve_and_check_new_shape<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
size: SizeT<'ctx>,
|
||||
new_ndims: SizeT<'ctx>,
|
||||
new_shape: Pointer<'ctx, SizeTModel<'ctx>>,
|
||||
) {
|
||||
let sizet = generator.get_sizet(ctx.ctx);
|
||||
|
||||
let perrctx = setup_error_context(ctx);
|
||||
FunctionBuilder::begin(
|
||||
ctx,
|
||||
&get_sized_dependent_function_name(sizet, "__nac3_ndarray_resolve_and_check_new_shape"),
|
||||
)
|
||||
.arg("errctx", perrctx)
|
||||
.arg("size", size)
|
||||
.arg("new_ndims", new_ndims)
|
||||
.arg("new_shape", new_shape)
|
||||
.returning_void();
|
||||
check_error_context(generator, ctx, perrctx);
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
use crate::codegen::{
|
||||
irrt::{
|
||||
error_context::{check_error_context, setup_error_context},
|
||||
util::get_sized_dependent_function_name,
|
||||
},
|
||||
model::*,
|
||||
structs::ndarray::NpArray,
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
|
||||
pub fn call_nac3_ndarray_transpose<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
src_ndarray: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
dst_ndarray: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
axes_or_none: Option<ArraySlice<'ctx, SizeTModel<'ctx>, SizeTModel<'ctx>>>,
|
||||
) -> Pointer<'ctx, StructModel<NpArray<'ctx>>> {
|
||||
let sizet = generator.get_sizet(ctx.ctx);
|
||||
let axes_model = PointerModel(sizet);
|
||||
|
||||
let (num_axes, axes) = match axes_or_none {
|
||||
Some(axes) => (axes.num_elements, axes.pointer),
|
||||
None => {
|
||||
// Please refer to the comment in the IRRT implementation
|
||||
(sizet.constant(ctx.ctx, 0), axes_model.nullptr(ctx.ctx))
|
||||
}
|
||||
};
|
||||
|
||||
let perrctx = setup_error_context(ctx);
|
||||
FunctionBuilder::begin(
|
||||
ctx,
|
||||
&get_sized_dependent_function_name(sizet, "__nac3_ndarray_transpose"),
|
||||
)
|
||||
.arg("errctx", perrctx)
|
||||
.arg("src_ndarray", src_ndarray)
|
||||
.arg("dst_ndarray", dst_ndarray)
|
||||
.arg("num_axes", num_axes)
|
||||
.arg("axes", axes)
|
||||
.returning_void();
|
||||
check_error_context(generator, ctx, perrctx);
|
||||
|
||||
dst_ndarray
|
||||
}
|
|
@ -1,84 +1,76 @@
|
|||
use crate::codegen::{model::*, CodeGenContext};
|
||||
use inkwell::{
|
||||
values::{BasicValueEnum, CallSiteValue, IntValue},
|
||||
IntPredicate,
|
||||
};
|
||||
use itertools::Either;
|
||||
use nac3parser::ast::Expr;
|
||||
|
||||
// nac3core's slicing index/length values are always int32_t
|
||||
pub type SliceIndex = Int32;
|
||||
use crate::{
|
||||
codegen::{CodeGenContext, CodeGenerator},
|
||||
typecheck::typedef::Type,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct UserSliceFields {
|
||||
pub start_defined: Field<BoolModel>,
|
||||
pub start: Field<NIntModel<SliceIndex>>,
|
||||
pub stop_defined: Field<BoolModel>,
|
||||
pub stop: Field<NIntModel<SliceIndex>>,
|
||||
pub step_defined: Field<BoolModel>,
|
||||
pub step: Field<NIntModel<SliceIndex>>,
|
||||
/// this function allows index out of range, since python
|
||||
/// allows index out of range in slice (`a = [1,2,3]; a[1:10] == [2,3]`).
|
||||
pub fn handle_slice_index_bound<'ctx, G: CodeGenerator>(
|
||||
i: &Expr<Option<Type>>,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
generator: &mut G,
|
||||
length: IntValue<'ctx>,
|
||||
) -> Result<Option<IntValue<'ctx>>, String> {
|
||||
const SYMBOL: &str = "__nac3_slice_index_bound";
|
||||
let func = ctx.module.get_function(SYMBOL).unwrap_or_else(|| {
|
||||
let i32_t = ctx.ctx.i32_type();
|
||||
let fn_t = i32_t.fn_type(&[i32_t.into(), i32_t.into()], false);
|
||||
ctx.module.add_function(SYMBOL, fn_t, None)
|
||||
});
|
||||
|
||||
let i = if let Some(v) = generator.gen_expr(ctx, i)? {
|
||||
v.to_basic_value_enum(ctx, generator, i.custom.unwrap())?
|
||||
} else {
|
||||
return Ok(None);
|
||||
};
|
||||
Ok(Some(
|
||||
ctx.builder
|
||||
.build_call(func, &[i.into(), length.into()], "bounded_ind")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap(),
|
||||
))
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
|
||||
pub struct UserSlice;
|
||||
pub fn calculate_len_for_slice_range<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
start: IntValue<'ctx>,
|
||||
end: IntValue<'ctx>,
|
||||
step: IntValue<'ctx>,
|
||||
) -> IntValue<'ctx> {
|
||||
const SYMBOL: &str = "__nac3_range_slice_len";
|
||||
let len_func = ctx.module.get_function(SYMBOL).unwrap_or_else(|| {
|
||||
let i32_t = ctx.ctx.i32_type();
|
||||
let fn_t = i32_t.fn_type(&[i32_t.into(), i32_t.into(), i32_t.into()], false);
|
||||
ctx.module.add_function(SYMBOL, fn_t, None)
|
||||
});
|
||||
|
||||
impl<'ctx> StructKind<'ctx> for UserSlice {
|
||||
type Fields = UserSliceFields;
|
||||
|
||||
fn struct_name(&self) -> &'static str {
|
||||
"UserSlice"
|
||||
}
|
||||
|
||||
fn build_fields(&self, builder: &mut FieldBuilder<'ctx>) -> Self::Fields {
|
||||
Self::Fields {
|
||||
start_defined: builder.add_field_auto("start_defined"),
|
||||
start: builder.add_field_auto("start"),
|
||||
stop_defined: builder.add_field_auto("stop_defined"),
|
||||
stop: builder.add_field_auto("stop"),
|
||||
step_defined: builder.add_field_auto("step_defined"),
|
||||
step: builder.add_field_auto("step"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RustUserSlice<'ctx> {
|
||||
pub start: Option<NInt<'ctx, SliceIndex>>,
|
||||
pub stop: Option<NInt<'ctx, SliceIndex>>,
|
||||
pub step: Option<NInt<'ctx, SliceIndex>>,
|
||||
}
|
||||
|
||||
impl<'ctx> RustUserSlice<'ctx> {
|
||||
// Set the values of an LLVM UserSlice
|
||||
// in the format of Python's `slice()`
|
||||
pub fn write_to_user_slice(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
dst_slice_ptr: Pointer<'ctx, StructModel<UserSlice>>,
|
||||
) {
|
||||
// TODO: make this neater, with a helper lambda?
|
||||
|
||||
let bool_model = BoolModel::default();
|
||||
|
||||
let false_ = bool_model.constant(ctx.ctx, 0);
|
||||
let true_ = bool_model.constant(ctx.ctx, 1);
|
||||
|
||||
match self.start {
|
||||
Some(start) => {
|
||||
dst_slice_ptr.gep(ctx, |f| f.start_defined).store(ctx, true_);
|
||||
dst_slice_ptr.gep(ctx, |f| f.start).store(ctx, start);
|
||||
}
|
||||
None => dst_slice_ptr.gep(ctx, |f| f.start_defined).store(ctx, false_),
|
||||
}
|
||||
|
||||
match self.stop {
|
||||
Some(stop) => {
|
||||
dst_slice_ptr.gep(ctx, |f| f.stop_defined).store(ctx, true_);
|
||||
dst_slice_ptr.gep(ctx, |f| f.stop).store(ctx, stop);
|
||||
}
|
||||
None => dst_slice_ptr.gep(ctx, |f| f.stop_defined).store(ctx, false_),
|
||||
}
|
||||
|
||||
match self.step {
|
||||
Some(step) => {
|
||||
dst_slice_ptr.gep(ctx, |f| f.step_defined).store(ctx, true_);
|
||||
dst_slice_ptr.gep(ctx, |f| f.step).store(ctx, step);
|
||||
}
|
||||
None => dst_slice_ptr.gep(ctx, |f| f.step_defined).store(ctx, false_),
|
||||
}
|
||||
}
|
||||
// assert step != 0, throw exception if not
|
||||
let not_zero = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::NE, step, step.get_type().const_zero(), "range_step_ne")
|
||||
.unwrap();
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
not_zero,
|
||||
"0:ValueError",
|
||||
"step must not be zero",
|
||||
[None, None, None],
|
||||
ctx.current_loc,
|
||||
);
|
||||
ctx.builder
|
||||
.build_call(len_func, &[start.into(), end.into(), step.into()], "calc_len")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{path::Path, process::Command};
|
||||
|
||||
#[test]
|
||||
fn run_irrt_test() {
|
||||
assert!(
|
||||
cfg!(feature = "test"),
|
||||
"Please do `cargo test -F test` to compile `irrt_test.out` and run test"
|
||||
);
|
||||
|
||||
let irrt_test_out_path = Path::new(concat!(env!("OUT_DIR"), "/irrt_test.out"));
|
||||
let output = Command::new(irrt_test_out_path.to_str().unwrap()).output().unwrap();
|
||||
|
||||
if !output.status.success() {
|
||||
eprintln!("irrt_test failed with status {}:", output.status);
|
||||
eprintln!("====== stdout ======");
|
||||
eprintln!("{}", String::from_utf8(output.stdout).unwrap());
|
||||
eprintln!("====== stderr ======");
|
||||
eprintln!("{}", String::from_utf8(output.stderr).unwrap());
|
||||
eprintln!("====================");
|
||||
|
||||
panic!("irrt_test failed");
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
use crate::codegen::model::*;
|
||||
|
||||
#[must_use]
|
||||
pub fn get_sized_dependent_function_name(sizet: SizeTModel<'_>, fn_name: &str) -> String {
|
||||
// When its 32-bits, the function name is "{fn_name}"
|
||||
// When its 64-bits, the function name is "{fn_name}64"
|
||||
let mut fn_name = fn_name.to_owned();
|
||||
match sizet.0.get_bit_width() {
|
||||
32 => {}
|
||||
64 => fn_name.push_str("64"),
|
||||
bit_width => {
|
||||
panic!("Unsupported int type bit width {bit_width}, must be either 32-bits or 64-bits")
|
||||
}
|
||||
}
|
||||
fn_name
|
||||
}
|
|
@ -1,12 +1,14 @@
|
|||
use crate::codegen::CodeGenContext;
|
||||
use inkwell::context::Context;
|
||||
use inkwell::intrinsics::Intrinsic;
|
||||
use inkwell::types::AnyTypeEnum::IntType;
|
||||
use inkwell::types::FloatType;
|
||||
use inkwell::values::{BasicValueEnum, CallSiteValue, FloatValue, IntValue, PointerValue};
|
||||
use inkwell::AddressSpace;
|
||||
use inkwell::{
|
||||
context::Context,
|
||||
intrinsics::Intrinsic,
|
||||
types::{AnyTypeEnum::IntType, FloatType},
|
||||
values::{BasicValueEnum, CallSiteValue, FloatValue, IntValue, PointerValue},
|
||||
AddressSpace,
|
||||
};
|
||||
use itertools::Either;
|
||||
|
||||
use super::CodeGenContext;
|
||||
|
||||
/// Returns the string representation for the floating-point type `ft` when used in intrinsic
|
||||
/// functions.
|
||||
fn get_float_intrinsic_repr(ctx: &Context, ft: FloatType) -> &'static str {
|
||||
|
@ -35,6 +37,40 @@ fn get_float_intrinsic_repr(ctx: &Context, ft: FloatType) -> &'static str {
|
|||
unreachable!()
|
||||
}
|
||||
|
||||
/// Invokes the [`llvm.va_start`](https://llvm.org/docs/LangRef.html#llvm-va-start-intrinsic)
|
||||
/// intrinsic.
|
||||
pub fn call_va_start<'ctx>(ctx: &CodeGenContext<'ctx, '_>, arglist: PointerValue<'ctx>) {
|
||||
const FN_NAME: &str = "llvm.va_start";
|
||||
|
||||
let intrinsic_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
|
||||
let llvm_void = ctx.ctx.void_type();
|
||||
let llvm_i8 = ctx.ctx.i8_type();
|
||||
let llvm_p0i8 = llvm_i8.ptr_type(AddressSpace::default());
|
||||
let fn_type = llvm_void.fn_type(&[llvm_p0i8.into()], false);
|
||||
|
||||
ctx.module.add_function(FN_NAME, fn_type, None)
|
||||
});
|
||||
|
||||
ctx.builder.build_call(intrinsic_fn, &[arglist.into()], "").unwrap();
|
||||
}
|
||||
|
||||
/// Invokes the [`llvm.va_start`](https://llvm.org/docs/LangRef.html#llvm-va-start-intrinsic)
|
||||
/// intrinsic.
|
||||
pub fn call_va_end<'ctx>(ctx: &CodeGenContext<'ctx, '_>, arglist: PointerValue<'ctx>) {
|
||||
const FN_NAME: &str = "llvm.va_end";
|
||||
|
||||
let intrinsic_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
|
||||
let llvm_void = ctx.ctx.void_type();
|
||||
let llvm_i8 = ctx.ctx.i8_type();
|
||||
let llvm_p0i8 = llvm_i8.ptr_type(AddressSpace::default());
|
||||
let fn_type = llvm_void.fn_type(&[llvm_p0i8.into()], false);
|
||||
|
||||
ctx.module.add_function(FN_NAME, fn_type, None)
|
||||
});
|
||||
|
||||
ctx.builder.build_call(intrinsic_fn, &[arglist.into()], "").unwrap();
|
||||
}
|
||||
|
||||
/// Invokes the [`llvm.stacksave`](https://llvm.org/docs/LangRef.html#llvm-stacksave-intrinsic)
|
||||
/// intrinsic.
|
||||
pub fn call_stacksave<'ctx>(
|
||||
|
@ -149,7 +185,7 @@ pub fn call_memcpy_generic<'ctx>(
|
|||
dest
|
||||
} else {
|
||||
ctx.builder
|
||||
.build_bitcast(dest, llvm_p0i8, "")
|
||||
.build_bit_cast(dest, llvm_p0i8, "")
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap()
|
||||
};
|
||||
|
@ -157,7 +193,7 @@ pub fn call_memcpy_generic<'ctx>(
|
|||
src
|
||||
} else {
|
||||
ctx.builder
|
||||
.build_bitcast(src, llvm_p0i8, "")
|
||||
.build_bit_cast(src, llvm_p0i8, "")
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap()
|
||||
};
|
||||
|
@ -171,8 +207,9 @@ pub fn call_memcpy_generic<'ctx>(
|
|||
/// * `$ctx:ident`: Reference to the current Code Generation Context
|
||||
/// * `$name:ident`: Optional name to be assigned to the llvm build call (Option<&str>)
|
||||
/// * `$llvm_name:literal`: Name of underlying llvm intrinsic function
|
||||
/// * `$map_fn:ident`: Mapping function to be applied on `BasicValue` (`BasicValue` -> Function Return Type)
|
||||
/// Use `BasicValueEnum::into_int_value` for Integer return type and `BasicValueEnum::into_float_value` for Float return type
|
||||
/// * `$map_fn:ident`: Mapping function to be applied on `BasicValue` (`BasicValue` -> Function Return Type).
|
||||
/// Use `BasicValueEnum::into_int_value` for Integer return type and
|
||||
/// `BasicValueEnum::into_float_value` for Float return type
|
||||
/// * `$llvm_ty:ident`: Type of first operand
|
||||
/// * `,($val:ident)*`: Comma separated list of operands
|
||||
macro_rules! generate_llvm_intrinsic_fn_body {
|
||||
|
@ -188,7 +225,7 @@ macro_rules! generate_llvm_intrinsic_fn_body {
|
|||
/// Arguments:
|
||||
/// * `float/int`: Indicates the return and argument type of the function
|
||||
/// * `$fn_name:ident`: The identifier of the rust function to be generated
|
||||
/// * `$llvm_name:literal`: Name of underlying llvm intrinsic function
|
||||
/// * `$llvm_name:literal`: Name of underlying llvm intrinsic function.
|
||||
/// Omit "llvm." prefix from the function name i.e. use "ceil" instead of "llvm.ceil"
|
||||
/// * `$val:ident`: The operand for unary operations
|
||||
/// * `$val1:ident`, `$val2:ident`: The operands for binary operations
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
use crate::{
|
||||
codegen::classes::{ListType, ProxyType, RangeType},
|
||||
symbol_resolver::{StaticValue, SymbolResolver},
|
||||
toplevel::{helper::PrimDef, TopLevelContext, TopLevelDef},
|
||||
typecheck::{
|
||||
type_inferencer::{CodeLocation, PrimitiveStore},
|
||||
typedef::{CallId, FuncArg, Type, TypeEnum, Unifier},
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
},
|
||||
thread,
|
||||
};
|
||||
|
||||
use crossbeam::channel::{unbounded, Receiver, Sender};
|
||||
use inkwell::{
|
||||
attributes::{Attribute, AttributeLoc},
|
||||
|
@ -24,37 +24,52 @@ use inkwell::{
|
|||
AddressSpace, IntPredicate, OptimizationLevel,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
use model::*;
|
||||
use nac3parser::ast::{Location, Stmt, StrRef};
|
||||
use parking_lot::{Condvar, Mutex};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
|
||||
use nac3parser::ast::{Location, Stmt, StrRef};
|
||||
|
||||
use crate::{
|
||||
symbol_resolver::{StaticValue, SymbolResolver},
|
||||
toplevel::{helper::PrimDef, numpy::unpack_ndarray_var_tys, TopLevelContext, TopLevelDef},
|
||||
typecheck::{
|
||||
type_inferencer::{CodeLocation, PrimitiveStore},
|
||||
typedef::{CallId, FuncArg, Type, TypeEnum, Unifier},
|
||||
},
|
||||
};
|
||||
use std::thread;
|
||||
use structs::{cslice::CSlice, exception::Exception, ndarray::NpArray};
|
||||
use concrete_type::{ConcreteType, ConcreteTypeEnum, ConcreteTypeStore};
|
||||
pub use generator::{CodeGenerator, DefaultCodeGenerator};
|
||||
use types::{ListType, NDArrayType, ProxyType, RangeType};
|
||||
|
||||
pub mod builtin_fns;
|
||||
pub mod classes;
|
||||
pub mod concrete_type;
|
||||
pub mod expr;
|
||||
pub mod extern_fns;
|
||||
mod generator;
|
||||
pub mod irrt;
|
||||
pub mod llvm_intrinsics;
|
||||
pub mod model;
|
||||
pub mod numpy;
|
||||
pub mod numpy_new;
|
||||
pub mod stmt;
|
||||
pub mod structs;
|
||||
pub mod types;
|
||||
pub mod values;
|
||||
|
||||
#[cfg(test)]
|
||||
mod test;
|
||||
pub mod util;
|
||||
|
||||
use concrete_type::{ConcreteType, ConcreteTypeEnum, ConcreteTypeStore};
|
||||
pub use generator::{CodeGenerator, DefaultCodeGenerator};
|
||||
mod macros {
|
||||
/// Codegen-variant of [`std::unreachable`] which accepts an instance of [`CodeGenContext`] as
|
||||
/// its first argument to provide Python source information to indicate the codegen location
|
||||
/// causing the assertion.
|
||||
macro_rules! codegen_unreachable {
|
||||
($ctx:expr $(,)?) => {
|
||||
std::unreachable!("unreachable code while processing {}", &$ctx.current_loc)
|
||||
};
|
||||
($ctx:expr, $($arg:tt)*) => {
|
||||
std::unreachable!("unreachable code while processing {}: {}", &$ctx.current_loc, std::format!("{}", std::format_args!($($arg)+)))
|
||||
};
|
||||
}
|
||||
|
||||
pub(crate) use codegen_unreachable;
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct StaticValueStore {
|
||||
|
@ -74,6 +89,16 @@ pub struct CodeGenLLVMOptions {
|
|||
pub target: CodeGenTargetMachineOptions,
|
||||
}
|
||||
|
||||
impl CodeGenLLVMOptions {
|
||||
/// Creates a [`TargetMachine`] using the target options specified by this struct.
|
||||
///
|
||||
/// See [`Target::create_target_machine`].
|
||||
#[must_use]
|
||||
pub fn create_target_machine(&self) -> Option<TargetMachine> {
|
||||
self.target.create_target_machine(self.opt_level)
|
||||
}
|
||||
}
|
||||
|
||||
/// Additional options for code generation for the target machine.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct CodeGenTargetMachineOptions {
|
||||
|
@ -164,11 +189,11 @@ pub struct CodeGenContext<'ctx, 'a> {
|
|||
pub registry: &'a WorkerRegistry,
|
||||
|
||||
/// Cache for constant strings.
|
||||
pub const_strings: HashMap<String, Struct<'ctx, CSlice<'ctx>>>,
|
||||
pub const_strings: HashMap<String, BasicValueEnum<'ctx>>,
|
||||
|
||||
/// [`BasicBlock`] containing all `alloca` statements for the current function.
|
||||
pub init_bb: BasicBlock<'ctx>,
|
||||
pub exception_val: Option<Pointer<'ctx, StructModel<Exception<'ctx>>>>,
|
||||
pub exception_val: Option<PointerValue<'ctx>>,
|
||||
|
||||
/// The header and exit basic blocks of a loop in this context. See
|
||||
/// <https://llvm.org/docs/LoopTerminology.html> for explanation of these terminology.
|
||||
|
@ -344,6 +369,10 @@ impl WorkerRegistry {
|
|||
let mut builder = context.create_builder();
|
||||
let mut module = context.create_module(generator.get_name());
|
||||
|
||||
let target_machine = self.llvm_options.create_target_machine().unwrap();
|
||||
module.set_data_layout(&target_machine.get_target_data().get_data_layout());
|
||||
module.set_triple(&target_machine.get_triple());
|
||||
|
||||
module.add_basic_value_flag(
|
||||
"Debug Info Version",
|
||||
inkwell::module::FlagBehavior::Warning,
|
||||
|
@ -367,6 +396,10 @@ impl WorkerRegistry {
|
|||
errors.insert(e);
|
||||
// create a new empty module just to continue codegen and collect errors
|
||||
module = context.create_module(&format!("{}_recover", generator.get_name()));
|
||||
|
||||
let target_machine = self.llvm_options.create_target_machine().unwrap();
|
||||
module.set_data_layout(&target_machine.get_target_data().get_data_layout());
|
||||
module.set_triple(&target_machine.get_triple());
|
||||
}
|
||||
}
|
||||
*self.task_count.lock() -= 1;
|
||||
|
@ -432,7 +465,7 @@ pub struct CodeGenTask {
|
|||
fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>(
|
||||
ctx: &'ctx Context,
|
||||
module: &Module<'ctx>,
|
||||
generator: &mut G,
|
||||
generator: &G,
|
||||
unifier: &mut Unifier,
|
||||
top_level: &TopLevelContext,
|
||||
type_cache: &mut HashMap<Type, BasicTypeEnum<'ctx>>,
|
||||
|
@ -477,9 +510,12 @@ fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>(
|
|||
}
|
||||
|
||||
TObj { obj_id, .. } if *obj_id == PrimDef::NDArray.id() => {
|
||||
let sizet = generator.get_sizet(ctx);
|
||||
let pndarray_model = PointerModel(StructModel(NpArray { sizet }));
|
||||
pndarray_model.get_type(ctx).into()
|
||||
let (dtype, _) = unpack_ndarray_var_tys(unifier, ty);
|
||||
let element_type = get_llvm_type(
|
||||
ctx, module, generator, unifier, top_level, type_cache, dtype,
|
||||
);
|
||||
|
||||
NDArrayType::new(generator, ctx, element_type).as_base_type().into()
|
||||
}
|
||||
|
||||
_ => unreachable!(
|
||||
|
@ -523,8 +559,10 @@ fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>(
|
|||
};
|
||||
return ty;
|
||||
}
|
||||
TTuple { ty } => {
|
||||
TTuple { ty, is_vararg_ctx } => {
|
||||
// a struct with fields in the order present in the tuple
|
||||
assert!(!is_vararg_ctx, "Tuples in vararg context must be instantiated with the correct number of arguments before calling get_llvm_type");
|
||||
|
||||
let fields = ty
|
||||
.iter()
|
||||
.map(|ty| {
|
||||
|
@ -554,7 +592,7 @@ fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>(
|
|||
fn get_llvm_abi_type<'ctx, G: CodeGenerator + ?Sized>(
|
||||
ctx: &'ctx Context,
|
||||
module: &Module<'ctx>,
|
||||
generator: &mut G,
|
||||
generator: &G,
|
||||
unifier: &mut Unifier,
|
||||
top_level: &TopLevelContext,
|
||||
type_cache: &mut HashMap<Type, BasicTypeEnum<'ctx>>,
|
||||
|
@ -563,11 +601,11 @@ fn get_llvm_abi_type<'ctx, G: CodeGenerator + ?Sized>(
|
|||
) -> BasicTypeEnum<'ctx> {
|
||||
// If the type is used in the definition of a function, return `i1` instead of `i8` for ABI
|
||||
// consistency.
|
||||
return if unifier.unioned(ty, primitives.bool) {
|
||||
if unifier.unioned(ty, primitives.bool) {
|
||||
ctx.bool_type().into()
|
||||
} else {
|
||||
get_llvm_type(ctx, module, generator, unifier, top_level, type_cache, ty)
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether `sret` is needed for a return value with type `ty`.
|
||||
|
@ -592,6 +630,40 @@ fn need_sret(ty: BasicTypeEnum) -> bool {
|
|||
need_sret_impl(ty, true)
|
||||
}
|
||||
|
||||
/// Returns the [`BasicTypeEnum`] representing a `va_list` struct for variadic arguments.
|
||||
fn get_llvm_valist_type<'ctx>(ctx: &'ctx Context, triple: &TargetTriple) -> BasicTypeEnum<'ctx> {
|
||||
let triple = TargetMachine::normalize_triple(triple);
|
||||
let triple = triple.as_str().to_str().unwrap();
|
||||
let arch = triple.split('-').next().unwrap();
|
||||
|
||||
let llvm_pi8 = ctx.i8_type().ptr_type(AddressSpace::default());
|
||||
|
||||
// Referenced from parseArch() in llvm/lib/Support/Triple.cpp
|
||||
match arch {
|
||||
"i386" | "i486" | "i586" | "i686" | "riscv32" => {
|
||||
ctx.i8_type().ptr_type(AddressSpace::default()).into()
|
||||
}
|
||||
"amd64" | "x86_64" | "x86_64h" => {
|
||||
let llvm_i32 = ctx.i32_type();
|
||||
|
||||
let va_list_tag = ctx.opaque_struct_type("struct.__va_list_tag");
|
||||
va_list_tag.set_body(
|
||||
&[llvm_i32.into(), llvm_i32.into(), llvm_pi8.into(), llvm_pi8.into()],
|
||||
false,
|
||||
);
|
||||
va_list_tag.into()
|
||||
}
|
||||
"armv7" => {
|
||||
let va_list = ctx.opaque_struct_type("struct.__va_list");
|
||||
va_list.set_body(&[llvm_pi8.into()], false);
|
||||
va_list.into()
|
||||
}
|
||||
triple => {
|
||||
todo!("Unsupported platform for varargs: {triple}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Implementation for generating LLVM IR for a function.
|
||||
pub fn gen_func_impl<
|
||||
'ctx,
|
||||
|
@ -649,24 +721,47 @@ pub fn gen_func_impl<
|
|||
..primitives
|
||||
};
|
||||
|
||||
let sizet = generator.get_sizet(context);
|
||||
let cslice_type = StructModel(CSlice { sizet });
|
||||
let pexception_type = PointerModel(StructModel(Exception { sizet }));
|
||||
|
||||
let mut type_cache: HashMap<_, BasicTypeEnum<'ctx>> = [
|
||||
let mut type_cache: HashMap<_, _> = [
|
||||
(primitives.int32, context.i32_type().into()),
|
||||
(primitives.int64, context.i64_type().into()),
|
||||
(primitives.uint32, context.i32_type().into()),
|
||||
(primitives.uint64, context.i64_type().into()),
|
||||
(primitives.float, context.f64_type().into()),
|
||||
(primitives.bool, context.i8_type().into()),
|
||||
(primitives.str, cslice_type.get_type(context).into()),
|
||||
(primitives.str, {
|
||||
let name = "str";
|
||||
match module.get_struct_type(name) {
|
||||
None => {
|
||||
let str_type = context.opaque_struct_type("str");
|
||||
let fields = [
|
||||
context.i8_type().ptr_type(AddressSpace::default()).into(),
|
||||
generator.get_size_type(context).into(),
|
||||
];
|
||||
str_type.set_body(&fields, false);
|
||||
str_type.into()
|
||||
}
|
||||
Some(t) => t.as_basic_type_enum(),
|
||||
}
|
||||
}),
|
||||
(primitives.range, RangeType::new(context).as_base_type().into()),
|
||||
(primitives.exception, pexception_type.get_type(context).into()),
|
||||
(primitives.exception, {
|
||||
let name = "Exception";
|
||||
if let Some(t) = module.get_struct_type(name) {
|
||||
t.ptr_type(AddressSpace::default()).as_basic_type_enum()
|
||||
} else {
|
||||
let exception = context.opaque_struct_type("Exception");
|
||||
let int32 = context.i32_type().into();
|
||||
let int64 = context.i64_type().into();
|
||||
let str_ty = module.get_struct_type("str").unwrap().as_basic_type_enum();
|
||||
let fields = [int32, str_ty, int32, int32, str_ty, str_ty, int64, int64, int64];
|
||||
exception.set_body(&fields, false);
|
||||
exception.ptr_type(AddressSpace::default()).as_basic_type_enum()
|
||||
}
|
||||
}),
|
||||
]
|
||||
.into_iter()
|
||||
.iter()
|
||||
.copied()
|
||||
.collect();
|
||||
|
||||
// NOTE: special handling of option cannot use this type cache since it contains type var,
|
||||
// handled inside get_llvm_type instead
|
||||
|
||||
|
@ -680,6 +775,7 @@ pub fn gen_func_impl<
|
|||
name: arg.name,
|
||||
ty: task.store.to_unifier_type(&mut unifier, &primitives, arg.ty, &mut cache),
|
||||
default_value: arg.default_value.clone(),
|
||||
is_vararg: arg.is_vararg,
|
||||
})
|
||||
.collect_vec(),
|
||||
task.store.to_unifier_type(&mut unifier, &primitives, *ret, &mut cache),
|
||||
|
@ -702,7 +798,10 @@ pub fn gen_func_impl<
|
|||
let has_sret = ret_type.map_or(false, |ty| need_sret(ty));
|
||||
let mut params = args
|
||||
.iter()
|
||||
.filter(|arg| !arg.is_vararg)
|
||||
.map(|arg| {
|
||||
debug_assert!(!arg.is_vararg);
|
||||
|
||||
get_llvm_abi_type(
|
||||
context,
|
||||
&module,
|
||||
|
@ -721,9 +820,12 @@ pub fn gen_func_impl<
|
|||
params.insert(0, ret_type.unwrap().ptr_type(AddressSpace::default()).into());
|
||||
}
|
||||
|
||||
debug_assert!(matches!(args.iter().filter(|arg| arg.is_vararg).count(), 0..=1));
|
||||
let vararg_arg = args.iter().find(|arg| arg.is_vararg);
|
||||
|
||||
let fn_type = match ret_type {
|
||||
Some(ret_type) if !has_sret => ret_type.fn_type(¶ms, false),
|
||||
_ => context.void_type().fn_type(¶ms, false),
|
||||
Some(ret_type) if !has_sret => ret_type.fn_type(¶ms, vararg_arg.is_some()),
|
||||
_ => context.void_type().fn_type(¶ms, vararg_arg.is_some()),
|
||||
};
|
||||
|
||||
let symbol = &task.symbol_name;
|
||||
|
@ -751,9 +853,10 @@ pub fn gen_func_impl<
|
|||
builder.position_at_end(init_bb);
|
||||
let body_bb = context.append_basic_block(fn_val, "body");
|
||||
|
||||
// Store non-vararg argument values into local variables
|
||||
let mut var_assignment = HashMap::new();
|
||||
let offset = u32::from(has_sret);
|
||||
for (n, arg) in args.iter().enumerate() {
|
||||
for (n, arg) in args.iter().enumerate().filter(|(_, arg)| !arg.is_vararg) {
|
||||
let param = fn_val.get_nth_param((n as u32) + offset).unwrap();
|
||||
let local_type = get_llvm_type(
|
||||
context,
|
||||
|
@ -786,6 +889,8 @@ pub fn gen_func_impl<
|
|||
var_assignment.insert(arg.name, (alloca, None, 0));
|
||||
}
|
||||
|
||||
// TODO: Save vararg parameters as list
|
||||
|
||||
let return_buffer = if has_sret {
|
||||
Some(fn_val.get_nth_param(0).unwrap().into_pointer_value())
|
||||
} else {
|
||||
|
@ -1008,3 +1113,9 @@ fn gen_in_range_check<'ctx>(
|
|||
|
||||
ctx.builder.build_int_compare(IntPredicate::SLT, lo, hi, "cmp").unwrap()
|
||||
}
|
||||
|
||||
/// Returns the internal name for the `va_count` argument, used to indicate the number of arguments
|
||||
/// passed to the variadic function.
|
||||
fn get_va_count_arg_name(arg_name: StrRef) -> StrRef {
|
||||
format!("__{}_va_count", &arg_name).into()
|
||||
}
|
||||
|
|
|
@ -1,204 +0,0 @@
|
|||
use core::fmt;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use inkwell::{
|
||||
context::Context,
|
||||
types::{BasicType, BasicTypeEnum, IntType},
|
||||
values::{BasicValue, IntValue, PointerValue},
|
||||
};
|
||||
|
||||
use crate::codegen::{CodeGenContext, CodeGenerator};
|
||||
|
||||
use super::{ArraySlice, Pointer, PointerModel};
|
||||
|
||||
/*
|
||||
TODO: UPDATE when the Model finally stablizes
|
||||
Explanation on the abstraction:
|
||||
|
||||
In LLVM, there are TYPES and VALUES.
|
||||
|
||||
Inkwell gives us TYPES [`BasicTypeEnum<'ctx>`] and VALUES [`BasicValueEnum<'ctx>`],
|
||||
but by themselves, they lack a lot of Rust compile-time known info.
|
||||
|
||||
e.g., You did `let ptr = builder.build_alloca(my_llvm_ndarray_struct_ty)`,
|
||||
but `ptr` is just a `PointerValue<'ctx>`, almost everything about the
|
||||
underlying `my_llvm_ndarray_struct_ty` is gone.
|
||||
|
||||
The `Model` abstraction is a wrapper around inkwell TYPES and VALUES but with
|
||||
a richer interface.
|
||||
|
||||
`Model<'ctx>` is a wrapper around for an inkwell TYPE:
|
||||
- `NIntModel<Byte>` is a i8.
|
||||
- `NIntModel<Int32>` is a i32.
|
||||
- `NIntModel<Int64>` is a i64.
|
||||
- `IntModel` is a carrier for an inkwell `IntType<'ctx>`,
|
||||
used when the type is dynamic/cannot be specified in Rust compile-time.
|
||||
- `PointerModel<'ctx, E>` is a wrapper for `PointerType<'ctx>`,
|
||||
where `E` is another `Model<'ctx>` that describes the element type of the pointer.
|
||||
- `StructModel<'ctx, NDArray>` is a wrapper for `StructType<'ctx>`,
|
||||
with additional information encoded within `NDArray`. (See `IsStruct<'ctx>`)
|
||||
|
||||
`Model<'ctx>::Value`/`ModelValue<'ctx>` is a wrapper around for an inkwell VALUE:
|
||||
- `NInt<'ctx, T>` is a value of `NIntModel<'ctx, T>`,
|
||||
where `T` could be `Byte`, `Int32`, or `Int64`.
|
||||
- `Pointer<'ctx, E>` is a value of `PointerModel<'ctx, E>`.
|
||||
|
||||
Other interesting utilities:
|
||||
- Given a `Model<'ctx>`, say, `let ndarray_model = StructModel<'ctx, NDArray>`,
|
||||
you are do `ndarray_model.alloca(ctx, "my_ndarray")` to get a `Pointer<'ctx, Struct<'ctx, NDArray>>`,
|
||||
notice that all LLVM type information are preserved.
|
||||
- For a `let my_ndarray = Pointer<'ctx, StructModel<NDArray>>`, you can access a field by doing
|
||||
`my_ndarray.gep(ctx, |f| f.itemsize).load() // or .store()`, and you can chain them
|
||||
together for nested structures.
|
||||
|
||||
A brief summary on the `Model<'ctx>` and `ModelValue<'ctx>` traits:
|
||||
- Model<'ctx>
|
||||
// The associated ModelValue of this Model
|
||||
- type Value: ModelValue<'ctx>
|
||||
|
||||
// Get the LLVM type of this Model
|
||||
- fn get_llvm_type(&self)
|
||||
|
||||
// Check if the input type is equal to the LLVM type of this Model
|
||||
// NOTE: this function is provideed through `CanCheckLLVMType<'ctx>`
|
||||
- fn check_llvm_type(&self, ty) -> Result<(), String>
|
||||
|
||||
// Check if the input value's type is equal to the LLVM type of this Model.
|
||||
//
|
||||
// If so, wrap it with `Self::Value`.
|
||||
- fn review_value<V: BasicType<'ctx>>(&self, val: V) -> Result<Self::Value, String>
|
||||
|
||||
- ModelValue<'ctx>
|
||||
// get the LLVM value of this ModelValue
|
||||
- fn get_llvm_value(&self) -> BasicValueEnum<'ctx>
|
||||
*/
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ModelError(pub String);
|
||||
|
||||
// NOTE: Should have been within [`Model<'ctx>`],
|
||||
// but rust object safety requirements made it necessary to
|
||||
// split the trait.
|
||||
pub trait CanCheckLLVMType<'ctx> {
|
||||
/// See [`Model::check_llvm_type`]
|
||||
fn check_llvm_type_impl(
|
||||
&self,
|
||||
ctx: &'ctx Context,
|
||||
ty: BasicTypeEnum<'ctx>,
|
||||
) -> Result<(), ModelError>;
|
||||
}
|
||||
|
||||
pub trait Model<'ctx>: fmt::Debug + Clone + Copy + CanCheckLLVMType<'ctx> + Sized + Eq {
|
||||
/// The corresponding LLVM [`BasicValue<'ctx>`] of this Model.
|
||||
type Value: BasicValue<'ctx>;
|
||||
/// The corresponding LLVM [`BasicType<'ctx>`] of this Model.
|
||||
type Type: BasicType<'ctx>;
|
||||
|
||||
/// Get the LLVM type of this [`Model<'ctx>`]
|
||||
fn get_type(&self, ctx: &'ctx Context) -> Self::Type;
|
||||
|
||||
/// Check if the input type is equal to the LLVM type of this Model.
|
||||
///
|
||||
/// If it doesn't match, an [`Err`] with a human-readable message is
|
||||
/// thrown explaining *how* it was different. Meant for debugging.
|
||||
fn check_type<T: BasicType<'ctx>>(&self, ctx: &'ctx Context, ty: T) -> Result<(), ModelError> {
|
||||
self.check_llvm_type_impl(ctx, ty.as_basic_type_enum())
|
||||
}
|
||||
|
||||
/// Check if an LLVM value's type is equal to the LLVM type of this [`Model`].
|
||||
/// If so, wrap it with [`Instance`].
|
||||
fn review_value<V: BasicValue<'ctx>>(
|
||||
&self,
|
||||
ctx: &'ctx Context,
|
||||
value: V,
|
||||
) -> Result<Instance<'ctx, Self>, ModelError>;
|
||||
|
||||
/// Directly create an [`Instance`] of this [`Model`].
|
||||
///
|
||||
/// It is assumed that the LLVM type of `value` has been checked.
|
||||
///
|
||||
/// It is recommended that you use [`Model::review_value`] instead in order to
|
||||
/// catch bugs.
|
||||
fn believe_value(&self, value: Self::Value) -> Instance<'ctx, Self> {
|
||||
Instance { model: *self, value, _phantom: PhantomData }
|
||||
}
|
||||
|
||||
/// Build an instruction to allocate a value with the LLVM type of this [`Model<'ctx>`].
|
||||
fn alloca(&self, ctx: &CodeGenContext<'ctx, '_>, name: &str) -> Pointer<'ctx, Self> {
|
||||
let ptr_model = PointerModel(*self);
|
||||
let ptr = ctx.builder.build_alloca(self.get_type(ctx.ctx), name).unwrap();
|
||||
ptr_model.believe_value(ptr)
|
||||
}
|
||||
|
||||
/// Build an instruction to allocate an array of the LLVM type of this [`Model<'ctx>`].
|
||||
fn array_alloca<N>(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
num_elements: Instance<'ctx, N>,
|
||||
name: &str,
|
||||
) -> ArraySlice<'ctx, N, Self>
|
||||
where
|
||||
N: Model<'ctx, Value = IntValue<'ctx>, Type = IntType<'ctx>>,
|
||||
{
|
||||
let ptr_model = PointerModel(*self);
|
||||
let ptr = ctx
|
||||
.builder
|
||||
.build_array_alloca(
|
||||
self.get_type(ctx.ctx).as_basic_type_enum(),
|
||||
num_elements.value,
|
||||
name,
|
||||
)
|
||||
.unwrap();
|
||||
let pointer = ptr_model.believe_value(ptr);
|
||||
|
||||
ArraySlice { pointer, num_elements }
|
||||
}
|
||||
|
||||
/// Do [`CodeGenerator::gen_var_alloc`] with the LLVM type of this [`Model<'ctx>`].
|
||||
fn var_alloc<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
name: Option<&str>,
|
||||
) -> Result<Pointer<'ctx, Self>, String> {
|
||||
let ptr_model = PointerModel(*self);
|
||||
let ptr =
|
||||
generator.gen_var_alloc(ctx, self.get_type(ctx.ctx).as_basic_type_enum(), name)?;
|
||||
Ok(ptr_model.believe_value(ptr))
|
||||
}
|
||||
|
||||
/// Do [`CodeGenerator::gen_array_var_alloc`] with the LLVM type of this [`Model<'ctx>`].
|
||||
fn array_var_alloc<G: CodeGenerator + ?Sized, N>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
num_elements: Instance<'ctx, N>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> Result<ArraySlice<'ctx, N, Self>, String>
|
||||
where
|
||||
N: Model<'ctx, Value = IntValue<'ctx>, Type = IntType<'ctx>>,
|
||||
{
|
||||
let ptr_model = PointerModel(*self);
|
||||
|
||||
// TODO: Remove ProxyType ArraySlice
|
||||
let ptr = ptr_model.believe_value(PointerValue::from(generator.gen_array_var_alloc(
|
||||
ctx,
|
||||
self.get_type(ctx.ctx).as_basic_type_enum(),
|
||||
num_elements.value,
|
||||
name,
|
||||
)?));
|
||||
|
||||
Ok(ArraySlice { num_elements, pointer: ptr })
|
||||
}
|
||||
}
|
||||
|
||||
/// An LLVM value of a type of a [`Model<'ctx>`].
|
||||
///
|
||||
/// It is guaranteed that [`Instance::value`]'s LLVM type
|
||||
/// has been *checked* to match [`Instance::model`].
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Instance<'ctx, M: Model<'ctx>> {
|
||||
pub model: M,
|
||||
pub value: M::Value,
|
||||
_phantom: PhantomData<&'ctx ()>,
|
||||
}
|
|
@ -1,161 +0,0 @@
|
|||
use core::fmt;
|
||||
|
||||
use inkwell::{
|
||||
context::Context,
|
||||
types::{BasicTypeEnum, IntType},
|
||||
values::{BasicValue, IntValue},
|
||||
};
|
||||
|
||||
use super::{
|
||||
core::*,
|
||||
int_util::{check_int_llvm_type, int_constant, review_int_llvm_value},
|
||||
Int, IntModel,
|
||||
};
|
||||
|
||||
/// A marker trait to mark a singleton struct that describes a particular fixed integer type.
|
||||
/// See [`Bool`], [`Byte`], [`Int32`], etc.
|
||||
///
|
||||
/// The [`Default`] trait is to enable auto-instantiations.
|
||||
pub trait NIntKind: fmt::Debug + Clone + Copy + Default + PartialEq + Eq {
|
||||
/// Get the [`IntType<'ctx>`] of this [`NIntKind`].
|
||||
fn get_int_type(ctx: &Context) -> IntType<'_>;
|
||||
|
||||
/// Get the [`IntType<'ctx>`] of this [`NIntKind`].
|
||||
///
|
||||
/// Compared to using [`NIntKind::get_int_type`], this
|
||||
/// function does not require [`Context`].
|
||||
fn get_bit_width() -> u32;
|
||||
}
|
||||
|
||||
/// A [`Model`] representing an [`IntType<'ctx>`] of a specified bit width.
|
||||
///
|
||||
/// Also see [`IntModel`], which is less constrained than [`NIntModel`],
|
||||
/// but enables one to handle dynamic [`IntType<'ctx>`] at runtime.
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
|
||||
pub struct NIntModel<T: NIntKind>(pub T);
|
||||
pub type NInt<'ctx, T> = Instance<'ctx, NIntModel<T>>;
|
||||
|
||||
impl<'ctx, T: NIntKind> CanCheckLLVMType<'ctx> for NIntModel<T> {
|
||||
fn check_llvm_type_impl(
|
||||
&self,
|
||||
ctx: &'ctx Context,
|
||||
ty: BasicTypeEnum<'ctx>,
|
||||
) -> Result<(), ModelError> {
|
||||
check_int_llvm_type(ty, T::get_int_type(ctx))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx, T: NIntKind> Model<'ctx> for NIntModel<T> {
|
||||
type Type = IntType<'ctx>;
|
||||
type Value = IntValue<'ctx>;
|
||||
|
||||
fn get_type(&self, ctx: &'ctx Context) -> Self::Type {
|
||||
T::get_int_type(ctx)
|
||||
}
|
||||
|
||||
fn review_value<V: BasicValue<'ctx>>(
|
||||
&self,
|
||||
ctx: &'ctx Context,
|
||||
value: V,
|
||||
) -> Result<NInt<'ctx, T>, ModelError> {
|
||||
let value = review_int_llvm_value(value.as_basic_value_enum(), T::get_int_type(ctx))?;
|
||||
Ok(self.believe_value(value))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: NIntKind> NIntModel<T> {
|
||||
/// "Demote" this [`NIntModel<T>`] to an [`IntModel`].
|
||||
///
|
||||
/// Information about the [`NIntKind`] will be lost.
|
||||
pub fn to_int_model(self, ctx: &Context) -> IntModel<'_> {
|
||||
IntModel(T::get_int_type(ctx))
|
||||
}
|
||||
|
||||
/// Create an unsigned constant of this [`NIntModel`].
|
||||
pub fn constant<'ctx>(&self, ctx: &'ctx Context, value: u64) -> NInt<'ctx, T> {
|
||||
int_constant(ctx, *self, value)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx, T: NIntKind> NInt<'ctx, T> {
|
||||
/// "Demote" this [`NInt<T>`] to an [`Int`].
|
||||
///
|
||||
/// Information about the [`NIntKind`] will be lost.
|
||||
pub fn to_int(self, ctx: &'ctx Context) -> Int<'ctx> {
|
||||
let int_model = self.model.to_int_model(ctx);
|
||||
int_model.believe_value(self.value)
|
||||
}
|
||||
}
|
||||
|
||||
// Some pre-defined fixed integer types
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
|
||||
pub struct Bool;
|
||||
pub type BoolModel = NIntModel<Bool>;
|
||||
|
||||
impl NIntKind for Bool {
|
||||
fn get_int_type(ctx: &Context) -> IntType<'_> {
|
||||
ctx.bool_type()
|
||||
}
|
||||
|
||||
fn get_bit_width() -> u32 {
|
||||
1
|
||||
}
|
||||
}
|
||||
|
||||
// Extra utilities for [`Bool`]
|
||||
impl NIntModel<Bool> {
|
||||
/// Create a constant `false`
|
||||
#[must_use]
|
||||
pub fn const_false<'ctx>(&self, ctx: &'ctx Context) -> NInt<'ctx, Bool> {
|
||||
self.constant(ctx, 0)
|
||||
}
|
||||
|
||||
/// Create a constant `true`
|
||||
#[must_use]
|
||||
pub fn const_true<'ctx>(&self, ctx: &'ctx Context) -> NInt<'ctx, Bool> {
|
||||
self.constant(ctx, 1)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
|
||||
pub struct Byte;
|
||||
pub type ByteModel = NIntModel<Byte>;
|
||||
|
||||
impl NIntKind for Byte {
|
||||
fn get_int_type(ctx: &Context) -> IntType<'_> {
|
||||
ctx.i8_type()
|
||||
}
|
||||
|
||||
fn get_bit_width() -> u32 {
|
||||
8
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
|
||||
pub struct Int32;
|
||||
pub type Int32Model = NIntModel<Int32>;
|
||||
|
||||
impl NIntKind for Int32 {
|
||||
fn get_int_type(ctx: &Context) -> IntType<'_> {
|
||||
ctx.i32_type()
|
||||
}
|
||||
|
||||
fn get_bit_width() -> u32 {
|
||||
32
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
|
||||
pub struct Int64;
|
||||
pub type Int64Model = NIntModel<Int64>;
|
||||
|
||||
impl NIntKind for Int64 {
|
||||
fn get_int_type(ctx: &Context) -> IntType<'_> {
|
||||
ctx.i64_type()
|
||||
}
|
||||
|
||||
fn get_bit_width() -> u32 {
|
||||
64
|
||||
}
|
||||
}
|
|
@ -1,66 +0,0 @@
|
|||
use inkwell::{
|
||||
types::{BasicMetadataTypeEnum, BasicType},
|
||||
values::{AnyValue, BasicMetadataValueEnum, BasicValue, BasicValueEnum},
|
||||
};
|
||||
|
||||
use crate::codegen::{model::*, CodeGenContext};
|
||||
|
||||
// TODO: Variadic argument?
|
||||
pub struct FunctionBuilder<'ctx, 'a> {
|
||||
ctx: &'a CodeGenContext<'ctx, 'a>,
|
||||
fn_name: &'a str,
|
||||
arguments: Vec<(BasicMetadataTypeEnum<'ctx>, BasicMetadataValueEnum<'ctx>)>,
|
||||
}
|
||||
|
||||
impl<'ctx, 'a> FunctionBuilder<'ctx, 'a> {
|
||||
pub fn begin(ctx: &'a CodeGenContext<'ctx, 'a>, fn_name: &'a str) -> Self {
|
||||
FunctionBuilder { ctx, fn_name, arguments: Vec::new() }
|
||||
}
|
||||
|
||||
// NOTE: `_name` is for self-documentation
|
||||
#[must_use]
|
||||
#[allow(clippy::needless_pass_by_value)]
|
||||
pub fn arg<M: Model<'ctx>>(mut self, _name: &'static str, arg: Instance<'ctx, M>) -> Self {
|
||||
self.arguments.push((
|
||||
arg.model.get_type(self.ctx.ctx).as_basic_type_enum().into(),
|
||||
arg.value.as_basic_value_enum().into(),
|
||||
));
|
||||
self
|
||||
}
|
||||
|
||||
pub fn returning<M: Model<'ctx>>(
|
||||
self,
|
||||
name: &'static str,
|
||||
return_model: M,
|
||||
) -> Instance<'ctx, M> {
|
||||
let (param_tys, param_vals): (Vec<_>, Vec<_>) = self.arguments.into_iter().unzip();
|
||||
|
||||
// Get the LLVM function, create (by declaring) the function if it doesn't exist in `ctx.module`.
|
||||
let function = self.ctx.module.get_function(self.fn_name).unwrap_or_else(|| {
|
||||
let fn_type = return_model.get_type(self.ctx.ctx).fn_type(¶m_tys, false);
|
||||
self.ctx.module.add_function(self.fn_name, fn_type, None)
|
||||
});
|
||||
|
||||
// Build call
|
||||
let ret = self.ctx.builder.build_call(function, ¶m_vals, name).unwrap();
|
||||
|
||||
// Check the return value/type
|
||||
let Ok(ret) = BasicValueEnum::try_from(ret.as_any_value_enum()) else {
|
||||
panic!("Return type is not a BasicValue");
|
||||
};
|
||||
return_model.review_value(self.ctx.ctx, ret).unwrap()
|
||||
}
|
||||
|
||||
// TODO: Code duplication, but otherwise returning<S: Optic<'ctx>> cannot resolve S if return_optic = None
|
||||
pub fn returning_void(self) {
|
||||
let (param_tys, param_vals): (Vec<_>, Vec<_>) = self.arguments.into_iter().unzip();
|
||||
|
||||
let function = self.ctx.module.get_function(self.fn_name).unwrap_or_else(|| {
|
||||
let return_type = self.ctx.ctx.void_type();
|
||||
let fn_type = return_type.fn_type(¶m_tys, false);
|
||||
self.ctx.module.add_function(self.fn_name, fn_type, None)
|
||||
});
|
||||
|
||||
self.ctx.builder.build_call(function, ¶m_vals, "").unwrap();
|
||||
}
|
||||
}
|
|
@ -1,107 +0,0 @@
|
|||
use inkwell::{
|
||||
context::Context,
|
||||
types::{BasicTypeEnum, IntType},
|
||||
values::{BasicValue, IntValue},
|
||||
};
|
||||
|
||||
use super::{
|
||||
core::*,
|
||||
int_util::{check_int_llvm_type, int_constant, review_int_llvm_value},
|
||||
};
|
||||
|
||||
/// A model representing an [`IntType<'ctx>`].
|
||||
///
|
||||
/// Also see [`NIntModel`][`super::NIntModel`], which is more constrained than [`IntModel`]
|
||||
/// but provides more type-safe mechanisms and even auto-derivation of [`BasicTypeEnum<'ctx>`]
|
||||
/// for creating LLVM structures.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct IntModel<'ctx>(pub IntType<'ctx>);
|
||||
|
||||
pub type Int<'ctx> = Instance<'ctx, IntModel<'ctx>>;
|
||||
|
||||
impl<'ctx> CanCheckLLVMType<'ctx> for IntModel<'ctx> {
|
||||
fn check_llvm_type_impl(
|
||||
&self,
|
||||
_ctx: &'ctx Context,
|
||||
ty: BasicTypeEnum<'ctx>,
|
||||
) -> Result<(), ModelError> {
|
||||
check_int_llvm_type(ty, self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> Model<'ctx> for IntModel<'ctx> {
|
||||
type Value = IntValue<'ctx>;
|
||||
type Type = IntType<'ctx>;
|
||||
|
||||
fn get_type(&self, _ctx: &'ctx Context) -> Self::Type {
|
||||
self.0
|
||||
}
|
||||
|
||||
fn review_value<V: BasicValue<'ctx>>(
|
||||
&self,
|
||||
_ctx: &'ctx Context,
|
||||
value: V,
|
||||
) -> Result<Int<'ctx>, ModelError> {
|
||||
let value = review_int_llvm_value(value.as_basic_value_enum(), self.0)?;
|
||||
Ok(self.believe_value(value))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> IntModel<'ctx> {
|
||||
/// Create a constant value that inhabits this [`IntModel<'ctx>`].
|
||||
#[must_use]
|
||||
pub fn constant(&self, ctx: &'ctx Context, value: u64) -> Int<'ctx> {
|
||||
int_constant(ctx, *self, value)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> From<IntValue<'ctx>> for Int<'ctx> {
|
||||
fn from(value: IntValue<'ctx>) -> Self {
|
||||
let model = IntModel(value.get_type());
|
||||
model.believe_value(value)
|
||||
}
|
||||
}
|
||||
|
||||
/// A model representing an [`IntType<'ctx>`] that happens to be defined as `size_t`.
|
||||
///
|
||||
/// This is specifically created to guide developers to write `size_t`-dependent code.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct SizeTModel<'ctx>(pub IntType<'ctx>);
|
||||
|
||||
pub type SizeT<'ctx> = Instance<'ctx, SizeTModel<'ctx>>;
|
||||
|
||||
impl<'ctx> CanCheckLLVMType<'ctx> for SizeTModel<'ctx> {
|
||||
fn check_llvm_type_impl(
|
||||
&self,
|
||||
_ctx: &'ctx Context,
|
||||
ty: BasicTypeEnum<'ctx>,
|
||||
) -> Result<(), ModelError> {
|
||||
check_int_llvm_type(ty, self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> Model<'ctx> for SizeTModel<'ctx> {
|
||||
type Value = IntValue<'ctx>;
|
||||
type Type = IntType<'ctx>;
|
||||
|
||||
fn get_type(&self, _ctx: &'ctx Context) -> Self::Type {
|
||||
self.0
|
||||
}
|
||||
|
||||
fn review_value<V: BasicValue<'ctx>>(
|
||||
&self,
|
||||
_ctx: &'ctx Context,
|
||||
value: V,
|
||||
) -> Result<SizeT<'ctx>, ModelError> {
|
||||
let value = review_int_llvm_value(value.as_basic_value_enum(), self.0)?;
|
||||
Ok(self.believe_value(value))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> SizeTModel<'ctx> {
|
||||
/// Create a constant value that inhabits this [`SizeTModel<'ctx>`].
|
||||
#[must_use]
|
||||
pub fn constant(&self, ctx: &'ctx Context, value: u64) -> SizeT<'ctx> {
|
||||
int_constant(ctx, *self, value)
|
||||
}
|
||||
}
|
|
@ -1,87 +0,0 @@
|
|||
use inkwell::{
|
||||
context::Context,
|
||||
types::{BasicType, BasicTypeEnum, IntType},
|
||||
values::{BasicValueEnum, IntValue},
|
||||
};
|
||||
|
||||
use crate::codegen::CodeGenContext;
|
||||
|
||||
use super::{Instance, Model, ModelError};
|
||||
|
||||
/// Helper function to check if `scrutinee` is the same as `expected_int_type`
|
||||
pub fn check_int_llvm_type<'ctx>(
|
||||
ty: BasicTypeEnum<'ctx>,
|
||||
expected_int_type: IntType<'ctx>,
|
||||
) -> Result<(), ModelError> {
|
||||
// Check if llvm_type is int type
|
||||
let BasicTypeEnum::IntType(ty) = ty else {
|
||||
return Err(ModelError(format!("Expecting an int type but got {ty:?}")));
|
||||
};
|
||||
|
||||
// Check bit width
|
||||
if ty.get_bit_width() != expected_int_type.get_bit_width() {
|
||||
return Err(ModelError(format!(
|
||||
"Expecting an int type of {}-bit(s) but got int type {}-bit(s)",
|
||||
expected_int_type.get_bit_width(),
|
||||
ty.get_bit_width()
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Helper function to cast `scrutinee` is into an [`IntValue<'ctx>`].
|
||||
/// The LLVM type of `scrutinee` will be checked with [`check_int_llvm_type`].
|
||||
pub fn review_int_llvm_value<'ctx>(
|
||||
value: BasicValueEnum<'ctx>,
|
||||
expected_int_type: IntType<'ctx>,
|
||||
) -> Result<IntValue<'ctx>, ModelError> {
|
||||
// Check if value is of int type, error if that is anything else
|
||||
check_int_llvm_type(value.get_type().as_basic_type_enum(), expected_int_type)?;
|
||||
|
||||
// Ok, it is must be an int
|
||||
Ok(value.into_int_value())
|
||||
}
|
||||
|
||||
pub fn int_constant<'ctx, M>(ctx: &'ctx Context, model: M, value: u64) -> Instance<'ctx, M>
|
||||
where
|
||||
M: Model<'ctx, Value = IntValue<'ctx>, Type = IntType<'ctx>>,
|
||||
{
|
||||
let value = model.get_type(ctx).const_int(value, false);
|
||||
model.believe_value(value)
|
||||
}
|
||||
|
||||
impl<'ctx, M> Instance<'ctx, M>
|
||||
where
|
||||
M: Model<'ctx, Value = IntValue<'ctx>, Type = IntType<'ctx>>,
|
||||
{
|
||||
pub fn s_extend_or_bit_cast<N>(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
to_model: N,
|
||||
name: &str,
|
||||
) -> Instance<'ctx, N>
|
||||
where
|
||||
N: Model<'ctx, Value = IntValue<'ctx>, Type = IntType<'ctx>>,
|
||||
{
|
||||
let value = ctx
|
||||
.builder
|
||||
.build_int_s_extend_or_bit_cast(self.value, to_model.get_type(ctx.ctx), name)
|
||||
.unwrap();
|
||||
to_model.believe_value(value)
|
||||
}
|
||||
|
||||
pub fn truncate<N>(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
to_model: N,
|
||||
name: &str,
|
||||
) -> Instance<'ctx, N>
|
||||
where
|
||||
N: Model<'ctx, Value = IntValue<'ctx>, Type = IntType<'ctx>>,
|
||||
{
|
||||
let value =
|
||||
ctx.builder.build_int_truncate(self.value, to_model.get_type(ctx.ctx), name).unwrap();
|
||||
to_model.believe_value(value)
|
||||
}
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
pub mod core;
|
||||
pub mod fixed_int;
|
||||
pub mod function_builder;
|
||||
pub mod int;
|
||||
mod int_util;
|
||||
pub mod opaque;
|
||||
pub mod pointer;
|
||||
pub mod slice;
|
||||
pub mod structure;
|
||||
|
||||
pub use core::*;
|
||||
pub use fixed_int::*;
|
||||
pub use function_builder::*;
|
||||
pub use int::*;
|
||||
pub use opaque::*;
|
||||
pub use pointer::*;
|
||||
pub use slice::*;
|
||||
pub use structure::*;
|
|
@ -1,57 +0,0 @@
|
|||
use inkwell::{
|
||||
context::Context,
|
||||
types::BasicTypeEnum,
|
||||
values::{BasicValue, BasicValueEnum},
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
/// A [`Model`] that holds an arbitrary [`BasicTypeEnum`].
|
||||
///
|
||||
/// Use this and [`Opaque`] when you are dealing with a [`BasicTypeEnum<'ctx>`]
|
||||
/// at runtime and there is no way to abstract your implementation
|
||||
/// with [`Model`].
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct OpaqueModel<'ctx>(pub BasicTypeEnum<'ctx>);
|
||||
|
||||
impl<'ctx> CanCheckLLVMType<'ctx> for OpaqueModel<'ctx> {
|
||||
fn check_llvm_type_impl(
|
||||
&self,
|
||||
_ctx: &'ctx Context,
|
||||
ty: BasicTypeEnum<'ctx>,
|
||||
) -> Result<(), ModelError> {
|
||||
if ty == self.0 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(ModelError(format!("Expecting {}, but got {}", self.0, ty)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> Model<'ctx> for OpaqueModel<'ctx> {
|
||||
type Value = BasicValueEnum<'ctx>;
|
||||
type Type = BasicTypeEnum<'ctx>;
|
||||
|
||||
fn get_type(&self, _ctx: &'ctx Context) -> BasicTypeEnum<'ctx> {
|
||||
self.0
|
||||
}
|
||||
|
||||
fn review_value<V: BasicValue<'ctx>>(
|
||||
&self,
|
||||
ctx: &'ctx Context,
|
||||
value: V,
|
||||
) -> Result<Opaque<'ctx>, ModelError> {
|
||||
let value = value.as_basic_value_enum();
|
||||
self.check_type(ctx, value.get_type())?;
|
||||
Ok(self.believe_value(value))
|
||||
}
|
||||
}
|
||||
|
||||
pub type Opaque<'ctx> = Instance<'ctx, OpaqueModel<'ctx>>;
|
||||
|
||||
impl<'ctx> From<BasicValueEnum<'ctx>> for Opaque<'ctx> {
|
||||
fn from(value: BasicValueEnum<'ctx>) -> Self {
|
||||
let model = OpaqueModel(value.get_type());
|
||||
model.believe_value(value)
|
||||
}
|
||||
}
|
|
@ -1,126 +0,0 @@
|
|||
use inkwell::{
|
||||
context::Context,
|
||||
types::{BasicType, BasicTypeEnum, PointerType},
|
||||
values::{BasicValue, PointerValue},
|
||||
AddressSpace,
|
||||
};
|
||||
|
||||
use crate::codegen::{model::*, CodeGenContext};
|
||||
|
||||
use super::{core::*, OpaqueModel};
|
||||
|
||||
/// A [`Model<'ctx>`] representing an LLVM [`PointerType<'ctx>`]
|
||||
/// with *full* information on the element u
|
||||
///
|
||||
/// [`self.0`] contains [`Model<'ctx>`] that represents the
|
||||
/// LLVM type of element of the [`PointerType<'ctx>`] is pointing at
|
||||
/// (like `PointerType<'ctx>::get_element_type()`, but abstracted as a [`Model<'ctx>`]).
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
|
||||
pub struct PointerModel<E>(pub E);
|
||||
|
||||
pub type Pointer<'ctx, E> = Instance<'ctx, PointerModel<E>>;
|
||||
|
||||
impl<'ctx, E: Model<'ctx>> CanCheckLLVMType<'ctx> for PointerModel<E> {
|
||||
fn check_llvm_type_impl(
|
||||
&self,
|
||||
ctx: &'ctx Context,
|
||||
ty: BasicTypeEnum<'ctx>,
|
||||
) -> Result<(), ModelError> {
|
||||
// Check if scrutinee is even a PointerValue
|
||||
let BasicTypeEnum::PointerType(ty) = ty else {
|
||||
return Err(ModelError(format!("Expecting a pointer value, but got {ty:?}")));
|
||||
};
|
||||
|
||||
// Check the type of what the pointer is pointing at
|
||||
// TODO: This will be deprecated by inkwell > llvm14 because `get_element_type()` will be gone
|
||||
let Ok(element_ty) = BasicTypeEnum::try_from(ty.get_element_type()) else {
|
||||
return Err(ModelError(format!(
|
||||
"Expecting pointer to point to an inkwell BasicValue, but got {ty:?}"
|
||||
)));
|
||||
};
|
||||
|
||||
self.0.check_type(ctx, element_ty) // TODO: Include backtrace?
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx, E: Model<'ctx>> Model<'ctx> for PointerModel<E> {
|
||||
type Value = PointerValue<'ctx>;
|
||||
type Type = PointerType<'ctx>;
|
||||
|
||||
fn get_type(&self, ctx: &'ctx Context) -> Self::Type {
|
||||
self.0.get_type(ctx).ptr_type(AddressSpace::default())
|
||||
}
|
||||
|
||||
fn review_value<V: BasicValue<'ctx>>(
|
||||
&self,
|
||||
ctx: &'ctx Context,
|
||||
value: V,
|
||||
) -> Result<Pointer<'ctx, E>, ModelError> {
|
||||
let value = value.as_basic_value_enum();
|
||||
self.check_type(ctx, value.get_type())?;
|
||||
Ok(self.believe_value(value.into_pointer_value()))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx, E: Model<'ctx>> PointerModel<E> {
|
||||
/// Create a null [`Pointer`] of this [`PointerModel`]
|
||||
pub fn nullptr(&self, ctx: &'ctx Context) -> Pointer<'ctx, E> {
|
||||
let nullptr = self.get_type(ctx).const_null();
|
||||
self.believe_value(nullptr)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx, E: Model<'ctx>> Pointer<'ctx, E> {
|
||||
/// Build an instruction to store a value into this pointer
|
||||
pub fn store(&self, ctx: &CodeGenContext<'ctx, '_>, instance: Instance<'ctx, E>) {
|
||||
assert_eq!(
|
||||
self.model.0, instance.model,
|
||||
"Attempting to store an Instance of a different type"
|
||||
);
|
||||
ctx.builder.build_store(self.value, instance.value).unwrap();
|
||||
}
|
||||
|
||||
/// Build an instruction to load a value from this pointer
|
||||
pub fn load(&self, ctx: &CodeGenContext<'ctx, '_>, name: &str) -> Instance<'ctx, E> {
|
||||
let value = ctx.builder.build_load(self.value, name).unwrap();
|
||||
self.model.0.review_value(ctx.ctx, value).unwrap() // If unwrap() panics, there is a logic error in your code.
|
||||
}
|
||||
|
||||
/// "Demote" the [`Model`] of the thing this pointer is pointing to.
|
||||
pub fn cast_to_opaque(self, ctx: &'ctx Context) -> Pointer<'ctx, OpaqueModel<'ctx>> {
|
||||
let ptr_model = PointerModel(OpaqueModel(self.model.get_type(ctx).as_basic_type_enum()));
|
||||
ptr_model.believe_value(self.value)
|
||||
}
|
||||
|
||||
/// Cast the [`Model`] of the thing this pointer is pointing to
|
||||
/// and uses inkwell's [`Builder::build_pointer_cast`] to cast the LLVM pointer type.
|
||||
pub fn cast_to<K: Model<'ctx>>(
|
||||
self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
element: K,
|
||||
name: &str,
|
||||
) -> Pointer<'ctx, K> {
|
||||
let casted_ptr_model = PointerModel(element);
|
||||
let casted_ptr = ctx
|
||||
.builder
|
||||
.build_pointer_cast(
|
||||
self.value,
|
||||
element.get_type(ctx.ctx).ptr_type(AddressSpace::default()),
|
||||
name,
|
||||
)
|
||||
.unwrap();
|
||||
casted_ptr_model.believe_value(casted_ptr)
|
||||
}
|
||||
|
||||
pub fn is_null(&self, ctx: &CodeGenContext<'ctx, '_>, name: &str) -> NInt<'ctx, Bool> {
|
||||
let model = NIntModel(Bool);
|
||||
let value = ctx.builder.build_is_null(self.value, name).unwrap();
|
||||
model.believe_value(value)
|
||||
}
|
||||
|
||||
pub fn is_not_null(&self, ctx: &CodeGenContext<'ctx, '_>, name: &str) -> NInt<'ctx, Bool> {
|
||||
let model = NIntModel(Bool);
|
||||
let value = ctx.builder.build_is_not_null(self.value, name).unwrap();
|
||||
model.believe_value(value)
|
||||
}
|
||||
}
|
|
@ -1,94 +0,0 @@
|
|||
use inkwell::{types::IntType, values::IntValue};
|
||||
|
||||
use crate::codegen::{CodeGenContext, CodeGenerator};
|
||||
|
||||
use super::{int_util::int_constant, Instance, Model, Pointer};
|
||||
|
||||
/// An LLVM "slice" - literally just a pointer and a length value.
|
||||
/// The pointer points to a location with `num_elements` **contiguously** placed
|
||||
/// values of [`E`][`Model<ctx>`] in memory.
|
||||
///
|
||||
/// NOTE: This is NOT a [`Model`]! This is simply a helper
|
||||
/// structure to aggregate a length value and a pointer together.
|
||||
pub struct ArraySlice<'ctx, N, E>
|
||||
where
|
||||
N: Model<'ctx, Value = IntValue<'ctx>, Type = IntType<'ctx>>,
|
||||
E: Model<'ctx>,
|
||||
{
|
||||
pub pointer: Pointer<'ctx, E>,
|
||||
pub num_elements: Instance<'ctx, N>,
|
||||
}
|
||||
|
||||
impl<'ctx, N, E> ArraySlice<'ctx, N, E>
|
||||
where
|
||||
N: Model<'ctx, Value = IntValue<'ctx>, Type = IntType<'ctx>>,
|
||||
E: Model<'ctx>,
|
||||
{
|
||||
/// Get the [Model][`super::Model`] of the element type of this [`ArraySlice`]
|
||||
pub fn get_element_model(&self) -> E {
|
||||
self.pointer.model.0
|
||||
}
|
||||
|
||||
/// Get the `idx`-nth element of this [`ArraySlice`],
|
||||
/// but doesn't do an assertion to see if `idx` is
|
||||
/// out of bounds or not.
|
||||
///
|
||||
/// Also see [`ArraySlice::ix`].
|
||||
pub fn ix_unchecked(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
idx: Instance<'ctx, N>,
|
||||
name: &str,
|
||||
) -> Pointer<'ctx, E> {
|
||||
assert_eq!(idx.model, self.num_elements.model);
|
||||
let element_ptr = unsafe {
|
||||
ctx.builder.build_in_bounds_gep(self.pointer.value, &[idx.value], name).unwrap()
|
||||
};
|
||||
self.pointer.model.review_value(ctx.ctx, element_ptr).unwrap()
|
||||
}
|
||||
|
||||
/// Call [`ArraySlice::ix_unchecked`], but
|
||||
/// checks if `idx` is in bounds, otherwise
|
||||
/// a runtime `IndexError` will be thrown.
|
||||
pub fn ix<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
idx: Instance<'ctx, N>,
|
||||
name: &str,
|
||||
) -> Pointer<'ctx, E> {
|
||||
assert_eq!(idx.model, self.num_elements.model);
|
||||
let int_type = self.num_elements.model;
|
||||
|
||||
// Assert `0 <= idx < length` and throw an Exception if `idx` is out of bounds
|
||||
let lower_bounded = ctx
|
||||
.builder
|
||||
.build_int_compare(
|
||||
inkwell::IntPredicate::SLE,
|
||||
int_constant(ctx.ctx, int_type, 0).value,
|
||||
idx.value,
|
||||
"lower_bounded",
|
||||
)
|
||||
.unwrap();
|
||||
let upper_bounded = ctx
|
||||
.builder
|
||||
.build_int_compare(
|
||||
inkwell::IntPredicate::SLT,
|
||||
idx.value,
|
||||
self.num_elements.value,
|
||||
"upper_bounded",
|
||||
)
|
||||
.unwrap();
|
||||
let bounded = ctx.builder.build_and(lower_bounded, upper_bounded, "bounded").unwrap();
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
bounded,
|
||||
"0:IndexError",
|
||||
"nac3core LLVM codegen attempting to access out of bounds array index {0}. Must satisfy 0 <= index < {2}",
|
||||
[ Some(idx.value), Some(self.num_elements.value), None],
|
||||
ctx.current_loc
|
||||
);
|
||||
|
||||
self.ix_unchecked(ctx, idx, name)
|
||||
}
|
||||
}
|
|
@ -1,384 +0,0 @@
|
|||
use core::fmt;
|
||||
|
||||
use inkwell::{
|
||||
context::Context,
|
||||
types::{BasicType, BasicTypeEnum, StructType},
|
||||
values::{BasicValue, StructValue},
|
||||
};
|
||||
use itertools::{izip, Itertools};
|
||||
|
||||
use crate::codegen::CodeGenContext;
|
||||
|
||||
use super::{core::CanCheckLLVMType, Instance, Model, ModelError, Pointer, PointerModel};
|
||||
|
||||
/// An LLVM struct's "field".
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Field<E> {
|
||||
/// The GEP index of this field.
|
||||
pub gep_index: u64,
|
||||
|
||||
/// The name of this field. Generally named
|
||||
/// to how the field is named in ARTIQ or IRRT.
|
||||
///
|
||||
/// NOTE: This is only used for debugging.
|
||||
pub name: &'static str,
|
||||
|
||||
/// The [`Model`] of this field.
|
||||
pub model: E,
|
||||
}
|
||||
|
||||
// A helper struct for [`FieldBuilder`]
|
||||
struct FieldLLVM<'ctx> {
|
||||
gep_index: u64,
|
||||
name: &'ctx str,
|
||||
|
||||
// Only CanCheckLLVMType is needed, dont use `Model<'ctx>`
|
||||
llvm_type_model: Box<dyn CanCheckLLVMType<'ctx> + 'ctx>,
|
||||
llvm_type: BasicTypeEnum<'ctx>,
|
||||
}
|
||||
|
||||
/// A helper struct to create [`Field`]-s in [`StructKind::build_fields`].
|
||||
///
|
||||
/// See [`StructKind`] for more details and see how [`FieldBuilder`] is put
|
||||
/// into action.
|
||||
pub struct FieldBuilder<'ctx> {
|
||||
/// The [`Context`] this [`FieldBuilder`] is under.
|
||||
///
|
||||
/// Can be used in [`StructKind::build_fields`].
|
||||
/// See [`StructKind`] for more details and see how [`FieldBuilder`] is put
|
||||
/// into action.
|
||||
pub ctx: &'ctx Context,
|
||||
|
||||
/// An incrementing counter for GEP indices when
|
||||
/// doing [`FieldBuilder::add_field`] or [`FieldBuilder::add_field_auto`].
|
||||
gep_index_counter: u64,
|
||||
|
||||
/// Name of the `struct` this [`FieldBuilder`] is currently
|
||||
/// building.
|
||||
///
|
||||
/// NOTE: This is only used for debugging.
|
||||
struct_name: &'ctx str,
|
||||
|
||||
/// The fields added so far.
|
||||
fields: Vec<FieldLLVM<'ctx>>,
|
||||
}
|
||||
|
||||
impl<'ctx> FieldBuilder<'ctx> {
|
||||
#[must_use]
|
||||
pub fn new(ctx: &'ctx Context, struct_name: &'ctx str) -> Self {
|
||||
FieldBuilder { ctx, gep_index_counter: 0, struct_name, fields: Vec::new() }
|
||||
}
|
||||
|
||||
fn next_gep_index(&mut self) -> u64 {
|
||||
let index = self.gep_index_counter;
|
||||
self.gep_index_counter += 1;
|
||||
index
|
||||
}
|
||||
|
||||
/// Add a new field.
|
||||
///
|
||||
/// - `name`: The name of the field. See [`Field::name`].
|
||||
/// - `element`: The [`Model`] of the type of the field. See [`Field::element`].
|
||||
pub fn add_field<E: Model<'ctx> + 'ctx>(&mut self, name: &'static str, element: E) -> Field<E> {
|
||||
let gep_index = self.next_gep_index();
|
||||
|
||||
self.fields.push(FieldLLVM {
|
||||
gep_index,
|
||||
name,
|
||||
llvm_type: element.get_type(self.ctx).as_basic_type_enum(),
|
||||
llvm_type_model: Box::new(element),
|
||||
});
|
||||
|
||||
Field { gep_index, name, model: element }
|
||||
}
|
||||
|
||||
/// Like [`FieldBuilder::add_field`] but `element` can be **automatically derived**
|
||||
/// if it has the `Default` instance.
|
||||
///
|
||||
/// Certain [`Model`] has a [`Default`] trait - [`Model`]s that are just singletons,
|
||||
/// By deriving the [`Default`] trait on those [`Model`]s, Rust could automatically
|
||||
/// construct the [`Model`] with [`Default::default`].
|
||||
///
|
||||
/// This function is equivalent to
|
||||
/// ```ignore
|
||||
/// self.add_field(name, E::default())
|
||||
/// ```
|
||||
pub fn add_field_auto<E: Model<'ctx> + Default + 'ctx>(
|
||||
&mut self,
|
||||
name: &'static str,
|
||||
) -> Field<E> {
|
||||
self.add_field(name, E::default())
|
||||
}
|
||||
}
|
||||
|
||||
/// A marker trait to mark singleton struct that
|
||||
/// describes a particular LLVM structure.
|
||||
///
|
||||
/// It is a powerful inkwell abstraction that can reduce
|
||||
/// a lot of inkwell boilerplate when dealing with LLVM structs,
|
||||
/// `getelementptr`, `load`-ing and `store`-ing fields.
|
||||
///
|
||||
/// ### Usage
|
||||
pub trait StructKind<'ctx>: fmt::Debug + Clone + Copy + PartialEq + Eq {
|
||||
/// The type of the Rust `struct` that holds all the fields of this LLVM struct.
|
||||
type Fields;
|
||||
|
||||
// TODO:
|
||||
/// The name of this [`StructKind`].
|
||||
///
|
||||
/// The name should be the name of in
|
||||
/// IRRT's `struct` or ARTIQ's definition.
|
||||
fn struct_name(&self) -> &'static str;
|
||||
|
||||
/// Define the [`Field`]s of this [`StructKind`]
|
||||
///
|
||||
///
|
||||
/// ### Syntax
|
||||
///
|
||||
/// Suppose you want to define the following C++ `struct`s in `nac3core`:
|
||||
/// ```cpp
|
||||
/// template <typename SizeT>
|
||||
/// struct Str {
|
||||
/// uint8_t* content; // NOTE: could be `void *`
|
||||
/// SizeT length;
|
||||
/// }
|
||||
///
|
||||
/// template <typename SizeT>
|
||||
/// struct Exception {
|
||||
/// uint32_t id;
|
||||
/// Str message;
|
||||
/// uint64_t param0;
|
||||
/// uint64_t param1;
|
||||
/// uint64_t param2;
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// You write this in nac3core:
|
||||
/// ```ignore
|
||||
/// struct Str<'ctx> {
|
||||
/// sizet: IntModel<'ctx>,
|
||||
/// }
|
||||
///
|
||||
/// struct StrFields<'ctx> {
|
||||
/// content: Field<PointerModel<ByteModel>>, // equivalent to `NIntModel<Byte>`.
|
||||
/// length: Field<IntModel<'ctx>>, // `SizeT` is only known in runtime - `CodeGenerator::get_size_type()`. /// }
|
||||
/// }
|
||||
///
|
||||
/// impl StructKind<'ctx> for Str<'ctx> {
|
||||
/// fn struct_name() {
|
||||
/// "Str"
|
||||
/// }
|
||||
///
|
||||
/// fn build_fields(&self, builder: &mut FieldBuilder<'ctx>) -> Self::Fields {
|
||||
/// // THE order of `builder.add_field*` is IMPORTANT!!!
|
||||
/// // so the GEP indices would be correct.
|
||||
/// StrFields {
|
||||
/// content: builder.add_field_auto("content"), // `PointerModel<ByteModel>` has `Default` trait.
|
||||
/// length: builder.add_field("length", IntModel(self.sizet)), // `PointerModel<ByteModel>` has `Default` trait.
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// struct Exception<'ctx> {
|
||||
/// sizet: IntModel<'ctx>,
|
||||
/// }
|
||||
///
|
||||
/// struct ExceptionFields<'ctx> {
|
||||
/// id: Field<NIntModel<Int32>>,
|
||||
/// message: Field<StructModel<Str>>,
|
||||
/// param0: Field<NIntModel<Int64>>,
|
||||
/// param1: Field<NIntModel<Int64>>,
|
||||
/// param2: Field<NIntModel<Int64>>,
|
||||
/// }
|
||||
///
|
||||
/// impl StructKind<'ctx> for Exception<'ctx> {
|
||||
/// fn struct_name() {
|
||||
/// "Exception"
|
||||
/// }
|
||||
///
|
||||
/// fn build_fields(&self, builder: &mut FieldBuilder<'ctx>) -> Self::Fields {
|
||||
/// // THE order of `builder.add_field*` is IMPORTANT!!!
|
||||
/// // so the GEP indices would be correct.
|
||||
/// ExceptionFields {
|
||||
/// id: builder.add_field_auto("content"), // `NIntModel<Int32>` has `Default` trait.
|
||||
/// message: builder.add_field("message", StructModel(Str { sizet: self.sizet })),
|
||||
/// param0: builder.add_field_auto("param0"), // has `Default` trait
|
||||
/// param1: builder.add_field_auto("param1"), // has `Default` trait
|
||||
/// param2: builder.add_field_auto("param2"), // has `Default` trait
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// Then to `alloca` an `Exception`, do this:
|
||||
/// ```ignore
|
||||
/// let generator: dyn CodeGenerator<'ctx>;
|
||||
/// let ctx: &CodeGenContext<'ctx, '_>;
|
||||
/// let sizet = generator.get_size_type();
|
||||
/// let exn_model = StructModel(Exception { sizet });
|
||||
/// let exn = exn_model.alloca(ctx, "my_exception"); // Every [`Model<'ctx>`] has an `.alloca()` function.
|
||||
/// // exn: Pointer<'ctx, StructModel<Exception>>
|
||||
/// ```
|
||||
///
|
||||
/// NOTE: In fact, it is possible to define `Str` and `Exception` like this:
|
||||
/// ```ignore
|
||||
/// struct Str<SizeT: NIntModel> {
|
||||
/// _phantom: PhantomData<SizeT>,
|
||||
/// }
|
||||
///
|
||||
/// struct Exception<T: NIntModel> {
|
||||
/// _phantom: PhantomData<SizeT>,
|
||||
/// }
|
||||
/// ```
|
||||
/// But issues arise by you don't know the nac3core
|
||||
/// `CodeGenerator`'s `get_size_type()` before hand.
|
||||
fn build_fields(&self, builder: &mut FieldBuilder<'ctx>) -> Self::Fields;
|
||||
}
|
||||
|
||||
/// A [`Model<'ctx>`] that represents an LLVM struct.
|
||||
///
|
||||
/// `self.0` contains a [`StructKind<'ctx>`] that gives the details of the LLVM struct.
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
|
||||
pub struct StructModel<S>(pub S);
|
||||
pub type Struct<'ctx, S> = Instance<'ctx, StructModel<S>>;
|
||||
|
||||
impl<'ctx, S: StructKind<'ctx>> CanCheckLLVMType<'ctx> for StructModel<S> {
|
||||
fn check_llvm_type_impl(
|
||||
&self,
|
||||
ctx: &'ctx Context,
|
||||
ty: BasicTypeEnum<'ctx>,
|
||||
) -> Result<(), ModelError> {
|
||||
// Check if scrutinee is even a struct type
|
||||
let BasicTypeEnum::StructType(ty) = ty else {
|
||||
return Err(ModelError(format!("Expecting a struct type, but got {ty:?}")));
|
||||
};
|
||||
|
||||
// Ok. now check the struct type thoroughly
|
||||
self.check_struct_type(ctx, ty)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx, S: StructKind<'ctx>> Model<'ctx> for StructModel<S> {
|
||||
type Value = StructValue<'ctx>;
|
||||
type Type = StructType<'ctx>;
|
||||
|
||||
fn get_type(&self, ctx: &'ctx Context) -> Self::Type {
|
||||
self.get_struct_type(ctx)
|
||||
}
|
||||
|
||||
fn review_value<V: BasicValue<'ctx>>(
|
||||
&self,
|
||||
ctx: &'ctx Context,
|
||||
value: V,
|
||||
) -> Result<Struct<'ctx, S>, ModelError> {
|
||||
let value = value.as_basic_value_enum();
|
||||
self.check_type(ctx, value.get_type())?;
|
||||
Ok(self.believe_value(value.into_struct_value()))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx, S: StructKind<'ctx>> StructModel<S> {
|
||||
/// Get the [`S::Fields`] of this [`StructModel`].
|
||||
pub fn get_fields(&self, ctx: &'ctx Context) -> S::Fields {
|
||||
let mut builder = FieldBuilder::new(ctx, self.0.struct_name());
|
||||
self.0.build_fields(&mut builder)
|
||||
}
|
||||
|
||||
/// Get the LLVM struct type this [`IsStruct<'ctx>`] is representing.
|
||||
pub fn get_struct_type(&self, ctx: &'ctx Context) -> StructType<'ctx> {
|
||||
let mut builder = FieldBuilder::new(ctx, self.0.struct_name());
|
||||
self.0.build_fields(&mut builder); // Self::Fields is discarded
|
||||
|
||||
let field_types = builder.fields.iter().map(|f| f.llvm_type).collect_vec();
|
||||
ctx.struct_type(&field_types, false)
|
||||
}
|
||||
|
||||
/// Check if `scrutinee` matches the [`StructType<'ctx>`] this [`IsStruct<'ctx>`] is representing.
|
||||
pub fn check_struct_type(
|
||||
&self,
|
||||
ctx: &'ctx Context,
|
||||
scrutinee: StructType<'ctx>,
|
||||
) -> Result<(), ModelError> {
|
||||
// Details about scrutinee
|
||||
let scrutinee_field_types = scrutinee.get_field_types();
|
||||
|
||||
// Details about the defined specifications of this struct
|
||||
// We will access them through builder
|
||||
let mut builder = FieldBuilder::new(ctx, self.0.struct_name());
|
||||
self.0.build_fields(&mut builder);
|
||||
|
||||
// Check # of fields
|
||||
if builder.fields.len() != scrutinee_field_types.len() {
|
||||
return Err(ModelError(format!(
|
||||
"Expecting struct to have {} field(s), but scrutinee has {} field(s)",
|
||||
builder.fields.len(),
|
||||
scrutinee_field_types.len()
|
||||
)));
|
||||
}
|
||||
|
||||
// Check the types of each field
|
||||
// TODO: Traceback?
|
||||
for (f, scrutinee_field_type) in izip!(builder.fields, scrutinee_field_types) {
|
||||
f.llvm_type_model
|
||||
.check_llvm_type_impl(ctx, scrutinee_field_type.as_basic_type_enum())?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx, S: StructKind<'ctx>> Pointer<'ctx, StructModel<S>> {
|
||||
/// Build an instruction that does `getelementptr` on an LLVM structure referenced by this pointer.
|
||||
///
|
||||
/// This provides a nice syntax to chain up `getelementptr` in an intuitive and type-safe way:
|
||||
///
|
||||
/// ```ignore
|
||||
/// let ctx: &CodeGenContext<'ctx, '_>;
|
||||
/// let ndarray: Pointer<'ctx, StructModel<NpArray<'ctx>>>;
|
||||
/// ndarray.gep(ctx, |f| f.ndims).store();
|
||||
/// ```
|
||||
///
|
||||
/// You might even write chains `gep`, i.e.,
|
||||
/// ```ignore
|
||||
/// let exn_ptr: Pointer<'ctx, StructModel<Exception>>;
|
||||
/// let value: Int<'ctx>; // Suppose it has the correct inkwell `IntType<'ctx>`.
|
||||
///
|
||||
/// // To do `exn.message.length = value`:
|
||||
/// let exn_message_ptr = exn_ptr.gep(ctx, |f| f.message);
|
||||
/// let exn_message_length_ptr = exn_message_ptr.gep(ctx, |f| f.length);
|
||||
/// exn_message_length_ptr.store(ctx, my_value);
|
||||
///
|
||||
/// // or simply:
|
||||
/// exn_ptr
|
||||
/// .gep(ctx, |f| f.message)
|
||||
/// .gep(ctx, |f| f.length)
|
||||
/// .store(ctx, my_value) // Equivalent to `my_struct.thing1.value = my_value`
|
||||
/// ```
|
||||
pub fn gep<E, GetFieldFn>(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
get_field: GetFieldFn,
|
||||
) -> Pointer<'ctx, E>
|
||||
where
|
||||
E: Model<'ctx>,
|
||||
GetFieldFn: FnOnce(S::Fields) -> Field<E>,
|
||||
{
|
||||
let fields = self.model.0.get_fields(ctx.ctx);
|
||||
let field = get_field(fields);
|
||||
|
||||
// TODO: I think I'm not supposed to *just* use i32 for GEP like that
|
||||
let llvm_i32 = ctx.ctx.i32_type();
|
||||
|
||||
let ptr_model = PointerModel(field.model);
|
||||
let ptr = unsafe {
|
||||
ctx.builder
|
||||
.build_in_bounds_gep(
|
||||
self.value,
|
||||
&[llvm_i32.const_zero(), llvm_i32.const_int(field.gep_index, false)],
|
||||
field.name,
|
||||
)
|
||||
.unwrap()
|
||||
};
|
||||
ptr_model.believe_value(ptr)
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,213 +0,0 @@
|
|||
use inkwell::{
|
||||
types::BasicType,
|
||||
values::{BasicValue, BasicValueEnum, PointerValue},
|
||||
};
|
||||
use nac3parser::ast::StrRef;
|
||||
|
||||
use crate::{
|
||||
codegen::{
|
||||
irrt::ndarray::{
|
||||
allocation::{alloca_ndarray, init_ndarray_data_by_alloca, init_ndarray_shape},
|
||||
fill::call_nac3_ndarray_fill_generic,
|
||||
},
|
||||
model::*,
|
||||
structs::ndarray::NpArray,
|
||||
util::shape::parse_input_shape_arg,
|
||||
CodeGenContext, CodeGenerator,
|
||||
},
|
||||
symbol_resolver::ValueEnum,
|
||||
toplevel::DefinitionId,
|
||||
typecheck::typedef::{FunSignature, Type},
|
||||
};
|
||||
|
||||
/// Helper function to create an ndarray with uninitialized values
|
||||
///
|
||||
/// * `elem_ty` - The [`Type`] of the ndarray elements
|
||||
/// * `shape` - The user input shape argument
|
||||
/// * `shape_ty` - The [`Type`] of the shape argument
|
||||
/// * `name` - LLVM IR name of the returned ndarray
|
||||
fn create_empty_ndarray<'ctx, G>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
elem_ty: Type,
|
||||
shape: BasicValueEnum<'ctx>,
|
||||
shape_ty: Type,
|
||||
name: &str,
|
||||
) -> Result<Pointer<'ctx, StructModel<NpArray<'ctx>>>, String>
|
||||
where
|
||||
G: CodeGenerator + ?Sized,
|
||||
{
|
||||
let sizet = generator.get_sizet(ctx.ctx);
|
||||
|
||||
let shape_writer = parse_input_shape_arg(generator, ctx, shape, shape_ty);
|
||||
let ndims = shape_writer.count;
|
||||
|
||||
let ndarray = alloca_ndarray(generator, ctx, ndims, name)?;
|
||||
init_ndarray_shape(generator, ctx, ndarray, &shape_writer)?;
|
||||
|
||||
let itemsize = sizet
|
||||
.review_value(ctx.ctx, ctx.get_llvm_type(generator, elem_ty).size_of().unwrap())
|
||||
.unwrap();
|
||||
ndarray.gep(ctx, |f| f.itemsize).store(ctx, itemsize);
|
||||
|
||||
init_ndarray_data_by_alloca(generator, ctx, ndarray); // Needs `itemsize` and `shape` initialized first
|
||||
|
||||
Ok(ndarray)
|
||||
}
|
||||
|
||||
/// Helper function to create an ndarray full of a value.
|
||||
///
|
||||
/// * `elem_ty` - The [`Type`] of the ndarray elements and the fill value
|
||||
/// * `shape` - The user input shape argument
|
||||
/// * `shape_ty` - The [`Type`] of the shape argument
|
||||
/// * `fill_value` - The user specified fill value
|
||||
/// * `name` - LLVM IR name of the returned ndarray
|
||||
fn create_full_ndarray<'ctx, G>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
elem_ty: Type,
|
||||
shape: BasicValueEnum<'ctx>,
|
||||
shape_ty: Type,
|
||||
fill_value: BasicValueEnum<'ctx>,
|
||||
name: &str,
|
||||
) -> Result<Pointer<'ctx, StructModel<NpArray<'ctx>>>, String>
|
||||
where
|
||||
G: CodeGenerator + ?Sized,
|
||||
{
|
||||
let byte_model = NIntModel(Byte);
|
||||
let fill_value_model = OpaqueModel(fill_value.get_type());
|
||||
|
||||
// Caller has to put fill_value on the stack and pass its address
|
||||
let fill_value_ptr = fill_value_model.alloca(ctx, "fill_value_ptr");
|
||||
fill_value_ptr.store(ctx, fill_value_model.believe_value(fill_value));
|
||||
let fill_value_ptr = fill_value_ptr.cast_to(ctx, byte_model, "fill_value_bytes_ptr");
|
||||
|
||||
let ndarray_ptr = create_empty_ndarray(generator, ctx, elem_ty, shape, shape_ty, name)?;
|
||||
call_nac3_ndarray_fill_generic(generator, ctx, ndarray_ptr, fill_value_ptr);
|
||||
|
||||
Ok(ndarray_ptr)
|
||||
}
|
||||
|
||||
/// Generates LLVM IR for `np.empty`.
|
||||
pub fn gen_ndarray_empty<'ctx>(
|
||||
context: &mut CodeGenContext<'ctx, '_>,
|
||||
obj: &Option<(Type, ValueEnum<'ctx>)>,
|
||||
fun: (&FunSignature, DefinitionId),
|
||||
args: &[(Option<StrRef>, ValueEnum<'ctx>)],
|
||||
generator: &mut dyn CodeGenerator,
|
||||
) -> Result<PointerValue<'ctx>, String> {
|
||||
assert!(obj.is_none());
|
||||
assert_eq!(args.len(), 1);
|
||||
|
||||
// Parse arguments
|
||||
let shape_ty = fun.0.args[0].ty;
|
||||
let shape = args[0].1.clone().to_basic_value_enum(context, generator, shape_ty)?;
|
||||
|
||||
// Implementation
|
||||
let ndarray_ptr = create_empty_ndarray(
|
||||
generator,
|
||||
context,
|
||||
context.primitives.float,
|
||||
shape,
|
||||
shape_ty,
|
||||
"ndarray",
|
||||
)?;
|
||||
Ok(ndarray_ptr.value)
|
||||
}
|
||||
|
||||
/// Generates LLVM IR for `np.zeros`.
|
||||
pub fn gen_ndarray_zeros<'ctx>(
|
||||
context: &mut CodeGenContext<'ctx, '_>,
|
||||
obj: &Option<(Type, ValueEnum<'ctx>)>,
|
||||
fun: (&FunSignature, DefinitionId),
|
||||
args: &[(Option<StrRef>, ValueEnum<'ctx>)],
|
||||
generator: &mut dyn CodeGenerator,
|
||||
) -> Result<PointerValue<'ctx>, String> {
|
||||
assert!(obj.is_none());
|
||||
assert_eq!(args.len(), 1);
|
||||
|
||||
// Parse arguments
|
||||
let shape_ty = fun.0.args[0].ty;
|
||||
let shape = args[0].1.clone().to_basic_value_enum(context, generator, shape_ty)?;
|
||||
|
||||
// Implementation
|
||||
// NOTE: Currently nac3's `np.zeros` is always `float64`.
|
||||
let float64_ty = context.primitives.float;
|
||||
let float64_llvm_type = context.get_llvm_type(generator, float64_ty).into_float_type();
|
||||
|
||||
let ndarray_ptr = create_full_ndarray(
|
||||
generator,
|
||||
context,
|
||||
float64_ty, // `elem_ty` is always `float64`
|
||||
shape,
|
||||
shape_ty,
|
||||
float64_llvm_type.const_zero().as_basic_value_enum(),
|
||||
"ndarray",
|
||||
)?;
|
||||
Ok(ndarray_ptr.value)
|
||||
}
|
||||
|
||||
/// Generates LLVM IR for `np.ones`.
|
||||
pub fn gen_ndarray_ones<'ctx>(
|
||||
context: &mut CodeGenContext<'ctx, '_>,
|
||||
obj: &Option<(Type, ValueEnum<'ctx>)>,
|
||||
fun: (&FunSignature, DefinitionId),
|
||||
args: &[(Option<StrRef>, ValueEnum<'ctx>)],
|
||||
generator: &mut dyn CodeGenerator,
|
||||
) -> Result<PointerValue<'ctx>, String> {
|
||||
assert!(obj.is_none());
|
||||
assert_eq!(args.len(), 1);
|
||||
|
||||
// Parse arguments
|
||||
let shape_ty = fun.0.args[0].ty;
|
||||
let shape = args[0].1.clone().to_basic_value_enum(context, generator, shape_ty)?;
|
||||
|
||||
// Implementation
|
||||
// NOTE: Currently nac3's `np.ones` is always `float64`.
|
||||
let float64_ty = context.primitives.float;
|
||||
let float64_llvm_type = context.get_llvm_type(generator, float64_ty).into_float_type();
|
||||
|
||||
let ndarray_ptr = create_full_ndarray(
|
||||
generator,
|
||||
context,
|
||||
float64_ty, // `elem_ty` is always `float64`
|
||||
shape,
|
||||
shape_ty,
|
||||
float64_llvm_type.const_float(1.0).as_basic_value_enum(),
|
||||
"ndarray",
|
||||
)?;
|
||||
Ok(ndarray_ptr.value)
|
||||
}
|
||||
|
||||
/// Generates LLVM IR for `np.full`.
|
||||
pub fn gen_ndarray_full<'ctx>(
|
||||
context: &mut CodeGenContext<'ctx, '_>,
|
||||
obj: &Option<(Type, ValueEnum<'ctx>)>,
|
||||
fun: (&FunSignature, DefinitionId),
|
||||
args: &[(Option<StrRef>, ValueEnum<'ctx>)],
|
||||
generator: &mut dyn CodeGenerator,
|
||||
) -> Result<PointerValue<'ctx>, String> {
|
||||
assert!(obj.is_none());
|
||||
assert_eq!(args.len(), 2);
|
||||
|
||||
// Parse argument #1 shape
|
||||
let shape_ty = fun.0.args[0].ty;
|
||||
let shape_arg = args[0].1.clone().to_basic_value_enum(context, generator, shape_ty)?;
|
||||
|
||||
// Parse argument #2 fill_value
|
||||
let fill_value_ty = fun.0.args[1].ty;
|
||||
let fill_value_arg =
|
||||
args[1].1.clone().to_basic_value_enum(context, generator, fill_value_ty)?;
|
||||
|
||||
// Implementation
|
||||
let ndarray_ptr = create_full_ndarray(
|
||||
generator,
|
||||
context,
|
||||
fill_value_ty,
|
||||
shape_arg,
|
||||
shape_ty,
|
||||
fill_value_arg,
|
||||
"ndarray",
|
||||
)?;
|
||||
Ok(ndarray_ptr.value)
|
||||
}
|
|
@ -1,2 +0,0 @@
|
|||
pub mod factory;
|
||||
pub mod view;
|
|
@ -1,169 +0,0 @@
|
|||
use inkwell::values::PointerValue;
|
||||
use nac3parser::ast::StrRef;
|
||||
|
||||
use crate::{
|
||||
codegen::{
|
||||
irrt::ndarray::{
|
||||
allocation::{alloca_ndarray, init_ndarray_shape},
|
||||
basic::{
|
||||
call_nac3_ndarray_is_c_contiguous, call_nac3_ndarray_nbytes,
|
||||
call_nac3_ndarray_set_strides_by_shape, call_nac3_ndarray_size,
|
||||
},
|
||||
reshape::call_nac3_ndarray_resolve_and_check_new_shape,
|
||||
transpose::call_nac3_ndarray_transpose,
|
||||
},
|
||||
model::*,
|
||||
structs::{list::List, ndarray::NpArray},
|
||||
util::{array_writer::ArrayWriter, shape::parse_input_shape_arg},
|
||||
CodeGenContext, CodeGenerator,
|
||||
},
|
||||
symbol_resolver::ValueEnum,
|
||||
toplevel::DefinitionId,
|
||||
typecheck::typedef::{FunSignature, Type},
|
||||
};
|
||||
|
||||
fn reshape_ndarray_or_copy<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
src_ndarray: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
new_shape: &ArrayWriter<'ctx, G, SizeTModel<'ctx>, SizeTModel<'ctx>>,
|
||||
) -> Result<Pointer<'ctx, StructModel<NpArray<'ctx>>>, String> {
|
||||
let byte_model = NIntModel(Byte);
|
||||
|
||||
/*
|
||||
Reference pseudo-code:
|
||||
```c
|
||||
NDArray<SizeT>* src_ndarray;
|
||||
|
||||
NDArray<SizeT>* dst_ndarray = __builtin_alloca(...);
|
||||
dst_ndarray->ndims = ...
|
||||
dst_ndarray->strides = __builtin_alloca(...);
|
||||
dst_ndarray->shape = ... // Directly set by user, may contain -1, or even illegal values.
|
||||
dst_ndarray->itemsize = src_ndarray->itemsize;
|
||||
set_strides_by_shape(dst_ndarray);
|
||||
|
||||
// Do assertions on `dst_ndarray->shape` and resolve -1
|
||||
|
||||
resolve_and_check_new_shape(ndarray_size(src_ndarray), dst_ndarray->shape);
|
||||
|
||||
if (is_c_contiguous(src_ndarray)) {
|
||||
dst_ndarray->data = src_ndarray->data;
|
||||
} else {
|
||||
dst_ndarray->data = __builtin_alloca( ndarray_nbytes(dst_ndarray) );
|
||||
copy_data(src_ndarray, dst_ndarray);
|
||||
}
|
||||
|
||||
return dst_ndarray;
|
||||
```
|
||||
*/
|
||||
|
||||
let current_bb = ctx.builder.get_insert_block().unwrap();
|
||||
let then_bb = ctx.ctx.insert_basic_block_after(current_bb, "then");
|
||||
let else_bb = ctx.ctx.insert_basic_block_after(then_bb, "else_bb");
|
||||
let end_bb = ctx.ctx.insert_basic_block_after(else_bb, "end_bb");
|
||||
|
||||
// current_bb
|
||||
let dst_ndarray = alloca_ndarray(generator, ctx, new_shape.count, "ndarray").unwrap();
|
||||
|
||||
init_ndarray_shape(generator, ctx, dst_ndarray, new_shape)?;
|
||||
dst_ndarray
|
||||
.gep(ctx, |f| f.itemsize)
|
||||
.store(ctx, src_ndarray.gep(ctx, |f| f.itemsize).load(ctx, "itemsize"));
|
||||
|
||||
call_nac3_ndarray_set_strides_by_shape(generator, ctx, dst_ndarray);
|
||||
|
||||
let src_ndarray_size = call_nac3_ndarray_size(generator, ctx, src_ndarray);
|
||||
call_nac3_ndarray_resolve_and_check_new_shape(
|
||||
generator,
|
||||
ctx,
|
||||
src_ndarray_size,
|
||||
dst_ndarray.gep(ctx, |f| f.ndims).load(ctx, "ndims"),
|
||||
dst_ndarray.gep(ctx, |f| f.shape).load(ctx, "shape"),
|
||||
);
|
||||
|
||||
let is_c_contiguous = call_nac3_ndarray_is_c_contiguous(generator, ctx, src_ndarray);
|
||||
ctx.builder.build_conditional_branch(is_c_contiguous.value, then_bb, else_bb).unwrap();
|
||||
|
||||
// then_bb: reshape is possible without copying
|
||||
ctx.builder.position_at_end(then_bb);
|
||||
dst_ndarray.gep(ctx, |f| f.data).store(ctx, src_ndarray.gep(ctx, |f| f.data).load(ctx, "data"));
|
||||
ctx.builder.build_unconditional_branch(end_bb).unwrap();
|
||||
|
||||
// else_bb: reshape is impossible without copying
|
||||
ctx.builder.position_at_end(else_bb);
|
||||
let dst_ndarray_nbytes = call_nac3_ndarray_nbytes(generator, ctx, dst_ndarray);
|
||||
let data = byte_model.array_alloca(ctx, dst_ndarray_nbytes, "new_data").pointer;
|
||||
dst_ndarray.gep(ctx, |f| f.data).store(ctx, data);
|
||||
ctx.builder.build_unconditional_branch(end_bb).unwrap();
|
||||
|
||||
// Reposition for continuation
|
||||
ctx.builder.position_at_end(end_bb);
|
||||
|
||||
Ok(dst_ndarray)
|
||||
}
|
||||
|
||||
/// Generates LLVM IR for `np.reshape`.
|
||||
pub fn gen_ndarray_reshape<'ctx>(
|
||||
context: &mut CodeGenContext<'ctx, '_>,
|
||||
obj: &Option<(Type, ValueEnum<'ctx>)>,
|
||||
fun: (&FunSignature, DefinitionId),
|
||||
args: &[(Option<StrRef>, ValueEnum<'ctx>)],
|
||||
generator: &mut dyn CodeGenerator,
|
||||
) -> Result<PointerValue<'ctx>, String> {
|
||||
assert!(obj.is_none());
|
||||
assert_eq!(args.len(), 2);
|
||||
|
||||
// Parse argument #1 ndarray
|
||||
let ndarray_ty = fun.0.args[0].ty;
|
||||
let ndarray_arg = args[0].1.clone().to_basic_value_enum(context, generator, ndarray_ty)?;
|
||||
|
||||
// Parse argument #2 shape
|
||||
let shape_ty = fun.0.args[1].ty;
|
||||
let shape_arg = args[1].1.clone().to_basic_value_enum(context, generator, shape_ty)?;
|
||||
|
||||
let sizet = generator.get_sizet(context.ctx);
|
||||
let pndarray_model = PointerModel(StructModel(NpArray { sizet }));
|
||||
|
||||
let src_ndarray = pndarray_model.review_value(context.ctx, ndarray_arg).unwrap();
|
||||
let new_shape = parse_input_shape_arg(generator, context, shape_arg, shape_ty);
|
||||
|
||||
let reshaped_ndarray = reshape_ndarray_or_copy(generator, context, src_ndarray, &new_shape)?;
|
||||
Ok(reshaped_ndarray.value)
|
||||
}
|
||||
|
||||
pub fn gen_ndarray_transpose<'ctx>(
|
||||
context: &mut CodeGenContext<'ctx, '_>,
|
||||
obj: &Option<(Type, ValueEnum<'ctx>)>,
|
||||
fun: (&FunSignature, DefinitionId),
|
||||
args: &[(Option<StrRef>, ValueEnum<'ctx>)],
|
||||
generator: &mut dyn CodeGenerator,
|
||||
) -> Result<PointerValue<'ctx>, String> {
|
||||
assert!(obj.is_none());
|
||||
assert!(matches!(args.len(), 1 | 2));
|
||||
|
||||
let sizet = generator.get_sizet(context.ctx);
|
||||
let in_axes_model = PointerModel(StructModel(List { sizet, element: NIntModel(Int32) }));
|
||||
|
||||
// Parse argument #1 ndarray
|
||||
let ndarray_ty = fun.0.args[0].ty;
|
||||
let ndarray_arg = args[0].1.clone().to_basic_value_enum(context, generator, ndarray_ty)?;
|
||||
|
||||
// Parse argument #2 axes (optional)
|
||||
let in_axes = if args.len() == 2 {
|
||||
let in_shape_ty = fun.0.args[1].ty;
|
||||
let in_shape_arg =
|
||||
args[1].1.clone().to_basic_value_enum(context, generator, in_shape_ty)?;
|
||||
|
||||
let in_shape = in_axes_model.review_value(context.ctx, in_shape_arg).unwrap();
|
||||
|
||||
let num_axes = in_shape.gep(context, |f| f.size).load(context, "num_axes");
|
||||
let axes = sizet.array_alloca(context, num_axes, "num_axes");
|
||||
|
||||
Some((in_shape_ty, in_shape_arg))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
// call_nac3_ndarray_transpose(generator, ctx, src_ndarray, dst_ndarray, axes_or_none)
|
||||
|
||||
todo!()
|
||||
}
|
|
@ -1,23 +1,3 @@
|
|||
use super::{
|
||||
super::symbol_resolver::ValueEnum,
|
||||
expr::destructure_range,
|
||||
irrt::{handle_slice_indices, list_slice_assignment},
|
||||
model::*,
|
||||
structs::{cslice::CSlice, exception::Exception},
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
use crate::{
|
||||
codegen::{
|
||||
classes::{ArrayLikeIndexer, ArraySliceValue, ListValue, RangeValue},
|
||||
expr::gen_binop_expr,
|
||||
gen_in_range_check,
|
||||
},
|
||||
toplevel::{helper::PrimDef, numpy::unpack_ndarray_var_tys, DefinitionId, TopLevelDef},
|
||||
typecheck::{
|
||||
magic_methods::Binop,
|
||||
typedef::{FunSignature, Type, TypeEnum},
|
||||
},
|
||||
};
|
||||
use inkwell::{
|
||||
attributes::{Attribute, AttributeLoc},
|
||||
basic_block::BasicBlock,
|
||||
|
@ -25,10 +5,28 @@ use inkwell::{
|
|||
values::{BasicValue, BasicValueEnum, FunctionValue, IntValue, PointerValue},
|
||||
IntPredicate,
|
||||
};
|
||||
use itertools::{izip, Itertools};
|
||||
|
||||
use nac3parser::ast::{
|
||||
Constant, ExcepthandlerKind, Expr, ExprKind, Location, Stmt, StmtKind, StrRef,
|
||||
};
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use super::{
|
||||
expr::{destructure_range, gen_binop_expr},
|
||||
gen_in_range_check,
|
||||
irrt::{handle_slice_indices, list_slice_assignment},
|
||||
macros::codegen_unreachable,
|
||||
values::{ArrayLikeIndexer, ArraySliceValue, ListValue, RangeValue},
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
use crate::{
|
||||
symbol_resolver::ValueEnum,
|
||||
toplevel::{DefinitionId, TopLevelDef},
|
||||
typecheck::{
|
||||
magic_methods::Binop,
|
||||
typedef::{iter_type_vars, FunSignature, Type, TypeEnum},
|
||||
},
|
||||
};
|
||||
|
||||
/// See [`CodeGenerator::gen_var_alloc`].
|
||||
pub fn gen_var<'ctx>(
|
||||
|
@ -99,8 +97,6 @@ pub fn gen_store_target<'ctx, G: CodeGenerator>(
|
|||
pattern: &Expr<Option<Type>>,
|
||||
name: Option<&str>,
|
||||
) -> Result<Option<PointerValue<'ctx>>, String> {
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
|
||||
// very similar to gen_expr, but we don't do an extra load at the end
|
||||
// and we flatten nested tuples
|
||||
Ok(Some(match &pattern.node {
|
||||
|
@ -125,7 +121,7 @@ pub fn gen_store_target<'ctx, G: CodeGenerator>(
|
|||
return Ok(None);
|
||||
};
|
||||
let BasicValueEnum::PointerValue(ptr) = val else {
|
||||
unreachable!();
|
||||
codegen_unreachable!(ctx);
|
||||
};
|
||||
unsafe {
|
||||
ctx.builder.build_in_bounds_gep(
|
||||
|
@ -139,66 +135,7 @@ pub fn gen_store_target<'ctx, G: CodeGenerator>(
|
|||
}
|
||||
.unwrap()
|
||||
}
|
||||
ExprKind::Subscript { value, slice, .. } => {
|
||||
match ctx.unifier.get_ty_immutable(value.custom.unwrap()).as_ref() {
|
||||
TypeEnum::TObj { obj_id, .. } if *obj_id == PrimDef::List.id() => {
|
||||
let v = generator
|
||||
.gen_expr(ctx, value)?
|
||||
.unwrap()
|
||||
.to_basic_value_enum(ctx, generator, value.custom.unwrap())?
|
||||
.into_pointer_value();
|
||||
let v = ListValue::from_ptr_val(v, llvm_usize, None);
|
||||
let len = v.load_size(ctx, Some("len"));
|
||||
let raw_index = generator
|
||||
.gen_expr(ctx, slice)?
|
||||
.unwrap()
|
||||
.to_basic_value_enum(ctx, generator, slice.custom.unwrap())?
|
||||
.into_int_value();
|
||||
let raw_index = ctx
|
||||
.builder
|
||||
.build_int_s_extend(raw_index, generator.get_size_type(ctx.ctx), "sext")
|
||||
.unwrap();
|
||||
// handle negative index
|
||||
let is_negative = ctx
|
||||
.builder
|
||||
.build_int_compare(
|
||||
IntPredicate::SLT,
|
||||
raw_index,
|
||||
generator.get_size_type(ctx.ctx).const_zero(),
|
||||
"is_neg",
|
||||
)
|
||||
.unwrap();
|
||||
let adjusted = ctx.builder.build_int_add(raw_index, len, "adjusted").unwrap();
|
||||
let index = ctx
|
||||
.builder
|
||||
.build_select(is_negative, adjusted, raw_index, "index")
|
||||
.map(BasicValueEnum::into_int_value)
|
||||
.unwrap();
|
||||
// unsigned less than is enough, because negative index after adjustment is
|
||||
// bigger than the length (for unsigned cmp)
|
||||
let bound_check = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::ULT, index, len, "inbound")
|
||||
.unwrap();
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
bound_check,
|
||||
"0:IndexError",
|
||||
"index {0} out of bounds 0:{1}",
|
||||
[Some(raw_index), Some(len), None],
|
||||
slice.location,
|
||||
);
|
||||
v.data().ptr_offset(ctx, generator, &index, name)
|
||||
}
|
||||
|
||||
TypeEnum::TObj { obj_id, .. } if *obj_id == PrimDef::NDArray.id() => {
|
||||
todo!()
|
||||
}
|
||||
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
_ => unreachable!(),
|
||||
_ => codegen_unreachable!(ctx),
|
||||
}))
|
||||
}
|
||||
|
||||
|
@ -208,70 +145,20 @@ pub fn gen_assign<'ctx, G: CodeGenerator>(
|
|||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
target: &Expr<Option<Type>>,
|
||||
value: ValueEnum<'ctx>,
|
||||
value_ty: Type,
|
||||
) -> Result<(), String> {
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
|
||||
// See https://docs.python.org/3/reference/simple_stmts.html#assignment-statements.
|
||||
match &target.node {
|
||||
ExprKind::Tuple { elts, .. } => {
|
||||
let BasicValueEnum::StructValue(v) =
|
||||
value.to_basic_value_enum(ctx, generator, target.custom.unwrap())?
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
for (i, elt) in elts.iter().enumerate() {
|
||||
let v = ctx
|
||||
.builder
|
||||
.build_extract_value(v, u32::try_from(i).unwrap(), "struct_elem")
|
||||
.unwrap();
|
||||
generator.gen_assign(ctx, elt, v.into())?;
|
||||
ExprKind::Subscript { value: target, slice: key, .. } => {
|
||||
// Handle "slicing" or "subscription"
|
||||
generator.gen_setitem(ctx, target, key, value, value_ty)?;
|
||||
}
|
||||
}
|
||||
ExprKind::Subscript { value: ls, slice, .. }
|
||||
if matches!(&slice.node, ExprKind::Slice { .. }) =>
|
||||
{
|
||||
let ExprKind::Slice { lower, upper, step } = &slice.node else { unreachable!() };
|
||||
|
||||
let ls = generator
|
||||
.gen_expr(ctx, ls)?
|
||||
.unwrap()
|
||||
.to_basic_value_enum(ctx, generator, ls.custom.unwrap())?
|
||||
.into_pointer_value();
|
||||
let ls = ListValue::from_ptr_val(ls, llvm_usize, None);
|
||||
let Some((start, end, step)) =
|
||||
handle_slice_indices(lower, upper, step, ctx, generator, ls.load_size(ctx, None))?
|
||||
else {
|
||||
return Ok(());
|
||||
};
|
||||
let value = value
|
||||
.to_basic_value_enum(ctx, generator, target.custom.unwrap())?
|
||||
.into_pointer_value();
|
||||
let value = ListValue::from_ptr_val(value, llvm_usize, None);
|
||||
let ty = match &*ctx.unifier.get_ty_immutable(target.custom.unwrap()) {
|
||||
TypeEnum::TObj { obj_id, params, .. } if *obj_id == PrimDef::List.id() => {
|
||||
*params.iter().next().unwrap().1
|
||||
}
|
||||
TypeEnum::TObj { obj_id, .. } if *obj_id == PrimDef::NDArray.id() => {
|
||||
unpack_ndarray_var_tys(&mut ctx.unifier, target.custom.unwrap()).0
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let ty = ctx.get_llvm_type(generator, ty);
|
||||
let Some(src_ind) = handle_slice_indices(
|
||||
&None,
|
||||
&None,
|
||||
&None,
|
||||
ctx,
|
||||
generator,
|
||||
value.load_size(ctx, None),
|
||||
)?
|
||||
else {
|
||||
return Ok(());
|
||||
};
|
||||
list_slice_assignment(generator, ctx, ty, ls, (start, end, step), value, src_ind);
|
||||
ExprKind::Tuple { elts, .. } | ExprKind::List { elts, .. } => {
|
||||
// Fold on `"[" [target_list] "]"` and `"(" [target_list] ")"`
|
||||
generator.gen_assign_target_list(ctx, elts, value, value_ty)?;
|
||||
}
|
||||
_ => {
|
||||
// Handle attribute and direct variable assignments.
|
||||
let name = if let ExprKind::Name { id, .. } = &target.node {
|
||||
format!("{id}.addr")
|
||||
} else {
|
||||
|
@ -289,19 +176,259 @@ pub fn gen_assign<'ctx, G: CodeGenerator>(
|
|||
}
|
||||
}
|
||||
let val = value.to_basic_value_enum(ctx, generator, target.custom.unwrap())?;
|
||||
|
||||
// Perform i1 <-> i8 conversion as needed
|
||||
let val = if ctx.unifier.unioned(target.custom.unwrap(), ctx.primitives.bool) {
|
||||
generator.bool_to_i8(ctx, val.into_int_value()).into()
|
||||
} else {
|
||||
val
|
||||
};
|
||||
|
||||
ctx.builder.build_store(ptr, val).unwrap();
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// See [`CodeGenerator::gen_assign_target_list`].
|
||||
pub fn gen_assign_target_list<'ctx, G: CodeGenerator>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
targets: &Vec<Expr<Option<Type>>>,
|
||||
value: ValueEnum<'ctx>,
|
||||
value_ty: Type,
|
||||
) -> Result<(), String> {
|
||||
// Deconstruct the tuple `value`
|
||||
let BasicValueEnum::StructValue(tuple) = value.to_basic_value_enum(ctx, generator, value_ty)?
|
||||
else {
|
||||
codegen_unreachable!(ctx)
|
||||
};
|
||||
|
||||
// NOTE: Currently, RHS's type is forced to be a Tuple by the type inferencer.
|
||||
let TypeEnum::TTuple { ty: tuple_tys, .. } = &*ctx.unifier.get_ty(value_ty) else {
|
||||
codegen_unreachable!(ctx);
|
||||
};
|
||||
|
||||
assert_eq!(tuple.get_type().count_fields() as usize, tuple_tys.len());
|
||||
|
||||
let tuple = (0..tuple.get_type().count_fields())
|
||||
.map(|i| ctx.builder.build_extract_value(tuple, i, "item").unwrap())
|
||||
.collect_vec();
|
||||
|
||||
// Find the starred target if it exists.
|
||||
let mut starred_target_index: Option<usize> = None; // Index of the "starred" target. If it exists, there may only be one.
|
||||
for (i, target) in targets.iter().enumerate() {
|
||||
if matches!(target.node, ExprKind::Starred { .. }) {
|
||||
assert!(starred_target_index.is_none()); // The typechecker ensures this
|
||||
starred_target_index = Some(i);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(starred_target_index) = starred_target_index {
|
||||
assert!(tuple_tys.len() >= targets.len() - 1); // The typechecker ensures this
|
||||
|
||||
let a = starred_target_index; // Number of RHS values before the starred target
|
||||
let b = tuple_tys.len() - (targets.len() - 1 - starred_target_index); // Number of RHS values after the starred target
|
||||
// Thus `tuple[a..b]` is assigned to the starred target.
|
||||
|
||||
// Handle assignment before the starred target
|
||||
for (target, val, val_ty) in
|
||||
izip!(&targets[..starred_target_index], &tuple[..a], &tuple_tys[..a])
|
||||
{
|
||||
generator.gen_assign(ctx, target, ValueEnum::Dynamic(*val), *val_ty)?;
|
||||
}
|
||||
|
||||
// Handle assignment to the starred target
|
||||
if let ExprKind::Starred { value: target, .. } = &targets[starred_target_index].node {
|
||||
let vals = &tuple[a..b];
|
||||
let val_tys = &tuple_tys[a..b];
|
||||
|
||||
// Create a sub-tuple from `value` for the starred target.
|
||||
let sub_tuple_ty = ctx
|
||||
.ctx
|
||||
.struct_type(&vals.iter().map(BasicValueEnum::get_type).collect_vec(), false);
|
||||
let psub_tuple_val =
|
||||
ctx.builder.build_alloca(sub_tuple_ty, "starred_target_value_ptr").unwrap();
|
||||
for (i, val) in vals.iter().enumerate() {
|
||||
let pitem = ctx
|
||||
.builder
|
||||
.build_struct_gep(psub_tuple_val, i as u32, "starred_target_value_item")
|
||||
.unwrap();
|
||||
ctx.builder.build_store(pitem, *val).unwrap();
|
||||
}
|
||||
let sub_tuple_val =
|
||||
ctx.builder.build_load(psub_tuple_val, "starred_target_value").unwrap();
|
||||
|
||||
// Create the typechecker type of the sub-tuple
|
||||
let sub_tuple_ty =
|
||||
ctx.unifier.add_ty(TypeEnum::TTuple { ty: val_tys.to_vec(), is_vararg_ctx: false });
|
||||
|
||||
// Now assign with that sub-tuple to the starred target.
|
||||
generator.gen_assign(ctx, target, ValueEnum::Dynamic(sub_tuple_val), sub_tuple_ty)?;
|
||||
} else {
|
||||
codegen_unreachable!(ctx) // The typechecker ensures this
|
||||
}
|
||||
|
||||
// Handle assignment after the starred target
|
||||
for (target, val, val_ty) in
|
||||
izip!(&targets[starred_target_index + 1..], &tuple[b..], &tuple_tys[b..])
|
||||
{
|
||||
generator.gen_assign(ctx, target, ValueEnum::Dynamic(*val), *val_ty)?;
|
||||
}
|
||||
} else {
|
||||
assert_eq!(tuple_tys.len(), targets.len()); // The typechecker ensures this
|
||||
|
||||
for (target, val, val_ty) in izip!(targets, tuple, tuple_tys) {
|
||||
generator.gen_assign(ctx, target, ValueEnum::Dynamic(val), *val_ty)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// See [`CodeGenerator::gen_setitem`].
|
||||
pub fn gen_setitem<'ctx, G: CodeGenerator>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
target: &Expr<Option<Type>>,
|
||||
key: &Expr<Option<Type>>,
|
||||
value: ValueEnum<'ctx>,
|
||||
value_ty: Type,
|
||||
) -> Result<(), String> {
|
||||
let target_ty = target.custom.unwrap();
|
||||
let key_ty = key.custom.unwrap();
|
||||
|
||||
match &*ctx.unifier.get_ty(target_ty) {
|
||||
TypeEnum::TObj { obj_id, params: list_params, .. }
|
||||
if *obj_id == ctx.primitives.list.obj_id(&ctx.unifier).unwrap() =>
|
||||
{
|
||||
// Handle list item assignment
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
let target_item_ty = iter_type_vars(list_params).next().unwrap().ty;
|
||||
|
||||
let target = generator
|
||||
.gen_expr(ctx, target)?
|
||||
.unwrap()
|
||||
.to_basic_value_enum(ctx, generator, target_ty)?
|
||||
.into_pointer_value();
|
||||
let target = ListValue::from_pointer_value(target, llvm_usize, None);
|
||||
|
||||
if let ExprKind::Slice { .. } = &key.node {
|
||||
// Handle assigning to a slice
|
||||
let ExprKind::Slice { lower, upper, step } = &key.node else {
|
||||
codegen_unreachable!(ctx)
|
||||
};
|
||||
let Some((start, end, step)) = handle_slice_indices(
|
||||
lower,
|
||||
upper,
|
||||
step,
|
||||
ctx,
|
||||
generator,
|
||||
target.load_size(ctx, None),
|
||||
)?
|
||||
else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let value =
|
||||
value.to_basic_value_enum(ctx, generator, value_ty)?.into_pointer_value();
|
||||
let value = ListValue::from_pointer_value(value, llvm_usize, None);
|
||||
|
||||
let target_item_ty = ctx.get_llvm_type(generator, target_item_ty);
|
||||
let Some(src_ind) = handle_slice_indices(
|
||||
&None,
|
||||
&None,
|
||||
&None,
|
||||
ctx,
|
||||
generator,
|
||||
value.load_size(ctx, None),
|
||||
)?
|
||||
else {
|
||||
return Ok(());
|
||||
};
|
||||
list_slice_assignment(
|
||||
generator,
|
||||
ctx,
|
||||
target_item_ty,
|
||||
target,
|
||||
(start, end, step),
|
||||
value,
|
||||
src_ind,
|
||||
);
|
||||
} else {
|
||||
// Handle assigning to an index
|
||||
let len = target.load_size(ctx, Some("len"));
|
||||
|
||||
let index = generator
|
||||
.gen_expr(ctx, key)?
|
||||
.unwrap()
|
||||
.to_basic_value_enum(ctx, generator, key_ty)?
|
||||
.into_int_value();
|
||||
let index = ctx
|
||||
.builder
|
||||
.build_int_s_extend(index, generator.get_size_type(ctx.ctx), "sext")
|
||||
.unwrap();
|
||||
|
||||
// handle negative index
|
||||
let is_negative = ctx
|
||||
.builder
|
||||
.build_int_compare(
|
||||
IntPredicate::SLT,
|
||||
index,
|
||||
generator.get_size_type(ctx.ctx).const_zero(),
|
||||
"is_neg",
|
||||
)
|
||||
.unwrap();
|
||||
let adjusted = ctx.builder.build_int_add(index, len, "adjusted").unwrap();
|
||||
let index = ctx
|
||||
.builder
|
||||
.build_select(is_negative, adjusted, index, "index")
|
||||
.map(BasicValueEnum::into_int_value)
|
||||
.unwrap();
|
||||
|
||||
// unsigned less than is enough, because negative index after adjustment is
|
||||
// bigger than the length (for unsigned cmp)
|
||||
let bound_check = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::ULT, index, len, "inbound")
|
||||
.unwrap();
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
bound_check,
|
||||
"0:IndexError",
|
||||
"index {0} out of bounds 0:{1}",
|
||||
[Some(index), Some(len), None],
|
||||
key.location,
|
||||
);
|
||||
|
||||
// Write value to index on list
|
||||
let item_ptr =
|
||||
target.data().ptr_offset(ctx, generator, &index, Some("list_item_ptr"));
|
||||
let value = value.to_basic_value_enum(ctx, generator, value_ty)?;
|
||||
ctx.builder.build_store(item_ptr, value).unwrap();
|
||||
}
|
||||
}
|
||||
TypeEnum::TObj { obj_id, .. }
|
||||
if *obj_id == ctx.primitives.ndarray.obj_id(&ctx.unifier).unwrap() =>
|
||||
{
|
||||
// Handle NDArray item assignment
|
||||
todo!("ndarray subscript assignment is not yet implemented");
|
||||
}
|
||||
_ => {
|
||||
panic!("encountered unknown target type: {}", ctx.unifier.stringify(target_ty));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// See [`CodeGenerator::gen_for`].
|
||||
pub fn gen_for<G: CodeGenerator>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'_, '_>,
|
||||
stmt: &Stmt<Option<Type>>,
|
||||
) -> Result<(), String> {
|
||||
let StmtKind::For { iter, target, body, orelse, .. } = &stmt.node else { unreachable!() };
|
||||
let StmtKind::For { iter, target, body, orelse, .. } = &stmt.node else {
|
||||
codegen_unreachable!(ctx)
|
||||
};
|
||||
|
||||
// var_assignment static values may be changed in another branch
|
||||
// if so, remove the static value as it may not be correct in this branch
|
||||
|
@ -317,9 +444,6 @@ pub fn gen_for<G: CodeGenerator>(
|
|||
let orelse_bb =
|
||||
if orelse.is_empty() { cont_bb } else { ctx.ctx.append_basic_block(current, "for.orelse") };
|
||||
|
||||
// Whether the iterable is a range() expression
|
||||
let is_iterable_range_expr = ctx.unifier.unioned(iter.custom.unwrap(), ctx.primitives.range);
|
||||
|
||||
// The BB containing the increment expression
|
||||
let incr_bb = ctx.ctx.append_basic_block(current, "for.incr");
|
||||
// The BB containing the loop condition check
|
||||
|
@ -328,27 +452,36 @@ pub fn gen_for<G: CodeGenerator>(
|
|||
// store loop bb information and restore it later
|
||||
let loop_bb = ctx.loop_target.replace((incr_bb, cont_bb));
|
||||
|
||||
let iter_ty = iter.custom.unwrap();
|
||||
let iter_val = if let Some(v) = generator.gen_expr(ctx, iter)? {
|
||||
v.to_basic_value_enum(ctx, generator, iter.custom.unwrap())?
|
||||
v.to_basic_value_enum(ctx, generator, iter_ty)?
|
||||
} else {
|
||||
return Ok(());
|
||||
};
|
||||
if is_iterable_range_expr {
|
||||
let iter_val = RangeValue::from_ptr_val(iter_val.into_pointer_value(), Some("range"));
|
||||
|
||||
match &*ctx.unifier.get_ty(iter_ty) {
|
||||
TypeEnum::TObj { obj_id, .. }
|
||||
if *obj_id == ctx.primitives.range.obj_id(&ctx.unifier).unwrap() =>
|
||||
{
|
||||
let iter_val =
|
||||
RangeValue::from_pointer_value(iter_val.into_pointer_value(), Some("range"));
|
||||
// Internal variable for loop; Cannot be assigned
|
||||
let i = generator.gen_var_alloc(ctx, int32.into(), Some("for.i.addr"))?;
|
||||
// Variable declared in "target" expression of the loop; Can be reassigned *or* shadowed
|
||||
let Some(target_i) = generator.gen_store_target(ctx, target, Some("for.target.addr"))?
|
||||
let Some(target_i) =
|
||||
generator.gen_store_target(ctx, target, Some("for.target.addr"))?
|
||||
else {
|
||||
unreachable!()
|
||||
codegen_unreachable!(ctx)
|
||||
};
|
||||
let (start, stop, step) = destructure_range(ctx, iter_val);
|
||||
|
||||
ctx.builder.build_store(i, start).unwrap();
|
||||
|
||||
// Check "If step is zero, ValueError is raised."
|
||||
let rangenez =
|
||||
ctx.builder.build_int_compare(IntPredicate::NE, step, int32.const_zero(), "").unwrap();
|
||||
let rangenez = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::NE, step, int32.const_zero(), "")
|
||||
.unwrap();
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
rangenez,
|
||||
|
@ -365,7 +498,10 @@ pub fn gen_for<G: CodeGenerator>(
|
|||
.build_conditional_branch(
|
||||
gen_in_range_check(
|
||||
ctx,
|
||||
ctx.builder.build_load(i, "").map(BasicValueEnum::into_int_value).unwrap(),
|
||||
ctx.builder
|
||||
.build_load(i, "")
|
||||
.map(BasicValueEnum::into_int_value)
|
||||
.unwrap(),
|
||||
stop,
|
||||
step,
|
||||
),
|
||||
|
@ -395,7 +531,10 @@ pub fn gen_for<G: CodeGenerator>(
|
|||
)
|
||||
.unwrap();
|
||||
generator.gen_block(ctx, body.iter())?;
|
||||
} else {
|
||||
}
|
||||
TypeEnum::TObj { obj_id, params: list_params, .. }
|
||||
if *obj_id == ctx.primitives.list.obj_id(&ctx.unifier).unwrap() =>
|
||||
{
|
||||
let index_addr = generator.gen_var_alloc(ctx, size_t.into(), Some("for.index.addr"))?;
|
||||
ctx.builder.build_store(index_addr, size_t.const_zero()).unwrap();
|
||||
let len = ctx
|
||||
|
@ -433,9 +572,14 @@ pub fn gen_for<G: CodeGenerator>(
|
|||
.map(BasicValueEnum::into_int_value)
|
||||
.unwrap();
|
||||
let val = ctx.build_gep_and_load(arr_ptr, &[index], Some("val"));
|
||||
generator.gen_assign(ctx, target, val.into())?;
|
||||
let val_ty = iter_type_vars(list_params).next().unwrap().ty;
|
||||
generator.gen_assign(ctx, target, val.into(), val_ty)?;
|
||||
generator.gen_block(ctx, body.iter())?;
|
||||
}
|
||||
_ => {
|
||||
panic!("unsupported for loop iterator type: {}", ctx.unifier.stringify(iter_ty));
|
||||
}
|
||||
}
|
||||
|
||||
for (k, (_, _, counter)) in &var_assignment {
|
||||
let (_, static_val, counter2) = ctx.var_assignment.get_mut(k).unwrap();
|
||||
|
@ -496,6 +640,7 @@ pub struct BreakContinueHooks<'ctx> {
|
|||
pub fn gen_for_callback<'ctx, 'a, G, I, InitFn, CondFn, BodyFn, UpdateFn>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, 'a>,
|
||||
label: Option<&str>,
|
||||
init: InitFn,
|
||||
cond: CondFn,
|
||||
body: BodyFn,
|
||||
|
@ -506,18 +651,24 @@ where
|
|||
I: Clone,
|
||||
InitFn: FnOnce(&mut G, &mut CodeGenContext<'ctx, 'a>) -> Result<I, String>,
|
||||
CondFn: FnOnce(&mut G, &mut CodeGenContext<'ctx, 'a>, I) -> Result<IntValue<'ctx>, String>,
|
||||
BodyFn:
|
||||
FnOnce(&mut G, &mut CodeGenContext<'ctx, 'a>, BreakContinueHooks, I) -> Result<(), String>,
|
||||
BodyFn: FnOnce(
|
||||
&mut G,
|
||||
&mut CodeGenContext<'ctx, 'a>,
|
||||
BreakContinueHooks<'ctx>,
|
||||
I,
|
||||
) -> Result<(), String>,
|
||||
UpdateFn: FnOnce(&mut G, &mut CodeGenContext<'ctx, 'a>, I) -> Result<(), String>,
|
||||
{
|
||||
let label = label.unwrap_or("for");
|
||||
|
||||
let current_bb = ctx.builder.get_insert_block().unwrap();
|
||||
let init_bb = ctx.ctx.insert_basic_block_after(current_bb, "for.init");
|
||||
let init_bb = ctx.ctx.insert_basic_block_after(current_bb, &format!("{label}.init"));
|
||||
// The BB containing the loop condition check
|
||||
let cond_bb = ctx.ctx.insert_basic_block_after(init_bb, "for.cond");
|
||||
let body_bb = ctx.ctx.insert_basic_block_after(cond_bb, "for.body");
|
||||
let cond_bb = ctx.ctx.insert_basic_block_after(init_bb, &format!("{label}.cond"));
|
||||
let body_bb = ctx.ctx.insert_basic_block_after(cond_bb, &format!("{label}.body"));
|
||||
// The BB containing the increment expression
|
||||
let update_bb = ctx.ctx.insert_basic_block_after(body_bb, "for.update");
|
||||
let cont_bb = ctx.ctx.insert_basic_block_after(update_bb, "for.end");
|
||||
let update_bb = ctx.ctx.insert_basic_block_after(body_bb, &format!("{label}.update"));
|
||||
let cont_bb = ctx.ctx.insert_basic_block_after(update_bb, &format!("{label}.end"));
|
||||
|
||||
// store loop bb information and restore it later
|
||||
let loop_bb = ctx.loop_target.replace((update_bb, cont_bb));
|
||||
|
@ -574,6 +725,7 @@ where
|
|||
pub fn gen_for_callback_incrementing<'ctx, 'a, G, BodyFn>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, 'a>,
|
||||
label: Option<&str>,
|
||||
init_val: IntValue<'ctx>,
|
||||
max_val: (IntValue<'ctx>, bool),
|
||||
body: BodyFn,
|
||||
|
@ -584,7 +736,7 @@ where
|
|||
BodyFn: FnOnce(
|
||||
&mut G,
|
||||
&mut CodeGenContext<'ctx, 'a>,
|
||||
BreakContinueHooks,
|
||||
BreakContinueHooks<'ctx>,
|
||||
IntValue<'ctx>,
|
||||
) -> Result<(), String>,
|
||||
{
|
||||
|
@ -593,6 +745,7 @@ where
|
|||
gen_for_callback(
|
||||
generator,
|
||||
ctx,
|
||||
label,
|
||||
|generator, ctx| {
|
||||
let i_addr = generator.gen_var_alloc(ctx, init_val_t.into(), None)?;
|
||||
ctx.builder.build_store(i_addr, init_val).unwrap();
|
||||
|
@ -644,9 +797,11 @@ where
|
|||
/// - `step_fn`: A lambda of IR statements that retrieves the `step` value of the `range`-like
|
||||
/// iterable. This value will be extended to the size of `start`.
|
||||
/// - `body_fn`: A lambda of IR statements within the loop body.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn gen_for_range_callback<'ctx, 'a, G, StartFn, StopFn, StepFn, BodyFn>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, 'a>,
|
||||
label: Option<&str>,
|
||||
is_unsigned: bool,
|
||||
start_fn: StartFn,
|
||||
(stop_fn, stop_inclusive): (StopFn, bool),
|
||||
|
@ -658,13 +813,19 @@ where
|
|||
StartFn: Fn(&mut G, &mut CodeGenContext<'ctx, 'a>) -> Result<IntValue<'ctx>, String>,
|
||||
StopFn: Fn(&mut G, &mut CodeGenContext<'ctx, 'a>) -> Result<IntValue<'ctx>, String>,
|
||||
StepFn: Fn(&mut G, &mut CodeGenContext<'ctx, 'a>) -> Result<IntValue<'ctx>, String>,
|
||||
BodyFn: FnOnce(&mut G, &mut CodeGenContext<'ctx, 'a>, IntValue<'ctx>) -> Result<(), String>,
|
||||
BodyFn: FnOnce(
|
||||
&mut G,
|
||||
&mut CodeGenContext<'ctx, 'a>,
|
||||
BreakContinueHooks<'ctx>,
|
||||
IntValue<'ctx>,
|
||||
) -> Result<(), String>,
|
||||
{
|
||||
let init_val_t = start_fn(generator, ctx).map(IntValue::get_type).unwrap();
|
||||
|
||||
gen_for_callback(
|
||||
generator,
|
||||
ctx,
|
||||
label,
|
||||
|generator, ctx| {
|
||||
let i_addr = generator.gen_var_alloc(ctx, init_val_t.into(), None)?;
|
||||
|
||||
|
@ -722,10 +883,10 @@ where
|
|||
|
||||
Ok(cond)
|
||||
},
|
||||
|generator, ctx, _, (i_addr, _)| {
|
||||
|generator, ctx, hooks, (i_addr, _)| {
|
||||
let i = ctx.builder.build_load(i_addr, "").map(BasicValueEnum::into_int_value).unwrap();
|
||||
|
||||
body_fn(generator, ctx, i)
|
||||
body_fn(generator, ctx, hooks, i)
|
||||
},
|
||||
|generator, ctx, (i_addr, _)| {
|
||||
let i = ctx.builder.build_load(i_addr, "").map(BasicValueEnum::into_int_value).unwrap();
|
||||
|
@ -753,7 +914,7 @@ pub fn gen_while<G: CodeGenerator>(
|
|||
ctx: &mut CodeGenContext<'_, '_>,
|
||||
stmt: &Stmt<Option<Type>>,
|
||||
) -> Result<(), String> {
|
||||
let StmtKind::While { test, body, orelse, .. } = &stmt.node else { unreachable!() };
|
||||
let StmtKind::While { test, body, orelse, .. } = &stmt.node else { codegen_unreachable!(ctx) };
|
||||
|
||||
// var_assignment static values may be changed in another branch
|
||||
// if so, remove the static value as it may not be correct in this branch
|
||||
|
@ -783,7 +944,7 @@ pub fn gen_while<G: CodeGenerator>(
|
|||
|
||||
return Ok(());
|
||||
};
|
||||
let BasicValueEnum::IntValue(test) = test else { unreachable!() };
|
||||
let BasicValueEnum::IntValue(test) = test else { codegen_unreachable!(ctx) };
|
||||
|
||||
ctx.builder
|
||||
.build_conditional_branch(generator.bool_to_i1(ctx, test), body_bb, orelse_bb)
|
||||
|
@ -931,7 +1092,7 @@ pub fn gen_if<G: CodeGenerator>(
|
|||
ctx: &mut CodeGenContext<'_, '_>,
|
||||
stmt: &Stmt<Option<Type>>,
|
||||
) -> Result<(), String> {
|
||||
let StmtKind::If { test, body, orelse, .. } = &stmt.node else { unreachable!() };
|
||||
let StmtKind::If { test, body, orelse, .. } = &stmt.node else { codegen_unreachable!(ctx) };
|
||||
|
||||
// var_assignment static values may be changed in another branch
|
||||
// if so, remove the static value as it may not be correct in this branch
|
||||
|
@ -1054,11 +1215,11 @@ pub fn exn_constructor<'ctx>(
|
|||
let zelf_id = if let TypeEnum::TObj { obj_id, .. } = &*ctx.unifier.get_ty(zelf_ty) {
|
||||
obj_id.0
|
||||
} else {
|
||||
unreachable!()
|
||||
codegen_unreachable!(ctx)
|
||||
};
|
||||
let defs = ctx.top_level.definitions.read();
|
||||
let def = defs[zelf_id].read();
|
||||
let TopLevelDef::Class { name: zelf_name, .. } = &*def else { unreachable!() };
|
||||
let TopLevelDef::Class { name: zelf_name, .. } = &*def else { codegen_unreachable!(ctx) };
|
||||
let exception_name = format!("{}:{}", ctx.resolver.get_exception_id(zelf_id), zelf_name);
|
||||
unsafe {
|
||||
let id_ptr = ctx.builder.build_in_bounds_gep(zelf, &[zero, zero], "exn.id").unwrap();
|
||||
|
@ -1115,37 +1276,47 @@ pub fn exn_constructor<'ctx>(
|
|||
pub fn gen_raise<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
exception: Option<Pointer<'ctx, StructModel<Exception<'ctx>>>>,
|
||||
exception: Option<&BasicValueEnum<'ctx>>,
|
||||
loc: Location,
|
||||
) {
|
||||
if let Some(pexn) = exception {
|
||||
let sizet = generator.get_sizet(ctx.ctx);
|
||||
let i32_model = NIntModel(Int32);
|
||||
let cslice_model = StructModel(CSlice { sizet });
|
||||
if let Some(exception) = exception {
|
||||
unsafe {
|
||||
let int32 = ctx.ctx.i32_type();
|
||||
let zero = int32.const_zero();
|
||||
let exception = exception.into_pointer_value();
|
||||
let file_ptr = ctx
|
||||
.builder
|
||||
.build_in_bounds_gep(exception, &[zero, int32.const_int(1, false)], "file_ptr")
|
||||
.unwrap();
|
||||
let filename = ctx.gen_string(generator, loc.file.0);
|
||||
ctx.builder.build_store(file_ptr, filename).unwrap();
|
||||
let row_ptr = ctx
|
||||
.builder
|
||||
.build_in_bounds_gep(exception, &[zero, int32.const_int(2, false)], "row_ptr")
|
||||
.unwrap();
|
||||
ctx.builder.build_store(row_ptr, int32.const_int(loc.row as u64, false)).unwrap();
|
||||
let col_ptr = ctx
|
||||
.builder
|
||||
.build_in_bounds_gep(exception, &[zero, int32.const_int(3, false)], "col_ptr")
|
||||
.unwrap();
|
||||
ctx.builder.build_store(col_ptr, int32.const_int(loc.column as u64, false)).unwrap();
|
||||
|
||||
// Get and store filename
|
||||
let filename = loc.file.0;
|
||||
let filename = ctx.gen_string(generator, &String::from(filename)).value;
|
||||
let filename = cslice_model.review_value(ctx.ctx, filename).unwrap();
|
||||
pexn.gep(ctx, |f| f.filename).store(ctx, filename);
|
||||
|
||||
let row = i32_model.constant(ctx.ctx, loc.row as u64);
|
||||
pexn.gep(ctx, |f| f.line).store(ctx, row);
|
||||
|
||||
let column = i32_model.constant(ctx.ctx, loc.column as u64);
|
||||
pexn.gep(ctx, |f| f.column).store(ctx, column);
|
||||
|
||||
let current_fn = ctx.builder.get_insert_block().unwrap().get_parent().unwrap();
|
||||
let fn_name = ctx.gen_string(generator, current_fn.get_name().to_str().unwrap());
|
||||
pexn.gep(ctx, |f| f.function_name).store(ctx, fn_name);
|
||||
let current_fun = ctx.builder.get_insert_block().unwrap().get_parent().unwrap();
|
||||
let fun_name = ctx.gen_string(generator, current_fun.get_name().to_str().unwrap());
|
||||
let name_ptr = ctx
|
||||
.builder
|
||||
.build_in_bounds_gep(exception, &[zero, int32.const_int(4, false)], "name_ptr")
|
||||
.unwrap();
|
||||
ctx.builder.build_store(name_ptr, fun_name).unwrap();
|
||||
}
|
||||
|
||||
let raise = get_builtins(generator, ctx, "__nac3_raise");
|
||||
ctx.build_call_or_invoke(raise, &[pexn.value.into()], "raise");
|
||||
let exception = *exception;
|
||||
ctx.build_call_or_invoke(raise, &[exception], "raise");
|
||||
} else {
|
||||
let resume = get_builtins(generator, ctx, "__nac3_resume");
|
||||
ctx.build_call_or_invoke(resume, &[], "resume");
|
||||
}
|
||||
|
||||
ctx.builder.build_unreachable().unwrap();
|
||||
}
|
||||
|
||||
|
@ -1156,7 +1327,7 @@ pub fn gen_try<'ctx, 'a, G: CodeGenerator>(
|
|||
target: &Stmt<Option<Type>>,
|
||||
) -> Result<(), String> {
|
||||
let StmtKind::Try { body, handlers, orelse, finalbody, .. } = &target.node else {
|
||||
unreachable!()
|
||||
codegen_unreachable!(ctx)
|
||||
};
|
||||
|
||||
// if we need to generate anything related to exception, we must have personality defined
|
||||
|
@ -1233,7 +1404,7 @@ pub fn gen_try<'ctx, 'a, G: CodeGenerator>(
|
|||
if let TypeEnum::TObj { obj_id, .. } = &*ctx.unifier.get_ty(type_.custom.unwrap()) {
|
||||
*obj_id
|
||||
} else {
|
||||
unreachable!()
|
||||
codegen_unreachable!(ctx)
|
||||
};
|
||||
let exception_name = format!("{}:{}", ctx.resolver.get_exception_id(obj_id.0), exn_name);
|
||||
let exn_id = ctx.resolver.get_string_id(&exception_name);
|
||||
|
@ -1505,6 +1676,23 @@ pub fn gen_return<G: CodeGenerator>(
|
|||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Remap boolean return type into i1
|
||||
let value = value.map(|ret_val| {
|
||||
// The "return type" of a sret function is in the first parameter
|
||||
let expected_ty = if ctx.need_sret {
|
||||
func.get_type().get_param_types()[0]
|
||||
} else {
|
||||
func.get_type().get_return_type().unwrap()
|
||||
};
|
||||
|
||||
if matches!(expected_ty, BasicTypeEnum::IntType(ty) if ty.get_bit_width() == 1) {
|
||||
generator.bool_to_i1(ctx, ret_val.into_int_value()).into()
|
||||
} else {
|
||||
ret_val
|
||||
}
|
||||
});
|
||||
|
||||
if let Some(return_target) = ctx.return_target {
|
||||
if let Some(value) = value {
|
||||
ctx.builder.build_store(ctx.return_buffer.unwrap(), value).unwrap();
|
||||
|
@ -1515,25 +1703,6 @@ pub fn gen_return<G: CodeGenerator>(
|
|||
ctx.builder.build_store(ctx.return_buffer.unwrap(), value.unwrap()).unwrap();
|
||||
ctx.builder.build_return(None).unwrap();
|
||||
} else {
|
||||
// Remap boolean return type into i1
|
||||
let value = value.map(|v| {
|
||||
let expected_ty = func.get_type().get_return_type().unwrap();
|
||||
let ret_val = v.as_basic_value_enum();
|
||||
|
||||
if expected_ty.is_int_type() && ret_val.is_int_value() {
|
||||
let ret_type = expected_ty.into_int_type();
|
||||
let ret_val = ret_val.into_int_value();
|
||||
|
||||
if ret_type.get_bit_width() == 1 && ret_val.get_type().get_bit_width() != 1 {
|
||||
generator.bool_to_i1(ctx, ret_val)
|
||||
} else {
|
||||
ret_val
|
||||
}
|
||||
.into()
|
||||
} else {
|
||||
ret_val
|
||||
}
|
||||
});
|
||||
let value = value.as_ref().map(|v| v as &dyn BasicValue);
|
||||
ctx.builder.build_return(value).unwrap();
|
||||
}
|
||||
|
@ -1567,14 +1736,14 @@ pub fn gen_stmt<G: CodeGenerator>(
|
|||
}
|
||||
StmtKind::AnnAssign { target, value, .. } => {
|
||||
if let Some(value) = value {
|
||||
let Some(value) = generator.gen_expr(ctx, value)? else { return Ok(()) };
|
||||
generator.gen_assign(ctx, target, value)?;
|
||||
let Some(value_enum) = generator.gen_expr(ctx, value)? else { return Ok(()) };
|
||||
generator.gen_assign(ctx, target, value_enum, value.custom.unwrap())?;
|
||||
}
|
||||
}
|
||||
StmtKind::Assign { targets, value, .. } => {
|
||||
let Some(value) = generator.gen_expr(ctx, value)? else { return Ok(()) };
|
||||
let Some(value_enum) = generator.gen_expr(ctx, value)? else { return Ok(()) };
|
||||
for target in targets {
|
||||
generator.gen_assign(ctx, target, value.clone())?;
|
||||
generator.gen_assign(ctx, target, value_enum.clone(), value.custom.unwrap())?;
|
||||
}
|
||||
}
|
||||
StmtKind::Continue { .. } => {
|
||||
|
@ -1588,69 +1757,109 @@ pub fn gen_stmt<G: CodeGenerator>(
|
|||
StmtKind::For { .. } => generator.gen_for(ctx, stmt)?,
|
||||
StmtKind::With { .. } => generator.gen_with(ctx, stmt)?,
|
||||
StmtKind::AugAssign { target, op, value, .. } => {
|
||||
let value = gen_binop_expr(
|
||||
let value_enum = gen_binop_expr(
|
||||
generator,
|
||||
ctx,
|
||||
target,
|
||||
Binop::aug_assign(*op),
|
||||
value,
|
||||
stmt.location,
|
||||
)?;
|
||||
generator.gen_assign(ctx, target, value.unwrap())?;
|
||||
)?
|
||||
.unwrap();
|
||||
generator.gen_assign(ctx, target, value_enum, value.custom.unwrap())?;
|
||||
}
|
||||
StmtKind::Try { .. } => gen_try(generator, ctx, stmt)?,
|
||||
StmtKind::Raise { exc, .. } => {
|
||||
if let Some(exc) = exc {
|
||||
// Define all used models
|
||||
let sizet = generator.get_sizet(ctx.ctx);
|
||||
let pexn_model = PointerModel(StructModel(Exception { sizet }));
|
||||
let exn = if let ExprKind::Name { id, .. } = &exc.node {
|
||||
// Handle "raise Exception" short form
|
||||
let def_id = ctx.resolver.get_identifier_def(*id).map_err(|e| {
|
||||
format!("{} (at {})", e.iter().next().unwrap(), exc.location)
|
||||
})?;
|
||||
let def = ctx.top_level.definitions.read();
|
||||
let TopLevelDef::Class { constructor, .. } = *def[def_id.0].read() else {
|
||||
return Err(format!("Failed to resolve symbol {id} (at {})", exc.location));
|
||||
};
|
||||
|
||||
let Some(exn) = generator.gen_expr(ctx, exc)? else {
|
||||
let TypeEnum::TFunc(signature) =
|
||||
ctx.unifier.get_ty(constructor.unwrap()).as_ref().clone()
|
||||
else {
|
||||
return Err(format!("Failed to resolve symbol {id} (at {})", exc.location));
|
||||
};
|
||||
|
||||
generator
|
||||
.gen_call(ctx, None, (&signature, def_id), Vec::default())?
|
||||
.map(Into::into)
|
||||
} else {
|
||||
generator.gen_expr(ctx, exc)?
|
||||
};
|
||||
|
||||
let exc = if let Some(v) = exn {
|
||||
v.to_basic_value_enum(ctx, generator, exc.custom.unwrap())?
|
||||
} else {
|
||||
return Ok(());
|
||||
};
|
||||
let pexn = exn.to_basic_value_enum(ctx, generator, ctx.primitives.exception)?;
|
||||
let pexn = pexn_model.review_value(ctx.ctx, pexn).unwrap();
|
||||
|
||||
gen_raise(generator, ctx, Some(pexn), stmt.location);
|
||||
gen_raise(generator, ctx, Some(&exc), stmt.location);
|
||||
} else {
|
||||
gen_raise(generator, ctx, None, stmt.location);
|
||||
}
|
||||
}
|
||||
StmtKind::Assert { test, msg, .. } => {
|
||||
// Define all used models
|
||||
let sizet = generator.get_sizet(ctx.ctx);
|
||||
let byte_model = NIntModel(Byte);
|
||||
let cslice_model = StructModel(CSlice { sizet });
|
||||
|
||||
// Check `test`
|
||||
let Some(test) = generator.gen_expr(ctx, test)? else {
|
||||
let test = if let Some(v) = generator.gen_expr(ctx, test)? {
|
||||
v.to_basic_value_enum(ctx, generator, test.custom.unwrap())?
|
||||
} else {
|
||||
return Ok(());
|
||||
};
|
||||
let test = test.to_basic_value_enum(ctx, generator, ctx.primitives.bool)?;
|
||||
let test = byte_model.review_value(ctx.ctx, test).unwrap(); // Python `bool`s are represented as `i8` in nac3core
|
||||
|
||||
// Check `msg`
|
||||
let err_msg = match msg {
|
||||
Some(msg) => {
|
||||
let Some(msg) = generator.gen_expr(ctx, msg)? else {
|
||||
if let Some(v) = generator.gen_expr(ctx, msg)? {
|
||||
v.to_basic_value_enum(ctx, generator, msg.custom.unwrap())?
|
||||
} else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let msg = msg.to_basic_value_enum(ctx, generator, ctx.primitives.str)?;
|
||||
cslice_model.review_value(ctx.ctx, msg).unwrap()
|
||||
}
|
||||
None => ctx.gen_string(generator, ""),
|
||||
}
|
||||
None => ctx.gen_string(generator, "").into(),
|
||||
};
|
||||
|
||||
ctx.make_assert_impl(
|
||||
generator,
|
||||
test.value,
|
||||
generator.bool_to_i1(ctx, test.into_int_value()),
|
||||
"0:AssertionError",
|
||||
err_msg,
|
||||
[None, None, None],
|
||||
stmt.location,
|
||||
);
|
||||
}
|
||||
StmtKind::Global { names, .. } => {
|
||||
let registered_globals = ctx
|
||||
.top_level
|
||||
.definitions
|
||||
.read()
|
||||
.iter()
|
||||
.filter_map(|def| {
|
||||
if let TopLevelDef::Variable { simple_name, ty, .. } = &*def.read() {
|
||||
Some((*simple_name, *ty))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect_vec();
|
||||
|
||||
for id in names {
|
||||
let Some((_, ty)) = registered_globals.iter().find(|(name, _)| name == id) else {
|
||||
return Err(format!("{id} is not a global at {}", stmt.location));
|
||||
};
|
||||
|
||||
let resolver = ctx.resolver.clone();
|
||||
let ptr = resolver
|
||||
.get_symbol_value(*id, ctx, generator)
|
||||
.map(|val| val.to_basic_value_enum(ctx, generator, *ty))
|
||||
.transpose()?
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap();
|
||||
|
||||
ctx.var_assignment.insert(*id, (ptr, None, 0));
|
||||
}
|
||||
}
|
||||
_ => unimplemented!(),
|
||||
};
|
||||
Ok(())
|
||||
|
|
|
@ -1,50 +0,0 @@
|
|||
use crate::codegen::{model::*, CodeGenContext};
|
||||
|
||||
/// Fields of [`CSlice<'ctx>`].
|
||||
pub struct CSliceFields<'ctx> {
|
||||
/// Pointer to the data.
|
||||
pub base: Field<PointerModel<ByteModel>>,
|
||||
/// Number of bytes of the data.
|
||||
pub len: Field<SizeTModel<'ctx>>,
|
||||
}
|
||||
|
||||
/// See <https://crates.io/crates/cslice>.
|
||||
///
|
||||
/// Additionally, see <https://github.com/m-labs/artiq/blob/b0d2705c385f64b6e6711c1726cd9178f40b598e/artiq/firmware/libeh/eh_artiq.rs>)
|
||||
/// for ARTIQ-specific notes.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct CSlice<'ctx> {
|
||||
pub sizet: SizeTModel<'ctx>,
|
||||
}
|
||||
|
||||
impl<'ctx> StructKind<'ctx> for CSlice<'ctx> {
|
||||
type Fields = CSliceFields<'ctx>;
|
||||
|
||||
fn struct_name(&self) -> &'static str {
|
||||
"CSlice"
|
||||
}
|
||||
|
||||
fn build_fields(&self, builder: &mut FieldBuilder<'ctx>) -> Self::Fields {
|
||||
Self::Fields {
|
||||
base: builder.add_field_auto("content"),
|
||||
len: builder.add_field("length", self.sizet),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> StructModel<CSlice<'ctx>> {
|
||||
/// Create a [`CSlice`].
|
||||
///
|
||||
/// `base` and `len` must be LLVM global constants.
|
||||
pub fn create_const(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
base: Pointer<'ctx, ByteModel>,
|
||||
len: SizeT<'ctx>,
|
||||
) -> Struct<'ctx, CSlice<'ctx>> {
|
||||
let value = self
|
||||
.get_struct_type(ctx.ctx)
|
||||
.const_named_struct(&[base.value.into(), len.value.into()]);
|
||||
self.believe_value(value)
|
||||
}
|
||||
}
|
|
@ -1,78 +0,0 @@
|
|||
use crate::codegen::model::*;
|
||||
|
||||
use super::cslice::CSlice;
|
||||
|
||||
/// The LLVM int type of an Exception ID.
|
||||
pub type ExceptionId = Int32;
|
||||
|
||||
/// Fields of [`Exception<'ctx>`]
|
||||
///
|
||||
/// The definition came from `pub struct Exception<'a>` in
|
||||
/// <https://github.com/m-labs/artiq/blob/master/artiq/firmware/libeh/eh_artiq.rs>.
|
||||
pub struct ExceptionFields<'ctx> {
|
||||
/// nac3core's ID of the exception
|
||||
pub exception_id: Field<NIntModel<ExceptionId>>,
|
||||
/// The name of the file this `Exception` was raised in.
|
||||
pub filename: Field<StructModel<CSlice<'ctx>>>,
|
||||
/// The line number in the file this `Exception` was raised in.
|
||||
pub line: Field<NIntModel<Int32>>,
|
||||
/// The column number in the file this `Exception` was raised in.
|
||||
pub column: Field<NIntModel<Int32>>,
|
||||
/// The name of the Python function this `Exception` was raised in.
|
||||
pub function_name: Field<StructModel<CSlice<'ctx>>>,
|
||||
/// The message of this Exception.
|
||||
///
|
||||
/// The message can optionally contain integer parameters `{0}`, `{1}`, and `{2}` in its string,
|
||||
/// where they will be substituted by `params[0]`, `params[1]`, and `params[2]` respectively (as `int64_t`s).
|
||||
/// Here is an example:
|
||||
///
|
||||
/// ```ignore
|
||||
/// "Index {0} is out of bounds! List only has {1} element(s)."
|
||||
/// ```
|
||||
///
|
||||
/// In this case, `params[0]` and `params[1]` must be specified, and `params[2]` is ***unused***.
|
||||
/// Having only 3 parameters is a constraint in ARTIQ.
|
||||
pub message: Field<StructModel<CSlice<'ctx>>>,
|
||||
pub params: [Field<NIntModel<Int64>>; 3],
|
||||
}
|
||||
|
||||
/// nac3core & ARTIQ's Exception
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct Exception<'ctx> {
|
||||
/// The `SizeT` type of this string.
|
||||
pub sizet: SizeTModel<'ctx>,
|
||||
}
|
||||
|
||||
impl<'ctx> StructKind<'ctx> for Exception<'ctx> {
|
||||
type Fields = ExceptionFields<'ctx>;
|
||||
|
||||
fn struct_name(&self) -> &'static str {
|
||||
"Exception"
|
||||
}
|
||||
|
||||
fn build_fields(&self, builder: &mut FieldBuilder<'ctx>) -> Self::Fields {
|
||||
let str = StructModel(CSlice { sizet: self.sizet });
|
||||
|
||||
let exception_id = builder.add_field_auto("exception_id");
|
||||
let file_name = builder.add_field("file_name", str);
|
||||
let line = builder.add_field_auto("line");
|
||||
let column = builder.add_field_auto("column");
|
||||
let function_name = builder.add_field("function_name", str);
|
||||
let message = builder.add_field("message", str);
|
||||
let params = [
|
||||
builder.add_field_auto("param0"),
|
||||
builder.add_field_auto("param1"),
|
||||
builder.add_field_auto("param2"),
|
||||
];
|
||||
|
||||
Self::Fields {
|
||||
exception_id,
|
||||
filename: file_name,
|
||||
line,
|
||||
column,
|
||||
function_name,
|
||||
message,
|
||||
params,
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,42 +0,0 @@
|
|||
use crate::codegen::{model::*, CodeGenContext};
|
||||
|
||||
/// Fields of [`List`]
|
||||
pub struct ListFields<'ctx, T: Model<'ctx>> {
|
||||
/// Length of the list
|
||||
pub size: Field<SizeTModel<'ctx>>,
|
||||
/// Base pointer of the list
|
||||
pub data: Field<PointerModel<T>>,
|
||||
}
|
||||
|
||||
/// nac3core's `List` definition
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct List<'ctx, T: Model<'ctx>> {
|
||||
pub sizet: SizeTModel<'ctx>,
|
||||
pub element: T,
|
||||
}
|
||||
|
||||
impl<'ctx, T: Model<'ctx> + 'ctx> StructKind<'ctx> for List<'ctx, T> {
|
||||
type Fields = ListFields<'ctx, T>;
|
||||
|
||||
fn struct_name(&self) -> &'static str {
|
||||
"List"
|
||||
}
|
||||
|
||||
fn build_fields(&self, builder: &mut FieldBuilder<'ctx>) -> Self::Fields {
|
||||
Self::Fields {
|
||||
size: builder.add_field("size", self.sizet),
|
||||
data: builder.add_field("data", PointerModel(self.element)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx, T: Model<'ctx> + 'ctx> Pointer<'ctx, StructModel<List<'ctx, T>>> {
|
||||
pub fn as_slice(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
) -> ArraySlice<'ctx, SizeTModel<'ctx>, T> {
|
||||
let num_elements = self.gep(ctx, |f| f.size).load(ctx, "num_elements");
|
||||
let pointer = self.gep(ctx, |f| f.data).load(ctx, "base");
|
||||
ArraySlice { num_elements, pointer }
|
||||
}
|
||||
}
|
|
@ -1,4 +0,0 @@
|
|||
pub mod cslice;
|
||||
pub mod exception;
|
||||
pub mod list;
|
||||
pub mod ndarray;
|
|
@ -1,54 +0,0 @@
|
|||
use crate::codegen::*;
|
||||
|
||||
pub struct NpArrayFields<'ctx> {
|
||||
pub data: Field<PointerModel<ByteModel>>,
|
||||
pub itemsize: Field<SizeTModel<'ctx>>,
|
||||
pub ndims: Field<SizeTModel<'ctx>>,
|
||||
pub shape: Field<PointerModel<SizeTModel<'ctx>>>,
|
||||
pub strides: Field<PointerModel<SizeTModel<'ctx>>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct NpArray<'ctx> {
|
||||
pub sizet: SizeTModel<'ctx>,
|
||||
}
|
||||
|
||||
impl<'ctx> StructKind<'ctx> for NpArray<'ctx> {
|
||||
type Fields = NpArrayFields<'ctx>;
|
||||
|
||||
fn struct_name(&self) -> &'static str {
|
||||
"NDArray"
|
||||
}
|
||||
|
||||
fn build_fields(&self, builder: &mut FieldBuilder<'ctx>) -> Self::Fields {
|
||||
NpArrayFields {
|
||||
data: builder.add_field_auto("data"),
|
||||
itemsize: builder.add_field("itemsize", self.sizet),
|
||||
ndims: builder.add_field("ndims", self.sizet),
|
||||
shape: builder.add_field("shape", PointerModel(self.sizet)),
|
||||
strides: builder.add_field("strides", PointerModel(self.sizet)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> Pointer<'ctx, StructModel<NpArray<'ctx>>> {
|
||||
/// Get an [`ArraySlice`] of [`NpArrayFields::shape`] with [`NpArrayFields::ndims`] as its length.
|
||||
pub fn shape_slice(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
) -> ArraySlice<'ctx, SizeTModel<'ctx>, SizeTModel<'ctx>> {
|
||||
let ndims = self.gep(ctx, |f| f.ndims).load(ctx, "ndims");
|
||||
let shape_base_ptr = self.gep(ctx, |f| f.shape).load(ctx, "shape");
|
||||
ArraySlice { num_elements: ndims, pointer: shape_base_ptr }
|
||||
}
|
||||
|
||||
/// Get an [`ArraySlice`] of [`NpArrayFields::strides`] with [`NpArrayFields::ndims`] as its length.
|
||||
pub fn strides_slice(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
) -> ArraySlice<'ctx, SizeTModel<'ctx>, SizeTModel<'ctx>> {
|
||||
let ndims = self.gep(ctx, |f| f.ndims).load(ctx, "ndims");
|
||||
let strides_base_ptr = self.gep(ctx, |f| f.strides).load(ctx, "strides");
|
||||
ArraySlice { num_elements: ndims, pointer: strides_base_ptr }
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue