Compare commits
18 Commits
master
...
ndarray-st
Author | SHA1 | Date | |
---|---|---|---|
3f4ee433f1 | |||
ab7ff2ae9d | |||
e75db2c26f | |||
635542a36d | |||
39a05d6be6 | |||
d6451b11c1 | |||
0774dd1685 | |||
dec1658e13 | |||
c192256b78 | |||
94c547ee22 | |||
262a99ff26 | |||
5fe74303ee | |||
84fc095800 | |||
b0f97b4d36 | |||
8f191631f2 | |||
851cef57aa | |||
0300c81bb7 | |||
600acc8e5e |
@ -1,32 +0,0 @@
|
||||
BasedOnStyle: LLVM
|
||||
|
||||
Language: Cpp
|
||||
Standard: Cpp11
|
||||
|
||||
AccessModifierOffset: -1
|
||||
AlignEscapedNewlines: Left
|
||||
AlwaysBreakAfterReturnType: None
|
||||
AlwaysBreakTemplateDeclarations: Yes
|
||||
AllowAllParametersOfDeclarationOnNextLine: false
|
||||
AllowShortFunctionsOnASingleLine: Inline
|
||||
BinPackParameters: false
|
||||
BreakBeforeBinaryOperators: NonAssignment
|
||||
BreakBeforeTernaryOperators: true
|
||||
BreakConstructorInitializers: AfterColon
|
||||
BreakInheritanceList: AfterColon
|
||||
ColumnLimit: 120
|
||||
ConstructorInitializerAllOnOneLineOrOnePerLine: true
|
||||
ContinuationIndentWidth: 4
|
||||
DerivePointerAlignment: false
|
||||
IndentCaseLabels: true
|
||||
IndentPPDirectives: None
|
||||
IndentWidth: 4
|
||||
MaxEmptyLinesToKeep: 1
|
||||
PointerAlignment: Left
|
||||
ReflowComments: true
|
||||
SortIncludes: false
|
||||
SortUsingDeclarations: true
|
||||
SpaceAfterTemplateKeyword: false
|
||||
SpacesBeforeTrailingComments: 2
|
||||
TabWidth: 4
|
||||
UseTab: Never
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -1,4 +1,3 @@
|
||||
__pycache__
|
||||
/target
|
||||
/nac3standalone/demo/linalg/target
|
||||
nix/windows/msys2
|
||||
|
@ -1,24 +1,24 @@
|
||||
# See https://pre-commit.com for more information
|
||||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
|
||||
default_stages: [pre-commit]
|
||||
default_stages: [commit]
|
||||
|
||||
repos:
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: nac3-cargo-fmt
|
||||
name: nac3 cargo format
|
||||
entry: nix
|
||||
entry: cargo
|
||||
language: system
|
||||
types: [file, rust]
|
||||
pass_filenames: false
|
||||
description: Runs cargo fmt on the codebase.
|
||||
args: [develop, -c, cargo, fmt, --all]
|
||||
args: [fmt]
|
||||
- id: nac3-cargo-clippy
|
||||
name: nac3 cargo clippy
|
||||
entry: nix
|
||||
entry: cargo
|
||||
language: system
|
||||
types: [file, rust]
|
||||
pass_filenames: false
|
||||
description: Runs cargo clippy on the codebase.
|
||||
args: [develop, -c, cargo, clippy, --tests]
|
||||
args: [clippy, --tests]
|
||||
|
661
Cargo.lock
generated
661
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -4,7 +4,6 @@ members = [
|
||||
"nac3ast",
|
||||
"nac3parser",
|
||||
"nac3core",
|
||||
"nac3core/nac3core_derive",
|
||||
"nac3standalone",
|
||||
"nac3artiq",
|
||||
"runkernel",
|
||||
|
6
flake.lock
generated
6
flake.lock
generated
@ -2,11 +2,11 @@
|
||||
"nodes": {
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1736798957,
|
||||
"narHash": "sha256-qwpCtZhSsSNQtK4xYGzMiyEDhkNzOCz/Vfu4oL2ETsQ=",
|
||||
"lastModified": 1720418205,
|
||||
"narHash": "sha256-cPJoFPXU44GlhWg4pUk9oUPqurPlCFZ11ZQPk21GTPU=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "9abb87b552b7f55ac8916b6fc9e5cb486656a2f3",
|
||||
"rev": "655a58a72a6601292512670343087c2d75d859c1",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
45
flake.nix
45
flake.nix
@ -6,7 +6,6 @@
|
||||
outputs = { self, nixpkgs }:
|
||||
let
|
||||
pkgs = import nixpkgs { system = "x86_64-linux"; };
|
||||
pkgs32 = import nixpkgs { system = "i686-linux"; };
|
||||
in rec {
|
||||
packages.x86_64-linux = rec {
|
||||
llvm-nac3 = pkgs.callPackage ./nix/llvm {};
|
||||
@ -14,24 +13,9 @@
|
||||
''
|
||||
mkdir -p $out/bin
|
||||
ln -s ${pkgs.llvmPackages_14.clang-unwrapped}/bin/clang $out/bin/clang-irrt
|
||||
ln -s ${pkgs.llvmPackages_14.clang}/bin/clang $out/bin/clang-irrt-test
|
||||
ln -s ${pkgs.llvmPackages_14.llvm.out}/bin/llvm-as $out/bin/llvm-as-irrt
|
||||
'';
|
||||
demo-linalg-stub = pkgs.rustPlatform.buildRustPackage {
|
||||
name = "demo-linalg-stub";
|
||||
src = ./nac3standalone/demo/linalg;
|
||||
cargoLock = {
|
||||
lockFile = ./nac3standalone/demo/linalg/Cargo.lock;
|
||||
};
|
||||
doCheck = false;
|
||||
};
|
||||
demo-linalg-stub32 = pkgs32.rustPlatform.buildRustPackage {
|
||||
name = "demo-linalg-stub32";
|
||||
src = ./nac3standalone/demo/linalg;
|
||||
cargoLock = {
|
||||
lockFile = ./nac3standalone/demo/linalg/Cargo.lock;
|
||||
};
|
||||
doCheck = false;
|
||||
};
|
||||
nac3artiq = pkgs.python3Packages.toPythonModule (
|
||||
pkgs.rustPlatform.buildRustPackage rec {
|
||||
name = "nac3artiq";
|
||||
@ -40,8 +24,9 @@
|
||||
cargoLock = {
|
||||
lockFile = ./Cargo.lock;
|
||||
};
|
||||
cargoTestFlags = [ "--features" "test" ];
|
||||
passthru.cargoLock = cargoLock;
|
||||
nativeBuildInputs = [ pkgs.python3 (pkgs.wrapClangMulti pkgs.llvmPackages_14.clang) llvm-tools-irrt pkgs.llvmPackages_14.llvm.out llvm-nac3 ];
|
||||
nativeBuildInputs = [ pkgs.python3 pkgs.llvmPackages_14.clang llvm-tools-irrt pkgs.llvmPackages_14.llvm.out llvm-nac3 ];
|
||||
buildInputs = [ pkgs.python3 llvm-nac3 ];
|
||||
checkInputs = [ (pkgs.python3.withPackages(ps: [ ps.numpy ps.scipy ])) ];
|
||||
checkPhase =
|
||||
@ -49,9 +34,7 @@
|
||||
echo "Checking nac3standalone demos..."
|
||||
pushd nac3standalone/demo
|
||||
patchShebangs .
|
||||
export DEMO_LINALG_STUB=${demo-linalg-stub}/lib/liblinalg.a
|
||||
export DEMO_LINALG_STUB32=${demo-linalg-stub32}/lib/liblinalg.a
|
||||
./check_demos.sh -i686
|
||||
./check_demos.sh
|
||||
popd
|
||||
echo "Running Cargo tests..."
|
||||
cargoCheckHook
|
||||
@ -107,18 +90,18 @@
|
||||
(pkgs.fetchFromGitHub {
|
||||
owner = "m-labs";
|
||||
repo = "sipyco";
|
||||
rev = "094a6cd63ffa980ef63698920170e50dc9ba77fd";
|
||||
sha256 = "sha256-PPnAyDedUQ7Og/Cby9x5OT9wMkNGTP8GS53V6N/dk4w=";
|
||||
rev = "939f84f9b5eef7efbf7423c735d1834783b6140e";
|
||||
sha256 = "sha256-15Nun4EY35j+6SPZkjzZtyH/ncxLS60KuGJjFh5kSTc=";
|
||||
})
|
||||
(pkgs.fetchFromGitHub {
|
||||
owner = "m-labs";
|
||||
repo = "artiq";
|
||||
rev = "28c9de3e251daa89a8c9fd79d5ab64a3ec03bac6";
|
||||
sha256 = "sha256-vAvpbHc5B+1wtG8zqN7j9dQE1ON+i22v+uqA+tw6Gak=";
|
||||
rev = "923ca3377d42c815f979983134ec549dc39d3ca0";
|
||||
sha256 = "sha256-oJoEeNEeNFSUyh6jXG8Tzp6qHVikeHS0CzfE+mODPgw=";
|
||||
})
|
||||
];
|
||||
buildInputs = [
|
||||
(python3-mimalloc.withPackages(ps: [ ps.numpy ps.scipy ps.jsonschema ps.lmdb ps.platformdirs nac3artiq-instrumented ]))
|
||||
(python3-mimalloc.withPackages(ps: [ ps.numpy ps.scipy ps.jsonschema ps.lmdb nac3artiq-instrumented ]))
|
||||
pkgs.llvmPackages_14.llvm.out
|
||||
];
|
||||
phases = [ "buildPhase" "installPhase" ];
|
||||
@ -168,7 +151,7 @@
|
||||
buildInputs = with pkgs; [
|
||||
# build dependencies
|
||||
packages.x86_64-linux.llvm-nac3
|
||||
(pkgs.wrapClangMulti llvmPackages_14.clang) llvmPackages_14.llvm.out # for running nac3standalone demos
|
||||
llvmPackages_14.clang llvmPackages_14.llvm.out # for running nac3standalone demos
|
||||
packages.x86_64-linux.llvm-tools-irrt
|
||||
cargo
|
||||
rustc
|
||||
@ -180,12 +163,10 @@
|
||||
clippy
|
||||
pre-commit
|
||||
rustfmt
|
||||
rust-analyzer
|
||||
];
|
||||
shellHook =
|
||||
''
|
||||
export DEMO_LINALG_STUB=${packages.x86_64-linux.demo-linalg-stub}/lib/liblinalg.a
|
||||
export DEMO_LINALG_STUB32=${packages.x86_64-linux.demo-linalg-stub32}/lib/liblinalg.a
|
||||
'';
|
||||
# https://nixos.wiki/wiki/Rust#Shell.nix_example
|
||||
RUST_SRC_PATH = "${pkgs.rust.packages.stable.rustPlatform.rustLibSrc}";
|
||||
};
|
||||
devShells.x86_64-linux.msys2 = pkgs.mkShell {
|
||||
name = "nac3-dev-shell-msys2";
|
||||
|
@ -12,10 +12,15 @@ crate-type = ["cdylib"]
|
||||
itertools = "0.13"
|
||||
pyo3 = { version = "0.21", features = ["extension-module", "gil-refs"] }
|
||||
parking_lot = "0.12"
|
||||
tempfile = "3.13"
|
||||
tempfile = "3.10"
|
||||
nac3parser = { path = "../nac3parser" }
|
||||
nac3core = { path = "../nac3core" }
|
||||
nac3ld = { path = "../nac3ld" }
|
||||
|
||||
[dependencies.inkwell]
|
||||
version = "0.4"
|
||||
default-features = false
|
||||
features = ["llvm14-0", "target-x86", "target-arm", "target-riscv", "no-libffi-linking"]
|
||||
|
||||
[features]
|
||||
init-llvm-profile = []
|
||||
no-escape-analysis = ["nac3core/no-escape-analysis"]
|
||||
|
66
nac3artiq/demo/embedding_map.py
Normal file
66
nac3artiq/demo/embedding_map.py
Normal file
@ -0,0 +1,66 @@
|
||||
class EmbeddingMap:
|
||||
def __init__(self):
|
||||
self.object_inverse_map = {}
|
||||
self.object_map = {}
|
||||
self.string_map = {}
|
||||
self.string_reverse_map = {}
|
||||
self.function_map = {}
|
||||
self.attributes_writeback = []
|
||||
|
||||
# preallocate exception names
|
||||
self.preallocate_runtime_exception_names(["RuntimeError",
|
||||
"RTIOUnderflow",
|
||||
"RTIOOverflow",
|
||||
"RTIODestinationUnreachable",
|
||||
"DMAError",
|
||||
"I2CError",
|
||||
"CacheError",
|
||||
"SPIError",
|
||||
"0:ZeroDivisionError",
|
||||
"0:IndexError",
|
||||
"0:ValueError",
|
||||
"0:RuntimeError",
|
||||
"0:AssertionError",
|
||||
"0:KeyError",
|
||||
"0:NotImplementedError",
|
||||
"0:OverflowError",
|
||||
"0:IOError",
|
||||
"0:UnwrapNoneError"])
|
||||
|
||||
def preallocate_runtime_exception_names(self, names):
|
||||
for i, name in enumerate(names):
|
||||
if ":" not in name:
|
||||
name = "0:artiq.coredevice.exceptions." + name
|
||||
exn_id = self.store_str(name)
|
||||
assert exn_id == i
|
||||
|
||||
def store_function(self, key, fun):
|
||||
self.function_map[key] = fun
|
||||
return key
|
||||
|
||||
def store_object(self, obj):
|
||||
obj_id = id(obj)
|
||||
if obj_id in self.object_inverse_map:
|
||||
return self.object_inverse_map[obj_id]
|
||||
key = len(self.object_map) + 1
|
||||
self.object_map[key] = obj
|
||||
self.object_inverse_map[obj_id] = key
|
||||
return key
|
||||
|
||||
def store_str(self, s):
|
||||
if s in self.string_reverse_map:
|
||||
return self.string_reverse_map[s]
|
||||
key = len(self.string_map)
|
||||
self.string_map[key] = s
|
||||
self.string_reverse_map[s] = key
|
||||
return key
|
||||
|
||||
def retrieve_function(self, key):
|
||||
return self.function_map[key]
|
||||
|
||||
def retrieve_object(self, key):
|
||||
return self.object_map[key]
|
||||
|
||||
def retrieve_str(self, key):
|
||||
return self.string_map[key]
|
||||
|
@ -6,6 +6,7 @@ from typing import Generic, TypeVar
|
||||
from math import floor, ceil
|
||||
|
||||
import nac3artiq
|
||||
from embedding_map import EmbeddingMap
|
||||
|
||||
|
||||
__all__ = [
|
||||
@ -111,15 +112,10 @@ def extern(function):
|
||||
register_function(function)
|
||||
return function
|
||||
|
||||
|
||||
def rpc(arg=None, flags={}):
|
||||
"""Decorates a function or method to be executed on the host interpreter."""
|
||||
if arg is None:
|
||||
def inner_decorator(function):
|
||||
return rpc(function, flags)
|
||||
return inner_decorator
|
||||
register_function(arg)
|
||||
return arg
|
||||
def rpc(function):
|
||||
"""Decorates a function declaration defined by the core device runtime."""
|
||||
register_function(function)
|
||||
return function
|
||||
|
||||
def kernel(function_or_method):
|
||||
"""Decorates a function or method to be executed on the core device."""
|
||||
@ -192,46 +188,6 @@ def print_int64(x: int64):
|
||||
raise NotImplementedError("syscall not simulated")
|
||||
|
||||
|
||||
class EmbeddingMap:
|
||||
def __init__(self):
|
||||
self.object_inverse_map = {}
|
||||
self.object_map = {}
|
||||
self.string_map = {}
|
||||
self.string_reverse_map = {}
|
||||
self.function_map = {}
|
||||
self.attributes_writeback = []
|
||||
|
||||
def store_function(self, key, fun):
|
||||
self.function_map[key] = fun
|
||||
return key
|
||||
|
||||
def store_object(self, obj):
|
||||
obj_id = id(obj)
|
||||
if obj_id in self.object_inverse_map:
|
||||
return self.object_inverse_map[obj_id]
|
||||
key = len(self.object_map) + 1
|
||||
self.object_map[key] = obj
|
||||
self.object_inverse_map[obj_id] = key
|
||||
return key
|
||||
|
||||
def store_str(self, s):
|
||||
if s in self.string_reverse_map:
|
||||
return self.string_reverse_map[s]
|
||||
key = len(self.string_map)
|
||||
self.string_map[key] = s
|
||||
self.string_reverse_map[s] = key
|
||||
return key
|
||||
|
||||
def retrieve_function(self, key):
|
||||
return self.function_map[key]
|
||||
|
||||
def retrieve_object(self, key):
|
||||
return self.object_map[key]
|
||||
|
||||
def retrieve_str(self, key):
|
||||
return self.string_map[key]
|
||||
|
||||
|
||||
@nac3
|
||||
class Core:
|
||||
ref_period: KernelInvariant[float]
|
||||
@ -245,7 +201,7 @@ class Core:
|
||||
embedding = EmbeddingMap()
|
||||
|
||||
if allow_registration:
|
||||
compiler.analyze(registered_functions, registered_classes, set())
|
||||
compiler.analyze(registered_functions, registered_classes)
|
||||
allow_registration = False
|
||||
|
||||
if hasattr(method, "__self__"):
|
||||
|
@ -1,26 +0,0 @@
|
||||
from min_artiq import *
|
||||
from numpy import int32
|
||||
|
||||
# Global Variable Definition
|
||||
X: Kernel[int32] = 1
|
||||
|
||||
# TopLevelFunction Defintion
|
||||
@kernel
|
||||
def display_X():
|
||||
print_int32(X)
|
||||
|
||||
# TopLevel Class Definition
|
||||
@nac3
|
||||
class A:
|
||||
@kernel
|
||||
def __init__(self):
|
||||
self.set_x(1)
|
||||
|
||||
@kernel
|
||||
def set_x(self, new_val: int32):
|
||||
global X
|
||||
X = new_val
|
||||
|
||||
@kernel
|
||||
def get_X(self) -> int32:
|
||||
return X
|
@ -1,26 +0,0 @@
|
||||
from min_artiq import *
|
||||
import module as module_definition
|
||||
|
||||
@nac3
|
||||
class TestModuleSupport:
|
||||
core: KernelInvariant[Core]
|
||||
|
||||
def __init__(self):
|
||||
self.core = Core()
|
||||
|
||||
@kernel
|
||||
def run(self):
|
||||
# Accessing classes
|
||||
obj = module_definition.A()
|
||||
obj.get_X()
|
||||
obj.set_x(2)
|
||||
|
||||
# Calling functions
|
||||
module_definition.display_X()
|
||||
|
||||
# Updating global variables
|
||||
module_definition.X = 9
|
||||
module_definition.display_X()
|
||||
|
||||
if __name__ == "__main__":
|
||||
TestModuleSupport().run()
|
@ -1,29 +0,0 @@
|
||||
from min_artiq import *
|
||||
import numpy
|
||||
from numpy import int32
|
||||
|
||||
|
||||
@nac3
|
||||
class NumpyBoolDecay:
|
||||
core: KernelInvariant[Core]
|
||||
np_true: KernelInvariant[bool]
|
||||
np_false: KernelInvariant[bool]
|
||||
np_int: KernelInvariant[int32]
|
||||
np_float: KernelInvariant[float]
|
||||
np_str: KernelInvariant[str]
|
||||
|
||||
def __init__(self):
|
||||
self.core = Core()
|
||||
self.np_true = numpy.True_
|
||||
self.np_false = numpy.False_
|
||||
self.np_int = numpy.int32(0)
|
||||
self.np_float = numpy.float64(0.0)
|
||||
self.np_str = numpy.str_("")
|
||||
|
||||
@kernel
|
||||
def run(self):
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
NumpyBoolDecay().run()
|
@ -1,26 +0,0 @@
|
||||
from min_artiq import *
|
||||
from numpy import ndarray, zeros as np_zeros
|
||||
|
||||
|
||||
@nac3
|
||||
class StrFail:
|
||||
core: KernelInvariant[Core]
|
||||
|
||||
def __init__(self):
|
||||
self.core = Core()
|
||||
|
||||
@kernel
|
||||
def hello(self, arg: str):
|
||||
pass
|
||||
|
||||
@kernel
|
||||
def consume_ndarray(self, arg: ndarray[str, 1]):
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
self.hello("world")
|
||||
self.consume_ndarray(np_zeros([10], dtype=str))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
StrFail().run()
|
@ -3,22 +3,22 @@ from numpy import int32
|
||||
|
||||
|
||||
@nac3
|
||||
class EmptyList:
|
||||
class Demo:
|
||||
core: KernelInvariant[Core]
|
||||
attr1: KernelInvariant[str]
|
||||
attr2: KernelInvariant[int32]
|
||||
|
||||
|
||||
def __init__(self):
|
||||
self.core = Core()
|
||||
|
||||
@rpc
|
||||
def get_empty(self) -> list[int32]:
|
||||
return []
|
||||
self.attr2 = 32
|
||||
self.attr1 = "SAMPLE"
|
||||
|
||||
@kernel
|
||||
def run(self):
|
||||
a: list[int32] = self.get_empty()
|
||||
if a != []:
|
||||
raise ValueError
|
||||
print_int32(self.attr2)
|
||||
self.attr1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
EmptyList().run()
|
||||
Demo().run()
|
40
nac3artiq/demo/support_class_attr_issue102.py
Normal file
40
nac3artiq/demo/support_class_attr_issue102.py
Normal file
@ -0,0 +1,40 @@
|
||||
from min_artiq import *
|
||||
from numpy import int32
|
||||
|
||||
|
||||
@nac3
|
||||
class Demo:
|
||||
attr1: KernelInvariant[int32] = 2
|
||||
attr2: int32 = 4
|
||||
attr3: Kernel[int32]
|
||||
|
||||
@kernel
|
||||
def __init__(self):
|
||||
self.attr3 = 8
|
||||
|
||||
|
||||
@nac3
|
||||
class NAC3Devices:
|
||||
core: KernelInvariant[Core]
|
||||
attr4: KernelInvariant[int32] = 16
|
||||
|
||||
def __init__(self):
|
||||
self.core = Core()
|
||||
|
||||
@kernel
|
||||
def run(self):
|
||||
Demo.attr1 # Supported
|
||||
# Demo.attr2 # Field not accessible on Kernel
|
||||
# Demo.attr3 # Only attributes can be accessed in this way
|
||||
# Demo.attr1 = 2 # Attributes are immutable
|
||||
|
||||
self.attr4 # Attributes can be accessed within class
|
||||
|
||||
obj = Demo()
|
||||
obj.attr1 # Attributes can be accessed by class objects
|
||||
|
||||
NAC3Devices.attr4 # Attributes accessible for classes without __init__
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
NAC3Devices().run()
|
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,10 @@
|
||||
#![deny(future_incompatible, let_underscore, nonstandard_style, clippy::all)]
|
||||
#![deny(
|
||||
future_incompatible,
|
||||
let_underscore,
|
||||
nonstandard_style,
|
||||
rust_2024_compatibility,
|
||||
clippy::all
|
||||
)]
|
||||
#![warn(clippy::pedantic)]
|
||||
#![allow(
|
||||
unsafe_op_in_unsafe_fn,
|
||||
@ -10,65 +16,63 @@
|
||||
clippy::wildcard_imports
|
||||
)]
|
||||
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
fs,
|
||||
io::Write,
|
||||
process::Command,
|
||||
rc::Rc,
|
||||
sync::Arc,
|
||||
};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::fs;
|
||||
use std::io::Write;
|
||||
use std::process::Command;
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
|
||||
use itertools::Itertools;
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use pyo3::{
|
||||
create_exception, exceptions,
|
||||
prelude::*,
|
||||
types::{PyBytes, PyDict, PyNone, PySet},
|
||||
use inkwell::{
|
||||
memory_buffer::MemoryBuffer,
|
||||
module::{Linkage, Module},
|
||||
passes::PassBuilderOptions,
|
||||
support::is_multithreaded,
|
||||
targets::*,
|
||||
OptimizationLevel,
|
||||
};
|
||||
use tempfile::{self, TempDir};
|
||||
use itertools::Itertools;
|
||||
use nac3core::codegen::{gen_func_impl, CodeGenLLVMOptions, CodeGenTargetMachineOptions};
|
||||
use nac3core::toplevel::builtins::get_exn_constructor;
|
||||
use nac3core::typecheck::typedef::{TypeEnum, Unifier, VarMap};
|
||||
use nac3parser::{
|
||||
ast::{ExprKind, Stmt, StmtKind, StrRef},
|
||||
parser::parse_program,
|
||||
};
|
||||
use pyo3::create_exception;
|
||||
use pyo3::prelude::*;
|
||||
use pyo3::{exceptions, types::PyBytes, types::PyDict, types::PySet};
|
||||
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
|
||||
use nac3core::{
|
||||
codegen::{
|
||||
concrete_type::ConcreteTypeStore, gen_func_impl, irrt::load_irrt, CodeGenLLVMOptions,
|
||||
CodeGenTargetMachineOptions, CodeGenTask, CodeGenerator, WithCall, WorkerRegistry,
|
||||
},
|
||||
inkwell::{
|
||||
context::Context,
|
||||
memory_buffer::MemoryBuffer,
|
||||
module::{FlagBehavior, Linkage, Module},
|
||||
passes::PassBuilderOptions,
|
||||
support::is_multithreaded,
|
||||
targets::*,
|
||||
OptimizationLevel,
|
||||
},
|
||||
nac3parser::{
|
||||
ast::{self, Constant, ExprKind, Located, Stmt, StmtKind, StrRef},
|
||||
parser::parse_program,
|
||||
},
|
||||
codegen::irrt::load_irrt,
|
||||
codegen::{concrete_type::ConcreteTypeStore, CodeGenTask, WithCall, WorkerRegistry},
|
||||
symbol_resolver::SymbolResolver,
|
||||
toplevel::{
|
||||
builtins::get_exn_constructor,
|
||||
composer::{BuiltinFuncCreator, BuiltinFuncSpec, ComposerConfig, TopLevelComposer},
|
||||
composer::{ComposerConfig, TopLevelComposer},
|
||||
DefinitionId, GenCall, TopLevelDef,
|
||||
},
|
||||
typecheck::{
|
||||
type_inferencer::PrimitiveStore,
|
||||
typedef::{into_var_map, FunSignature, FuncArg, Type, TypeEnum, Unifier, VarMap},
|
||||
},
|
||||
typecheck::typedef::{FunSignature, FuncArg},
|
||||
typecheck::{type_inferencer::PrimitiveStore, typedef::Type},
|
||||
};
|
||||
|
||||
use nac3ld::Linker;
|
||||
|
||||
use codegen::{
|
||||
attributes_writeback, gen_core_log, gen_rtio_log, rpc_codegen_callback, ArtiqCodeGenerator,
|
||||
use tempfile::{self, TempDir};
|
||||
|
||||
use crate::codegen::attributes_writeback;
|
||||
use crate::{
|
||||
codegen::{rpc_codegen_callback, ArtiqCodeGenerator},
|
||||
symbol_resolver::{DeferredEvaluationStore, InnerResolver, PythonHelper, Resolver},
|
||||
};
|
||||
use symbol_resolver::{DeferredEvaluationStore, InnerResolver, PythonHelper, Resolver};
|
||||
use timeline::TimeFns;
|
||||
|
||||
mod codegen;
|
||||
mod symbol_resolver;
|
||||
mod timeline;
|
||||
|
||||
use timeline::TimeFns;
|
||||
|
||||
#[derive(PartialEq, Clone, Copy)]
|
||||
enum Isa {
|
||||
Host,
|
||||
@ -78,62 +82,14 @@ enum Isa {
|
||||
}
|
||||
|
||||
impl Isa {
|
||||
/// Returns the [`TargetTriple`] used for compiling to this ISA.
|
||||
pub fn get_llvm_target_triple(self) -> TargetTriple {
|
||||
match self {
|
||||
Isa::Host => TargetMachine::get_default_triple(),
|
||||
Isa::RiscV32G | Isa::RiscV32IMA => TargetTriple::create("riscv32-unknown-linux"),
|
||||
Isa::CortexA9 => TargetTriple::create("armv7-unknown-linux-gnueabihf"),
|
||||
/// Returns the number of bits in `size_t` for the [`Isa`].
|
||||
fn get_size_type(self) -> u32 {
|
||||
if self == Isa::Host {
|
||||
64u32
|
||||
} else {
|
||||
32u32
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the [`String`] representing the target CPU used for compiling to this ISA.
|
||||
pub fn get_llvm_target_cpu(self) -> String {
|
||||
match self {
|
||||
Isa::Host => TargetMachine::get_host_cpu_name().to_string(),
|
||||
Isa::RiscV32G | Isa::RiscV32IMA => "generic-rv32".to_string(),
|
||||
Isa::CortexA9 => "cortex-a9".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the [`String`] representing the target features used for compiling to this ISA.
|
||||
pub fn get_llvm_target_features(self) -> String {
|
||||
match self {
|
||||
Isa::Host => TargetMachine::get_host_cpu_features().to_string(),
|
||||
Isa::RiscV32G => "+a,+m,+f,+d".to_string(),
|
||||
Isa::RiscV32IMA => "+a,+m".to_string(),
|
||||
Isa::CortexA9 => "+dsp,+fp16,+neon,+vfp3,+long-calls".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an instance of [`CodeGenTargetMachineOptions`] representing the target machine
|
||||
/// options used for compiling to this ISA.
|
||||
pub fn get_llvm_target_options(self) -> CodeGenTargetMachineOptions {
|
||||
CodeGenTargetMachineOptions {
|
||||
triple: self.get_llvm_target_triple().as_str().to_string_lossy().into_owned(),
|
||||
cpu: self.get_llvm_target_cpu(),
|
||||
features: self.get_llvm_target_features(),
|
||||
reloc_mode: RelocMode::PIC,
|
||||
..CodeGenTargetMachineOptions::from_host()
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an instance of [`TargetMachine`] used in compiling and linking of a program of this
|
||||
/// ISA.
|
||||
pub fn create_llvm_target_machine(self, opt_level: OptimizationLevel) -> TargetMachine {
|
||||
self.get_llvm_target_options()
|
||||
.create_target_machine(opt_level)
|
||||
.expect("couldn't create target machine")
|
||||
}
|
||||
|
||||
/// Returns the number of bits in `size_t` for this ISA.
|
||||
fn get_size_type(self, ctx: &Context) -> u32 {
|
||||
ctx.ptr_sized_int_type(
|
||||
&self.create_llvm_target_machine(OptimizationLevel::Default).get_target_data(),
|
||||
None,
|
||||
)
|
||||
.get_bit_width()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@ -159,7 +115,6 @@ pub struct PrimitivePythonId {
|
||||
generic_alias: (u64, u64),
|
||||
virtual_id: u64,
|
||||
option: u64,
|
||||
module: u64,
|
||||
}
|
||||
|
||||
type TopLevelComponent = (Stmt, String, PyObject);
|
||||
@ -171,7 +126,7 @@ struct Nac3 {
|
||||
isa: Isa,
|
||||
time_fns: &'static (dyn TimeFns + Sync),
|
||||
primitive: PrimitiveStore,
|
||||
builtins: Vec<BuiltinFuncSpec>,
|
||||
builtins: Vec<(StrRef, FunSignature, Arc<GenCall>)>,
|
||||
pyid_to_def: Arc<RwLock<HashMap<u64, DefinitionId>>>,
|
||||
primitive_ids: PrimitivePythonId,
|
||||
working_directory: TempDir,
|
||||
@ -191,32 +146,14 @@ impl Nac3 {
|
||||
module: &PyObject,
|
||||
registered_class_ids: &HashSet<u64>,
|
||||
) -> PyResult<()> {
|
||||
let (module_name, source_file, source) =
|
||||
Python::with_gil(|py| -> PyResult<(String, String, String)> {
|
||||
let module: &PyAny = module.extract(py)?;
|
||||
let source_file = module.getattr("__file__");
|
||||
let (source_file, source) = if let Ok(source_file) = source_file {
|
||||
let source_file = source_file.extract()?;
|
||||
(
|
||||
source_file,
|
||||
fs::read_to_string(source_file).map_err(|e| {
|
||||
exceptions::PyIOError::new_err(format!(
|
||||
"failed to read input file: {e}"
|
||||
))
|
||||
})?,
|
||||
)
|
||||
} else {
|
||||
// kernels submitted by content have no file
|
||||
// but still can provide source by StringLoader
|
||||
let get_src_fn = module
|
||||
.getattr("__loader__")?
|
||||
.extract::<PyObject>()?
|
||||
.getattr(py, "get_source")?;
|
||||
("<expcontent>", get_src_fn.call1(py, (PyNone::get(py),))?.extract(py)?)
|
||||
};
|
||||
Ok((module.getattr("__name__")?.extract()?, source_file.to_string(), source))
|
||||
})?;
|
||||
let (module_name, source_file) = Python::with_gil(|py| -> PyResult<(String, String)> {
|
||||
let module: &PyAny = module.extract(py)?;
|
||||
Ok((module.getattr("__name__")?.extract()?, module.getattr("__file__")?.extract()?))
|
||||
})?;
|
||||
|
||||
let source = fs::read_to_string(&source_file).map_err(|e| {
|
||||
exceptions::PyIOError::new_err(format!("failed to read input file: {e}"))
|
||||
})?;
|
||||
let parser_result = parse_program(&source, source_file.into())
|
||||
.map_err(|e| exceptions::PySyntaxError::new_err(format!("parse error: {e}")))?;
|
||||
|
||||
@ -256,8 +193,10 @@ impl Nac3 {
|
||||
body.retain(|stmt| {
|
||||
if let StmtKind::FunctionDef { ref decorator_list, .. } = stmt.node {
|
||||
decorator_list.iter().any(|decorator| {
|
||||
if let Some(id) = decorator_id_string(decorator) {
|
||||
id == "kernel" || id == "portable" || id == "rpc"
|
||||
if let ExprKind::Name { id, .. } = decorator.node {
|
||||
id.to_string() == "kernel"
|
||||
|| id.to_string() == "portable"
|
||||
|| id.to_string() == "rpc"
|
||||
} else {
|
||||
false
|
||||
}
|
||||
@ -270,17 +209,14 @@ impl Nac3 {
|
||||
}
|
||||
StmtKind::FunctionDef { ref decorator_list, .. } => {
|
||||
decorator_list.iter().any(|decorator| {
|
||||
if let Some(id) = decorator_id_string(decorator) {
|
||||
id == "extern" || id == "kernel" || id == "portable" || id == "rpc"
|
||||
if let ExprKind::Name { id, .. } = decorator.node {
|
||||
let id = id.to_string();
|
||||
id == "extern" || id == "portable" || id == "kernel" || id == "rpc"
|
||||
} else {
|
||||
false
|
||||
}
|
||||
})
|
||||
}
|
||||
// Allow global variable declaration with `Kernel` type annotation
|
||||
StmtKind::AnnAssign { ref annotation, .. } => {
|
||||
matches!(&annotation.node, ExprKind::Subscript { value, .. } if matches!(&value.node, ExprKind::Name {id, ..} if id == &"Kernel".into()))
|
||||
}
|
||||
_ => false,
|
||||
};
|
||||
|
||||
@ -328,7 +264,7 @@ impl Nac3 {
|
||||
arg_names.len(),
|
||||
));
|
||||
}
|
||||
for (i, FuncArg { ty, default_value, name, .. }) in args.iter().enumerate() {
|
||||
for (i, FuncArg { ty, default_value, name }) in args.iter().enumerate() {
|
||||
let in_name = match arg_names.get(i) {
|
||||
Some(n) => n,
|
||||
None if default_value.is_none() => {
|
||||
@ -364,64 +300,6 @@ impl Nac3 {
|
||||
None
|
||||
}
|
||||
|
||||
/// Returns a [`Vec`] of builtins that needs to be initialized during method compilation time.
|
||||
fn get_lateinit_builtins() -> Vec<Box<BuiltinFuncCreator>> {
|
||||
vec![
|
||||
Box::new(|primitives, unifier| {
|
||||
let arg_ty = unifier.get_fresh_var(Some("T".into()), None);
|
||||
|
||||
(
|
||||
"core_log".into(),
|
||||
FunSignature {
|
||||
args: vec![FuncArg {
|
||||
name: "arg".into(),
|
||||
ty: arg_ty.ty,
|
||||
default_value: None,
|
||||
is_vararg: false,
|
||||
}],
|
||||
ret: primitives.none,
|
||||
vars: into_var_map([arg_ty]),
|
||||
},
|
||||
Arc::new(GenCall::new(Box::new(move |ctx, obj, fun, args, generator| {
|
||||
gen_core_log(ctx, obj.as_ref(), fun, &args, generator)?;
|
||||
|
||||
Ok(None)
|
||||
}))),
|
||||
)
|
||||
}),
|
||||
Box::new(|primitives, unifier| {
|
||||
let arg_ty = unifier.get_fresh_var(Some("T".into()), None);
|
||||
|
||||
(
|
||||
"rtio_log".into(),
|
||||
FunSignature {
|
||||
args: vec![
|
||||
FuncArg {
|
||||
name: "channel".into(),
|
||||
ty: primitives.str,
|
||||
default_value: None,
|
||||
is_vararg: false,
|
||||
},
|
||||
FuncArg {
|
||||
name: "arg".into(),
|
||||
ty: arg_ty.ty,
|
||||
default_value: None,
|
||||
is_vararg: false,
|
||||
},
|
||||
],
|
||||
ret: primitives.none,
|
||||
vars: into_var_map([arg_ty]),
|
||||
},
|
||||
Arc::new(GenCall::new(Box::new(move |ctx, obj, fun, args, generator| {
|
||||
gen_rtio_log(ctx, obj.as_ref(), fun, &args, generator)?;
|
||||
|
||||
Ok(None)
|
||||
}))),
|
||||
)
|
||||
}),
|
||||
]
|
||||
}
|
||||
|
||||
fn compile_method<T>(
|
||||
&self,
|
||||
obj: &PyAny,
|
||||
@ -431,10 +309,9 @@ impl Nac3 {
|
||||
py: Python,
|
||||
link_fn: &dyn Fn(&Module) -> PyResult<T>,
|
||||
) -> PyResult<T> {
|
||||
let size_t = self.isa.get_size_type(&Context::create());
|
||||
let size_t = self.isa.get_size_type();
|
||||
let (mut composer, mut builtins_def, mut builtins_ty) = TopLevelComposer::new(
|
||||
self.builtins.clone(),
|
||||
Self::get_lateinit_builtins(),
|
||||
ComposerConfig { kernel_ann: Some("Kernel"), kernel_invariant_ann: "KernelInvariant" },
|
||||
size_t,
|
||||
);
|
||||
@ -474,14 +351,12 @@ impl Nac3 {
|
||||
];
|
||||
add_exceptions(&mut composer, &mut builtins_def, &mut builtins_ty, &exception_names);
|
||||
|
||||
// Stores a mapping from module id to attributes
|
||||
let mut module_to_resolver_cache: HashMap<u64, _> = HashMap::new();
|
||||
|
||||
let mut rpc_ids = vec![];
|
||||
for (stmt, path, module) in &self.top_levels {
|
||||
let py_module: &PyAny = module.extract(py)?;
|
||||
let module_id: u64 = id_fn.call1((py_module,))?.extract()?;
|
||||
let module_name: String = py_module.getattr("__name__")?.extract()?;
|
||||
let helper = helper.clone();
|
||||
let class_obj;
|
||||
if let StmtKind::ClassDef { name, .. } = &stmt.node {
|
||||
@ -496,7 +371,7 @@ impl Nac3 {
|
||||
} else {
|
||||
class_obj = None;
|
||||
}
|
||||
let (name_to_pyid, resolver, _, _) =
|
||||
let (name_to_pyid, resolver) =
|
||||
module_to_resolver_cache.get(&module_id).cloned().unwrap_or_else(|| {
|
||||
let mut name_to_pyid: HashMap<StrRef, u64> = HashMap::new();
|
||||
let members: &PyDict =
|
||||
@ -513,6 +388,7 @@ impl Nac3 {
|
||||
pyid_to_type: pyid_to_type.clone(),
|
||||
primitive_ids: self.primitive_ids.clone(),
|
||||
global_value_ids: global_value_ids.clone(),
|
||||
class_names: Mutex::default(),
|
||||
name_to_pyid: name_to_pyid.clone(),
|
||||
module: module.clone(),
|
||||
id_to_pyval: RwLock::default(),
|
||||
@ -525,17 +401,9 @@ impl Nac3 {
|
||||
})))
|
||||
as Arc<dyn SymbolResolver + Send + Sync>;
|
||||
let name_to_pyid = Rc::new(name_to_pyid);
|
||||
let module_location = ast::Location::new(1, 1, stmt.location.file);
|
||||
module_to_resolver_cache.insert(
|
||||
module_id,
|
||||
(
|
||||
name_to_pyid.clone(),
|
||||
resolver.clone(),
|
||||
module_name.clone(),
|
||||
Some(module_location),
|
||||
),
|
||||
);
|
||||
(name_to_pyid, resolver, module_name, Some(module_location))
|
||||
module_to_resolver_cache
|
||||
.insert(module_id, (name_to_pyid.clone(), resolver.clone()));
|
||||
(name_to_pyid, resolver)
|
||||
});
|
||||
|
||||
let (name, def_id, ty) = composer
|
||||
@ -551,25 +419,9 @@ impl Nac3 {
|
||||
|
||||
match &stmt.node {
|
||||
StmtKind::FunctionDef { decorator_list, .. } => {
|
||||
if decorator_list
|
||||
.iter()
|
||||
.any(|decorator| decorator_id_string(decorator) == Some("rpc".to_string()))
|
||||
{
|
||||
store_fun
|
||||
.call1(
|
||||
py,
|
||||
(
|
||||
def_id.0.into_py(py),
|
||||
module.getattr(py, name.to_string().as_str()).unwrap(),
|
||||
),
|
||||
)
|
||||
.unwrap();
|
||||
let is_async = decorator_list.iter().any(|decorator| {
|
||||
decorator_get_flags(decorator)
|
||||
.iter()
|
||||
.any(|constant| *constant == Constant::Str("async".into()))
|
||||
});
|
||||
rpc_ids.push((None, def_id, is_async));
|
||||
if decorator_list.iter().any(|decorator| matches!(decorator.node, ExprKind::Name { id, .. } if id == "rpc".into())) {
|
||||
store_fun.call1(py, (def_id.0.into_py(py), module.getattr(py, name.to_string().as_str()).unwrap())).unwrap();
|
||||
rpc_ids.push((None, def_id));
|
||||
}
|
||||
}
|
||||
StmtKind::ClassDef { name, body, .. } => {
|
||||
@ -577,26 +429,19 @@ impl Nac3 {
|
||||
let class_obj = module.getattr(py, class_name.as_str()).unwrap();
|
||||
for stmt in body {
|
||||
if let StmtKind::FunctionDef { name, decorator_list, .. } = &stmt.node {
|
||||
if decorator_list.iter().any(|decorator| {
|
||||
decorator_id_string(decorator) == Some("rpc".to_string())
|
||||
}) {
|
||||
let is_async = decorator_list.iter().any(|decorator| {
|
||||
decorator_get_flags(decorator)
|
||||
.iter()
|
||||
.any(|constant| *constant == Constant::Str("async".into()))
|
||||
});
|
||||
if decorator_list.iter().any(|decorator| matches!(decorator.node, ExprKind::Name { id, .. } if id == "rpc".into())) {
|
||||
if name == &"__init__".into() {
|
||||
return Err(CompileError::new_err(format!(
|
||||
"compilation failed\n----------\nThe constructor of class {} should not be decorated with rpc decorator (at {})",
|
||||
class_name, stmt.location
|
||||
)));
|
||||
}
|
||||
rpc_ids.push((Some((class_obj.clone(), *name)), def_id, is_async));
|
||||
rpc_ids.push((Some((class_obj.clone(), *name)), def_id));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
_ => ()
|
||||
}
|
||||
|
||||
let id = *name_to_pyid.get(&name).unwrap();
|
||||
@ -609,24 +454,6 @@ impl Nac3 {
|
||||
}
|
||||
}
|
||||
|
||||
// Adding top level module definitions
|
||||
for (module_id, (module_name_to_pyid, module_resolver, module_name, module_location)) in
|
||||
module_to_resolver_cache
|
||||
{
|
||||
let def_id = composer
|
||||
.register_top_level_module(
|
||||
&module_name,
|
||||
&module_name_to_pyid,
|
||||
module_resolver,
|
||||
module_location,
|
||||
)
|
||||
.map_err(|e| {
|
||||
CompileError::new_err(format!("compilation failed\n----------\n{e}"))
|
||||
})?;
|
||||
|
||||
self.pyid_to_def.write().insert(module_id, def_id);
|
||||
}
|
||||
|
||||
let id_fun = PyModule::import(py, "builtins")?.getattr("id")?;
|
||||
let mut name_to_pyid: HashMap<StrRef, u64> = HashMap::new();
|
||||
let module = PyModule::new(py, "tmp")?;
|
||||
@ -653,12 +480,13 @@ impl Nac3 {
|
||||
pyid_to_type: pyid_to_type.clone(),
|
||||
primitive_ids: self.primitive_ids.clone(),
|
||||
global_value_ids: global_value_ids.clone(),
|
||||
class_names: Mutex::default(),
|
||||
id_to_pyval: RwLock::default(),
|
||||
id_to_primitive: RwLock::default(),
|
||||
field_to_val: RwLock::default(),
|
||||
name_to_pyid,
|
||||
module: module.to_object(py),
|
||||
helper: helper.clone(),
|
||||
helper,
|
||||
string_store: self.string_store.clone(),
|
||||
exception_ids: self.exception_ids.clone(),
|
||||
deferred_eval_store: self.deferred_eval_store.clone(),
|
||||
@ -669,10 +497,6 @@ impl Nac3 {
|
||||
.register_top_level(synthesized.pop().unwrap(), Some(resolver.clone()), "", false)
|
||||
.unwrap();
|
||||
|
||||
// Process IRRT
|
||||
let context = Context::create();
|
||||
let irrt = load_irrt(&context, resolver.as_ref());
|
||||
|
||||
let fun_signature =
|
||||
FunSignature { args: vec![], ret: self.primitive.none, vars: VarMap::new() };
|
||||
let mut store = ConcreteTypeStore::new();
|
||||
@ -710,12 +534,13 @@ impl Nac3 {
|
||||
let top_level = Arc::new(composer.make_top_level_context());
|
||||
|
||||
{
|
||||
let rpc_codegen = rpc_codegen_callback();
|
||||
let defs = top_level.definitions.read();
|
||||
for (class_data, id, is_async) in &rpc_ids {
|
||||
for (class_data, id) in &rpc_ids {
|
||||
let mut def = defs[id.0].write();
|
||||
match &mut *def {
|
||||
TopLevelDef::Function { codegen_callback, .. } => {
|
||||
*codegen_callback = Some(rpc_codegen_callback(*is_async));
|
||||
*codegen_callback = Some(rpc_codegen.clone());
|
||||
}
|
||||
TopLevelDef::Class { methods, .. } => {
|
||||
let (class_def, method_name) = class_data.as_ref().unwrap();
|
||||
@ -726,7 +551,7 @@ impl Nac3 {
|
||||
if let TopLevelDef::Function { codegen_callback, .. } =
|
||||
&mut *defs[id.0].write()
|
||||
{
|
||||
*codegen_callback = Some(rpc_codegen_callback(*is_async));
|
||||
*codegen_callback = Some(rpc_codegen.clone());
|
||||
store_fun
|
||||
.call1(
|
||||
py,
|
||||
@ -741,14 +566,6 @@ impl Nac3 {
|
||||
}
|
||||
}
|
||||
}
|
||||
TopLevelDef::Variable { .. } => {
|
||||
return Err(CompileError::new_err(String::from(
|
||||
"Unsupported @rpc annotation on global variable",
|
||||
)))
|
||||
}
|
||||
TopLevelDef::Module { .. } => {
|
||||
unreachable!("Type module cannot be decorated with @rpc")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -769,12 +586,33 @@ impl Nac3 {
|
||||
let task = CodeGenTask {
|
||||
subst: Vec::default(),
|
||||
symbol_name: "__modinit__".to_string(),
|
||||
body: instance.body,
|
||||
signature,
|
||||
resolver: resolver.clone(),
|
||||
store,
|
||||
unifier_index: instance.unifier_id,
|
||||
calls: instance.calls,
|
||||
id: 0,
|
||||
};
|
||||
|
||||
let mut store = ConcreteTypeStore::new();
|
||||
let mut cache = HashMap::new();
|
||||
let signature = store.from_signature(
|
||||
&mut composer.unifier,
|
||||
&self.primitive,
|
||||
&fun_signature,
|
||||
&mut cache,
|
||||
);
|
||||
let signature = store.add_cty(signature);
|
||||
let attributes_writeback_task = CodeGenTask {
|
||||
subst: Vec::default(),
|
||||
symbol_name: "attributes_writeback".to_string(),
|
||||
body: Arc::new(Vec::default()),
|
||||
signature,
|
||||
resolver,
|
||||
store,
|
||||
unifier_index: instance.unifier_id,
|
||||
calls: instance.calls,
|
||||
calls: Arc::new(HashMap::default()),
|
||||
id: 0,
|
||||
};
|
||||
|
||||
@ -787,47 +625,25 @@ impl Nac3 {
|
||||
let buffer = buffer.as_slice().into();
|
||||
membuffer.lock().push(buffer);
|
||||
})));
|
||||
let size_t = if self.isa == Isa::Host { 64 } else { 32 };
|
||||
let num_threads = if is_multithreaded() { 4 } else { 1 };
|
||||
let thread_names: Vec<String> = (0..num_threads).map(|_| "main".to_string()).collect();
|
||||
let threads: Vec<_> = thread_names
|
||||
.iter()
|
||||
.map(|s| {
|
||||
Box::new(ArtiqCodeGenerator::with_target_machine(
|
||||
s.to_string(),
|
||||
&context,
|
||||
&self.get_llvm_target_machine(),
|
||||
self.time_fns,
|
||||
))
|
||||
})
|
||||
.map(|s| Box::new(ArtiqCodeGenerator::new(s.to_string(), size_t, self.time_fns)))
|
||||
.collect();
|
||||
|
||||
let membuffer = membuffers.clone();
|
||||
let mut has_return = false;
|
||||
py.allow_threads(|| {
|
||||
let (registry, handles) =
|
||||
WorkerRegistry::create_workers(threads, top_level.clone(), &self.llvm_options, &f);
|
||||
registry.add_task(task);
|
||||
registry.wait_tasks_complete(handles);
|
||||
|
||||
let context = Context::create();
|
||||
let mut generator = ArtiqCodeGenerator::with_target_machine(
|
||||
"main".to_string(),
|
||||
&context,
|
||||
&self.get_llvm_target_machine(),
|
||||
self.time_fns,
|
||||
);
|
||||
let module = context.create_module("main");
|
||||
let target_machine = self.llvm_options.create_target_machine().unwrap();
|
||||
module.set_data_layout(&target_machine.get_target_data().get_data_layout());
|
||||
module.set_triple(&target_machine.get_triple());
|
||||
module.add_basic_value_flag(
|
||||
"Debug Info Version",
|
||||
FlagBehavior::Warning,
|
||||
context.i32_type().const_int(3, false),
|
||||
);
|
||||
module.add_basic_value_flag(
|
||||
"Dwarf Version",
|
||||
FlagBehavior::Warning,
|
||||
context.i32_type().const_int(4, false),
|
||||
);
|
||||
let mut generator =
|
||||
ArtiqCodeGenerator::new("attributes_writeback".to_string(), size_t, self.time_fns);
|
||||
let context = inkwell::context::Context::create();
|
||||
let module = context.create_module("attributes_writeback");
|
||||
let builder = context.create_builder();
|
||||
let (_, module, _) = gen_func_impl(
|
||||
&context,
|
||||
@ -835,27 +651,9 @@ impl Nac3 {
|
||||
®istry,
|
||||
builder,
|
||||
module,
|
||||
task,
|
||||
attributes_writeback_task,
|
||||
|generator, ctx| {
|
||||
assert_eq!(instance.body.len(), 1, "toplevel module should have 1 statement");
|
||||
let StmtKind::Expr { value: ref expr, .. } = instance.body[0].node else {
|
||||
unreachable!("toplevel statement must be an expression")
|
||||
};
|
||||
let ExprKind::Call { .. } = expr.node else {
|
||||
unreachable!("toplevel expression must be a function call")
|
||||
};
|
||||
|
||||
let return_obj =
|
||||
generator.gen_expr(ctx, expr)?.map(|value| (expr.custom.unwrap(), value));
|
||||
has_return = return_obj.is_some();
|
||||
registry.wait_tasks_complete(handles);
|
||||
attributes_writeback(
|
||||
ctx,
|
||||
generator,
|
||||
inner_resolver.as_ref(),
|
||||
&host_attributes,
|
||||
return_obj,
|
||||
)
|
||||
attributes_writeback(ctx, generator, inner_resolver.as_ref(), &host_attributes)
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
@ -864,24 +662,37 @@ impl Nac3 {
|
||||
membuffer.lock().push(buffer);
|
||||
});
|
||||
|
||||
embedding_map.setattr("expects_return", has_return).unwrap();
|
||||
|
||||
// Link all modules into `main`.
|
||||
let context = inkwell::context::Context::create();
|
||||
let buffers = membuffers.lock();
|
||||
let main = context
|
||||
.create_module_from_ir(MemoryBuffer::create_from_memory_range(
|
||||
buffers.last().unwrap(),
|
||||
"main",
|
||||
))
|
||||
.create_module_from_ir(MemoryBuffer::create_from_memory_range(&buffers[0], "main"))
|
||||
.unwrap();
|
||||
for buffer in buffers.iter().rev().skip(1) {
|
||||
for buffer in buffers.iter().skip(1) {
|
||||
let other = context
|
||||
.create_module_from_ir(MemoryBuffer::create_from_memory_range(buffer, "main"))
|
||||
.unwrap();
|
||||
|
||||
main.link_in_module(other).map_err(|err| CompileError::new_err(err.to_string()))?;
|
||||
}
|
||||
main.link_in_module(irrt).map_err(|err| CompileError::new_err(err.to_string()))?;
|
||||
let builder = context.create_builder();
|
||||
let modinit_return = main
|
||||
.get_function("__modinit__")
|
||||
.unwrap()
|
||||
.get_last_basic_block()
|
||||
.unwrap()
|
||||
.get_terminator()
|
||||
.unwrap();
|
||||
builder.position_before(&modinit_return);
|
||||
builder
|
||||
.build_call(
|
||||
main.get_function("attributes_writeback").unwrap(),
|
||||
&[],
|
||||
"attributes_writeback",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
main.link_in_module(load_irrt(&context))
|
||||
.map_err(|err| CompileError::new_err(err.to_string()))?;
|
||||
|
||||
let mut function_iter = main.get_first_function();
|
||||
while let Some(func) = function_iter {
|
||||
@ -915,65 +726,58 @@ impl Nac3 {
|
||||
panic!("Failed to run optimization for module `main`: {}", err.to_string());
|
||||
}
|
||||
|
||||
Python::with_gil(|py| {
|
||||
let string_store = self.string_store.read();
|
||||
let mut string_store_vec = string_store.iter().collect::<Vec<_>>();
|
||||
string_store_vec.sort_by(|(_s1, key1), (_s2, key2)| key1.cmp(key2));
|
||||
for (s, key) in string_store_vec {
|
||||
let embed_key: i32 = helper.store_str.call1(py, (s,)).unwrap().extract(py).unwrap();
|
||||
assert_eq!(
|
||||
embed_key, *key,
|
||||
"string {s} is out of sync between embedding map (key={embed_key}) and \
|
||||
the internal string store (key={key})"
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
link_fn(&main)
|
||||
}
|
||||
|
||||
/// Returns the [`TargetTriple`] used for compiling to [isa].
|
||||
fn get_llvm_target_triple(isa: Isa) -> TargetTriple {
|
||||
match isa {
|
||||
Isa::Host => TargetMachine::get_default_triple(),
|
||||
Isa::RiscV32G | Isa::RiscV32IMA => TargetTriple::create("riscv32-unknown-linux"),
|
||||
Isa::CortexA9 => TargetTriple::create("armv7-unknown-linux-gnueabihf"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the [`String`] representing the target CPU used for compiling to [isa].
|
||||
fn get_llvm_target_cpu(isa: Isa) -> String {
|
||||
match isa {
|
||||
Isa::Host => TargetMachine::get_host_cpu_name().to_string(),
|
||||
Isa::RiscV32G | Isa::RiscV32IMA => "generic-rv32".to_string(),
|
||||
Isa::CortexA9 => "cortex-a9".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the [`String`] representing the target features used for compiling to [isa].
|
||||
fn get_llvm_target_features(isa: Isa) -> String {
|
||||
match isa {
|
||||
Isa::Host => TargetMachine::get_host_cpu_features().to_string(),
|
||||
Isa::RiscV32G => "+a,+m,+f,+d".to_string(),
|
||||
Isa::RiscV32IMA => "+a,+m".to_string(),
|
||||
Isa::CortexA9 => "+dsp,+fp16,+neon,+vfp3,+long-calls".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an instance of [`CodeGenTargetMachineOptions`] representing the target machine
|
||||
/// options used for compiling to [isa].
|
||||
fn get_llvm_target_options(isa: Isa) -> CodeGenTargetMachineOptions {
|
||||
CodeGenTargetMachineOptions {
|
||||
triple: Nac3::get_llvm_target_triple(isa).as_str().to_string_lossy().into_owned(),
|
||||
cpu: Nac3::get_llvm_target_cpu(isa),
|
||||
features: Nac3::get_llvm_target_features(isa),
|
||||
reloc_mode: RelocMode::PIC,
|
||||
..CodeGenTargetMachineOptions::from_host()
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an instance of [`TargetMachine`] used in compiling and linking of a program to the
|
||||
/// target [ISA][isa].
|
||||
/// target [isa].
|
||||
fn get_llvm_target_machine(&self) -> TargetMachine {
|
||||
self.isa.create_llvm_target_machine(self.llvm_options.opt_level)
|
||||
Nac3::get_llvm_target_options(self.isa)
|
||||
.create_target_machine(self.llvm_options.opt_level)
|
||||
.expect("couldn't create target machine")
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieves the Name.id from a decorator, supports decorators with arguments.
|
||||
fn decorator_id_string(decorator: &Located<ExprKind>) -> Option<String> {
|
||||
if let ExprKind::Name { id, .. } = decorator.node {
|
||||
// Bare decorator
|
||||
return Some(id.to_string());
|
||||
} else if let ExprKind::Call { func, .. } = &decorator.node {
|
||||
// Decorators that are calls (e.g. "@rpc()") have Call for the node,
|
||||
// need to extract the id from within.
|
||||
if let ExprKind::Name { id, .. } = func.node {
|
||||
return Some(id.to_string());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Retrieves flags from a decorator, if any.
|
||||
fn decorator_get_flags(decorator: &Located<ExprKind>) -> Vec<Constant> {
|
||||
let mut flags = vec![];
|
||||
if let ExprKind::Call { keywords, .. } = &decorator.node {
|
||||
for keyword in keywords {
|
||||
if keyword.node.arg != Some("flags".into()) {
|
||||
continue;
|
||||
}
|
||||
if let ExprKind::Set { elts } = &keyword.node.value.node {
|
||||
for elt in elts {
|
||||
if let ExprKind::Constant { value, .. } = &elt.node {
|
||||
flags.push(value.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
flags
|
||||
}
|
||||
|
||||
fn link_with_lld(elf_filename: String, obj_filename: String) -> PyResult<()> {
|
||||
let linker_args = vec![
|
||||
"-shared".to_string(),
|
||||
@ -1043,8 +847,7 @@ impl Nac3 {
|
||||
Isa::RiscV32IMA => &timeline::NOW_PINNING_TIME_FNS,
|
||||
Isa::CortexA9 | Isa::Host => &timeline::EXTERN_TIME_FNS,
|
||||
};
|
||||
let (primitive, _) =
|
||||
TopLevelComposer::make_primitives(isa.get_size_type(&Context::create()));
|
||||
let primitive: PrimitiveStore = TopLevelComposer::make_primitives(isa.get_size_type()).0;
|
||||
let builtins = vec![
|
||||
(
|
||||
"now_mu".into(),
|
||||
@ -1060,7 +863,6 @@ impl Nac3 {
|
||||
name: "t".into(),
|
||||
ty: primitive.int64,
|
||||
default_value: None,
|
||||
is_vararg: false,
|
||||
}],
|
||||
ret: primitive.none,
|
||||
vars: VarMap::new(),
|
||||
@ -1080,7 +882,6 @@ impl Nac3 {
|
||||
name: "dt".into(),
|
||||
ty: primitive.int64,
|
||||
default_value: None,
|
||||
is_vararg: false,
|
||||
}],
|
||||
ret: primitive.none,
|
||||
vars: VarMap::new(),
|
||||
@ -1132,54 +933,11 @@ impl Nac3 {
|
||||
tuple: get_attr_id(builtins_mod, "tuple"),
|
||||
exception: get_attr_id(builtins_mod, "Exception"),
|
||||
option: get_id(artiq_builtins.get_item("Option").ok().flatten().unwrap()),
|
||||
module: get_attr_id(types_mod, "ModuleType"),
|
||||
};
|
||||
|
||||
let working_directory = tempfile::Builder::new().prefix("nac3-").tempdir().unwrap();
|
||||
fs::write(working_directory.path().join("kernel.ld"), include_bytes!("kernel.ld")).unwrap();
|
||||
|
||||
let mut string_store: HashMap<String, i32> = HashMap::default();
|
||||
|
||||
// Keep this list of exceptions in sync with `EXCEPTION_ID_LOOKUP` in `artiq::firmware::ksupport::eh_artiq`
|
||||
// The exceptions declared here must be defined in `artiq.coredevice.exceptions`
|
||||
// Verify synchronization by running the test cases in `artiq.test.coredevice.test_exceptions`
|
||||
let runtime_exception_names = [
|
||||
"RTIOUnderflow",
|
||||
"RTIOOverflow",
|
||||
"RTIODestinationUnreachable",
|
||||
"DMAError",
|
||||
"I2CError",
|
||||
"CacheError",
|
||||
"SPIError",
|
||||
"SubkernelError",
|
||||
"0:AssertionError",
|
||||
"0:AttributeError",
|
||||
"0:IndexError",
|
||||
"0:IOError",
|
||||
"0:KeyError",
|
||||
"0:NotImplementedError",
|
||||
"0:OverflowError",
|
||||
"0:RuntimeError",
|
||||
"0:TimeoutError",
|
||||
"0:TypeError",
|
||||
"0:ValueError",
|
||||
"0:ZeroDivisionError",
|
||||
"0:LinAlgError",
|
||||
"UnwrapNoneError",
|
||||
];
|
||||
|
||||
// Preallocate runtime exception names
|
||||
for (i, name) in runtime_exception_names.iter().enumerate() {
|
||||
let exn_name = if name.find(':').is_none() {
|
||||
format!("0:artiq.coredevice.exceptions.{name}")
|
||||
} else {
|
||||
(*name).to_string()
|
||||
};
|
||||
|
||||
let id = i32::try_from(i).unwrap();
|
||||
string_store.insert(exn_name, id);
|
||||
}
|
||||
|
||||
Ok(Nac3 {
|
||||
isa,
|
||||
time_fns,
|
||||
@ -1189,22 +947,17 @@ impl Nac3 {
|
||||
top_levels: Vec::default(),
|
||||
pyid_to_def: Arc::default(),
|
||||
working_directory,
|
||||
string_store: Arc::new(string_store.into()),
|
||||
string_store: Arc::default(),
|
||||
exception_ids: Arc::default(),
|
||||
deferred_eval_store: DeferredEvaluationStore::new(),
|
||||
llvm_options: CodeGenLLVMOptions {
|
||||
opt_level: OptimizationLevel::Default,
|
||||
target: isa.get_llvm_target_options(),
|
||||
target: Nac3::get_llvm_target_options(isa),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
fn analyze(
|
||||
&mut self,
|
||||
functions: &PySet,
|
||||
classes: &PySet,
|
||||
content_modules: &PySet,
|
||||
) -> PyResult<()> {
|
||||
fn analyze(&mut self, functions: &PySet, classes: &PySet) -> PyResult<()> {
|
||||
let (modules, class_ids) =
|
||||
Python::with_gil(|py| -> PyResult<(HashMap<u64, PyObject>, HashSet<u64>)> {
|
||||
let mut modules: HashMap<u64, PyObject> = HashMap::new();
|
||||
@ -1214,21 +967,13 @@ impl Nac3 {
|
||||
let getmodule_fn = PyModule::import(py, "inspect")?.getattr("getmodule")?;
|
||||
|
||||
for function in functions {
|
||||
let module: PyObject = getmodule_fn.call1((function,))?.extract()?;
|
||||
if !module.is_none(py) {
|
||||
modules.insert(id_fn.call1((&module,))?.extract()?, module);
|
||||
}
|
||||
let module = getmodule_fn.call1((function,))?.extract()?;
|
||||
modules.insert(id_fn.call1((&module,))?.extract()?, module);
|
||||
}
|
||||
for class in classes {
|
||||
let module: PyObject = getmodule_fn.call1((class,))?.extract()?;
|
||||
if !module.is_none(py) {
|
||||
modules.insert(id_fn.call1((&module,))?.extract()?, module);
|
||||
}
|
||||
class_ids.insert(id_fn.call1((class,))?.extract()?);
|
||||
}
|
||||
for module in content_modules {
|
||||
let module: PyObject = module.extract()?;
|
||||
let module = getmodule_fn.call1((class,))?.extract()?;
|
||||
modules.insert(id_fn.call1((&module,))?.extract()?, module);
|
||||
class_ids.insert(id_fn.call1((class,))?.extract()?);
|
||||
}
|
||||
Ok((modules, class_ids))
|
||||
})?;
|
||||
|
@ -1,32 +1,14 @@
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering::Relaxed},
|
||||
Arc,
|
||||
},
|
||||
use inkwell::{
|
||||
types::{BasicType, BasicTypeEnum},
|
||||
values::BasicValueEnum,
|
||||
AddressSpace,
|
||||
};
|
||||
|
||||
use itertools::Itertools;
|
||||
use parking_lot::RwLock;
|
||||
use pyo3::{
|
||||
types::{PyDict, PyTuple},
|
||||
PyAny, PyErr, PyObject, PyResult, Python,
|
||||
};
|
||||
|
||||
use super::PrimitivePythonId;
|
||||
use nac3core::{
|
||||
codegen::{
|
||||
types::{ndarray::NDArrayType, ProxyType},
|
||||
values::ndarray::make_contiguous_strides,
|
||||
classes::{NDArrayType, ProxyType},
|
||||
CodeGenContext, CodeGenerator,
|
||||
},
|
||||
inkwell::{
|
||||
module::Linkage,
|
||||
types::{BasicType, BasicTypeEnum},
|
||||
values::{BasicValue, BasicValueEnum},
|
||||
AddressSpace,
|
||||
},
|
||||
nac3parser::ast::{self, StrRef},
|
||||
symbol_resolver::{StaticValue, SymbolResolver, SymbolValue, ValueEnum},
|
||||
toplevel::{
|
||||
helper::PrimDef,
|
||||
@ -38,6 +20,21 @@ use nac3core::{
|
||||
typedef::{into_var_map, iter_type_vars, Type, TypeEnum, TypeVar, Unifier, VarMap},
|
||||
},
|
||||
};
|
||||
use nac3parser::ast::{self, StrRef};
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use pyo3::{
|
||||
types::{PyDict, PyTuple},
|
||||
PyAny, PyObject, PyResult, Python,
|
||||
};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering::Relaxed},
|
||||
Arc,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::PrimitivePythonId;
|
||||
|
||||
pub enum PrimitiveValue {
|
||||
I32(i32),
|
||||
@ -82,6 +79,7 @@ pub struct InnerResolver {
|
||||
pub id_to_primitive: RwLock<HashMap<u64, PrimitiveValue>>,
|
||||
pub field_to_val: RwLock<HashMap<ResolverField, Option<PyFieldHandle>>>,
|
||||
pub global_value_ids: Arc<RwLock<HashMap<u64, PyObject>>>,
|
||||
pub class_names: Mutex<HashMap<StrRef, Type>>,
|
||||
pub pyid_to_def: Arc<RwLock<HashMap<u64, DefinitionId>>>,
|
||||
pub pyid_to_type: Arc<RwLock<HashMap<u64, Type>>>,
|
||||
pub primitive_ids: PrimitivePythonId,
|
||||
@ -135,8 +133,6 @@ impl StaticValue for PythonValue {
|
||||
format!("{}_const", self.id).as_str(),
|
||||
);
|
||||
global.set_constant(true);
|
||||
// Set linkage of global to private to avoid name collisions
|
||||
global.set_linkage(Linkage::Private);
|
||||
global.set_initializer(&ctx.ctx.const_struct(
|
||||
&[ctx.ctx.i32_type().const_int(u64::from(id), false).into()],
|
||||
false,
|
||||
@ -167,7 +163,7 @@ impl StaticValue for PythonValue {
|
||||
PrimitiveValue::Bool(val) => {
|
||||
ctx.ctx.i8_type().const_int(u64::from(*val), false).into()
|
||||
}
|
||||
PrimitiveValue::Str(val) => ctx.gen_string(generator, val).into(),
|
||||
PrimitiveValue::Str(val) => ctx.ctx.const_string(val.as_bytes(), true).into(),
|
||||
});
|
||||
}
|
||||
if let Some(global) = ctx.module.get_global(&self.id.to_string()) {
|
||||
@ -355,7 +351,7 @@ impl InnerResolver {
|
||||
Ok(Ok((ndarray, false)))
|
||||
} else if ty_id == self.primitive_ids.tuple {
|
||||
// do not handle type var param and concrete check here
|
||||
Ok(Ok((unifier.add_ty(TypeEnum::TTuple { ty: vec![], is_vararg_ctx: false }), false)))
|
||||
Ok(Ok((unifier.add_ty(TypeEnum::TTuple { ty: vec![] }), false)))
|
||||
} else if ty_id == self.primitive_ids.option {
|
||||
Ok(Ok((primitives.option, false)))
|
||||
} else if ty_id == self.primitive_ids.none {
|
||||
@ -559,10 +555,7 @@ impl InnerResolver {
|
||||
Err(err) => return Ok(Err(err)),
|
||||
_ => return Ok(Err("tuple type needs at least 1 type parameters".to_string()))
|
||||
};
|
||||
Ok(Ok((
|
||||
unifier.add_ty(TypeEnum::TTuple { ty: args, is_vararg_ctx: false }),
|
||||
true,
|
||||
)))
|
||||
Ok(Ok((unifier.add_ty(TypeEnum::TTuple { ty: args }), true)))
|
||||
}
|
||||
TypeEnum::TObj { params, obj_id, .. } => {
|
||||
let subst = {
|
||||
@ -674,48 +667,6 @@ impl InnerResolver {
|
||||
})
|
||||
});
|
||||
|
||||
// check if obj is module
|
||||
if self.helper.id_fn.call1(py, (ty.clone(),))?.extract::<u64>(py)?
|
||||
== self.primitive_ids.module
|
||||
&& self.pyid_to_def.read().contains_key(&py_obj_id)
|
||||
{
|
||||
let def_id = self.pyid_to_def.read()[&py_obj_id];
|
||||
let def = defs[def_id.0].read();
|
||||
let TopLevelDef::Module { name: module_name, module_id, attributes, methods, .. } =
|
||||
&*def
|
||||
else {
|
||||
unreachable!("must be a module here");
|
||||
};
|
||||
// Construct the module return type
|
||||
let mut module_attributes = HashMap::new();
|
||||
for (name, _) in attributes {
|
||||
let attribute_obj = obj.getattr(name.to_string().as_str())?;
|
||||
let attribute_ty =
|
||||
self.get_obj_type(py, attribute_obj, unifier, defs, primitives)?;
|
||||
if let Ok(attribute_ty) = attribute_ty {
|
||||
module_attributes.insert(*name, (attribute_ty, false));
|
||||
} else {
|
||||
return Ok(Err(format!("Unable to resolve {module_name}.{name}")));
|
||||
}
|
||||
}
|
||||
|
||||
for name in methods.keys() {
|
||||
let method_obj = obj.getattr(name.to_string().as_str())?;
|
||||
let method_ty = self.get_obj_type(py, method_obj, unifier, defs, primitives)?;
|
||||
if let Ok(method_ty) = method_ty {
|
||||
module_attributes.insert(*name, (method_ty, true));
|
||||
} else {
|
||||
return Ok(Err(format!("Unable to resolve {module_name}.{name}")));
|
||||
}
|
||||
}
|
||||
|
||||
let module_ty =
|
||||
TypeEnum::TModule { module_id: *module_id, attributes: module_attributes };
|
||||
|
||||
let ty = unifier.add_ty(module_ty);
|
||||
return Ok(Ok(ty));
|
||||
}
|
||||
|
||||
if let Some(ty) = constructor_ty {
|
||||
self.pyid_to_type.write().insert(py_obj_id, ty);
|
||||
return Ok(Ok(ty));
|
||||
@ -846,9 +797,7 @@ impl InnerResolver {
|
||||
.map(|elem| self.get_obj_type(py, elem, unifier, defs, primitives))
|
||||
.collect();
|
||||
let types = types?;
|
||||
Ok(types.map(|types| {
|
||||
unifier.add_ty(TypeEnum::TTuple { ty: types, is_vararg_ctx: false })
|
||||
}))
|
||||
Ok(types.map(|types| unifier.add_ty(TypeEnum::TTuple { ty: types })))
|
||||
}
|
||||
// special handling for option type since its class member layout in python side
|
||||
// is special and cannot be mapped directly to a nac3 type as below
|
||||
@ -973,13 +922,10 @@ impl InnerResolver {
|
||||
|_| Ok(Ok(extracted_ty)),
|
||||
)
|
||||
} else if unifier.unioned(extracted_ty, primitives.bool) {
|
||||
if obj.extract::<bool>().is_ok()
|
||||
|| obj.call_method("__bool__", (), None)?.extract::<bool>().is_ok()
|
||||
{
|
||||
Ok(Ok(extracted_ty))
|
||||
} else {
|
||||
Ok(Err(format!("{obj} is not in the range of bool")))
|
||||
}
|
||||
obj.extract::<bool>().map_or_else(
|
||||
|_| Ok(Err(format!("{obj} is not in the range of bool"))),
|
||||
|_| Ok(Ok(extracted_ty)),
|
||||
)
|
||||
} else if unifier.unioned(extracted_ty, primitives.float) {
|
||||
obj.extract::<f64>().map_or_else(
|
||||
|_| Ok(Err(format!("{obj} is not in the range of float64"))),
|
||||
@ -1019,18 +965,14 @@ impl InnerResolver {
|
||||
let val: u64 = obj.extract().unwrap();
|
||||
self.id_to_primitive.write().insert(id, PrimitiveValue::U64(val));
|
||||
Ok(Some(ctx.ctx.i64_type().const_int(val, false).into()))
|
||||
} else if ty_id == self.primitive_ids.bool {
|
||||
} else if ty_id == self.primitive_ids.bool || ty_id == self.primitive_ids.np_bool_ {
|
||||
let val: bool = obj.extract().unwrap();
|
||||
self.id_to_primitive.write().insert(id, PrimitiveValue::Bool(val));
|
||||
Ok(Some(ctx.ctx.i8_type().const_int(u64::from(val), false).into()))
|
||||
} else if ty_id == self.primitive_ids.np_bool_ {
|
||||
let val: bool = obj.call_method("__bool__", (), None)?.extract().unwrap();
|
||||
self.id_to_primitive.write().insert(id, PrimitiveValue::Bool(val));
|
||||
Ok(Some(ctx.ctx.i8_type().const_int(u64::from(val), false).into()))
|
||||
} else if ty_id == self.primitive_ids.string || ty_id == self.primitive_ids.np_str_ {
|
||||
let val: String = obj.extract().unwrap();
|
||||
self.id_to_primitive.write().insert(id, PrimitiveValue::Str(val.clone()));
|
||||
Ok(Some(ctx.gen_string(generator, val).into()))
|
||||
Ok(Some(ctx.ctx.const_string(val.as_bytes(), true).into()))
|
||||
} else if ty_id == self.primitive_ids.float || ty_id == self.primitive_ids.float64 {
|
||||
let val: f64 = obj.extract().unwrap();
|
||||
self.id_to_primitive.write().insert(id, PrimitiveValue::F64(val));
|
||||
@ -1049,15 +991,8 @@ impl InnerResolver {
|
||||
}
|
||||
_ => unreachable!("must be list"),
|
||||
};
|
||||
let size_t = ctx.get_size_type();
|
||||
let ty = if len == 0
|
||||
&& matches!(&*ctx.unifier.get_ty_immutable(elem_ty), TypeEnum::TVar { .. })
|
||||
{
|
||||
// The default type for zero-length lists of unknown element type is size_t
|
||||
size_t.into()
|
||||
} else {
|
||||
ctx.get_llvm_type(generator, elem_ty)
|
||||
};
|
||||
let ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
let size_t = generator.get_size_type(ctx.ctx);
|
||||
let arr_ty = ctx
|
||||
.ctx
|
||||
.struct_type(&[ty.ptr_type(AddressSpace::default()).into(), size_t.into()], false);
|
||||
@ -1134,19 +1069,18 @@ impl InnerResolver {
|
||||
} else {
|
||||
unreachable!("must be ndarray")
|
||||
};
|
||||
let (ndarray_dtype, _) = unpack_ndarray_var_tys(&mut ctx.unifier, ndarray_ty);
|
||||
let (ndarray_dtype, ndarray_ndims) =
|
||||
unpack_ndarray_var_tys(&mut ctx.unifier, ndarray_ty);
|
||||
|
||||
let llvm_i8 = ctx.ctx.i8_type();
|
||||
let llvm_pi8 = llvm_i8.ptr_type(AddressSpace::default());
|
||||
let llvm_usize = ctx.get_size_type();
|
||||
let llvm_ndarray = NDArrayType::from_unifier_type(generator, ctx, ndarray_ty);
|
||||
let dtype = llvm_ndarray.element_type();
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
let ndarray_dtype_llvm_ty = ctx.get_llvm_type(generator, ndarray_dtype);
|
||||
let ndarray_llvm_ty = NDArrayType::new(generator, ctx.ctx, ndarray_dtype_llvm_ty);
|
||||
|
||||
{
|
||||
if self.global_value_ids.read().contains_key(&id) {
|
||||
let global = ctx.module.get_global(&id_str).unwrap_or_else(|| {
|
||||
ctx.module.add_global(
|
||||
llvm_ndarray.as_base_type().get_element_type().into_struct_type(),
|
||||
ndarray_llvm_ty.as_underlying_type(),
|
||||
Some(AddressSpace::default()),
|
||||
&id_str,
|
||||
)
|
||||
@ -1156,44 +1090,40 @@ impl InnerResolver {
|
||||
self.global_value_ids.write().insert(id, obj.into());
|
||||
}
|
||||
|
||||
let ndims = llvm_ndarray.ndims();
|
||||
let TypeEnum::TLiteral { values, .. } = &*ctx.unifier.get_ty_immutable(ndarray_ndims)
|
||||
else {
|
||||
unreachable!("Expected Literal for ndarray_ndims")
|
||||
};
|
||||
|
||||
let ndarray_ndims = if values.len() == 1 {
|
||||
values[0].clone()
|
||||
} else {
|
||||
todo!("Unpacking literal of more than one element unimplemented")
|
||||
};
|
||||
let Ok(ndarray_ndims) = u64::try_from(ndarray_ndims) else {
|
||||
unreachable!("Expected u64 value for ndarray_ndims")
|
||||
};
|
||||
|
||||
// Obtain the shape of the ndarray
|
||||
let shape_tuple: &PyTuple = obj.getattr("shape")?.downcast()?;
|
||||
assert_eq!(shape_tuple.len(), ndims as usize);
|
||||
|
||||
// The Rust type inferencer cannot figure this out
|
||||
let shape_values = shape_tuple
|
||||
assert_eq!(shape_tuple.len(), ndarray_ndims as usize);
|
||||
let shape_values: Result<Option<Vec<_>>, _> = shape_tuple
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, elem)| {
|
||||
let value = self
|
||||
.get_obj_value(py, elem, ctx, generator, ctx.primitives.usize())
|
||||
.map_err(|e| {
|
||||
super::CompileError::new_err(format!("Error getting element {i}: {e}"))
|
||||
})?
|
||||
.unwrap();
|
||||
let value = ctx
|
||||
.builder
|
||||
.build_int_z_extend(value.into_int_value(), llvm_usize, "")
|
||||
.unwrap();
|
||||
Ok(value)
|
||||
self.get_obj_value(py, elem, ctx, generator, ctx.primitives.usize()).map_err(
|
||||
|e| super::CompileError::new_err(format!("Error getting element {i}: {e}")),
|
||||
)
|
||||
})
|
||||
.collect::<Result<Vec<_>, PyErr>>()?;
|
||||
|
||||
// Also use this opportunity to get the constant values of `shape_values` for calculating strides.
|
||||
let shape_u64s = shape_values
|
||||
.iter()
|
||||
.map(|dim| {
|
||||
assert!(dim.is_const());
|
||||
dim.get_zero_extended_constant().unwrap()
|
||||
})
|
||||
.collect_vec();
|
||||
let shape_values = llvm_usize.const_array(&shape_values);
|
||||
.collect();
|
||||
let shape_values = shape_values?.unwrap();
|
||||
let shape_values = llvm_usize.const_array(
|
||||
&shape_values.into_iter().map(BasicValueEnum::into_int_value).collect_vec(),
|
||||
);
|
||||
|
||||
// create a global for ndarray.shape and initialize it using the shape
|
||||
let shape_global = ctx.module.add_global(
|
||||
llvm_usize.array_type(ndims as u32),
|
||||
llvm_usize.array_type(ndarray_ndims as u32),
|
||||
Some(AddressSpace::default()),
|
||||
&(id_str.clone() + ".shape"),
|
||||
);
|
||||
@ -1201,25 +1131,17 @@ impl InnerResolver {
|
||||
|
||||
// Obtain the (flattened) elements of the ndarray
|
||||
let sz: usize = obj.getattr("size")?.extract()?;
|
||||
let data: Vec<_> = (0..sz)
|
||||
let data: Result<Option<Vec<_>>, _> = (0..sz)
|
||||
.map(|i| {
|
||||
obj.getattr("flat")?.get_item(i).and_then(|elem| {
|
||||
let value = self
|
||||
.get_obj_value(py, elem, ctx, generator, ndarray_dtype)
|
||||
.map_err(|e| {
|
||||
super::CompileError::new_err(format!(
|
||||
"Error getting element {i}: {e}"
|
||||
))
|
||||
})?
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(value.get_type(), dtype);
|
||||
Ok(value)
|
||||
self.get_obj_value(py, elem, ctx, generator, ndarray_dtype).map_err(|e| {
|
||||
super::CompileError::new_err(format!("Error getting element {i}: {e}"))
|
||||
})
|
||||
})
|
||||
})
|
||||
.try_collect()?;
|
||||
let data = data.into_iter();
|
||||
let data = match dtype {
|
||||
.collect();
|
||||
let data = data?.unwrap().into_iter();
|
||||
let data = match ndarray_dtype_llvm_ty {
|
||||
BasicTypeEnum::ArrayType(ty) => {
|
||||
ty.const_array(&data.map(BasicValueEnum::into_array_value).collect_vec())
|
||||
}
|
||||
@ -1244,102 +1166,37 @@ impl InnerResolver {
|
||||
};
|
||||
|
||||
// create a global for ndarray.data and initialize it using the elements
|
||||
//
|
||||
// NOTE: NDArray's `data` is `u8*`. Here, `data_global` is an array of `dtype`.
|
||||
// We will have to cast it to an `u8*` later.
|
||||
let data_global = ctx.module.add_global(
|
||||
dtype.array_type(sz as u32),
|
||||
ndarray_dtype_llvm_ty.array_type(sz as u32),
|
||||
Some(AddressSpace::default()),
|
||||
&(id_str.clone() + ".data"),
|
||||
);
|
||||
data_global.set_initializer(&data);
|
||||
|
||||
// Get the constant itemsize.
|
||||
//
|
||||
// NOTE: dtype.size_of() may return a non-constant, where `TargetData::get_store_size`
|
||||
// will always return a constant size.
|
||||
let itemsize = ctx
|
||||
.registry
|
||||
.llvm_options
|
||||
.create_target_machine()
|
||||
.map(|tm| tm.get_target_data().get_store_size(&dtype))
|
||||
.unwrap();
|
||||
assert_ne!(itemsize, 0);
|
||||
|
||||
// Create the strides needed for ndarray.strides
|
||||
let strides = make_contiguous_strides(itemsize, ndims, &shape_u64s);
|
||||
let strides =
|
||||
strides.into_iter().map(|stride| llvm_usize.const_int(stride, false)).collect_vec();
|
||||
let strides = llvm_usize.const_array(&strides);
|
||||
|
||||
// create a global for ndarray.strides and initialize it
|
||||
let strides_global = ctx.module.add_global(
|
||||
llvm_usize.array_type(ndims as u32),
|
||||
Some(AddressSpace::default()),
|
||||
&format!("${id_str}.strides"),
|
||||
);
|
||||
strides_global.set_initializer(&strides);
|
||||
|
||||
// create a global for the ndarray object and initialize it
|
||||
let value = ndarray_llvm_ty.as_underlying_type().const_named_struct(&[
|
||||
llvm_usize.const_int(ndarray_ndims, false).into(),
|
||||
shape_global
|
||||
.as_pointer_value()
|
||||
.const_cast(llvm_usize.ptr_type(AddressSpace::default()))
|
||||
.into(),
|
||||
data_global
|
||||
.as_pointer_value()
|
||||
.const_cast(ndarray_dtype_llvm_ty.ptr_type(AddressSpace::default()))
|
||||
.into(),
|
||||
]);
|
||||
|
||||
// NOTE: data_global is an array of dtype, we want a `u8*`.
|
||||
let ndarray_data = data_global.as_pointer_value();
|
||||
let ndarray_data = ctx.builder.build_pointer_cast(ndarray_data, llvm_pi8, "").unwrap();
|
||||
|
||||
let ndarray_itemsize = llvm_usize.const_int(itemsize, false);
|
||||
|
||||
let ndarray_ndims = llvm_usize.const_int(ndims, false);
|
||||
|
||||
// calling as_pointer_value on shape and strides returns [i64 x ndims]*
|
||||
// convert into i64* to conform with expected layout of ndarray
|
||||
|
||||
let ndarray_shape = shape_global.as_pointer_value();
|
||||
let ndarray_shape = unsafe {
|
||||
ctx.builder
|
||||
.build_in_bounds_gep(
|
||||
ndarray_shape,
|
||||
&[llvm_usize.const_zero(), llvm_usize.const_zero()],
|
||||
"",
|
||||
)
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let ndarray_strides = strides_global.as_pointer_value();
|
||||
let ndarray_strides = unsafe {
|
||||
ctx.builder
|
||||
.build_in_bounds_gep(
|
||||
ndarray_strides,
|
||||
&[llvm_usize.const_zero(), llvm_usize.const_zero()],
|
||||
"",
|
||||
)
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let ndarray = llvm_ndarray
|
||||
.as_base_type()
|
||||
.get_element_type()
|
||||
.into_struct_type()
|
||||
.const_named_struct(&[
|
||||
ndarray_itemsize.into(),
|
||||
ndarray_ndims.into(),
|
||||
ndarray_shape.into(),
|
||||
ndarray_strides.into(),
|
||||
ndarray_data.into(),
|
||||
]);
|
||||
|
||||
let ndarray_global = ctx.module.add_global(
|
||||
llvm_ndarray.as_base_type().get_element_type().into_struct_type(),
|
||||
let ndarray = ctx.module.add_global(
|
||||
ndarray_llvm_ty.as_underlying_type(),
|
||||
Some(AddressSpace::default()),
|
||||
&id_str,
|
||||
);
|
||||
ndarray_global.set_initializer(&ndarray);
|
||||
ndarray.set_initializer(&value);
|
||||
|
||||
Ok(Some(ndarray_global.as_pointer_value().into()))
|
||||
Ok(Some(ndarray.as_pointer_value().into()))
|
||||
} else if ty_id == self.primitive_ids.tuple {
|
||||
let expected_ty_enum = ctx.unifier.get_ty_immutable(expected_ty);
|
||||
let TypeEnum::TTuple { ty, is_vararg_ctx: false } = expected_ty_enum.as_ref() else {
|
||||
unreachable!()
|
||||
};
|
||||
let TypeEnum::TTuple { ty } = expected_ty_enum.as_ref() else { unreachable!() };
|
||||
|
||||
let tup_tys = ty.iter();
|
||||
let elements: &PyTuple = obj.downcast()?;
|
||||
@ -1415,77 +1272,6 @@ impl InnerResolver {
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
} else if ty_id == self.primitive_ids.module {
|
||||
let id_str = id.to_string();
|
||||
|
||||
if let Some(global) = ctx.module.get_global(&id_str) {
|
||||
return Ok(Some(global.as_pointer_value().into()));
|
||||
}
|
||||
|
||||
let top_level_defs = ctx.top_level.definitions.read();
|
||||
let ty = self
|
||||
.get_obj_type(py, obj, &mut ctx.unifier, &top_level_defs, &ctx.primitives)?
|
||||
.unwrap();
|
||||
let ty = ctx
|
||||
.get_llvm_type(generator, ty)
|
||||
.into_pointer_type()
|
||||
.get_element_type()
|
||||
.into_struct_type();
|
||||
|
||||
{
|
||||
if self.global_value_ids.read().contains_key(&id) {
|
||||
let global = ctx.module.get_global(&id_str).unwrap_or_else(|| {
|
||||
ctx.module.add_global(ty, Some(AddressSpace::default()), &id_str)
|
||||
});
|
||||
return Ok(Some(global.as_pointer_value().into()));
|
||||
}
|
||||
self.global_value_ids.write().insert(id, obj.into());
|
||||
}
|
||||
|
||||
let fields = {
|
||||
let definition =
|
||||
top_level_defs.get(self.pyid_to_def.read().get(&id).unwrap().0).unwrap().read();
|
||||
let TopLevelDef::Module { attributes, .. } = &*definition else { unreachable!() };
|
||||
attributes
|
||||
.iter()
|
||||
.filter_map(|f| {
|
||||
let definition = top_level_defs.get(f.1 .0).unwrap().read();
|
||||
if let TopLevelDef::Variable { ty, .. } = &*definition {
|
||||
Some((f.0, *ty))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect_vec()
|
||||
};
|
||||
|
||||
let values: Result<Option<Vec<_>>, _> = fields
|
||||
.iter()
|
||||
.map(|(name, ty)| {
|
||||
self.get_obj_value(
|
||||
py,
|
||||
obj.getattr(name.to_string().as_str())?,
|
||||
ctx,
|
||||
generator,
|
||||
*ty,
|
||||
)
|
||||
.map_err(|e| {
|
||||
super::CompileError::new_err(format!("Error getting field {name}: {e}"))
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
let values = values?;
|
||||
|
||||
if let Some(values) = values {
|
||||
let val = ty.const_named_struct(&values);
|
||||
let global = ctx.module.get_global(&id_str).unwrap_or_else(|| {
|
||||
ctx.module.add_global(ty, Some(AddressSpace::default()), &id_str)
|
||||
});
|
||||
global.set_initializer(&val);
|
||||
Ok(Some(global.as_pointer_value().into()))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
} else {
|
||||
let id_str = id.to_string();
|
||||
|
||||
@ -1565,12 +1351,9 @@ impl InnerResolver {
|
||||
} else if ty_id == self.primitive_ids.uint64 {
|
||||
let val: u64 = obj.extract()?;
|
||||
Ok(SymbolValue::U64(val))
|
||||
} else if ty_id == self.primitive_ids.bool {
|
||||
} else if ty_id == self.primitive_ids.bool || ty_id == self.primitive_ids.np_bool_ {
|
||||
let val: bool = obj.extract()?;
|
||||
Ok(SymbolValue::Bool(val))
|
||||
} else if ty_id == self.primitive_ids.np_bool_ {
|
||||
let val: bool = obj.call_method("__bool__", (), None)?.extract()?;
|
||||
Ok(SymbolValue::Bool(val))
|
||||
} else if ty_id == self.primitive_ids.string || ty_id == self.primitive_ids.np_str_ {
|
||||
let val: String = obj.extract()?;
|
||||
Ok(SymbolValue::Str(val))
|
||||
@ -1668,50 +1451,8 @@ impl SymbolResolver for Resolver {
|
||||
fn get_symbol_value<'ctx>(
|
||||
&self,
|
||||
id: StrRef,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
generator: &mut dyn CodeGenerator,
|
||||
_: &mut CodeGenContext<'ctx, '_>,
|
||||
) -> Option<ValueEnum<'ctx>> {
|
||||
if let Some(def_id) = self.0.id_to_def.read().get(&id) {
|
||||
let top_levels = ctx.top_level.definitions.read();
|
||||
if matches!(&*top_levels[def_id.0].read(), TopLevelDef::Variable { .. }) {
|
||||
let module_val = &self.0.module;
|
||||
let ret = Python::with_gil(|py| -> PyResult<Result<BasicValueEnum, String>> {
|
||||
let module_val = module_val.as_ref(py);
|
||||
|
||||
let ty = self.0.get_obj_type(
|
||||
py,
|
||||
module_val,
|
||||
&mut ctx.unifier,
|
||||
&top_levels,
|
||||
&ctx.primitives,
|
||||
)?;
|
||||
if let Err(ty) = ty {
|
||||
return Ok(Err(ty));
|
||||
}
|
||||
let ty = ty.unwrap();
|
||||
let obj = self.0.get_obj_value(py, module_val, ctx, generator, ty)?.unwrap();
|
||||
let (idx, _) = ctx.get_attr_index(ty, id);
|
||||
let ret = unsafe {
|
||||
ctx.builder.build_gep(
|
||||
obj.into_pointer_value(),
|
||||
&[
|
||||
ctx.ctx.i32_type().const_zero(),
|
||||
ctx.ctx.i32_type().const_int(idx as u64, false),
|
||||
],
|
||||
id.to_string().as_str(),
|
||||
)
|
||||
}
|
||||
.unwrap();
|
||||
Ok(Ok(ret.as_basic_value_enum()))
|
||||
})
|
||||
.unwrap();
|
||||
if ret.is_err() {
|
||||
return None;
|
||||
}
|
||||
return Some(ret.unwrap().into());
|
||||
}
|
||||
}
|
||||
|
||||
let sym_value = {
|
||||
let id_to_val = self.0.id_to_pyval.read();
|
||||
id_to_val.get(&id).cloned()
|
||||
@ -1772,7 +1513,10 @@ impl SymbolResolver for Resolver {
|
||||
if let Some(id) = string_store.get(s) {
|
||||
*id
|
||||
} else {
|
||||
let id = i32::try_from(string_store.len()).unwrap();
|
||||
let id = Python::with_gil(|py| -> PyResult<i32> {
|
||||
self.0.helper.store_str.call1(py, (s,))?.extract(py)
|
||||
})
|
||||
.unwrap();
|
||||
string_store.insert(s.into(), id);
|
||||
id
|
||||
}
|
||||
|
@ -1,12 +1,9 @@
|
||||
use itertools::Either;
|
||||
|
||||
use nac3core::{
|
||||
codegen::CodeGenContext,
|
||||
inkwell::{
|
||||
values::{BasicValueEnum, CallSiteValue},
|
||||
AddressSpace, AtomicOrdering,
|
||||
},
|
||||
use inkwell::{
|
||||
values::{BasicValueEnum, CallSiteValue},
|
||||
AddressSpace, AtomicOrdering,
|
||||
};
|
||||
use itertools::Either;
|
||||
use nac3core::codegen::CodeGenContext;
|
||||
|
||||
/// Functions for manipulating the timeline.
|
||||
pub trait TimeFns {
|
||||
@ -34,7 +31,7 @@ impl TimeFns for NowPinningTimeFns64 {
|
||||
.unwrap_or_else(|| ctx.module.add_global(i64_type, None, "now"));
|
||||
let now_hiptr = ctx
|
||||
.builder
|
||||
.build_bit_cast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr")
|
||||
.build_bitcast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr")
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap();
|
||||
|
||||
@ -83,7 +80,7 @@ impl TimeFns for NowPinningTimeFns64 {
|
||||
.unwrap_or_else(|| ctx.module.add_global(i64_type, None, "now"));
|
||||
let now_hiptr = ctx
|
||||
.builder
|
||||
.build_bit_cast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr")
|
||||
.build_bitcast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr")
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap();
|
||||
|
||||
@ -112,7 +109,7 @@ impl TimeFns for NowPinningTimeFns64 {
|
||||
.unwrap_or_else(|| ctx.module.add_global(i64_type, None, "now"));
|
||||
let now_hiptr = ctx
|
||||
.builder
|
||||
.build_bit_cast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr")
|
||||
.build_bitcast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr")
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap();
|
||||
|
||||
@ -210,7 +207,7 @@ impl TimeFns for NowPinningTimeFns {
|
||||
.unwrap_or_else(|| ctx.module.add_global(i64_type, None, "now"));
|
||||
let now_hiptr = ctx
|
||||
.builder
|
||||
.build_bit_cast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr")
|
||||
.build_bitcast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr")
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap();
|
||||
|
||||
@ -261,7 +258,7 @@ impl TimeFns for NowPinningTimeFns {
|
||||
let time_lo = ctx.builder.build_int_truncate(time, i32_type, "time.lo").unwrap();
|
||||
let now_hiptr = ctx
|
||||
.builder
|
||||
.build_bit_cast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr")
|
||||
.build_bitcast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr")
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap();
|
||||
|
||||
|
@ -10,6 +10,7 @@ constant-optimization = ["fold"]
|
||||
fold = []
|
||||
|
||||
[dependencies]
|
||||
lazy_static = "1.5"
|
||||
parking_lot = "0.12"
|
||||
string-interner = "0.17"
|
||||
fxhash = "0.2"
|
||||
|
@ -5,12 +5,14 @@ pub use crate::location::Location;
|
||||
|
||||
use fxhash::FxBuildHasher;
|
||||
use parking_lot::{Mutex, MutexGuard};
|
||||
use std::{cell::RefCell, collections::HashMap, fmt, sync::LazyLock};
|
||||
use std::{cell::RefCell, collections::HashMap, fmt};
|
||||
use string_interner::{symbol::SymbolU32, DefaultBackend, StringInterner};
|
||||
|
||||
pub type Interner = StringInterner<DefaultBackend, FxBuildHasher>;
|
||||
static INTERNER: LazyLock<Mutex<Interner>> =
|
||||
LazyLock::new(|| Mutex::new(StringInterner::with_hasher(FxBuildHasher::default())));
|
||||
lazy_static! {
|
||||
static ref INTERNER: Mutex<Interner> =
|
||||
Mutex::new(StringInterner::with_hasher(FxBuildHasher::default()));
|
||||
}
|
||||
|
||||
thread_local! {
|
||||
static LOCAL_INTERNER: RefCell<HashMap<String, StrRef>> = RefCell::default();
|
||||
|
@ -1,4 +1,10 @@
|
||||
#![deny(future_incompatible, let_underscore, nonstandard_style, clippy::all)]
|
||||
#![deny(
|
||||
future_incompatible,
|
||||
let_underscore,
|
||||
nonstandard_style,
|
||||
rust_2024_compatibility,
|
||||
clippy::all
|
||||
)]
|
||||
#![warn(clippy::pedantic)]
|
||||
#![allow(
|
||||
clippy::missing_errors_doc,
|
||||
@ -8,6 +14,9 @@
|
||||
clippy::wildcard_imports
|
||||
)]
|
||||
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
mod ast_gen;
|
||||
mod constant;
|
||||
#[cfg(feature = "fold")]
|
||||
|
@ -1,29 +1,26 @@
|
||||
[features]
|
||||
test = []
|
||||
|
||||
[package]
|
||||
name = "nac3core"
|
||||
version = "0.1.0"
|
||||
authors = ["M-Labs"]
|
||||
edition = "2021"
|
||||
|
||||
[features]
|
||||
default = ["derive"]
|
||||
derive = ["dep:nac3core_derive"]
|
||||
no-escape-analysis = []
|
||||
|
||||
[dependencies]
|
||||
itertools = "0.13"
|
||||
crossbeam = "0.8"
|
||||
indexmap = "2.6"
|
||||
indexmap = "2.2"
|
||||
parking_lot = "0.12"
|
||||
rayon = "1.10"
|
||||
nac3core_derive = { path = "nac3core_derive", optional = true }
|
||||
rayon = "1.8"
|
||||
nac3parser = { path = "../nac3parser" }
|
||||
strum = "0.26"
|
||||
strum_macros = "0.26"
|
||||
strum = "0.26.2"
|
||||
strum_macros = "0.26.4"
|
||||
|
||||
[dependencies.inkwell]
|
||||
version = "0.5"
|
||||
version = "0.4"
|
||||
default-features = false
|
||||
features = ["llvm14-0-prefer-dynamic", "target-x86", "target-arm", "target-riscv", "no-libffi-linking"]
|
||||
features = ["llvm14-0", "target-x86", "target-arm", "target-riscv", "no-libffi-linking"]
|
||||
|
||||
[dev-dependencies]
|
||||
test-case = "1.2.0"
|
||||
|
@ -1,3 +1,4 @@
|
||||
use regex::Regex;
|
||||
use std::{
|
||||
env,
|
||||
fs::File,
|
||||
@ -6,58 +7,45 @@ use std::{
|
||||
process::{Command, Stdio},
|
||||
};
|
||||
|
||||
use regex::Regex;
|
||||
|
||||
fn main() {
|
||||
let out_dir = env::var("OUT_DIR").unwrap();
|
||||
let out_dir = Path::new(&out_dir);
|
||||
let irrt_dir = Path::new("irrt");
|
||||
|
||||
fn compile_irrt(irrt_dir: &Path, out_dir: &Path) {
|
||||
let irrt_cpp_path = irrt_dir.join("irrt.cpp");
|
||||
|
||||
/*
|
||||
* HACK: Sadly, clang doesn't let us emit generic LLVM bitcode.
|
||||
* Compiling for WASM32 and filtering the output with regex is the closest we can get.
|
||||
*/
|
||||
let mut flags: Vec<&str> = vec![
|
||||
let flags: &[&str] = &[
|
||||
"--target=wasm32",
|
||||
irrt_cpp_path.to_str().unwrap(),
|
||||
"-x",
|
||||
"c++",
|
||||
"-std=c++20",
|
||||
"-fno-discard-value-names",
|
||||
"-fno-exceptions",
|
||||
"-fno-rtti",
|
||||
match env::var("PROFILE").as_deref() {
|
||||
Ok("debug") => "-O0",
|
||||
Ok("release") => "-O3",
|
||||
flavor => panic!("Unknown or missing build flavor {flavor:?}"),
|
||||
},
|
||||
"-emit-llvm",
|
||||
"-S",
|
||||
"-Wall",
|
||||
"-Wextra",
|
||||
"-o",
|
||||
"-",
|
||||
"-Werror=return-type",
|
||||
"-I",
|
||||
irrt_dir.to_str().unwrap(),
|
||||
irrt_cpp_path.to_str().unwrap(),
|
||||
"-o",
|
||||
"-",
|
||||
];
|
||||
|
||||
match env::var("PROFILE").as_deref() {
|
||||
Ok("debug") => {
|
||||
flags.push("-O0");
|
||||
flags.push("-DIRRT_DEBUG_ASSERT");
|
||||
}
|
||||
Ok("release") => {
|
||||
flags.push("-O3");
|
||||
}
|
||||
flavor => panic!("Unknown or missing build flavor {flavor:?}"),
|
||||
}
|
||||
println!("cargo:rerun-if-changed={}", out_dir.to_str().unwrap());
|
||||
|
||||
// Tell Cargo to rerun if any file under `irrt_dir` (recursive) changes
|
||||
println!("cargo:rerun-if-changed={}", irrt_dir.to_str().unwrap());
|
||||
|
||||
// Compile IRRT and capture the LLVM IR output
|
||||
let output = Command::new("clang-irrt")
|
||||
.args(flags)
|
||||
.output()
|
||||
.inspect(|o| {
|
||||
.map(|o| {
|
||||
assert!(o.status.success(), "{}", std::str::from_utf8(&o.stderr).unwrap());
|
||||
o
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
@ -65,17 +53,11 @@ fn main() {
|
||||
let output = std::str::from_utf8(&output.stdout).unwrap().replace("\r\n", "\n");
|
||||
let mut filtered_output = String::with_capacity(output.len());
|
||||
|
||||
// Filter out irrelevant IR
|
||||
//
|
||||
// Regex:
|
||||
// - `(?ms:^define.*?\}$)` captures LLVM `define` blocks
|
||||
// - `(?m:^declare.*?$)` captures LLVM `declare` lines
|
||||
// - `(?m:^%.+?=\s*type\s*\{.+?\}$)` captures LLVM `type` declarations
|
||||
// - `(?m:^@.+?=.+$)` captures global constants
|
||||
let regex_filter = Regex::new(
|
||||
r"(?ms:^define.*?\}$)|(?m:^declare.*?$)|(?m:^%.+?=\s*type\s*\{.+?\}$)|(?m:^@.+?=.+$)",
|
||||
)
|
||||
.unwrap();
|
||||
// (?ms:^define.*?\}$) to capture `define` blocks
|
||||
// (?m:^declare.*?$) to capture `declare` blocks
|
||||
// (?m:^%.+?=\s*type\s*\{.+?\}$) to capture `type` declarations
|
||||
let regex_filter =
|
||||
Regex::new(r"(?ms:^define.*?\}$)|(?m:^declare.*?$)|(?m:^%.+?=\s*type\s*\{.+?\}$)").unwrap();
|
||||
for f in regex_filter.captures_iter(&output) {
|
||||
assert_eq!(f.len(), 1);
|
||||
filtered_output.push_str(&f[0]);
|
||||
@ -86,14 +68,10 @@ fn main() {
|
||||
.unwrap()
|
||||
.replace_all(&filtered_output, "");
|
||||
|
||||
// For debugging
|
||||
// Doing `DEBUG_DUMP_IRRT=1 cargo build -p nac3core` dumps the LLVM IR generated
|
||||
const DEBUG_DUMP_IRRT: &str = "DEBUG_DUMP_IRRT";
|
||||
println!("cargo:rerun-if-env-changed={DEBUG_DUMP_IRRT}");
|
||||
if env::var(DEBUG_DUMP_IRRT).is_ok() {
|
||||
println!("cargo:rerun-if-env-changed=DEBUG_DUMP_IRRT");
|
||||
if env::var("DEBUG_DUMP_IRRT").is_ok() {
|
||||
let mut file = File::create(out_dir.join("irrt.ll")).unwrap();
|
||||
file.write_all(output.as_bytes()).unwrap();
|
||||
|
||||
let mut file = File::create(out_dir.join("irrt-filtered.ll")).unwrap();
|
||||
file.write_all(filtered_output.as_bytes()).unwrap();
|
||||
}
|
||||
@ -107,3 +85,50 @@ fn main() {
|
||||
llvm_as.stdin.as_mut().unwrap().write_all(filtered_output.as_bytes()).unwrap();
|
||||
assert!(llvm_as.wait().unwrap().success());
|
||||
}
|
||||
|
||||
fn compile_irrt_test(irrt_dir: &Path, out_dir: &Path) {
|
||||
let irrt_test_cpp_path = irrt_dir.join("irrt_test.cpp");
|
||||
let exe_path = out_dir.join("irrt_test.out");
|
||||
|
||||
let flags: &[&str] = &[
|
||||
irrt_test_cpp_path.to_str().unwrap(),
|
||||
"-x",
|
||||
"c++",
|
||||
"-I",
|
||||
irrt_dir.to_str().unwrap(),
|
||||
"-g",
|
||||
"-fno-discard-value-names",
|
||||
"-O0",
|
||||
"-Wall",
|
||||
"-Wextra",
|
||||
"-Werror=return-type",
|
||||
"-lm", // for `tgamma()`, `lgamma()`
|
||||
"-o",
|
||||
exe_path.to_str().unwrap(),
|
||||
];
|
||||
|
||||
Command::new("clang-irrt-test")
|
||||
.args(flags)
|
||||
.output()
|
||||
.map(|o| {
|
||||
assert!(o.status.success(), "{}", std::str::from_utf8(&o.stderr).unwrap());
|
||||
o
|
||||
})
|
||||
.unwrap();
|
||||
println!("cargo:rerun-if-changed={}", out_dir.to_str().unwrap());
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let out_dir = env::var("OUT_DIR").unwrap();
|
||||
let out_dir = Path::new(&out_dir);
|
||||
|
||||
let irrt_dir = Path::new("./irrt");
|
||||
|
||||
compile_irrt(irrt_dir, out_dir);
|
||||
|
||||
// https://github.com/rust-lang/cargo/issues/2549
|
||||
// `cargo test -F test` to also build `irrt_test.cpp
|
||||
if cfg!(feature = "test") {
|
||||
compile_irrt_test(irrt_dir, out_dir);
|
||||
}
|
||||
}
|
||||
|
@ -1,15 +1,5 @@
|
||||
#include "irrt/exception.hpp"
|
||||
#include "irrt/list.hpp"
|
||||
#include "irrt/math.hpp"
|
||||
#include "irrt/range.hpp"
|
||||
#include "irrt/slice.hpp"
|
||||
#include "irrt/string.hpp"
|
||||
#include "irrt/ndarray/basic.hpp"
|
||||
#include "irrt/ndarray/def.hpp"
|
||||
#include "irrt/ndarray/iter.hpp"
|
||||
#include "irrt/ndarray/indexing.hpp"
|
||||
#include "irrt/ndarray/array.hpp"
|
||||
#include "irrt/ndarray/reshape.hpp"
|
||||
#include "irrt/ndarray/broadcast.hpp"
|
||||
#include "irrt/ndarray/transpose.hpp"
|
||||
#include "irrt/ndarray/matmul.hpp"
|
||||
#include "irrt_everything.hpp"
|
||||
|
||||
/*
|
||||
This file will be read by `clang-irrt` to conveniently produce LLVM IR for `nac3core/codegen`.
|
||||
*/
|
||||
|
437
nac3core/irrt/irrt.hpp
Normal file
437
nac3core/irrt/irrt.hpp
Normal file
@ -0,0 +1,437 @@
|
||||
#ifndef IRRT_DONT_TYPEDEF_INTS
|
||||
typedef _BitInt(8) int8_t;
|
||||
typedef unsigned _BitInt(8) uint8_t;
|
||||
typedef _BitInt(32) int32_t;
|
||||
typedef unsigned _BitInt(32) uint32_t;
|
||||
typedef _BitInt(64) int64_t;
|
||||
typedef unsigned _BitInt(64) uint64_t;
|
||||
#endif
|
||||
|
||||
// NDArray indices are always `uint32_t`.
|
||||
typedef uint32_t NDIndex;
|
||||
// The type of an index or a value describing the length of a range/slice is
|
||||
// always `int32_t`.
|
||||
typedef int32_t SliceIndex;
|
||||
|
||||
template <typename T>
|
||||
static T max(T a, T b) {
|
||||
return a > b ? a : b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static T min(T a, T b) {
|
||||
return a > b ? b : a;
|
||||
}
|
||||
|
||||
// adapted from GNU Scientific Library: https://git.savannah.gnu.org/cgit/gsl.git/tree/sys/pow_int.c
|
||||
// need to make sure `exp >= 0` before calling this function
|
||||
template <typename T>
|
||||
static T __nac3_int_exp_impl(T base, T exp) {
|
||||
T res = 1;
|
||||
/* repeated squaring method */
|
||||
do {
|
||||
if (exp & 1) {
|
||||
res *= base; /* for n odd */
|
||||
}
|
||||
exp >>= 1;
|
||||
base *= base;
|
||||
} while (exp);
|
||||
return res;
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
static SizeT __nac3_ndarray_calc_size_impl(
|
||||
const SizeT *list_data,
|
||||
SizeT list_len,
|
||||
SizeT begin_idx,
|
||||
SizeT end_idx
|
||||
) {
|
||||
__builtin_assume(end_idx <= list_len);
|
||||
|
||||
SizeT num_elems = 1;
|
||||
for (SizeT i = begin_idx; i < end_idx; ++i) {
|
||||
SizeT val = list_data[i];
|
||||
__builtin_assume(val > 0);
|
||||
num_elems *= val;
|
||||
}
|
||||
return num_elems;
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
static void __nac3_ndarray_calc_nd_indices_impl(
|
||||
SizeT index,
|
||||
const SizeT *dims,
|
||||
SizeT num_dims,
|
||||
NDIndex *idxs
|
||||
) {
|
||||
SizeT stride = 1;
|
||||
for (SizeT dim = 0; dim < num_dims; dim++) {
|
||||
SizeT i = num_dims - dim - 1;
|
||||
__builtin_assume(dims[i] > 0);
|
||||
idxs[i] = (index / stride) % dims[i];
|
||||
stride *= dims[i];
|
||||
}
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
static SizeT __nac3_ndarray_flatten_index_impl(
|
||||
const SizeT *dims,
|
||||
SizeT num_dims,
|
||||
const NDIndex *indices,
|
||||
SizeT num_indices
|
||||
) {
|
||||
SizeT idx = 0;
|
||||
SizeT stride = 1;
|
||||
for (SizeT i = 0; i < num_dims; ++i) {
|
||||
SizeT ri = num_dims - i - 1;
|
||||
if (ri < num_indices) {
|
||||
idx += stride * indices[ri];
|
||||
}
|
||||
|
||||
__builtin_assume(dims[i] > 0);
|
||||
stride *= dims[ri];
|
||||
}
|
||||
return idx;
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
static void __nac3_ndarray_calc_broadcast_impl(
|
||||
const SizeT *lhs_dims,
|
||||
SizeT lhs_ndims,
|
||||
const SizeT *rhs_dims,
|
||||
SizeT rhs_ndims,
|
||||
SizeT *out_dims
|
||||
) {
|
||||
SizeT max_ndims = lhs_ndims > rhs_ndims ? lhs_ndims : rhs_ndims;
|
||||
|
||||
for (SizeT i = 0; i < max_ndims; ++i) {
|
||||
const SizeT *lhs_dim_sz = i < lhs_ndims ? &lhs_dims[lhs_ndims - i - 1] : nullptr;
|
||||
const SizeT *rhs_dim_sz = i < rhs_ndims ? &rhs_dims[rhs_ndims - i - 1] : nullptr;
|
||||
SizeT *out_dim = &out_dims[max_ndims - i - 1];
|
||||
|
||||
if (lhs_dim_sz == nullptr) {
|
||||
*out_dim = *rhs_dim_sz;
|
||||
} else if (rhs_dim_sz == nullptr) {
|
||||
*out_dim = *lhs_dim_sz;
|
||||
} else if (*lhs_dim_sz == 1) {
|
||||
*out_dim = *rhs_dim_sz;
|
||||
} else if (*rhs_dim_sz == 1) {
|
||||
*out_dim = *lhs_dim_sz;
|
||||
} else if (*lhs_dim_sz == *rhs_dim_sz) {
|
||||
*out_dim = *lhs_dim_sz;
|
||||
} else {
|
||||
__builtin_unreachable();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
static void __nac3_ndarray_calc_broadcast_idx_impl(
|
||||
const SizeT *src_dims,
|
||||
SizeT src_ndims,
|
||||
const NDIndex *in_idx,
|
||||
NDIndex *out_idx
|
||||
) {
|
||||
for (SizeT i = 0; i < src_ndims; ++i) {
|
||||
SizeT src_i = src_ndims - i - 1;
|
||||
out_idx[src_i] = src_dims[src_i] == 1 ? 0 : in_idx[src_i];
|
||||
}
|
||||
}
|
||||
|
||||
template<typename SizeT>
|
||||
static void __nac3_ndarray_strides_from_shape_impl(
|
||||
SizeT ndims,
|
||||
SizeT *shape,
|
||||
SizeT *dst_strides
|
||||
) {
|
||||
SizeT stride_product = 1;
|
||||
for (SizeT i = 0; i < ndims; i++) {
|
||||
int dim_i = ndims - i - 1;
|
||||
dst_strides[dim_i] = stride_product;
|
||||
stride_product *= shape[dim_i];
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
#define DEF_nac3_int_exp_(T) \
|
||||
T __nac3_int_exp_##T(T base, T exp) {\
|
||||
return __nac3_int_exp_impl(base, exp);\
|
||||
}
|
||||
|
||||
DEF_nac3_int_exp_(int32_t)
|
||||
DEF_nac3_int_exp_(int64_t)
|
||||
DEF_nac3_int_exp_(uint32_t)
|
||||
DEF_nac3_int_exp_(uint64_t)
|
||||
|
||||
SliceIndex __nac3_slice_index_bound(SliceIndex i, const SliceIndex len) {
|
||||
if (i < 0) {
|
||||
i = len + i;
|
||||
}
|
||||
if (i < 0) {
|
||||
return 0;
|
||||
} else if (i > len) {
|
||||
return len;
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
SliceIndex __nac3_range_slice_len(
|
||||
const SliceIndex start,
|
||||
const SliceIndex end,
|
||||
const SliceIndex step
|
||||
) {
|
||||
SliceIndex diff = end - start;
|
||||
if (diff > 0 && step > 0) {
|
||||
return ((diff - 1) / step) + 1;
|
||||
} else if (diff < 0 && step < 0) {
|
||||
return ((diff + 1) / step) + 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle list assignment and dropping part of the list when
|
||||
// both dest_step and src_step are +1.
|
||||
// - All the index must *not* be out-of-bound or negative,
|
||||
// - The end index is *inclusive*,
|
||||
// - The length of src and dest slice size should already
|
||||
// be checked: if dest.step == 1 then len(src) <= len(dest) else len(src) == len(dest)
|
||||
SliceIndex __nac3_list_slice_assign_var_size(
|
||||
SliceIndex dest_start,
|
||||
SliceIndex dest_end,
|
||||
SliceIndex dest_step,
|
||||
uint8_t *dest_arr,
|
||||
SliceIndex dest_arr_len,
|
||||
SliceIndex src_start,
|
||||
SliceIndex src_end,
|
||||
SliceIndex src_step,
|
||||
uint8_t *src_arr,
|
||||
SliceIndex src_arr_len,
|
||||
const SliceIndex size
|
||||
) {
|
||||
/* if dest_arr_len == 0, do nothing since we do not support extending list */
|
||||
if (dest_arr_len == 0) return dest_arr_len;
|
||||
/* if both step is 1, memmove directly, handle the dropping of the list, and shrink size */
|
||||
if (src_step == dest_step && dest_step == 1) {
|
||||
const SliceIndex src_len = (src_end >= src_start) ? (src_end - src_start + 1) : 0;
|
||||
const SliceIndex dest_len = (dest_end >= dest_start) ? (dest_end - dest_start + 1) : 0;
|
||||
if (src_len > 0) {
|
||||
__builtin_memmove(
|
||||
dest_arr + dest_start * size,
|
||||
src_arr + src_start * size,
|
||||
src_len * size
|
||||
);
|
||||
}
|
||||
if (dest_len > 0) {
|
||||
/* dropping */
|
||||
__builtin_memmove(
|
||||
dest_arr + (dest_start + src_len) * size,
|
||||
dest_arr + (dest_end + 1) * size,
|
||||
(dest_arr_len - dest_end - 1) * size
|
||||
);
|
||||
}
|
||||
/* shrink size */
|
||||
return dest_arr_len - (dest_len - src_len);
|
||||
}
|
||||
/* if two range overlaps, need alloca */
|
||||
uint8_t need_alloca =
|
||||
(dest_arr == src_arr)
|
||||
&& !(
|
||||
max(dest_start, dest_end) < min(src_start, src_end)
|
||||
|| max(src_start, src_end) < min(dest_start, dest_end)
|
||||
);
|
||||
if (need_alloca) {
|
||||
uint8_t *tmp = reinterpret_cast<uint8_t *>(__builtin_alloca(src_arr_len * size));
|
||||
__builtin_memcpy(tmp, src_arr, src_arr_len * size);
|
||||
src_arr = tmp;
|
||||
}
|
||||
SliceIndex src_ind = src_start;
|
||||
SliceIndex dest_ind = dest_start;
|
||||
for (;
|
||||
(src_step > 0) ? (src_ind <= src_end) : (src_ind >= src_end);
|
||||
src_ind += src_step, dest_ind += dest_step
|
||||
) {
|
||||
/* for constant optimization */
|
||||
if (size == 1) {
|
||||
__builtin_memcpy(dest_arr + dest_ind, src_arr + src_ind, 1);
|
||||
} else if (size == 4) {
|
||||
__builtin_memcpy(dest_arr + dest_ind * 4, src_arr + src_ind * 4, 4);
|
||||
} else if (size == 8) {
|
||||
__builtin_memcpy(dest_arr + dest_ind * 8, src_arr + src_ind * 8, 8);
|
||||
} else {
|
||||
/* memcpy for var size, cannot overlap after previous alloca */
|
||||
__builtin_memcpy(dest_arr + dest_ind * size, src_arr + src_ind * size, size);
|
||||
}
|
||||
}
|
||||
/* only dest_step == 1 can we shrink the dest list. */
|
||||
/* size should be ensured prior to calling this function */
|
||||
if (dest_step == 1 && dest_end >= dest_start) {
|
||||
__builtin_memmove(
|
||||
dest_arr + dest_ind * size,
|
||||
dest_arr + (dest_end + 1) * size,
|
||||
(dest_arr_len - dest_end - 1) * size
|
||||
);
|
||||
return dest_arr_len - (dest_end - dest_ind) - 1;
|
||||
}
|
||||
return dest_arr_len;
|
||||
}
|
||||
|
||||
int32_t __nac3_isinf(double x) {
|
||||
return __builtin_isinf(x);
|
||||
}
|
||||
|
||||
int32_t __nac3_isnan(double x) {
|
||||
return __builtin_isnan(x);
|
||||
}
|
||||
|
||||
double tgamma(double arg);
|
||||
|
||||
double __nac3_gamma(double z) {
|
||||
// Handling for denormals
|
||||
// | x | Python gamma(x) | C tgamma(x) |
|
||||
// --- | ----------------- | --------------- | ----------- |
|
||||
// (1) | nan | nan | nan |
|
||||
// (2) | -inf | -inf | inf |
|
||||
// (3) | inf | inf | inf |
|
||||
// (4) | 0.0 | inf | inf |
|
||||
// (5) | {-1.0, -2.0, ...} | inf | nan |
|
||||
|
||||
// (1)-(3)
|
||||
if (__builtin_isinf(z) || __builtin_isnan(z)) {
|
||||
return z;
|
||||
}
|
||||
|
||||
double v = tgamma(z);
|
||||
|
||||
// (4)-(5)
|
||||
return __builtin_isinf(v) || __builtin_isnan(v) ? __builtin_inf() : v;
|
||||
}
|
||||
|
||||
double lgamma(double arg);
|
||||
|
||||
double __nac3_gammaln(double x) {
|
||||
// libm's handling of value overflows differs from scipy:
|
||||
// - scipy: gammaln(-inf) -> -inf
|
||||
// - libm : lgamma(-inf) -> inf
|
||||
|
||||
if (__builtin_isinf(x)) {
|
||||
return x;
|
||||
}
|
||||
|
||||
return lgamma(x);
|
||||
}
|
||||
|
||||
double j0(double x);
|
||||
|
||||
double __nac3_j0(double x) {
|
||||
// libm's handling of value overflows differs from scipy:
|
||||
// - scipy: j0(inf) -> nan
|
||||
// - libm : j0(inf) -> 0.0
|
||||
|
||||
if (__builtin_isinf(x)) {
|
||||
return __builtin_nan("");
|
||||
}
|
||||
|
||||
return j0(x);
|
||||
}
|
||||
|
||||
uint32_t __nac3_ndarray_calc_size(
|
||||
const uint32_t *list_data,
|
||||
uint32_t list_len,
|
||||
uint32_t begin_idx,
|
||||
uint32_t end_idx
|
||||
) {
|
||||
return __nac3_ndarray_calc_size_impl(list_data, list_len, begin_idx, end_idx);
|
||||
}
|
||||
|
||||
uint64_t __nac3_ndarray_calc_size64(
|
||||
const uint64_t *list_data,
|
||||
uint64_t list_len,
|
||||
uint64_t begin_idx,
|
||||
uint64_t end_idx
|
||||
) {
|
||||
return __nac3_ndarray_calc_size_impl(list_data, list_len, begin_idx, end_idx);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_nd_indices(
|
||||
uint32_t index,
|
||||
const uint32_t* dims,
|
||||
uint32_t num_dims,
|
||||
NDIndex* idxs
|
||||
) {
|
||||
__nac3_ndarray_calc_nd_indices_impl(index, dims, num_dims, idxs);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_nd_indices64(
|
||||
uint64_t index,
|
||||
const uint64_t* dims,
|
||||
uint64_t num_dims,
|
||||
NDIndex* idxs
|
||||
) {
|
||||
__nac3_ndarray_calc_nd_indices_impl(index, dims, num_dims, idxs);
|
||||
}
|
||||
|
||||
uint32_t __nac3_ndarray_flatten_index(
|
||||
const uint32_t* dims,
|
||||
uint32_t num_dims,
|
||||
const NDIndex* indices,
|
||||
uint32_t num_indices
|
||||
) {
|
||||
return __nac3_ndarray_flatten_index_impl(dims, num_dims, indices, num_indices);
|
||||
}
|
||||
|
||||
uint64_t __nac3_ndarray_flatten_index64(
|
||||
const uint64_t* dims,
|
||||
uint64_t num_dims,
|
||||
const NDIndex* indices,
|
||||
uint64_t num_indices
|
||||
) {
|
||||
return __nac3_ndarray_flatten_index_impl(dims, num_dims, indices, num_indices);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_broadcast(
|
||||
const uint32_t *lhs_dims,
|
||||
uint32_t lhs_ndims,
|
||||
const uint32_t *rhs_dims,
|
||||
uint32_t rhs_ndims,
|
||||
uint32_t *out_dims
|
||||
) {
|
||||
return __nac3_ndarray_calc_broadcast_impl(lhs_dims, lhs_ndims, rhs_dims, rhs_ndims, out_dims);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_broadcast64(
|
||||
const uint64_t *lhs_dims,
|
||||
uint64_t lhs_ndims,
|
||||
const uint64_t *rhs_dims,
|
||||
uint64_t rhs_ndims,
|
||||
uint64_t *out_dims
|
||||
) {
|
||||
return __nac3_ndarray_calc_broadcast_impl(lhs_dims, lhs_ndims, rhs_dims, rhs_ndims, out_dims);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_broadcast_idx(
|
||||
const uint32_t *src_dims,
|
||||
uint32_t src_ndims,
|
||||
const NDIndex *in_idx,
|
||||
NDIndex *out_idx
|
||||
) {
|
||||
__nac3_ndarray_calc_broadcast_idx_impl(src_dims, src_ndims, in_idx, out_idx);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_broadcast_idx64(
|
||||
const uint64_t *src_dims,
|
||||
uint64_t src_ndims,
|
||||
const NDIndex *in_idx,
|
||||
NDIndex *out_idx
|
||||
) {
|
||||
__nac3_ndarray_calc_broadcast_idx_impl(src_dims, src_ndims, in_idx, out_idx);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_strides_from_shape(uint32_t ndims, uint32_t* shape, uint32_t* dst_strides) {
|
||||
__nac3_ndarray_strides_from_shape_impl(ndims, shape, dst_strides);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_strides_from_shape64(uint64_t ndims, uint64_t* shape, uint64_t* dst_strides) {
|
||||
__nac3_ndarray_strides_from_shape_impl(ndims, shape, dst_strides);
|
||||
}
|
||||
}
|
@ -1,9 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt/int_types.hpp"
|
||||
|
||||
template<typename SizeT>
|
||||
struct CSlice {
|
||||
void* base;
|
||||
SizeT len;
|
||||
};
|
@ -1,25 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
// Set in nac3core/build.rs
|
||||
#ifdef IRRT_DEBUG_ASSERT
|
||||
#define IRRT_DEBUG_ASSERT_BOOL true
|
||||
#else
|
||||
#define IRRT_DEBUG_ASSERT_BOOL false
|
||||
#endif
|
||||
|
||||
#define raise_debug_assert(SizeT, msg, param1, param2, param3) \
|
||||
raise_exception(SizeT, EXN_ASSERTION_ERROR, "IRRT debug assert failed: " msg, param1, param2, param3)
|
||||
|
||||
#define debug_assert_eq(SizeT, lhs, rhs) \
|
||||
if constexpr (IRRT_DEBUG_ASSERT_BOOL) { \
|
||||
if ((lhs) != (rhs)) { \
|
||||
raise_debug_assert(SizeT, "LHS = {0}. RHS = {1}", lhs, rhs, NO_PARAM); \
|
||||
} \
|
||||
}
|
||||
|
||||
#define debug_assert(SizeT, expr) \
|
||||
if constexpr (IRRT_DEBUG_ASSERT_BOOL) { \
|
||||
if (!(expr)) { \
|
||||
raise_debug_assert(SizeT, "Got false.", NO_PARAM, NO_PARAM, NO_PARAM); \
|
||||
} \
|
||||
}
|
@ -1,85 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt/cslice.hpp"
|
||||
#include "irrt/int_types.hpp"
|
||||
|
||||
/**
|
||||
* @brief The int type of ARTIQ exception IDs.
|
||||
*/
|
||||
using ExceptionId = int32_t;
|
||||
|
||||
/*
|
||||
* Set of exceptions C++ IRRT can use.
|
||||
* Must be synchronized with `setup_irrt_exceptions` in `nac3core/src/codegen/irrt/mod.rs`.
|
||||
*/
|
||||
extern "C" {
|
||||
ExceptionId EXN_INDEX_ERROR;
|
||||
ExceptionId EXN_VALUE_ERROR;
|
||||
ExceptionId EXN_ASSERTION_ERROR;
|
||||
ExceptionId EXN_TYPE_ERROR;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Extern function to `__nac3_raise`
|
||||
*
|
||||
* The parameter `err` could be `Exception<int32_t>` or `Exception<int64_t>`. The caller
|
||||
* must make sure to pass `Exception`s with the correct `SizeT` depending on the `size_t` of the runtime.
|
||||
*/
|
||||
extern "C" void __nac3_raise(void* err);
|
||||
|
||||
namespace {
|
||||
/**
|
||||
* @brief NAC3's Exception struct
|
||||
*/
|
||||
template<typename SizeT>
|
||||
struct Exception {
|
||||
ExceptionId id;
|
||||
CSlice<SizeT> filename;
|
||||
int32_t line;
|
||||
int32_t column;
|
||||
CSlice<SizeT> function;
|
||||
CSlice<SizeT> msg;
|
||||
int64_t params[3];
|
||||
};
|
||||
|
||||
constexpr int64_t NO_PARAM = 0;
|
||||
|
||||
template<typename SizeT>
|
||||
void _raise_exception_helper(ExceptionId id,
|
||||
const char* filename,
|
||||
int32_t line,
|
||||
const char* function,
|
||||
const char* msg,
|
||||
int64_t param0,
|
||||
int64_t param1,
|
||||
int64_t param2) {
|
||||
Exception<SizeT> e = {
|
||||
.id = id,
|
||||
.filename = {.base = reinterpret_cast<void*>(const_cast<char*>(filename)),
|
||||
.len = static_cast<SizeT>(__builtin_strlen(filename))},
|
||||
.line = line,
|
||||
.column = 0,
|
||||
.function = {.base = reinterpret_cast<void*>(const_cast<char*>(function)),
|
||||
.len = static_cast<SizeT>(__builtin_strlen(function))},
|
||||
.msg = {.base = reinterpret_cast<void*>(const_cast<char*>(msg)),
|
||||
.len = static_cast<SizeT>(__builtin_strlen(msg))},
|
||||
};
|
||||
e.params[0] = param0;
|
||||
e.params[1] = param1;
|
||||
e.params[2] = param2;
|
||||
__nac3_raise(reinterpret_cast<void*>(&e));
|
||||
__builtin_unreachable();
|
||||
}
|
||||
} // namespace
|
||||
|
||||
/**
|
||||
* @brief Raise an exception with location details (location in the IRRT source files).
|
||||
* @param SizeT The runtime `size_t` type.
|
||||
* @param id The ID of the exception to raise.
|
||||
* @param msg A global constant C-string of the error message.
|
||||
*
|
||||
* `param0` to `param2` are optional format arguments of `msg`. They should be set to
|
||||
* `NO_PARAM` to indicate they are unused.
|
||||
*/
|
||||
#define raise_exception(SizeT, id, msg, param0, param1, param2) \
|
||||
_raise_exception_helper<SizeT>(id, __FILE__, __LINE__, __FUNCTION__, msg, param0, param1, param2)
|
@ -1,25 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#if __STDC_VERSION__ >= 202000
|
||||
using int8_t = _BitInt(8);
|
||||
using uint8_t = unsigned _BitInt(8);
|
||||
using int32_t = _BitInt(32);
|
||||
using uint32_t = unsigned _BitInt(32);
|
||||
using int64_t = _BitInt(64);
|
||||
using uint64_t = unsigned _BitInt(64);
|
||||
#else
|
||||
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wdeprecated-type"
|
||||
using int8_t = _ExtInt(8);
|
||||
using uint8_t = unsigned _ExtInt(8);
|
||||
using int32_t = _ExtInt(32);
|
||||
using uint32_t = unsigned _ExtInt(32);
|
||||
using int64_t = _ExtInt(64);
|
||||
using uint64_t = unsigned _ExtInt(64);
|
||||
#pragma clang diagnostic pop
|
||||
|
||||
#endif
|
||||
|
||||
// The type of an index or a value describing the length of a range/slice is always `int32_t`.
|
||||
using SliceIndex = int32_t;
|
@ -1,96 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt/int_types.hpp"
|
||||
#include "irrt/math_util.hpp"
|
||||
#include "irrt/slice.hpp"
|
||||
|
||||
namespace {
|
||||
/**
|
||||
* @brief A list in NAC3.
|
||||
*
|
||||
* The `items` field is opaque. You must rely on external contexts to
|
||||
* know how to interpret it.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
struct List {
|
||||
uint8_t* items;
|
||||
SizeT len;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
extern "C" {
|
||||
// Handle list assignment and dropping part of the list when
|
||||
// both dest_step and src_step are +1.
|
||||
// - All the index must *not* be out-of-bound or negative,
|
||||
// - The end index is *inclusive*,
|
||||
// - The length of src and dest slice size should already
|
||||
// be checked: if dest.step == 1 then len(src) <= len(dest) else len(src) == len(dest)
|
||||
SliceIndex __nac3_list_slice_assign_var_size(SliceIndex dest_start,
|
||||
SliceIndex dest_end,
|
||||
SliceIndex dest_step,
|
||||
void* dest_arr,
|
||||
SliceIndex dest_arr_len,
|
||||
SliceIndex src_start,
|
||||
SliceIndex src_end,
|
||||
SliceIndex src_step,
|
||||
void* src_arr,
|
||||
SliceIndex src_arr_len,
|
||||
const SliceIndex size) {
|
||||
/* if dest_arr_len == 0, do nothing since we do not support extending list */
|
||||
if (dest_arr_len == 0)
|
||||
return dest_arr_len;
|
||||
/* if both step is 1, memmove directly, handle the dropping of the list, and shrink size */
|
||||
if (src_step == dest_step && dest_step == 1) {
|
||||
const SliceIndex src_len = (src_end >= src_start) ? (src_end - src_start + 1) : 0;
|
||||
const SliceIndex dest_len = (dest_end >= dest_start) ? (dest_end - dest_start + 1) : 0;
|
||||
if (src_len > 0) {
|
||||
__builtin_memmove(static_cast<uint8_t*>(dest_arr) + dest_start * size,
|
||||
static_cast<uint8_t*>(src_arr) + src_start * size, src_len * size);
|
||||
}
|
||||
if (dest_len > 0) {
|
||||
/* dropping */
|
||||
__builtin_memmove(static_cast<uint8_t*>(dest_arr) + (dest_start + src_len) * size,
|
||||
static_cast<uint8_t*>(dest_arr) + (dest_end + 1) * size,
|
||||
(dest_arr_len - dest_end - 1) * size);
|
||||
}
|
||||
/* shrink size */
|
||||
return dest_arr_len - (dest_len - src_len);
|
||||
}
|
||||
/* if two range overlaps, need alloca */
|
||||
uint8_t need_alloca = (dest_arr == src_arr)
|
||||
&& !(max(dest_start, dest_end) < min(src_start, src_end)
|
||||
|| max(src_start, src_end) < min(dest_start, dest_end));
|
||||
if (need_alloca) {
|
||||
void* tmp = __builtin_alloca(src_arr_len * size);
|
||||
__builtin_memcpy(tmp, src_arr, src_arr_len * size);
|
||||
src_arr = tmp;
|
||||
}
|
||||
SliceIndex src_ind = src_start;
|
||||
SliceIndex dest_ind = dest_start;
|
||||
for (; (src_step > 0) ? (src_ind <= src_end) : (src_ind >= src_end); src_ind += src_step, dest_ind += dest_step) {
|
||||
/* for constant optimization */
|
||||
if (size == 1) {
|
||||
__builtin_memcpy(static_cast<uint8_t*>(dest_arr) + dest_ind, static_cast<uint8_t*>(src_arr) + src_ind, 1);
|
||||
} else if (size == 4) {
|
||||
__builtin_memcpy(static_cast<uint8_t*>(dest_arr) + dest_ind * 4,
|
||||
static_cast<uint8_t*>(src_arr) + src_ind * 4, 4);
|
||||
} else if (size == 8) {
|
||||
__builtin_memcpy(static_cast<uint8_t*>(dest_arr) + dest_ind * 8,
|
||||
static_cast<uint8_t*>(src_arr) + src_ind * 8, 8);
|
||||
} else {
|
||||
/* memcpy for var size, cannot overlap after previous alloca */
|
||||
__builtin_memcpy(static_cast<uint8_t*>(dest_arr) + dest_ind * size,
|
||||
static_cast<uint8_t*>(src_arr) + src_ind * size, size);
|
||||
}
|
||||
}
|
||||
/* only dest_step == 1 can we shrink the dest list. */
|
||||
/* size should be ensured prior to calling this function */
|
||||
if (dest_step == 1 && dest_end >= dest_start) {
|
||||
__builtin_memmove(static_cast<uint8_t*>(dest_arr) + dest_ind * size,
|
||||
static_cast<uint8_t*>(dest_arr) + (dest_end + 1) * size,
|
||||
(dest_arr_len - dest_end - 1) * size);
|
||||
return dest_arr_len - (dest_end - dest_ind) - 1;
|
||||
}
|
||||
return dest_arr_len;
|
||||
}
|
||||
} // extern "C"
|
@ -1,95 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt/int_types.hpp"
|
||||
|
||||
namespace {
|
||||
// adapted from GNU Scientific Library: https://git.savannah.gnu.org/cgit/gsl.git/tree/sys/pow_int.c
|
||||
// need to make sure `exp >= 0` before calling this function
|
||||
template<typename T>
|
||||
T __nac3_int_exp_impl(T base, T exp) {
|
||||
T res = 1;
|
||||
/* repeated squaring method */
|
||||
do {
|
||||
if (exp & 1) {
|
||||
res *= base; /* for n odd */
|
||||
}
|
||||
exp >>= 1;
|
||||
base *= base;
|
||||
} while (exp);
|
||||
return res;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
#define DEF_nac3_int_exp_(T) \
|
||||
T __nac3_int_exp_##T(T base, T exp) { \
|
||||
return __nac3_int_exp_impl(base, exp); \
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
|
||||
// Putting semicolons here to make clang-format not reformat this into
|
||||
// a stair shape.
|
||||
DEF_nac3_int_exp_(int32_t);
|
||||
DEF_nac3_int_exp_(int64_t);
|
||||
DEF_nac3_int_exp_(uint32_t);
|
||||
DEF_nac3_int_exp_(uint64_t);
|
||||
|
||||
int32_t __nac3_isinf(double x) {
|
||||
return __builtin_isinf(x);
|
||||
}
|
||||
|
||||
int32_t __nac3_isnan(double x) {
|
||||
return __builtin_isnan(x);
|
||||
}
|
||||
|
||||
double tgamma(double arg);
|
||||
|
||||
double __nac3_gamma(double z) {
|
||||
// Handling for denormals
|
||||
// | x | Python gamma(x) | C tgamma(x) |
|
||||
// --- | ----------------- | --------------- | ----------- |
|
||||
// (1) | nan | nan | nan |
|
||||
// (2) | -inf | -inf | inf |
|
||||
// (3) | inf | inf | inf |
|
||||
// (4) | 0.0 | inf | inf |
|
||||
// (5) | {-1.0, -2.0, ...} | inf | nan |
|
||||
|
||||
// (1)-(3)
|
||||
if (__builtin_isinf(z) || __builtin_isnan(z)) {
|
||||
return z;
|
||||
}
|
||||
|
||||
double v = tgamma(z);
|
||||
|
||||
// (4)-(5)
|
||||
return __builtin_isinf(v) || __builtin_isnan(v) ? __builtin_inf() : v;
|
||||
}
|
||||
|
||||
double lgamma(double arg);
|
||||
|
||||
double __nac3_gammaln(double x) {
|
||||
// libm's handling of value overflows differs from scipy:
|
||||
// - scipy: gammaln(-inf) -> -inf
|
||||
// - libm : lgamma(-inf) -> inf
|
||||
|
||||
if (__builtin_isinf(x)) {
|
||||
return x;
|
||||
}
|
||||
|
||||
return lgamma(x);
|
||||
}
|
||||
|
||||
double j0(double x);
|
||||
|
||||
double __nac3_j0(double x) {
|
||||
// libm's handling of value overflows differs from scipy:
|
||||
// - scipy: j0(inf) -> nan
|
||||
// - libm : j0(inf) -> 0.0
|
||||
|
||||
if (__builtin_isinf(x)) {
|
||||
return __builtin_nan("");
|
||||
}
|
||||
|
||||
return j0(x);
|
||||
}
|
||||
} // namespace
|
@ -1,13 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
namespace {
|
||||
template<typename T>
|
||||
const T& max(const T& a, const T& b) {
|
||||
return a > b ? a : b;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
const T& min(const T& a, const T& b) {
|
||||
return a > b ? b : a;
|
||||
}
|
||||
} // namespace
|
@ -1,132 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt/debug.hpp"
|
||||
#include "irrt/exception.hpp"
|
||||
#include "irrt/int_types.hpp"
|
||||
#include "irrt/list.hpp"
|
||||
#include "irrt/ndarray/basic.hpp"
|
||||
#include "irrt/ndarray/def.hpp"
|
||||
|
||||
namespace {
|
||||
namespace ndarray::array {
|
||||
/**
|
||||
* @brief In the context of `np.array(<list>)`, deduce the ndarray's shape produced by `<list>` and raise
|
||||
* an exception if there is anything wrong with `<shape>` (e.g., inconsistent dimensions `np.array([[1.0, 2.0],
|
||||
* [3.0]])`)
|
||||
*
|
||||
* If this function finds no issues with `<list>`, the deduced shape is written to `shape`. The caller has the
|
||||
* responsibility to allocate `[SizeT; ndims]` for `shape`. The caller must also initialize `shape` with `-1`s because
|
||||
* of implementation details.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
void set_and_validate_list_shape_helper(SizeT axis, List<SizeT>* list, SizeT ndims, SizeT* shape) {
|
||||
if (shape[axis] == -1) {
|
||||
// Dimension is unspecified. Set it.
|
||||
shape[axis] = list->len;
|
||||
} else {
|
||||
// Dimension is specified. Check.
|
||||
if (shape[axis] != list->len) {
|
||||
// Mismatch, throw an error.
|
||||
// NOTE: NumPy's error message is more complex and needs more PARAMS to display.
|
||||
raise_exception(SizeT, EXN_VALUE_ERROR,
|
||||
"The requested array has an inhomogenous shape "
|
||||
"after {0} dimension(s).",
|
||||
axis, shape[axis], list->len);
|
||||
}
|
||||
}
|
||||
|
||||
if (axis + 1 == ndims) {
|
||||
// `list` has type `list[ItemType]`
|
||||
// Do nothing
|
||||
} else {
|
||||
// `list` has type `list[list[...]]`
|
||||
List<SizeT>** lists = (List<SizeT>**)(list->items);
|
||||
for (SizeT i = 0; i < list->len; i++) {
|
||||
set_and_validate_list_shape_helper<SizeT>(axis + 1, lists[i], ndims, shape);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief See `set_and_validate_list_shape_helper`.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
void set_and_validate_list_shape(List<SizeT>* list, SizeT ndims, SizeT* shape) {
|
||||
for (SizeT axis = 0; axis < ndims; axis++) {
|
||||
shape[axis] = -1; // Sentinel to say this dimension is unspecified.
|
||||
}
|
||||
set_and_validate_list_shape_helper<SizeT>(0, list, ndims, shape);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief In the context of `np.array(<list>)`, copied the contents stored in `list` to `ndarray`.
|
||||
*
|
||||
* `list` is assumed to be "legal". (i.e., no inconsistent dimensions)
|
||||
*
|
||||
* # Notes on `ndarray`
|
||||
* The caller is responsible for allocating space for `ndarray`.
|
||||
* Here is what this function expects from `ndarray` when called:
|
||||
* - `ndarray->data` has to be allocated, contiguous, and may contain uninitialized values.
|
||||
* - `ndarray->itemsize` has to be initialized.
|
||||
* - `ndarray->ndims` has to be initialized.
|
||||
* - `ndarray->shape` has to be initialized.
|
||||
* - `ndarray->strides` is ignored, but note that `ndarray->data` is contiguous.
|
||||
* When this function call ends:
|
||||
* - `ndarray->data` is written with contents from `<list>`.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
void write_list_to_array_helper(SizeT axis, SizeT* index, List<SizeT>* list, NDArray<SizeT>* ndarray) {
|
||||
debug_assert_eq(SizeT, list->len, ndarray->shape[axis]);
|
||||
if (IRRT_DEBUG_ASSERT_BOOL) {
|
||||
if (!ndarray::basic::is_c_contiguous(ndarray)) {
|
||||
raise_debug_assert(SizeT, "ndarray is not C-contiguous", ndarray->strides[0], ndarray->strides[1],
|
||||
NO_PARAM);
|
||||
}
|
||||
}
|
||||
|
||||
if (axis + 1 == ndarray->ndims) {
|
||||
// `list` has type `list[scalar]`
|
||||
// `ndarray` is contiguous, so we can do this, and this is fast.
|
||||
uint8_t* dst = static_cast<uint8_t*>(ndarray->data) + (ndarray->itemsize * (*index));
|
||||
__builtin_memcpy(dst, list->items, ndarray->itemsize * list->len);
|
||||
*index += list->len;
|
||||
} else {
|
||||
// `list` has type `list[list[...]]`
|
||||
List<SizeT>** lists = (List<SizeT>**)(list->items);
|
||||
|
||||
for (SizeT i = 0; i < list->len; i++) {
|
||||
write_list_to_array_helper<SizeT>(axis + 1, index, lists[i], ndarray);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief See `write_list_to_array_helper`.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
void write_list_to_array(List<SizeT>* list, NDArray<SizeT>* ndarray) {
|
||||
SizeT index = 0;
|
||||
write_list_to_array_helper<SizeT>((SizeT)0, &index, list, ndarray);
|
||||
}
|
||||
} // namespace ndarray::array
|
||||
} // namespace
|
||||
|
||||
extern "C" {
|
||||
using namespace ndarray::array;
|
||||
|
||||
void __nac3_ndarray_array_set_and_validate_list_shape(List<int32_t>* list, int32_t ndims, int32_t* shape) {
|
||||
set_and_validate_list_shape(list, ndims, shape);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_array_set_and_validate_list_shape64(List<int64_t>* list, int64_t ndims, int64_t* shape) {
|
||||
set_and_validate_list_shape(list, ndims, shape);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_array_write_list_to_array(List<int32_t>* list, NDArray<int32_t>* ndarray) {
|
||||
write_list_to_array(list, ndarray);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_array_write_list_to_array64(List<int64_t>* list, NDArray<int64_t>* ndarray) {
|
||||
write_list_to_array(list, ndarray);
|
||||
}
|
||||
}
|
@ -1,340 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt/debug.hpp"
|
||||
#include "irrt/exception.hpp"
|
||||
#include "irrt/int_types.hpp"
|
||||
#include "irrt/ndarray/def.hpp"
|
||||
|
||||
namespace {
|
||||
namespace ndarray::basic {
|
||||
/**
|
||||
* @brief Assert that `shape` does not contain negative dimensions.
|
||||
*
|
||||
* @param ndims Number of dimensions in `shape`
|
||||
* @param shape The shape to check on
|
||||
*/
|
||||
template<typename SizeT>
|
||||
void assert_shape_no_negative(SizeT ndims, const SizeT* shape) {
|
||||
for (SizeT axis = 0; axis < ndims; axis++) {
|
||||
if (shape[axis] < 0) {
|
||||
raise_exception(SizeT, EXN_VALUE_ERROR,
|
||||
"negative dimensions are not allowed; axis {0} "
|
||||
"has dimension {1}",
|
||||
axis, shape[axis], NO_PARAM);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Assert that two shapes are the same in the context of writing output to an ndarray.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
void assert_output_shape_same(SizeT ndarray_ndims,
|
||||
const SizeT* ndarray_shape,
|
||||
SizeT output_ndims,
|
||||
const SizeT* output_shape) {
|
||||
if (ndarray_ndims != output_ndims) {
|
||||
// There is no corresponding NumPy error message like this.
|
||||
raise_exception(SizeT, EXN_VALUE_ERROR, "Cannot write output of ndims {0} to an ndarray with ndims {1}",
|
||||
output_ndims, ndarray_ndims, NO_PARAM);
|
||||
}
|
||||
|
||||
for (SizeT axis = 0; axis < ndarray_ndims; axis++) {
|
||||
if (ndarray_shape[axis] != output_shape[axis]) {
|
||||
// There is no corresponding NumPy error message like this.
|
||||
raise_exception(SizeT, EXN_VALUE_ERROR,
|
||||
"Mismatched dimensions on axis {0}, output has "
|
||||
"dimension {1}, but destination ndarray has dimension {2}.",
|
||||
axis, output_shape[axis], ndarray_shape[axis]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return the number of elements of an ndarray given its shape.
|
||||
*
|
||||
* @param ndims Number of dimensions in `shape`
|
||||
* @param shape The shape of the ndarray
|
||||
*/
|
||||
template<typename SizeT>
|
||||
SizeT calc_size_from_shape(SizeT ndims, const SizeT* shape) {
|
||||
SizeT size = 1;
|
||||
for (SizeT axis = 0; axis < ndims; axis++)
|
||||
size *= shape[axis];
|
||||
return size;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Compute the array indices of the `nth` (0-based) element of an ndarray given only its shape.
|
||||
*
|
||||
* @param ndims Number of elements in `shape` and `indices`
|
||||
* @param shape The shape of the ndarray
|
||||
* @param indices The returned indices indexing the ndarray with shape `shape`.
|
||||
* @param nth The index of the element of interest.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
void set_indices_by_nth(SizeT ndims, const SizeT* shape, SizeT* indices, SizeT nth) {
|
||||
for (SizeT i = 0; i < ndims; i++) {
|
||||
SizeT axis = ndims - i - 1;
|
||||
SizeT dim = shape[axis];
|
||||
|
||||
indices[axis] = nth % dim;
|
||||
nth /= dim;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return the number of elements of an `ndarray`
|
||||
*
|
||||
* This function corresponds to `<an_ndarray>.size`
|
||||
*/
|
||||
template<typename SizeT>
|
||||
SizeT size(const NDArray<SizeT>* ndarray) {
|
||||
return calc_size_from_shape(ndarray->ndims, ndarray->shape);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return of the number of its content of an `ndarray`.
|
||||
*
|
||||
* This function corresponds to `<an_ndarray>.nbytes`.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
SizeT nbytes(const NDArray<SizeT>* ndarray) {
|
||||
return size(ndarray) * ndarray->itemsize;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the `len()` of an ndarray, and asserts that `ndarray` is a sized object.
|
||||
*
|
||||
* This function corresponds to `<an_ndarray>.__len__`.
|
||||
*
|
||||
* @param dst_length The length.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
SizeT len(const NDArray<SizeT>* ndarray) {
|
||||
if (ndarray->ndims != 0) {
|
||||
return ndarray->shape[0];
|
||||
}
|
||||
|
||||
// numpy prohibits `__len__` on unsized objects
|
||||
raise_exception(SizeT, EXN_TYPE_ERROR, "len() of unsized object", NO_PARAM, NO_PARAM, NO_PARAM);
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return a boolean indicating if `ndarray` is (C-)contiguous.
|
||||
*
|
||||
* You may want to see ndarray's rules for C-contiguity:
|
||||
* https://github.com/numpy/numpy/blob/df256d0d2f3bc6833699529824781c58f9c6e697/numpy/core/src/multiarray/flagsobject.c#L95C1-L99C45
|
||||
*/
|
||||
template<typename SizeT>
|
||||
bool is_c_contiguous(const NDArray<SizeT>* ndarray) {
|
||||
// References:
|
||||
// - tinynumpy's implementation:
|
||||
// https://github.com/wadetb/tinynumpy/blob/0d23d22e07062ffab2afa287374c7b366eebdda1/tinynumpy/tinynumpy.py#L102
|
||||
// - ndarray's flags["C_CONTIGUOUS"]:
|
||||
// https://numpy.org/doc/stable/reference/generated/numpy.ndarray.flags.html#numpy.ndarray.flags
|
||||
// - ndarray's rules for C-contiguity:
|
||||
// https://github.com/numpy/numpy/blob/df256d0d2f3bc6833699529824781c58f9c6e697/numpy/core/src/multiarray/flagsobject.c#L95C1-L99C45
|
||||
|
||||
// From
|
||||
// https://github.com/numpy/numpy/blob/df256d0d2f3bc6833699529824781c58f9c6e697/numpy/core/src/multiarray/flagsobject.c#L95C1-L99C45:
|
||||
//
|
||||
// The traditional rule is that for an array to be flagged as C contiguous,
|
||||
// the following must hold:
|
||||
//
|
||||
// strides[-1] == itemsize
|
||||
// strides[i] == shape[i+1] * strides[i + 1]
|
||||
// [...]
|
||||
// According to these rules, a 0- or 1-dimensional array is either both
|
||||
// C- and F-contiguous, or neither; and an array with 2+ dimensions
|
||||
// can be C- or F- contiguous, or neither, but not both. Though there
|
||||
// there are exceptions for arrays with zero or one item, in the first
|
||||
// case the check is relaxed up to and including the first dimension
|
||||
// with shape[i] == 0. In the second case `strides == itemsize` will
|
||||
// can be true for all dimensions and both flags are set.
|
||||
|
||||
if (ndarray->ndims == 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (ndarray->strides[ndarray->ndims - 1] != ndarray->itemsize) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (SizeT i = 1; i < ndarray->ndims; i++) {
|
||||
SizeT axis_i = ndarray->ndims - i - 1;
|
||||
if (ndarray->strides[axis_i] != ndarray->shape[axis_i + 1] * ndarray->strides[axis_i + 1]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return the pointer to the element indexed by `indices` along the ndarray's axes.
|
||||
*
|
||||
* This function does no bound check.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
void* get_pelement_by_indices(const NDArray<SizeT>* ndarray, const SizeT* indices) {
|
||||
void* element = ndarray->data;
|
||||
for (SizeT dim_i = 0; dim_i < ndarray->ndims; dim_i++)
|
||||
element = static_cast<uint8_t*>(element) + indices[dim_i] * ndarray->strides[dim_i];
|
||||
return element;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return the pointer to the nth (0-based) element of `ndarray` in flattened view.
|
||||
*
|
||||
* This function does no bound check.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
void* get_nth_pelement(const NDArray<SizeT>* ndarray, SizeT nth) {
|
||||
void* element = ndarray->data;
|
||||
for (SizeT i = 0; i < ndarray->ndims; i++) {
|
||||
SizeT axis = ndarray->ndims - i - 1;
|
||||
SizeT dim = ndarray->shape[axis];
|
||||
element = static_cast<uint8_t*>(element) + ndarray->strides[axis] * (nth % dim);
|
||||
nth /= dim;
|
||||
}
|
||||
return element;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update the strides of an ndarray given an ndarray `shape` to be contiguous.
|
||||
*
|
||||
* You might want to read https://ajcr.net/stride-guide-part-1/.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
void set_strides_by_shape(NDArray<SizeT>* ndarray) {
|
||||
SizeT stride_product = 1;
|
||||
for (SizeT i = 0; i < ndarray->ndims; i++) {
|
||||
SizeT axis = ndarray->ndims - i - 1;
|
||||
ndarray->strides[axis] = stride_product * ndarray->itemsize;
|
||||
stride_product *= ndarray->shape[axis];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set an element in `ndarray`.
|
||||
*
|
||||
* @param pelement Pointer to the element in `ndarray` to be set.
|
||||
* @param pvalue Pointer to the value `pelement` will be set to.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
void set_pelement_value(NDArray<SizeT>* ndarray, void* pelement, const void* pvalue) {
|
||||
__builtin_memcpy(pelement, pvalue, ndarray->itemsize);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Copy data from one ndarray to another of the exact same size and itemsize.
|
||||
*
|
||||
* Both ndarrays will be viewed in their flatten views when copying the elements.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
void copy_data(const NDArray<SizeT>* src_ndarray, NDArray<SizeT>* dst_ndarray) {
|
||||
// TODO: Make this faster with memcpy when we see a contiguous segment.
|
||||
// TODO: Handle overlapping.
|
||||
|
||||
debug_assert_eq(SizeT, src_ndarray->itemsize, dst_ndarray->itemsize);
|
||||
|
||||
for (SizeT i = 0; i < size(src_ndarray); i++) {
|
||||
auto src_element = ndarray::basic::get_nth_pelement(src_ndarray, i);
|
||||
auto dst_element = ndarray::basic::get_nth_pelement(dst_ndarray, i);
|
||||
ndarray::basic::set_pelement_value(dst_ndarray, dst_element, src_element);
|
||||
}
|
||||
}
|
||||
} // namespace ndarray::basic
|
||||
} // namespace
|
||||
|
||||
extern "C" {
|
||||
using namespace ndarray::basic;
|
||||
|
||||
void __nac3_ndarray_util_assert_shape_no_negative(int32_t ndims, int32_t* shape) {
|
||||
assert_shape_no_negative(ndims, shape);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_util_assert_shape_no_negative64(int64_t ndims, int64_t* shape) {
|
||||
assert_shape_no_negative(ndims, shape);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_util_assert_output_shape_same(int32_t ndarray_ndims,
|
||||
const int32_t* ndarray_shape,
|
||||
int32_t output_ndims,
|
||||
const int32_t* output_shape) {
|
||||
assert_output_shape_same(ndarray_ndims, ndarray_shape, output_ndims, output_shape);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_util_assert_output_shape_same64(int64_t ndarray_ndims,
|
||||
const int64_t* ndarray_shape,
|
||||
int64_t output_ndims,
|
||||
const int64_t* output_shape) {
|
||||
assert_output_shape_same(ndarray_ndims, ndarray_shape, output_ndims, output_shape);
|
||||
}
|
||||
|
||||
uint32_t __nac3_ndarray_size(NDArray<int32_t>* ndarray) {
|
||||
return size(ndarray);
|
||||
}
|
||||
|
||||
uint64_t __nac3_ndarray_size64(NDArray<int64_t>* ndarray) {
|
||||
return size(ndarray);
|
||||
}
|
||||
|
||||
uint32_t __nac3_ndarray_nbytes(NDArray<int32_t>* ndarray) {
|
||||
return nbytes(ndarray);
|
||||
}
|
||||
|
||||
uint64_t __nac3_ndarray_nbytes64(NDArray<int64_t>* ndarray) {
|
||||
return nbytes(ndarray);
|
||||
}
|
||||
|
||||
int32_t __nac3_ndarray_len(NDArray<int32_t>* ndarray) {
|
||||
return len(ndarray);
|
||||
}
|
||||
|
||||
int64_t __nac3_ndarray_len64(NDArray<int64_t>* ndarray) {
|
||||
return len(ndarray);
|
||||
}
|
||||
|
||||
bool __nac3_ndarray_is_c_contiguous(NDArray<int32_t>* ndarray) {
|
||||
return is_c_contiguous(ndarray);
|
||||
}
|
||||
|
||||
bool __nac3_ndarray_is_c_contiguous64(NDArray<int64_t>* ndarray) {
|
||||
return is_c_contiguous(ndarray);
|
||||
}
|
||||
|
||||
void* __nac3_ndarray_get_nth_pelement(const NDArray<int32_t>* ndarray, int32_t nth) {
|
||||
return get_nth_pelement(ndarray, nth);
|
||||
}
|
||||
|
||||
void* __nac3_ndarray_get_nth_pelement64(const NDArray<int64_t>* ndarray, int64_t nth) {
|
||||
return get_nth_pelement(ndarray, nth);
|
||||
}
|
||||
|
||||
void* __nac3_ndarray_get_pelement_by_indices(const NDArray<int32_t>* ndarray, int32_t* indices) {
|
||||
return get_pelement_by_indices(ndarray, indices);
|
||||
}
|
||||
|
||||
void* __nac3_ndarray_get_pelement_by_indices64(const NDArray<int64_t>* ndarray, int64_t* indices) {
|
||||
return get_pelement_by_indices(ndarray, indices);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_set_strides_by_shape(NDArray<int32_t>* ndarray) {
|
||||
set_strides_by_shape(ndarray);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_set_strides_by_shape64(NDArray<int64_t>* ndarray) {
|
||||
set_strides_by_shape(ndarray);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_copy_data(NDArray<int32_t>* src_ndarray, NDArray<int32_t>* dst_ndarray) {
|
||||
copy_data(src_ndarray, dst_ndarray);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_copy_data64(NDArray<int64_t>* src_ndarray, NDArray<int64_t>* dst_ndarray) {
|
||||
copy_data(src_ndarray, dst_ndarray);
|
||||
}
|
||||
}
|
@ -1,165 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt/int_types.hpp"
|
||||
#include "irrt/ndarray/def.hpp"
|
||||
#include "irrt/slice.hpp"
|
||||
|
||||
namespace {
|
||||
template<typename SizeT>
|
||||
struct ShapeEntry {
|
||||
SizeT ndims;
|
||||
SizeT* shape;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
namespace {
|
||||
namespace ndarray::broadcast {
|
||||
/**
|
||||
* @brief Return true if `src_shape` can broadcast to `dst_shape`.
|
||||
*
|
||||
* See https://numpy.org/doc/stable/user/basics.broadcasting.html
|
||||
*/
|
||||
template<typename SizeT>
|
||||
bool can_broadcast_shape_to(SizeT target_ndims, const SizeT* target_shape, SizeT src_ndims, const SizeT* src_shape) {
|
||||
if (src_ndims > target_ndims) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (SizeT i = 0; i < src_ndims; i++) {
|
||||
SizeT target_dim = target_shape[target_ndims - i - 1];
|
||||
SizeT src_dim = src_shape[src_ndims - i - 1];
|
||||
if (!(src_dim == 1 || target_dim == src_dim)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Performs `np.broadcast_shapes(<shapes>)`
|
||||
*
|
||||
* @param num_shapes Number of entries in `shapes`
|
||||
* @param shapes The list of shape to do `np.broadcast_shapes` on.
|
||||
* @param dst_ndims The length of `dst_shape`.
|
||||
* `dst_ndims` must be `max([shape.ndims for shape in shapes])`, but the caller has to calculate it/provide it.
|
||||
* for this function since they should already know in order to allocate `dst_shape` in the first place.
|
||||
* @param dst_shape The resulting shape. Must be pre-allocated by the caller. This function calculate the result
|
||||
* of `np.broadcast_shapes` and write it here.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
void broadcast_shapes(SizeT num_shapes, const ShapeEntry<SizeT>* shapes, SizeT dst_ndims, SizeT* dst_shape) {
|
||||
for (SizeT dst_axis = 0; dst_axis < dst_ndims; dst_axis++) {
|
||||
dst_shape[dst_axis] = 1;
|
||||
}
|
||||
|
||||
#ifdef IRRT_DEBUG_ASSERT
|
||||
SizeT max_ndims_found = 0;
|
||||
#endif
|
||||
|
||||
for (SizeT i = 0; i < num_shapes; i++) {
|
||||
ShapeEntry<SizeT> entry = shapes[i];
|
||||
|
||||
// Check pre-condition: `dst_ndims` must be `max([shape.ndims for shape in shapes])`
|
||||
debug_assert(SizeT, entry.ndims <= dst_ndims);
|
||||
|
||||
#ifdef IRRT_DEBUG_ASSERT
|
||||
max_ndims_found = max(max_ndims_found, entry.ndims);
|
||||
#endif
|
||||
|
||||
for (SizeT j = 0; j < entry.ndims; j++) {
|
||||
SizeT entry_axis = entry.ndims - j - 1;
|
||||
SizeT dst_axis = dst_ndims - j - 1;
|
||||
|
||||
SizeT entry_dim = entry.shape[entry_axis];
|
||||
SizeT dst_dim = dst_shape[dst_axis];
|
||||
|
||||
if (dst_dim == 1) {
|
||||
dst_shape[dst_axis] = entry_dim;
|
||||
} else if (entry_dim == 1 || entry_dim == dst_dim) {
|
||||
// Do nothing
|
||||
} else {
|
||||
raise_exception(SizeT, EXN_VALUE_ERROR,
|
||||
"shape mismatch: objects cannot be broadcast "
|
||||
"to a single shape.",
|
||||
NO_PARAM, NO_PARAM, NO_PARAM);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef IRRT_DEBUG_ASSERT
|
||||
// Check pre-condition: `dst_ndims` must be `max([shape.ndims for shape in shapes])`
|
||||
debug_assert_eq(SizeT, max_ndims_found, dst_ndims);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Perform `np.broadcast_to(<ndarray>, <target_shape>)` and appropriate assertions.
|
||||
*
|
||||
* This function attempts to broadcast `src_ndarray` to a new shape defined by `dst_ndarray.shape`,
|
||||
* and return the result by modifying `dst_ndarray`.
|
||||
*
|
||||
* # Notes on `dst_ndarray`
|
||||
* The caller is responsible for allocating space for the resulting ndarray.
|
||||
* Here is what this function expects from `dst_ndarray` when called:
|
||||
* - `dst_ndarray->data` does not have to be initialized.
|
||||
* - `dst_ndarray->itemsize` does not have to be initialized.
|
||||
* - `dst_ndarray->ndims` must be initialized, determining the length of `dst_ndarray->shape`
|
||||
* - `dst_ndarray->shape` must be allocated, and must contain the desired target broadcast shape.
|
||||
* - `dst_ndarray->strides` must be allocated, through it can contain uninitialized values.
|
||||
* When this function call ends:
|
||||
* - `dst_ndarray->data` is set to `src_ndarray->data` (`dst_ndarray` is just a view to `src_ndarray`)
|
||||
* - `dst_ndarray->itemsize` is set to `src_ndarray->itemsize`
|
||||
* - `dst_ndarray->ndims` is unchanged.
|
||||
* - `dst_ndarray->shape` is unchanged.
|
||||
* - `dst_ndarray->strides` is updated accordingly by how ndarray broadcast_to works.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
void broadcast_to(const NDArray<SizeT>* src_ndarray, NDArray<SizeT>* dst_ndarray) {
|
||||
if (!ndarray::broadcast::can_broadcast_shape_to(dst_ndarray->ndims, dst_ndarray->shape, src_ndarray->ndims,
|
||||
src_ndarray->shape)) {
|
||||
raise_exception(SizeT, EXN_VALUE_ERROR, "operands could not be broadcast together", NO_PARAM, NO_PARAM,
|
||||
NO_PARAM);
|
||||
}
|
||||
|
||||
dst_ndarray->data = src_ndarray->data;
|
||||
dst_ndarray->itemsize = src_ndarray->itemsize;
|
||||
|
||||
for (SizeT i = 0; i < dst_ndarray->ndims; i++) {
|
||||
SizeT src_axis = src_ndarray->ndims - i - 1;
|
||||
SizeT dst_axis = dst_ndarray->ndims - i - 1;
|
||||
if (src_axis < 0 || (src_ndarray->shape[src_axis] == 1 && dst_ndarray->shape[dst_axis] != 1)) {
|
||||
// Freeze the steps in-place
|
||||
dst_ndarray->strides[dst_axis] = 0;
|
||||
} else {
|
||||
dst_ndarray->strides[dst_axis] = src_ndarray->strides[src_axis];
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace ndarray::broadcast
|
||||
} // namespace
|
||||
|
||||
extern "C" {
|
||||
using namespace ndarray::broadcast;
|
||||
|
||||
void __nac3_ndarray_broadcast_to(NDArray<int32_t>* src_ndarray, NDArray<int32_t>* dst_ndarray) {
|
||||
broadcast_to(src_ndarray, dst_ndarray);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_broadcast_to64(NDArray<int64_t>* src_ndarray, NDArray<int64_t>* dst_ndarray) {
|
||||
broadcast_to(src_ndarray, dst_ndarray);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_broadcast_shapes(int32_t num_shapes,
|
||||
const ShapeEntry<int32_t>* shapes,
|
||||
int32_t dst_ndims,
|
||||
int32_t* dst_shape) {
|
||||
broadcast_shapes(num_shapes, shapes, dst_ndims, dst_shape);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_broadcast_shapes64(int64_t num_shapes,
|
||||
const ShapeEntry<int64_t>* shapes,
|
||||
int64_t dst_ndims,
|
||||
int64_t* dst_shape) {
|
||||
broadcast_shapes(num_shapes, shapes, dst_ndims, dst_shape);
|
||||
}
|
||||
}
|
@ -1,51 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt/int_types.hpp"
|
||||
|
||||
namespace {
|
||||
/**
|
||||
* @brief The NDArray object
|
||||
*
|
||||
* Official numpy implementation:
|
||||
* https://github.com/numpy/numpy/blob/735a477f0bc2b5b84d0e72d92f224bde78d4e069/doc/source/reference/c-api/types-and-structures.rst#pyarrayinterface
|
||||
*
|
||||
* Note that this implementation is based on `PyArrayInterface` rather of `PyArrayObject`. The
|
||||
* difference between `PyArrayInterface` and `PyArrayObject` (relevant to our implementation) is
|
||||
* that `PyArrayInterface` *has* `itemsize` and uses `void*` for its `data`, whereas `PyArrayObject`
|
||||
* does not require `itemsize` (probably using `strides[-1]` instead) and uses `char*` for its
|
||||
* `data`. There are also minor differences in the struct layout.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
struct NDArray {
|
||||
/**
|
||||
* @brief The number of bytes of a single element in `data`.
|
||||
*/
|
||||
SizeT itemsize;
|
||||
|
||||
/**
|
||||
* @brief The number of dimensions of this shape.
|
||||
*/
|
||||
SizeT ndims;
|
||||
|
||||
/**
|
||||
* @brief The NDArray shape, with length equal to `ndims`.
|
||||
*
|
||||
* Note that it may contain 0.
|
||||
*/
|
||||
SizeT* shape;
|
||||
|
||||
/**
|
||||
* @brief Array strides, with length equal to `ndims`
|
||||
*
|
||||
* The stride values are in units of bytes, not number of elements.
|
||||
*
|
||||
* Note that `strides` can have negative values or contain 0.
|
||||
*/
|
||||
SizeT* strides;
|
||||
|
||||
/**
|
||||
* @brief The underlying data this `ndarray` is pointing to.
|
||||
*/
|
||||
void* data;
|
||||
};
|
||||
} // namespace
|
@ -1,219 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt/exception.hpp"
|
||||
#include "irrt/int_types.hpp"
|
||||
#include "irrt/ndarray/basic.hpp"
|
||||
#include "irrt/ndarray/def.hpp"
|
||||
#include "irrt/range.hpp"
|
||||
#include "irrt/slice.hpp"
|
||||
|
||||
namespace {
|
||||
typedef uint8_t NDIndexType;
|
||||
|
||||
/**
|
||||
* @brief A single element index
|
||||
*
|
||||
* `data` points to a `int32_t`.
|
||||
*/
|
||||
const NDIndexType ND_INDEX_TYPE_SINGLE_ELEMENT = 0;
|
||||
|
||||
/**
|
||||
* @brief A slice index
|
||||
*
|
||||
* `data` points to a `Slice<int32_t>`.
|
||||
*/
|
||||
const NDIndexType ND_INDEX_TYPE_SLICE = 1;
|
||||
|
||||
/**
|
||||
* @brief `np.newaxis` / `None`
|
||||
*
|
||||
* `data` is unused.
|
||||
*/
|
||||
const NDIndexType ND_INDEX_TYPE_NEWAXIS = 2;
|
||||
|
||||
/**
|
||||
* @brief `Ellipsis` / `...`
|
||||
*
|
||||
* `data` is unused.
|
||||
*/
|
||||
const NDIndexType ND_INDEX_TYPE_ELLIPSIS = 3;
|
||||
|
||||
/**
|
||||
* @brief An index used in ndarray indexing
|
||||
*
|
||||
* That is:
|
||||
* ```
|
||||
* my_ndarray[::-1, 3, ..., np.newaxis]
|
||||
* ^^^^ ^ ^^^ ^^^^^^^^^^ each of these is represented by an NDIndex.
|
||||
* ```
|
||||
*/
|
||||
struct NDIndex {
|
||||
/**
|
||||
* @brief Enum tag to specify the type of index.
|
||||
*
|
||||
* Please see the comment of each enum constant.
|
||||
*/
|
||||
NDIndexType type;
|
||||
|
||||
/**
|
||||
* @brief The accompanying data associated with `type`.
|
||||
*
|
||||
* Please see the comment of each enum constant.
|
||||
*/
|
||||
uint8_t* data;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
namespace {
|
||||
namespace ndarray::indexing {
|
||||
/**
|
||||
* @brief Perform ndarray "basic indexing" (https://numpy.org/doc/stable/user/basics.indexing.html#basic-indexing)
|
||||
*
|
||||
* This function is very similar to performing `dst_ndarray = src_ndarray[indices]` in Python.
|
||||
*
|
||||
* This function also does proper assertions on `indices` to check for out of bounds access and more.
|
||||
*
|
||||
* # Notes on `dst_ndarray`
|
||||
* The caller is responsible for allocating space for the resulting ndarray.
|
||||
* Here is what this function expects from `dst_ndarray` when called:
|
||||
* - `dst_ndarray->data` does not have to be initialized.
|
||||
* - `dst_ndarray->itemsize` does not have to be initialized.
|
||||
* - `dst_ndarray->ndims` must be initialized, and it must be equal to the expected `ndims` of the `dst_ndarray` after
|
||||
* indexing `src_ndarray` with `indices`.
|
||||
* - `dst_ndarray->shape` must be allocated, through it can contain uninitialized values.
|
||||
* - `dst_ndarray->strides` must be allocated, through it can contain uninitialized values.
|
||||
* When this function call ends:
|
||||
* - `dst_ndarray->data` is set to `src_ndarray->data`.
|
||||
* - `dst_ndarray->itemsize` is set to `src_ndarray->itemsize`.
|
||||
* - `dst_ndarray->ndims` is unchanged.
|
||||
* - `dst_ndarray->shape` is updated according to how `src_ndarray` is indexed.
|
||||
* - `dst_ndarray->strides` is updated accordingly by how ndarray indexing works.
|
||||
*
|
||||
* @param indices indices to index `src_ndarray`, ordered in the same way you would write them in Python.
|
||||
* @param src_ndarray The NDArray to be indexed.
|
||||
* @param dst_ndarray The resulting NDArray after indexing. Further details in the comments above,
|
||||
*/
|
||||
template<typename SizeT>
|
||||
void index(SizeT num_indices, const NDIndex* indices, const NDArray<SizeT>* src_ndarray, NDArray<SizeT>* dst_ndarray) {
|
||||
// Validate `indices`.
|
||||
|
||||
// Expected value of `dst_ndarray->ndims`.
|
||||
SizeT expected_dst_ndims = src_ndarray->ndims;
|
||||
// To check for "too many indices for array: array is ?-dimensional, but ? were indexed"
|
||||
SizeT num_indexed = 0;
|
||||
// There may be ellipsis `...` in `indices`. There can only be 0 or 1 ellipsis.
|
||||
SizeT num_ellipsis = 0;
|
||||
|
||||
for (SizeT i = 0; i < num_indices; i++) {
|
||||
if (indices[i].type == ND_INDEX_TYPE_SINGLE_ELEMENT) {
|
||||
expected_dst_ndims--;
|
||||
num_indexed++;
|
||||
} else if (indices[i].type == ND_INDEX_TYPE_SLICE) {
|
||||
num_indexed++;
|
||||
} else if (indices[i].type == ND_INDEX_TYPE_NEWAXIS) {
|
||||
expected_dst_ndims++;
|
||||
} else if (indices[i].type == ND_INDEX_TYPE_ELLIPSIS) {
|
||||
num_ellipsis++;
|
||||
if (num_ellipsis > 1) {
|
||||
raise_exception(SizeT, EXN_INDEX_ERROR, "an index can only have a single ellipsis ('...')", NO_PARAM,
|
||||
NO_PARAM, NO_PARAM);
|
||||
}
|
||||
} else {
|
||||
__builtin_unreachable();
|
||||
}
|
||||
}
|
||||
|
||||
debug_assert_eq(SizeT, expected_dst_ndims, dst_ndarray->ndims);
|
||||
|
||||
if (src_ndarray->ndims - num_indexed < 0) {
|
||||
raise_exception(SizeT, EXN_INDEX_ERROR,
|
||||
"too many indices for array: array is {0}-dimensional, "
|
||||
"but {1} were indexed",
|
||||
src_ndarray->ndims, num_indices, NO_PARAM);
|
||||
}
|
||||
|
||||
dst_ndarray->data = src_ndarray->data;
|
||||
dst_ndarray->itemsize = src_ndarray->itemsize;
|
||||
|
||||
// Reference code:
|
||||
// https://github.com/wadetb/tinynumpy/blob/0d23d22e07062ffab2afa287374c7b366eebdda1/tinynumpy/tinynumpy.py#L652
|
||||
SizeT src_axis = 0;
|
||||
SizeT dst_axis = 0;
|
||||
|
||||
for (int32_t i = 0; i < num_indices; i++) {
|
||||
const NDIndex* index = &indices[i];
|
||||
if (index->type == ND_INDEX_TYPE_SINGLE_ELEMENT) {
|
||||
SizeT input = (SizeT) * ((int32_t*)index->data);
|
||||
|
||||
SizeT k = slice::resolve_index_in_length(src_ndarray->shape[src_axis], input);
|
||||
if (k == -1) {
|
||||
raise_exception(SizeT, EXN_INDEX_ERROR,
|
||||
"index {0} is out of bounds for axis {1} "
|
||||
"with size {2}",
|
||||
input, src_axis, src_ndarray->shape[src_axis]);
|
||||
}
|
||||
|
||||
dst_ndarray->data = static_cast<uint8_t*>(dst_ndarray->data) + k * src_ndarray->strides[src_axis];
|
||||
|
||||
src_axis++;
|
||||
} else if (index->type == ND_INDEX_TYPE_SLICE) {
|
||||
Slice<int32_t>* slice = (Slice<int32_t>*)index->data;
|
||||
|
||||
Range<int32_t> range = slice->indices_checked<SizeT>(src_ndarray->shape[src_axis]);
|
||||
|
||||
dst_ndarray->data =
|
||||
static_cast<uint8_t*>(dst_ndarray->data) + (SizeT)range.start * src_ndarray->strides[src_axis];
|
||||
dst_ndarray->strides[dst_axis] = ((SizeT)range.step) * src_ndarray->strides[src_axis];
|
||||
dst_ndarray->shape[dst_axis] = (SizeT)range.len<SizeT>();
|
||||
|
||||
dst_axis++;
|
||||
src_axis++;
|
||||
} else if (index->type == ND_INDEX_TYPE_NEWAXIS) {
|
||||
dst_ndarray->strides[dst_axis] = 0;
|
||||
dst_ndarray->shape[dst_axis] = 1;
|
||||
|
||||
dst_axis++;
|
||||
} else if (index->type == ND_INDEX_TYPE_ELLIPSIS) {
|
||||
// The number of ':' entries this '...' implies.
|
||||
SizeT ellipsis_size = src_ndarray->ndims - num_indexed;
|
||||
|
||||
for (SizeT j = 0; j < ellipsis_size; j++) {
|
||||
dst_ndarray->strides[dst_axis] = src_ndarray->strides[src_axis];
|
||||
dst_ndarray->shape[dst_axis] = src_ndarray->shape[src_axis];
|
||||
|
||||
dst_axis++;
|
||||
src_axis++;
|
||||
}
|
||||
} else {
|
||||
__builtin_unreachable();
|
||||
}
|
||||
}
|
||||
|
||||
for (; dst_axis < dst_ndarray->ndims; dst_axis++, src_axis++) {
|
||||
dst_ndarray->shape[dst_axis] = src_ndarray->shape[src_axis];
|
||||
dst_ndarray->strides[dst_axis] = src_ndarray->strides[src_axis];
|
||||
}
|
||||
|
||||
debug_assert_eq(SizeT, src_ndarray->ndims, src_axis);
|
||||
debug_assert_eq(SizeT, dst_ndarray->ndims, dst_axis);
|
||||
}
|
||||
} // namespace ndarray::indexing
|
||||
} // namespace
|
||||
|
||||
extern "C" {
|
||||
using namespace ndarray::indexing;
|
||||
|
||||
void __nac3_ndarray_index(int32_t num_indices,
|
||||
NDIndex* indices,
|
||||
NDArray<int32_t>* src_ndarray,
|
||||
NDArray<int32_t>* dst_ndarray) {
|
||||
index(num_indices, indices, src_ndarray, dst_ndarray);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_index64(int64_t num_indices,
|
||||
NDIndex* indices,
|
||||
NDArray<int64_t>* src_ndarray,
|
||||
NDArray<int64_t>* dst_ndarray) {
|
||||
index(num_indices, indices, src_ndarray, dst_ndarray);
|
||||
}
|
||||
}
|
@ -1,146 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt/int_types.hpp"
|
||||
#include "irrt/ndarray/def.hpp"
|
||||
|
||||
namespace {
|
||||
/**
|
||||
* @brief Helper struct to enumerate through an ndarray *efficiently*.
|
||||
*
|
||||
* Example usage (in pseudo-code):
|
||||
* ```
|
||||
* // Suppose my_ndarray has been initialized, with shape [2, 3] and dtype `double`
|
||||
* NDIter nditer;
|
||||
* nditer.initialize(my_ndarray);
|
||||
* while (nditer.has_element()) {
|
||||
* // This body is run 6 (= my_ndarray.size) times.
|
||||
*
|
||||
* // [0, 0] -> [0, 1] -> [0, 2] -> [1, 0] -> [1, 1] -> [1, 2] -> end
|
||||
* print(nditer.indices);
|
||||
*
|
||||
* // 0 -> 1 -> 2 -> 3 -> 4 -> 5
|
||||
* print(nditer.nth);
|
||||
*
|
||||
* // <1st element> -> <2nd element> -> ... -> <6th element> -> end
|
||||
* print(*((double *) nditer.element))
|
||||
*
|
||||
* nditer.next(); // Go to next element.
|
||||
* }
|
||||
* ```
|
||||
*
|
||||
* Interesting cases:
|
||||
* - If `my_ndarray.ndims` == 0, there is one iteration.
|
||||
* - If `my_ndarray.shape` contains zeroes, there are no iterations.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
struct NDIter {
|
||||
// Information about the ndarray being iterated over.
|
||||
SizeT ndims;
|
||||
SizeT* shape;
|
||||
SizeT* strides;
|
||||
|
||||
/**
|
||||
* @brief The current indices.
|
||||
*
|
||||
* Must be allocated by the caller.
|
||||
*/
|
||||
SizeT* indices;
|
||||
|
||||
/**
|
||||
* @brief The nth (0-based) index of the current indices.
|
||||
*
|
||||
* Initially this is 0.
|
||||
*/
|
||||
SizeT nth;
|
||||
|
||||
/**
|
||||
* @brief Pointer to the current element.
|
||||
*
|
||||
* Initially this points to first element of the ndarray.
|
||||
*/
|
||||
void* element;
|
||||
|
||||
/**
|
||||
* @brief Cache for the product of shape.
|
||||
*
|
||||
* Could be 0 if `shape` has 0s in it.
|
||||
*/
|
||||
SizeT size;
|
||||
|
||||
void initialize(SizeT ndims, SizeT* shape, SizeT* strides, void* element, SizeT* indices) {
|
||||
this->ndims = ndims;
|
||||
this->shape = shape;
|
||||
this->strides = strides;
|
||||
|
||||
this->indices = indices;
|
||||
this->element = element;
|
||||
|
||||
// Compute size
|
||||
this->size = 1;
|
||||
for (SizeT i = 0; i < ndims; i++) {
|
||||
this->size *= shape[i];
|
||||
}
|
||||
|
||||
// `indices` starts on all 0s.
|
||||
for (SizeT axis = 0; axis < ndims; axis++)
|
||||
indices[axis] = 0;
|
||||
nth = 0;
|
||||
}
|
||||
|
||||
void initialize_by_ndarray(NDArray<SizeT>* ndarray, SizeT* indices) {
|
||||
// NOTE: ndarray->data is pointing to the first element, and `NDIter`'s `element` should also point to the first
|
||||
// element as well.
|
||||
this->initialize(ndarray->ndims, ndarray->shape, ndarray->strides, ndarray->data, indices);
|
||||
}
|
||||
|
||||
// Is the current iteration valid?
|
||||
// If true, then `element`, `indices` and `nth` contain details about the current element.
|
||||
bool has_element() { return nth < size; }
|
||||
|
||||
// Go to the next element.
|
||||
void next() {
|
||||
for (SizeT i = 0; i < ndims; i++) {
|
||||
SizeT axis = ndims - i - 1;
|
||||
indices[axis]++;
|
||||
if (indices[axis] >= shape[axis]) {
|
||||
indices[axis] = 0;
|
||||
|
||||
// TODO: There is something called backstrides to speedup iteration.
|
||||
// See https://ajcr.net/stride-guide-part-1/, and
|
||||
// https://docs.scipy.org/doc/numpy-1.13.0/reference/c-api.types-and-structures.html#c.PyArrayIterObject.PyArrayIterObject.backstrides.
|
||||
element = static_cast<void*>(reinterpret_cast<uint8_t*>(element) - strides[axis] * (shape[axis] - 1));
|
||||
} else {
|
||||
element = static_cast<void*>(reinterpret_cast<uint8_t*>(element) + strides[axis]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
nth++;
|
||||
}
|
||||
};
|
||||
} // namespace
|
||||
|
||||
extern "C" {
|
||||
void __nac3_nditer_initialize(NDIter<int32_t>* iter, NDArray<int32_t>* ndarray, int32_t* indices) {
|
||||
iter->initialize_by_ndarray(ndarray, indices);
|
||||
}
|
||||
|
||||
void __nac3_nditer_initialize64(NDIter<int64_t>* iter, NDArray<int64_t>* ndarray, int64_t* indices) {
|
||||
iter->initialize_by_ndarray(ndarray, indices);
|
||||
}
|
||||
|
||||
bool __nac3_nditer_has_element(NDIter<int32_t>* iter) {
|
||||
return iter->has_element();
|
||||
}
|
||||
|
||||
bool __nac3_nditer_has_element64(NDIter<int64_t>* iter) {
|
||||
return iter->has_element();
|
||||
}
|
||||
|
||||
void __nac3_nditer_next(NDIter<int32_t>* iter) {
|
||||
iter->next();
|
||||
}
|
||||
|
||||
void __nac3_nditer_next64(NDIter<int64_t>* iter) {
|
||||
iter->next();
|
||||
}
|
||||
}
|
@ -1,98 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt/debug.hpp"
|
||||
#include "irrt/exception.hpp"
|
||||
#include "irrt/int_types.hpp"
|
||||
#include "irrt/ndarray/basic.hpp"
|
||||
#include "irrt/ndarray/broadcast.hpp"
|
||||
#include "irrt/ndarray/iter.hpp"
|
||||
|
||||
// NOTE: Everything would be much easier and elegant if einsum is implemented.
|
||||
|
||||
namespace {
|
||||
namespace ndarray::matmul {
|
||||
|
||||
/**
|
||||
* @brief Perform the broadcast in `np.einsum("...ij,...jk->...ik", a, b)`.
|
||||
*
|
||||
* Example:
|
||||
* Suppose `a_shape == [1, 97, 4, 2]`
|
||||
* and `b_shape == [99, 98, 1, 2, 5]`,
|
||||
*
|
||||
* ...then `new_a_shape == [99, 98, 97, 4, 2]`,
|
||||
* `new_b_shape == [99, 98, 97, 2, 5]`,
|
||||
* and `dst_shape == [99, 98, 97, 4, 5]`.
|
||||
* ^^^^^^^^^^ ^^^^
|
||||
* (broadcasted) (4x2 @ 2x5 => 4x5)
|
||||
*
|
||||
* @param a_ndims Length of `a_shape`.
|
||||
* @param a_shape Shape of `a`.
|
||||
* @param b_ndims Length of `b_shape`.
|
||||
* @param b_shape Shape of `b`.
|
||||
* @param final_ndims Should be equal to `max(a_ndims, b_ndims)`. This is the length of `new_a_shape`,
|
||||
* `new_b_shape`, and `dst_shape` - the number of dimensions after broadcasting.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
void calculate_shapes(SizeT a_ndims,
|
||||
SizeT* a_shape,
|
||||
SizeT b_ndims,
|
||||
SizeT* b_shape,
|
||||
SizeT final_ndims,
|
||||
SizeT* new_a_shape,
|
||||
SizeT* new_b_shape,
|
||||
SizeT* dst_shape) {
|
||||
debug_assert(SizeT, a_ndims >= 2);
|
||||
debug_assert(SizeT, b_ndims >= 2);
|
||||
debug_assert_eq(SizeT, max(a_ndims, b_ndims), final_ndims);
|
||||
|
||||
// Check that a and b are compatible for matmul
|
||||
if (a_shape[a_ndims - 1] != b_shape[b_ndims - 2]) {
|
||||
// This is a custom error message. Different from NumPy.
|
||||
raise_exception(SizeT, EXN_VALUE_ERROR, "Cannot multiply LHS (shape ?x{0}) with RHS (shape {1}x?})",
|
||||
a_shape[a_ndims - 1], b_shape[b_ndims - 2], NO_PARAM);
|
||||
}
|
||||
|
||||
const SizeT num_entries = 2;
|
||||
ShapeEntry<SizeT> entries[num_entries] = {{.ndims = a_ndims - 2, .shape = a_shape},
|
||||
{.ndims = b_ndims - 2, .shape = b_shape}};
|
||||
|
||||
// TODO: Optimize this
|
||||
ndarray::broadcast::broadcast_shapes<SizeT>(num_entries, entries, final_ndims - 2, new_a_shape);
|
||||
ndarray::broadcast::broadcast_shapes<SizeT>(num_entries, entries, final_ndims - 2, new_b_shape);
|
||||
ndarray::broadcast::broadcast_shapes<SizeT>(num_entries, entries, final_ndims - 2, dst_shape);
|
||||
|
||||
new_a_shape[final_ndims - 2] = a_shape[a_ndims - 2];
|
||||
new_a_shape[final_ndims - 1] = a_shape[a_ndims - 1];
|
||||
new_b_shape[final_ndims - 2] = b_shape[b_ndims - 2];
|
||||
new_b_shape[final_ndims - 1] = b_shape[b_ndims - 1];
|
||||
dst_shape[final_ndims - 2] = a_shape[a_ndims - 2];
|
||||
dst_shape[final_ndims - 1] = b_shape[b_ndims - 1];
|
||||
}
|
||||
} // namespace ndarray::matmul
|
||||
} // namespace
|
||||
|
||||
extern "C" {
|
||||
using namespace ndarray::matmul;
|
||||
|
||||
void __nac3_ndarray_matmul_calculate_shapes(int32_t a_ndims,
|
||||
int32_t* a_shape,
|
||||
int32_t b_ndims,
|
||||
int32_t* b_shape,
|
||||
int32_t final_ndims,
|
||||
int32_t* new_a_shape,
|
||||
int32_t* new_b_shape,
|
||||
int32_t* dst_shape) {
|
||||
calculate_shapes(a_ndims, a_shape, b_ndims, b_shape, final_ndims, new_a_shape, new_b_shape, dst_shape);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_matmul_calculate_shapes64(int64_t a_ndims,
|
||||
int64_t* a_shape,
|
||||
int64_t b_ndims,
|
||||
int64_t* b_shape,
|
||||
int64_t final_ndims,
|
||||
int64_t* new_a_shape,
|
||||
int64_t* new_b_shape,
|
||||
int64_t* dst_shape) {
|
||||
calculate_shapes(a_ndims, a_shape, b_ndims, b_shape, final_ndims, new_a_shape, new_b_shape, dst_shape);
|
||||
}
|
||||
}
|
@ -1,97 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt/exception.hpp"
|
||||
#include "irrt/int_types.hpp"
|
||||
#include "irrt/ndarray/def.hpp"
|
||||
|
||||
namespace {
|
||||
namespace ndarray::reshape {
|
||||
/**
|
||||
* @brief Perform assertions on and resolve unknown dimensions in `new_shape` in `np.reshape(<ndarray>, new_shape)`
|
||||
*
|
||||
* If `new_shape` indeed contains unknown dimensions (specified with `-1`, just like numpy), `new_shape` will be
|
||||
* modified to contain the resolved dimension.
|
||||
*
|
||||
* To perform assertions on and resolve unknown dimensions in `new_shape`, we don't need the actual
|
||||
* `<ndarray>` object itself, but only the `.size` of the `<ndarray>`.
|
||||
*
|
||||
* @param size The `.size` of `<ndarray>`
|
||||
* @param new_ndims Number of elements in `new_shape`
|
||||
* @param new_shape Target shape to reshape to
|
||||
*/
|
||||
template<typename SizeT>
|
||||
void resolve_and_check_new_shape(SizeT size, SizeT new_ndims, SizeT* new_shape) {
|
||||
// Is there a -1 in `new_shape`?
|
||||
bool neg1_exists = false;
|
||||
// Location of -1, only initialized if `neg1_exists` is true
|
||||
SizeT neg1_axis_i;
|
||||
// The computed ndarray size of `new_shape`
|
||||
SizeT new_size = 1;
|
||||
|
||||
for (SizeT axis_i = 0; axis_i < new_ndims; axis_i++) {
|
||||
SizeT dim = new_shape[axis_i];
|
||||
if (dim < 0) {
|
||||
if (dim == -1) {
|
||||
if (neg1_exists) {
|
||||
// Multiple `-1` found. Throw an error.
|
||||
raise_exception(SizeT, EXN_VALUE_ERROR, "can only specify one unknown dimension", NO_PARAM,
|
||||
NO_PARAM, NO_PARAM);
|
||||
} else {
|
||||
neg1_exists = true;
|
||||
neg1_axis_i = axis_i;
|
||||
}
|
||||
} else {
|
||||
// TODO: What? In `np.reshape` any negative dimensions is
|
||||
// treated like its `-1`.
|
||||
//
|
||||
// Try running `np.zeros((3, 4)).reshape((-999, 2))`
|
||||
//
|
||||
// It is not documented by numpy.
|
||||
// Throw an error for now...
|
||||
|
||||
raise_exception(SizeT, EXN_VALUE_ERROR, "Found non -1 negative dimension {0} on axis {1}", dim, axis_i,
|
||||
NO_PARAM);
|
||||
}
|
||||
} else {
|
||||
new_size *= dim;
|
||||
}
|
||||
}
|
||||
|
||||
bool can_reshape;
|
||||
if (neg1_exists) {
|
||||
// Let `x` be the unknown dimension
|
||||
// Solve `x * <new_size> = <size>`
|
||||
if (new_size == 0 && size == 0) {
|
||||
// `x` has infinitely many solutions
|
||||
can_reshape = false;
|
||||
} else if (new_size == 0 && size != 0) {
|
||||
// `x` has no solutions
|
||||
can_reshape = false;
|
||||
} else if (size % new_size != 0) {
|
||||
// `x` has no integer solutions
|
||||
can_reshape = false;
|
||||
} else {
|
||||
can_reshape = true;
|
||||
new_shape[neg1_axis_i] = size / new_size; // Resolve dimension
|
||||
}
|
||||
} else {
|
||||
can_reshape = (new_size == size);
|
||||
}
|
||||
|
||||
if (!can_reshape) {
|
||||
raise_exception(SizeT, EXN_VALUE_ERROR, "cannot reshape array of size {0} into given shape", size, NO_PARAM,
|
||||
NO_PARAM);
|
||||
}
|
||||
}
|
||||
} // namespace ndarray::reshape
|
||||
} // namespace
|
||||
|
||||
extern "C" {
|
||||
void __nac3_ndarray_reshape_resolve_and_check_new_shape(int32_t size, int32_t new_ndims, int32_t* new_shape) {
|
||||
ndarray::reshape::resolve_and_check_new_shape(size, new_ndims, new_shape);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_reshape_resolve_and_check_new_shape64(int64_t size, int64_t new_ndims, int64_t* new_shape) {
|
||||
ndarray::reshape::resolve_and_check_new_shape(size, new_ndims, new_shape);
|
||||
}
|
||||
}
|
@ -1,143 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt/debug.hpp"
|
||||
#include "irrt/exception.hpp"
|
||||
#include "irrt/int_types.hpp"
|
||||
#include "irrt/ndarray/def.hpp"
|
||||
#include "irrt/slice.hpp"
|
||||
|
||||
/*
|
||||
* Notes on `np.transpose(<array>, <axes>)`
|
||||
*
|
||||
* TODO: `axes`, if specified, can actually contain negative indices,
|
||||
* but it is not documented in numpy.
|
||||
*
|
||||
* Supporting it for now.
|
||||
*/
|
||||
|
||||
namespace {
|
||||
namespace ndarray::transpose {
|
||||
/**
|
||||
* @brief Do assertions on `<axes>` in `np.transpose(<array>, <axes>)`.
|
||||
*
|
||||
* Note that `np.transpose`'s `<axe>` argument is optional. If the argument
|
||||
* is specified but the user, use this function to do assertions on it.
|
||||
*
|
||||
* @param ndims The number of dimensions of `<array>`
|
||||
* @param num_axes Number of elements in `<axes>` as specified by the user.
|
||||
* This should be equal to `ndims`. If not, a "ValueError: axes don't match array" is thrown.
|
||||
* @param axes The user specified `<axes>`.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
void assert_transpose_axes(SizeT ndims, SizeT num_axes, const SizeT* axes) {
|
||||
if (ndims != num_axes) {
|
||||
raise_exception(SizeT, EXN_VALUE_ERROR, "axes don't match array", NO_PARAM, NO_PARAM, NO_PARAM);
|
||||
}
|
||||
|
||||
// TODO: Optimize this
|
||||
bool* axe_specified = (bool*)__builtin_alloca(sizeof(bool) * ndims);
|
||||
for (SizeT i = 0; i < ndims; i++)
|
||||
axe_specified[i] = false;
|
||||
|
||||
for (SizeT i = 0; i < ndims; i++) {
|
||||
SizeT axis = slice::resolve_index_in_length(ndims, axes[i]);
|
||||
if (axis == -1) {
|
||||
// TODO: numpy actually throws a `numpy.exceptions.AxisError`
|
||||
raise_exception(SizeT, EXN_VALUE_ERROR, "axis {0} is out of bounds for array of dimension {1}", axis, ndims,
|
||||
NO_PARAM);
|
||||
}
|
||||
|
||||
if (axe_specified[axis]) {
|
||||
raise_exception(SizeT, EXN_VALUE_ERROR, "repeated axis in transpose", NO_PARAM, NO_PARAM, NO_PARAM);
|
||||
}
|
||||
|
||||
axe_specified[axis] = true;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Create a transpose view of `src_ndarray` and perform proper assertions.
|
||||
*
|
||||
* This function is very similar to doing `dst_ndarray = np.transpose(src_ndarray, <axes>)`.
|
||||
* If `<axes>` is supposed to be `None`, caller can pass in a `nullptr` to `<axes>`.
|
||||
*
|
||||
* The transpose view created is returned by modifying `dst_ndarray`.
|
||||
*
|
||||
* The caller is responsible for setting up `dst_ndarray` before calling this function.
|
||||
* Here is what this function expects from `dst_ndarray` when called:
|
||||
* - `dst_ndarray->data` does not have to be initialized.
|
||||
* - `dst_ndarray->itemsize` does not have to be initialized.
|
||||
* - `dst_ndarray->ndims` must be initialized, must be equal to `src_ndarray->ndims`.
|
||||
* - `dst_ndarray->shape` must be allocated, through it can contain uninitialized values.
|
||||
* - `dst_ndarray->strides` must be allocated, through it can contain uninitialized values.
|
||||
* When this function call ends:
|
||||
* - `dst_ndarray->data` is set to `src_ndarray->data` (`dst_ndarray` is just a view to `src_ndarray`)
|
||||
* - `dst_ndarray->itemsize` is set to `src_ndarray->itemsize`
|
||||
* - `dst_ndarray->ndims` is unchanged
|
||||
* - `dst_ndarray->shape` is updated according to how `np.transpose` works
|
||||
* - `dst_ndarray->strides` is updated according to how `np.transpose` works
|
||||
*
|
||||
* @param src_ndarray The NDArray to build a transpose view on
|
||||
* @param dst_ndarray The resulting NDArray after transpose. Further details in the comments above,
|
||||
* @param num_axes Number of elements in axes. Unused if `axes` is nullptr.
|
||||
* @param axes Axes permutation. Set it to `nullptr` if `<axes>` is `None`.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
void transpose(const NDArray<SizeT>* src_ndarray, NDArray<SizeT>* dst_ndarray, SizeT num_axes, const SizeT* axes) {
|
||||
debug_assert_eq(SizeT, src_ndarray->ndims, dst_ndarray->ndims);
|
||||
const auto ndims = src_ndarray->ndims;
|
||||
|
||||
if (axes != nullptr)
|
||||
assert_transpose_axes(ndims, num_axes, axes);
|
||||
|
||||
dst_ndarray->data = src_ndarray->data;
|
||||
dst_ndarray->itemsize = src_ndarray->itemsize;
|
||||
|
||||
// Check out https://ajcr.net/stride-guide-part-2/ to see how `np.transpose` works behind the scenes.
|
||||
if (axes == nullptr) {
|
||||
// `np.transpose(<array>, axes=None)`
|
||||
|
||||
/*
|
||||
* Minor note: `np.transpose(<array>, axes=None)` is equivalent to
|
||||
* `np.transpose(<array>, axes=[N-1, N-2, ..., 0])` - basically it
|
||||
* is reversing the order of strides and shape.
|
||||
*
|
||||
* This is a fast implementation to handle this special (but very common) case.
|
||||
*/
|
||||
|
||||
for (SizeT axis = 0; axis < ndims; axis++) {
|
||||
dst_ndarray->shape[axis] = src_ndarray->shape[ndims - axis - 1];
|
||||
dst_ndarray->strides[axis] = src_ndarray->strides[ndims - axis - 1];
|
||||
}
|
||||
} else {
|
||||
// `np.transpose(<array>, <axes>)`
|
||||
|
||||
// Permute strides and shape according to `axes`, while resolving negative indices in `axes`
|
||||
for (SizeT axis = 0; axis < ndims; axis++) {
|
||||
// `i` cannot be OUT_OF_BOUNDS because of assertions
|
||||
SizeT i = slice::resolve_index_in_length(ndims, axes[axis]);
|
||||
|
||||
dst_ndarray->shape[axis] = src_ndarray->shape[i];
|
||||
dst_ndarray->strides[axis] = src_ndarray->strides[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace ndarray::transpose
|
||||
} // namespace
|
||||
|
||||
extern "C" {
|
||||
using namespace ndarray::transpose;
|
||||
void __nac3_ndarray_transpose(const NDArray<int32_t>* src_ndarray,
|
||||
NDArray<int32_t>* dst_ndarray,
|
||||
int32_t num_axes,
|
||||
const int32_t* axes) {
|
||||
transpose(src_ndarray, dst_ndarray, num_axes, axes);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_transpose64(const NDArray<int64_t>* src_ndarray,
|
||||
NDArray<int64_t>* dst_ndarray,
|
||||
int64_t num_axes,
|
||||
const int64_t* axes) {
|
||||
transpose(src_ndarray, dst_ndarray, num_axes, axes);
|
||||
}
|
||||
}
|
@ -1,47 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt/debug.hpp"
|
||||
#include "irrt/int_types.hpp"
|
||||
|
||||
namespace {
|
||||
namespace range {
|
||||
template<typename T>
|
||||
T len(T start, T stop, T step) {
|
||||
// Reference:
|
||||
// https://github.com/python/cpython/blob/9dbd12375561a393eaec4b21ee4ac568a407cdb0/Objects/rangeobject.c#L933
|
||||
if (step > 0 && start < stop)
|
||||
return 1 + (stop - 1 - start) / step;
|
||||
else if (step < 0 && start > stop)
|
||||
return 1 + (start - 1 - stop) / (-step);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
} // namespace range
|
||||
|
||||
/**
|
||||
* @brief A Python range.
|
||||
*/
|
||||
template<typename T>
|
||||
struct Range {
|
||||
T start;
|
||||
T stop;
|
||||
T step;
|
||||
|
||||
/**
|
||||
* @brief Calculate the `len()` of this range.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
T len() {
|
||||
debug_assert(SizeT, step != 0);
|
||||
return range::len(start, stop, step);
|
||||
}
|
||||
};
|
||||
} // namespace
|
||||
|
||||
extern "C" {
|
||||
using namespace range;
|
||||
|
||||
SliceIndex __nac3_range_slice_len(const SliceIndex start, const SliceIndex end, const SliceIndex step) {
|
||||
return len(start, end, step);
|
||||
}
|
||||
}
|
@ -1,156 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt/debug.hpp"
|
||||
#include "irrt/exception.hpp"
|
||||
#include "irrt/int_types.hpp"
|
||||
#include "irrt/math_util.hpp"
|
||||
#include "irrt/range.hpp"
|
||||
|
||||
namespace {
|
||||
namespace slice {
|
||||
/**
|
||||
* @brief Resolve a possibly negative index in a list of a known length.
|
||||
*
|
||||
* Returns -1 if the resolved index is out of the list's bounds.
|
||||
*/
|
||||
template<typename T>
|
||||
T resolve_index_in_length(T length, T index) {
|
||||
T resolved = index < 0 ? length + index : index;
|
||||
if (0 <= resolved && resolved < length) {
|
||||
return resolved;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Resolve a slice as a range.
|
||||
*
|
||||
* This is equivalent to `range(*slice(start, stop, step).indices(length))` in Python.
|
||||
*/
|
||||
template<typename T>
|
||||
void indices(bool start_defined,
|
||||
T start,
|
||||
bool stop_defined,
|
||||
T stop,
|
||||
bool step_defined,
|
||||
T step,
|
||||
T length,
|
||||
T* range_start,
|
||||
T* range_stop,
|
||||
T* range_step) {
|
||||
// Reference: https://github.com/python/cpython/blob/main/Objects/sliceobject.c#L388
|
||||
*range_step = step_defined ? step : 1;
|
||||
bool step_is_negative = *range_step < 0;
|
||||
|
||||
T lower, upper;
|
||||
if (step_is_negative) {
|
||||
lower = -1;
|
||||
upper = length - 1;
|
||||
} else {
|
||||
lower = 0;
|
||||
upper = length;
|
||||
}
|
||||
|
||||
if (start_defined) {
|
||||
*range_start = start < 0 ? max(lower, start + length) : min(upper, start);
|
||||
} else {
|
||||
*range_start = step_is_negative ? upper : lower;
|
||||
}
|
||||
|
||||
if (stop_defined) {
|
||||
*range_stop = stop < 0 ? max(lower, stop + length) : min(upper, stop);
|
||||
} else {
|
||||
*range_stop = step_is_negative ? lower : upper;
|
||||
}
|
||||
}
|
||||
} // namespace slice
|
||||
|
||||
/**
|
||||
* @brief A Python-like slice with **unresolved** indices.
|
||||
*/
|
||||
template<typename T>
|
||||
struct Slice {
|
||||
bool start_defined;
|
||||
T start;
|
||||
|
||||
bool stop_defined;
|
||||
T stop;
|
||||
|
||||
bool step_defined;
|
||||
T step;
|
||||
|
||||
Slice() { this->reset(); }
|
||||
|
||||
void reset() {
|
||||
this->start_defined = false;
|
||||
this->stop_defined = false;
|
||||
this->step_defined = false;
|
||||
}
|
||||
|
||||
void set_start(T start) {
|
||||
this->start_defined = true;
|
||||
this->start = start;
|
||||
}
|
||||
|
||||
void set_stop(T stop) {
|
||||
this->stop_defined = true;
|
||||
this->stop = stop;
|
||||
}
|
||||
|
||||
void set_step(T step) {
|
||||
this->step_defined = true;
|
||||
this->step = step;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Resolve this slice as a range.
|
||||
*
|
||||
* In Python, this would be `range(*slice(start, stop, step).indices(length))`.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
Range<T> indices(T length) {
|
||||
// Reference:
|
||||
// https://github.com/python/cpython/blob/main/Objects/sliceobject.c#L388
|
||||
debug_assert(SizeT, length >= 0);
|
||||
|
||||
Range<T> result;
|
||||
slice::indices(start_defined, start, stop_defined, stop, step_defined, step, length, &result.start,
|
||||
&result.stop, &result.step);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Like `.indices()` but with assertions.
|
||||
*/
|
||||
template<typename SizeT>
|
||||
Range<T> indices_checked(T length) {
|
||||
// TODO: Switch to `SizeT length`
|
||||
|
||||
if (length < 0) {
|
||||
raise_exception(SizeT, EXN_VALUE_ERROR, "length should not be negative, got {0}", length, NO_PARAM,
|
||||
NO_PARAM);
|
||||
}
|
||||
|
||||
if (this->step_defined && this->step == 0) {
|
||||
raise_exception(SizeT, EXN_VALUE_ERROR, "slice step cannot be zero", NO_PARAM, NO_PARAM, NO_PARAM);
|
||||
}
|
||||
|
||||
return this->indices<SizeT>(length);
|
||||
}
|
||||
};
|
||||
} // namespace
|
||||
|
||||
extern "C" {
|
||||
SliceIndex __nac3_slice_index_bound(SliceIndex i, const SliceIndex len) {
|
||||
if (i < 0) {
|
||||
i = len + i;
|
||||
}
|
||||
if (i < 0) {
|
||||
return 0;
|
||||
} else if (i > len) {
|
||||
return len;
|
||||
}
|
||||
return i;
|
||||
}
|
||||
}
|
@ -1,23 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt/int_types.hpp"
|
||||
|
||||
namespace {
|
||||
template<typename SizeT>
|
||||
bool __nac3_str_eq_impl(const char* str1, SizeT len1, const char* str2, SizeT len2) {
|
||||
if (len1 != len2) {
|
||||
return 0;
|
||||
}
|
||||
return __builtin_memcmp(str1, str2, static_cast<SizeT>(len1)) == 0;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
extern "C" {
|
||||
bool nac3_str_eq(const char* str1, uint32_t len1, const char* str2, uint32_t len2) {
|
||||
return __nac3_str_eq_impl<uint32_t>(str1, len1, str2, len2);
|
||||
}
|
||||
|
||||
bool nac3_str_eq64(const char* str1, uint64_t len1, const char* str2, uint64_t len2) {
|
||||
return __nac3_str_eq_impl<uint64_t>(str1, len1, str2, len2);
|
||||
}
|
||||
}
|
216
nac3core/irrt/irrt_basic.hpp
Normal file
216
nac3core/irrt/irrt_basic.hpp
Normal file
@ -0,0 +1,216 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt_utils.hpp"
|
||||
#include "irrt_typedefs.hpp"
|
||||
|
||||
/*
|
||||
This header contains IRRT implementations
|
||||
that do not deserved to be categorized (e.g., into numpy, etc.)
|
||||
|
||||
Check out other *.hpp files before including them here!!
|
||||
*/
|
||||
|
||||
// The type of an index or a value describing the length of a range/slice is
|
||||
// always `int32_t`.
|
||||
|
||||
namespace {
|
||||
// adapted from GNU Scientific Library: https://git.savannah.gnu.org/cgit/gsl.git/tree/sys/pow_int.c
|
||||
// need to make sure `exp >= 0` before calling this function
|
||||
template <typename T>
|
||||
T __nac3_int_exp_impl(T base, T exp) {
|
||||
T res = 1;
|
||||
/* repeated squaring method */
|
||||
do {
|
||||
if (exp & 1) {
|
||||
res *= base; /* for n odd */
|
||||
}
|
||||
exp >>= 1;
|
||||
base *= base;
|
||||
} while (exp);
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
#define DEF_nac3_int_exp_(T) \
|
||||
T __nac3_int_exp_##T(T base, T exp) {\
|
||||
return __nac3_int_exp_impl(base, exp);\
|
||||
}
|
||||
|
||||
DEF_nac3_int_exp_(int32_t)
|
||||
DEF_nac3_int_exp_(int64_t)
|
||||
DEF_nac3_int_exp_(uint32_t)
|
||||
DEF_nac3_int_exp_(uint64_t)
|
||||
|
||||
SliceIndex __nac3_slice_index_bound(SliceIndex i, const SliceIndex len) {
|
||||
if (i < 0) {
|
||||
i = len + i;
|
||||
}
|
||||
if (i < 0) {
|
||||
return 0;
|
||||
} else if (i > len) {
|
||||
return len;
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
SliceIndex __nac3_range_slice_len(
|
||||
const SliceIndex start,
|
||||
const SliceIndex end,
|
||||
const SliceIndex step
|
||||
) {
|
||||
SliceIndex diff = end - start;
|
||||
if (diff > 0 && step > 0) {
|
||||
return ((diff - 1) / step) + 1;
|
||||
} else if (diff < 0 && step < 0) {
|
||||
return ((diff + 1) / step) + 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle list assignment and dropping part of the list when
|
||||
// both dest_step and src_step are +1.
|
||||
// - All the index must *not* be out-of-bound or negative,
|
||||
// - The end index is *inclusive*,
|
||||
// - The length of src and dest slice size should already
|
||||
// be checked: if dest.step == 1 then len(src) <= len(dest) else len(src) == len(dest)
|
||||
SliceIndex __nac3_list_slice_assign_var_size(
|
||||
SliceIndex dest_start,
|
||||
SliceIndex dest_end,
|
||||
SliceIndex dest_step,
|
||||
uint8_t *dest_arr,
|
||||
SliceIndex dest_arr_len,
|
||||
SliceIndex src_start,
|
||||
SliceIndex src_end,
|
||||
SliceIndex src_step,
|
||||
uint8_t *src_arr,
|
||||
SliceIndex src_arr_len,
|
||||
const SliceIndex size
|
||||
) {
|
||||
/* if dest_arr_len == 0, do nothing since we do not support extending list */
|
||||
if (dest_arr_len == 0) return dest_arr_len;
|
||||
/* if both step is 1, memmove directly, handle the dropping of the list, and shrink size */
|
||||
if (src_step == dest_step && dest_step == 1) {
|
||||
const SliceIndex src_len = (src_end >= src_start) ? (src_end - src_start + 1) : 0;
|
||||
const SliceIndex dest_len = (dest_end >= dest_start) ? (dest_end - dest_start + 1) : 0;
|
||||
if (src_len > 0) {
|
||||
__builtin_memmove(
|
||||
dest_arr + dest_start * size,
|
||||
src_arr + src_start * size,
|
||||
src_len * size
|
||||
);
|
||||
}
|
||||
if (dest_len > 0) {
|
||||
/* dropping */
|
||||
__builtin_memmove(
|
||||
dest_arr + (dest_start + src_len) * size,
|
||||
dest_arr + (dest_end + 1) * size,
|
||||
(dest_arr_len - dest_end - 1) * size
|
||||
);
|
||||
}
|
||||
/* shrink size */
|
||||
return dest_arr_len - (dest_len - src_len);
|
||||
}
|
||||
/* if two range overlaps, need alloca */
|
||||
uint8_t need_alloca =
|
||||
(dest_arr == src_arr)
|
||||
&& !(
|
||||
max(dest_start, dest_end) < min(src_start, src_end)
|
||||
|| max(src_start, src_end) < min(dest_start, dest_end)
|
||||
);
|
||||
if (need_alloca) {
|
||||
uint8_t *tmp = reinterpret_cast<uint8_t *>(__builtin_alloca(src_arr_len * size));
|
||||
__builtin_memcpy(tmp, src_arr, src_arr_len * size);
|
||||
src_arr = tmp;
|
||||
}
|
||||
SliceIndex src_ind = src_start;
|
||||
SliceIndex dest_ind = dest_start;
|
||||
for (;
|
||||
(src_step > 0) ? (src_ind <= src_end) : (src_ind >= src_end);
|
||||
src_ind += src_step, dest_ind += dest_step
|
||||
) {
|
||||
/* for constant optimization */
|
||||
if (size == 1) {
|
||||
__builtin_memcpy(dest_arr + dest_ind, src_arr + src_ind, 1);
|
||||
} else if (size == 4) {
|
||||
__builtin_memcpy(dest_arr + dest_ind * 4, src_arr + src_ind * 4, 4);
|
||||
} else if (size == 8) {
|
||||
__builtin_memcpy(dest_arr + dest_ind * 8, src_arr + src_ind * 8, 8);
|
||||
} else {
|
||||
/* memcpy for var size, cannot overlap after previous alloca */
|
||||
__builtin_memcpy(dest_arr + dest_ind * size, src_arr + src_ind * size, size);
|
||||
}
|
||||
}
|
||||
/* only dest_step == 1 can we shrink the dest list. */
|
||||
/* size should be ensured prior to calling this function */
|
||||
if (dest_step == 1 && dest_end >= dest_start) {
|
||||
__builtin_memmove(
|
||||
dest_arr + dest_ind * size,
|
||||
dest_arr + (dest_end + 1) * size,
|
||||
(dest_arr_len - dest_end - 1) * size
|
||||
);
|
||||
return dest_arr_len - (dest_end - dest_ind) - 1;
|
||||
}
|
||||
return dest_arr_len;
|
||||
}
|
||||
|
||||
int32_t __nac3_isinf(double x) {
|
||||
return __builtin_isinf(x);
|
||||
}
|
||||
|
||||
int32_t __nac3_isnan(double x) {
|
||||
return __builtin_isnan(x);
|
||||
}
|
||||
|
||||
double tgamma(double arg);
|
||||
|
||||
double __nac3_gamma(double z) {
|
||||
// Handling for denormals
|
||||
// | x | Python gamma(x) | C tgamma(x) |
|
||||
// --- | ----------------- | --------------- | ----------- |
|
||||
// (1) | nan | nan | nan |
|
||||
// (2) | -inf | -inf | inf |
|
||||
// (3) | inf | inf | inf |
|
||||
// (4) | 0.0 | inf | inf |
|
||||
// (5) | {-1.0, -2.0, ...} | inf | nan |
|
||||
|
||||
// (1)-(3)
|
||||
if (__builtin_isinf(z) || __builtin_isnan(z)) {
|
||||
return z;
|
||||
}
|
||||
|
||||
double v = tgamma(z);
|
||||
|
||||
// (4)-(5)
|
||||
return __builtin_isinf(v) || __builtin_isnan(v) ? __builtin_inf() : v;
|
||||
}
|
||||
|
||||
double lgamma(double arg);
|
||||
|
||||
double __nac3_gammaln(double x) {
|
||||
// libm's handling of value overflows differs from scipy:
|
||||
// - scipy: gammaln(-inf) -> -inf
|
||||
// - libm : lgamma(-inf) -> inf
|
||||
|
||||
if (__builtin_isinf(x)) {
|
||||
return x;
|
||||
}
|
||||
|
||||
return lgamma(x);
|
||||
}
|
||||
|
||||
double j0(double x);
|
||||
|
||||
double __nac3_j0(double x) {
|
||||
// libm's handling of value overflows differs from scipy:
|
||||
// - scipy: j0(inf) -> nan
|
||||
// - libm : j0(inf) -> 0.0
|
||||
|
||||
if (__builtin_isinf(x)) {
|
||||
return __builtin_nan("");
|
||||
}
|
||||
|
||||
return j0(x);
|
||||
}
|
||||
}
|
31
nac3core/irrt/irrt_error_context.hpp
Normal file
31
nac3core/irrt/irrt_error_context.hpp
Normal file
@ -0,0 +1,31 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt_printer.hpp"
|
||||
|
||||
namespace {
|
||||
#define MAX_ERROR_NAME_LEN 32
|
||||
|
||||
// TODO: right now just to report some messages for now
|
||||
struct ErrorContext {
|
||||
Printer error;
|
||||
// TODO: add error_class_name??
|
||||
|
||||
void initialize(char* string_base_ptr, uint32_t max_length) {
|
||||
error.initialize(string_base_ptr, max_length);
|
||||
}
|
||||
|
||||
bool has_error() {
|
||||
return error.length > 0;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
void __nac3_error_context_init(ErrorContext* ctx, char* string_base_ptr, uint32_t max_length) {
|
||||
ctx->initialize(string_base_ptr, max_length);
|
||||
}
|
||||
|
||||
uint8_t __nac3_error_context_has_error(ErrorContext* ctx) {
|
||||
return (uint8_t) ctx->has_error();
|
||||
}
|
||||
}
|
16
nac3core/irrt/irrt_everything.hpp
Normal file
16
nac3core/irrt/irrt_everything.hpp
Normal file
@ -0,0 +1,16 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt_basic.hpp"
|
||||
#include "irrt_error_context.hpp"
|
||||
#include "irrt_numpy_ndarray.hpp"
|
||||
#include "irrt_printer.hpp"
|
||||
#include "irrt_slice.hpp"
|
||||
#include "irrt_typedefs.hpp"
|
||||
#include "irrt_utils.hpp"
|
||||
|
||||
/*
|
||||
All IRRT implementations.
|
||||
|
||||
We don't have any pre-compiled objects, so we are writing all implementations in headers and
|
||||
concatenate them with `#include` into one massive source file that contains all the IRRT stuff.
|
||||
*/
|
520
nac3core/irrt/irrt_numpy_ndarray.hpp
Normal file
520
nac3core/irrt/irrt_numpy_ndarray.hpp
Normal file
@ -0,0 +1,520 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt_utils.hpp"
|
||||
#include "irrt_typedefs.hpp"
|
||||
#include "irrt_slice.hpp"
|
||||
|
||||
/*
|
||||
NDArray-related implementations.
|
||||
`*/
|
||||
|
||||
namespace {
|
||||
namespace ndarray_util {
|
||||
template <typename SizeT>
|
||||
static void set_indices_by_nth(SizeT ndims, const SizeT* shape, SizeT* indices, SizeT nth) {
|
||||
for (int32_t i = 0; i < ndims; i++) {
|
||||
int32_t dim_i = ndims - i - 1;
|
||||
int32_t dim = shape[dim_i];
|
||||
|
||||
indices[dim_i] = nth % dim;
|
||||
nth /= dim;
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the strides of an ndarray given an ndarray `shape`
|
||||
// and assuming that the ndarray is *fully C-contagious*.
|
||||
//
|
||||
// You might want to read up on https://ajcr.net/stride-guide-part-1/.
|
||||
template <typename SizeT>
|
||||
static void set_strides_by_shape(SizeT itemsize, SizeT ndims, SizeT* dst_strides, const SizeT* shape) {
|
||||
SizeT stride_product = 1;
|
||||
for (SizeT i = 0; i < ndims; i++) {
|
||||
int dim_i = ndims - i - 1;
|
||||
dst_strides[dim_i] = stride_product * itemsize;
|
||||
stride_product *= shape[dim_i];
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the size/# of elements of an ndarray given its shape
|
||||
template <typename SizeT>
|
||||
static SizeT calc_size_from_shape(SizeT ndims, const SizeT* shape) {
|
||||
SizeT size = 1;
|
||||
for (SizeT dim_i = 0; dim_i < ndims; dim_i++) size *= shape[dim_i];
|
||||
return size;
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
static bool can_broadcast_shape_to(
|
||||
const SizeT target_ndims,
|
||||
const SizeT *target_shape,
|
||||
const SizeT src_ndims,
|
||||
const SizeT *src_shape
|
||||
) {
|
||||
/*
|
||||
// See https://numpy.org/doc/stable/user/basics.broadcasting.html
|
||||
|
||||
This function handles this example:
|
||||
```
|
||||
Image (3d array): 256 x 256 x 3
|
||||
Scale (1d array): 3
|
||||
Result (3d array): 256 x 256 x 3
|
||||
```
|
||||
|
||||
Other interesting examples to consider:
|
||||
- `can_broadcast_shape_to([3], [1, 1, 1, 1, 3]) == true`
|
||||
- `can_broadcast_shape_to([3], [3, 1]) == false`
|
||||
- `can_broadcast_shape_to([256, 256, 3], [256, 1, 3]) == true`
|
||||
|
||||
In cases when the shapes contain zero(es):
|
||||
- `can_broadcast_shape_to([0], [1]) == true`
|
||||
- `can_broadcast_shape_to([0], [2]) == false`
|
||||
- `can_broadcast_shape_to([0, 4, 0, 0], [1]) == true`
|
||||
- `can_broadcast_shape_to([0, 4, 0, 0], [1, 1, 1, 1]) == true`
|
||||
- `can_broadcast_shape_to([0, 4, 0, 0], [1, 4, 1, 1]) == true`
|
||||
- `can_broadcast_shape_to([4, 3], [0, 3]) == false`
|
||||
- `can_broadcast_shape_to([4, 3], [0, 0]) == false`
|
||||
*/
|
||||
|
||||
// This is essentially doing the following in Python:
|
||||
// `for target_dim, src_dim in itertools.zip_longest(target_shape[::-1], src_shape[::-1], fillvalue=1)`
|
||||
for (SizeT i = 0; i < max(target_ndims, src_ndims); i++) {
|
||||
SizeT target_dim_i = target_ndims - i - 1;
|
||||
SizeT src_dim_i = src_ndims - i - 1;
|
||||
|
||||
bool target_dim_exists = target_dim_i >= 0;
|
||||
bool src_dim_exists = src_dim_i >= 0;
|
||||
|
||||
SizeT target_dim = target_dim_exists ? target_shape[target_dim_i] : 1;
|
||||
SizeT src_dim = src_dim_exists ? src_shape[src_dim_i] : 1;
|
||||
|
||||
bool ok = src_dim == 1 || target_dim == src_dim;
|
||||
if (!ok) return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
typedef uint8_t NDSliceType;
|
||||
extern "C" {
|
||||
const NDSliceType INPUT_SLICE_TYPE_INDEX = 0;
|
||||
const NDSliceType INPUT_SLICE_TYPE_SLICE = 1;
|
||||
}
|
||||
|
||||
struct NDSlice {
|
||||
// A poor-man's enum variant type
|
||||
NDSliceType type;
|
||||
|
||||
/*
|
||||
if type == INPUT_SLICE_TYPE_INDEX => `slice` points to a single `SizeT`
|
||||
if type == INPUT_SLICE_TYPE_SLICE => `slice` points to a single `UserRange<SizeT>`
|
||||
|
||||
`SizeT` is controlled by the caller: `NDSlice` only cares about where that
|
||||
slice is (the pointer), `NDSlice` does not care/know about the actual `sizeof()`
|
||||
of the slice value.
|
||||
*/
|
||||
uint8_t* slice;
|
||||
};
|
||||
|
||||
namespace ndarray_util {
|
||||
template<typename SizeT>
|
||||
SizeT deduce_ndims_after_slicing(SizeT ndims, SizeT num_slices, const NDSlice *slices) {
|
||||
irrt_assert(num_slices <= ndims);
|
||||
|
||||
SizeT final_ndims = ndims;
|
||||
for (SizeT i = 0; i < num_slices; i++) {
|
||||
if (slices[i].type == INPUT_SLICE_TYPE_INDEX) {
|
||||
final_ndims--; // An index demotes the rank by 1
|
||||
}
|
||||
}
|
||||
return final_ndims;
|
||||
}
|
||||
}
|
||||
|
||||
// template <typename SizeT>
|
||||
// struct NDArrayIndicesIter {
|
||||
// SizeT ndims;
|
||||
// const SizeT *shape;
|
||||
// SizeT *indices;
|
||||
|
||||
// void set_indices_zero() {
|
||||
// __builtin_memset(indices, 0, sizeof(SizeT) * ndims);
|
||||
// }
|
||||
|
||||
// void next() {
|
||||
// for (SizeT i = 0; i < ndims; i++) {
|
||||
// SizeT dim_i = ndims - i - 1;
|
||||
|
||||
// indices[dim_i]++;
|
||||
// if (indices[dim_i] < shape[dim_i]) {
|
||||
// break;
|
||||
// } else {
|
||||
// indices[dim_i] = 0;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// };
|
||||
|
||||
// The NDArray object. `SizeT` is the *signed* size type of this ndarray.
|
||||
//
|
||||
// NOTE: The order of fields is IMPORTANT. DON'T TOUCH IT
|
||||
//
|
||||
// Some resources you might find helpful:
|
||||
// - The official numpy implementations:
|
||||
// - https://github.com/numpy/numpy/blob/735a477f0bc2b5b84d0e72d92f224bde78d4e069/doc/source/reference/c-api/types-and-structures.rst
|
||||
// - On strides (about reshaping, slicing, C-contagiousness, etc)
|
||||
// - https://ajcr.net/stride-guide-part-1/.
|
||||
// - https://ajcr.net/stride-guide-part-2/.
|
||||
// - https://ajcr.net/stride-guide-part-3/.
|
||||
template <typename SizeT>
|
||||
struct NDArray {
|
||||
// The underlying data this `ndarray` is pointing to.
|
||||
//
|
||||
// NOTE: Formally this should be of type `void *`, but clang
|
||||
// translates `void *` to `i8 *` when run with `-S -emit-llvm`,
|
||||
// so we will put `uint8_t *` here for clarity.
|
||||
//
|
||||
// This pointer should point to the first element of the ndarray directly
|
||||
uint8_t *data;
|
||||
|
||||
// The number of bytes of a single element in `data`.
|
||||
//
|
||||
// The `SizeT` is treated as `unsigned`.
|
||||
SizeT itemsize;
|
||||
|
||||
// The number of dimensions of this shape.
|
||||
//
|
||||
// The `SizeT` is treated as `unsigned`.
|
||||
SizeT ndims;
|
||||
|
||||
// Array shape, with length equal to `ndims`.
|
||||
//
|
||||
// The `SizeT` is treated as `unsigned`.
|
||||
//
|
||||
// NOTE: `shape` can contain 0.
|
||||
// (those appear when the user makes an out of bounds slice into an ndarray, e.g., `np.zeros((3, 3))[400:].shape == (0, 3)`)
|
||||
SizeT *shape;
|
||||
|
||||
// Array strides (stride value is in number of bytes, NOT number of elements), with length equal to `ndims`.
|
||||
//
|
||||
// The `SizeT` is treated as `signed`.
|
||||
//
|
||||
// NOTE: `strides` can have negative numbers.
|
||||
// (those appear when there is a slice with a negative step, e.g., `my_array[::-1]`)
|
||||
SizeT *strides;
|
||||
|
||||
// Calculate the size/# of elements of an `ndarray`.
|
||||
// This function corresponds to `np.size(<ndarray>)` or `ndarray.size`
|
||||
SizeT size() {
|
||||
return ndarray_util::calc_size_from_shape(ndims, shape);
|
||||
}
|
||||
|
||||
// Calculate the number of bytes of its content of an `ndarray` *in its view*.
|
||||
// This function corresponds to `ndarray.nbytes`
|
||||
SizeT nbytes() {
|
||||
return this->size() * itemsize;
|
||||
}
|
||||
|
||||
void set_pelement_value(uint8_t* pelement, const uint8_t* pvalue) {
|
||||
__builtin_memcpy(pelement, pvalue, itemsize);
|
||||
}
|
||||
|
||||
uint8_t* get_pelement_by_indices(const SizeT *indices) {
|
||||
uint8_t* element = data;
|
||||
for (SizeT dim_i = 0; dim_i < ndims; dim_i++)
|
||||
element += indices[dim_i] * strides[dim_i];
|
||||
return element;
|
||||
}
|
||||
|
||||
uint8_t* get_nth_pelement(SizeT nth) {
|
||||
irrt_assert(0 <= nth);
|
||||
irrt_assert(nth < this->size());
|
||||
|
||||
SizeT* indices = (SizeT*) __builtin_alloca(sizeof(SizeT) * this->ndims);
|
||||
ndarray_util::set_indices_by_nth(this->ndims, this->shape, indices, nth);
|
||||
return get_pelement_by_indices(indices);
|
||||
}
|
||||
|
||||
// Get pointer to the first element of this ndarray, assuming
|
||||
// `this->size() > 0`, i.e., not "degenerate" due to zeroes in `this->shape`)
|
||||
//
|
||||
// This is particularly useful for when the ndarray is just containing a single scalar.
|
||||
uint8_t* get_first_pelement() {
|
||||
irrt_assert(this->size() > 0);
|
||||
return this->data; // ...It is simply `this->data`
|
||||
}
|
||||
|
||||
// Is the given `indices` valid/in-bounds?
|
||||
bool in_bounds(const SizeT *indices) {
|
||||
for (SizeT dim_i = 0; dim_i < ndims; dim_i++) {
|
||||
bool dim_ok = indices[dim_i] < shape[dim_i];
|
||||
if (!dim_ok) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Fill the ndarray with a value
|
||||
void fill_generic(const uint8_t* pvalue) {
|
||||
const SizeT size = this->size();
|
||||
for (SizeT i = 0; i < size; i++) {
|
||||
uint8_t* pelement = get_nth_pelement(i);
|
||||
set_pelement_value(pelement, pvalue);
|
||||
}
|
||||
}
|
||||
|
||||
// Set the strides of the ndarray with `ndarray_util::set_strides_by_shape`
|
||||
void set_strides_by_shape() {
|
||||
ndarray_util::set_strides_by_shape(itemsize, ndims, strides, shape);
|
||||
}
|
||||
|
||||
// https://numpy.org/doc/stable/reference/generated/numpy.eye.html
|
||||
void set_to_eye(SizeT k, const uint8_t* zero_pvalue, const uint8_t* one_pvalue) {
|
||||
__builtin_assume(ndims == 2);
|
||||
|
||||
// TODO: Better implementation
|
||||
|
||||
fill_generic(zero_pvalue);
|
||||
for (SizeT i = 0; i < min(shape[0], shape[1]); i++) {
|
||||
SizeT row = i;
|
||||
SizeT col = i + k;
|
||||
SizeT indices[2] = { row, col };
|
||||
|
||||
if (!in_bounds(indices)) continue;
|
||||
|
||||
uint8_t* pelement = get_pelement_by_indices(indices);
|
||||
set_pelement_value(pelement, one_pvalue);
|
||||
}
|
||||
}
|
||||
|
||||
// To support numpy "basic indexing" https://numpy.org/doc/stable/user/basics.indexing.html#basic-indexing
|
||||
// "Advanced indexing" https://numpy.org/doc/stable/user/basics.indexing.html#advanced-indexing is not supported
|
||||
//
|
||||
// This function supports:
|
||||
// - "scalar indexing",
|
||||
// - "slicing and strides",
|
||||
// - and "dimensional indexing tools" (TODO, but this is really easy to implement).
|
||||
//
|
||||
// Things assumed by this function:
|
||||
// - `dst_ndarray` is allocated by the caller
|
||||
// - `dst_ndarray.ndims` has the correct value (according to `ndarray_util::deduce_ndims_after_slicing`).
|
||||
// - ... and `dst_ndarray.shape` and `dst_ndarray.strides` have been allocated by the caller as well
|
||||
//
|
||||
// Other notes:
|
||||
// - `dst_ndarray->data` does not have to be set, it will be derived.
|
||||
// - `dst_ndarray->itemsize` does not have to be set, it will be set to `this->itemsize`
|
||||
// - `dst_ndarray->shape` and `dst_ndarray.strides` can contain empty values
|
||||
void subscript(SizeT num_ndslices, NDSlice* ndslices, NDArray<SizeT>* dst_ndarray) {
|
||||
// REFERENCE CODE (check out `_index_helper` in `__getitem__`):
|
||||
// https://github.com/wadetb/tinynumpy/blob/0d23d22e07062ffab2afa287374c7b366eebdda1/tinynumpy/tinynumpy.py#L652
|
||||
|
||||
irrt_assert(dst_ndarray->ndims == ndarray_util::deduce_ndims_after_slicing(this->ndims, num_ndslices, ndslices));
|
||||
|
||||
dst_ndarray->data = this->data;
|
||||
dst_ndarray->itemsize = this->itemsize;
|
||||
|
||||
SizeT this_axis = 0;
|
||||
SizeT dst_axis = 0;
|
||||
|
||||
for (SizeT i = 0; i < num_ndslices; i++) {
|
||||
NDSlice *ndslice = &ndslices[i];
|
||||
if (ndslice->type == INPUT_SLICE_TYPE_INDEX) {
|
||||
// Handle when the ndslice is just a single (possibly negative) integer
|
||||
// e.g., `my_array[::2, -5, ::-1]`
|
||||
// ^^------ like this
|
||||
SizeT index_user = *((SizeT*) ndslice->slice);
|
||||
SizeT index = resolve_index_in_length(this->shape[this_axis], index_user);
|
||||
dst_ndarray->data += index * this->strides[this_axis]; // Add offset
|
||||
|
||||
// Next
|
||||
this_axis++;
|
||||
} else if (ndslice->type == INPUT_SLICE_TYPE_SLICE) {
|
||||
// Handle when the ndslice is a slice (represented by UserSlice in IRRT)
|
||||
// e.g., `my_array[::2, -5, ::-1]`
|
||||
// ^^^------^^^^----- like these
|
||||
UserSlice* user_slice = (UserSlice*) ndslice->slice;
|
||||
Slice slice = user_slice->indices(this->shape[this_axis]); // To resolve negative indices and other funny stuff written by the user
|
||||
|
||||
// NOTE: There is no need to write special code to handle negative steps/strides.
|
||||
// This simple implementation meticulously handles both positive and negative steps/strides.
|
||||
// Check out the tinynumpy and IRRT's test cases if you are not convinced.
|
||||
dst_ndarray->data += (SizeT) slice.start * this->strides[this_axis]; // Add offset (NOTE: no need to `* itemsize`, strides count in # of bytes)
|
||||
dst_ndarray->strides[dst_axis] = ((SizeT) slice.step) * this->strides[this_axis]; // Determine stride
|
||||
dst_ndarray->shape[dst_axis] = (SizeT) slice.len(); // Determine shape dimension
|
||||
|
||||
// Next
|
||||
dst_axis++;
|
||||
this_axis++;
|
||||
} else {
|
||||
__builtin_unreachable();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Reference python code:
|
||||
```python
|
||||
dst_ndarray.shape.extend(this.shape[this_axis:])
|
||||
dst_ndarray.strides.extend(this.strides[this_axis:])
|
||||
```
|
||||
*/
|
||||
|
||||
for (; dst_axis < dst_ndarray->ndims; dst_axis++, this_axis++) {
|
||||
dst_ndarray->shape[dst_axis] = this->shape[this_axis];
|
||||
dst_ndarray->strides[dst_axis] = this->strides[this_axis];
|
||||
}
|
||||
}
|
||||
|
||||
// Similar to `np.broadcast_to(<ndarray>, <target_shape>)`
|
||||
// Assumptions:
|
||||
// - `this` has to be fully initialized.
|
||||
// - `dst_ndarray->ndims` has to be set.
|
||||
// - `dst_ndarray->shape` has to be set, this determines the shape `this` broadcasts to.
|
||||
//
|
||||
// Other notes:
|
||||
// - `dst_ndarray->data` does not have to be set, it will be set to `this->data`.
|
||||
// - `dst_ndarray->itemsize` does not have to be set, it will be set to `this->data`.
|
||||
// - `dst_ndarray->strides` does not have to be set, it will be overwritten.
|
||||
//
|
||||
// Cautions:
|
||||
// ```
|
||||
// xs = np.zeros((4,))
|
||||
// ys = np.zero((4, 1))
|
||||
// ys[:] = xs # ok
|
||||
//
|
||||
// xs = np.zeros((1, 4))
|
||||
// ys = np.zero((4,))
|
||||
// ys[:] = xs # allowed
|
||||
// # However `np.broadcast_to(xs, (4,))` would fails, as per numpy's broadcasting rule.
|
||||
// # and apparently numpy will "deprecate" this? SEE https://github.com/numpy/numpy/issues/21744
|
||||
// # This implementation will NOT support this assignment.
|
||||
// ```
|
||||
void broadcast_to(NDArray<SizeT>* dst_ndarray) {
|
||||
dst_ndarray->data = this->data;
|
||||
dst_ndarray->itemsize = this->itemsize;
|
||||
|
||||
irrt_assert(
|
||||
ndarray_util::can_broadcast_shape_to(
|
||||
dst_ndarray->ndims,
|
||||
dst_ndarray->shape,
|
||||
this->ndims,
|
||||
this->shape
|
||||
)
|
||||
);
|
||||
|
||||
SizeT stride_product = 1;
|
||||
for (SizeT i = 0; i < max(this->ndims, dst_ndarray->ndims); i++) {
|
||||
SizeT this_dim_i = this->ndims - i - 1;
|
||||
SizeT dst_dim_i = dst_ndarray->ndims - i - 1;
|
||||
|
||||
bool this_dim_exists = this_dim_i >= 0;
|
||||
bool dst_dim_exists = dst_dim_i >= 0;
|
||||
|
||||
// TODO: Explain how this works
|
||||
bool c1 = this_dim_exists && this->shape[this_dim_i] == 1;
|
||||
bool c2 = dst_dim_exists && dst_ndarray->shape[dst_dim_i] != 1;
|
||||
if (!this_dim_exists || (c1 && c2)) {
|
||||
dst_ndarray->strides[dst_dim_i] = 0; // Freeze it in-place
|
||||
} else {
|
||||
dst_ndarray->strides[dst_dim_i] = stride_product * this->itemsize;
|
||||
stride_product *= this->shape[this_dim_i]; // NOTE: this_dim_exist must be true here.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Simulates `this_ndarray[:] = src_ndarray`, with automatic broadcasting.
|
||||
// Caution on https://github.com/numpy/numpy/issues/21744
|
||||
// Also see `NDArray::broadcast_to`
|
||||
void assign_with(NDArray<SizeT>* src_ndarray) {
|
||||
irrt_assert(
|
||||
ndarray_util::can_broadcast_shape_to(
|
||||
this->ndims,
|
||||
this->shape,
|
||||
src_ndarray->ndims,
|
||||
src_ndarray->shape
|
||||
)
|
||||
);
|
||||
|
||||
// Broadcast the `src_ndarray` to make the reading process *much* easier
|
||||
SizeT* broadcasted_src_ndarray_strides = __builtin_alloca(sizeof(SizeT) * this->ndims); // Remember to allocate strides beforehand
|
||||
NDArray<SizeT> broadcasted_src_ndarray = {
|
||||
.ndims = this->ndims,
|
||||
.shape = this->shape,
|
||||
.strides = broadcasted_src_ndarray_strides
|
||||
};
|
||||
src_ndarray->broadcast_to(&broadcasted_src_ndarray);
|
||||
|
||||
const SizeT size = this->size();
|
||||
for (SizeT i = 0; i < size; i++) {
|
||||
uint8_t* src_pelement = broadcasted_src_ndarray_strides->get_nth_pelement(i);
|
||||
uint8_t* this_pelement = this->get_nth_pelement(i);
|
||||
this->set_pelement_value(this_pelement, src_pelement);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: DOCUMENT ME
|
||||
bool is_unsized() {
|
||||
return this->ndims == 0;
|
||||
}
|
||||
|
||||
// Simulate `len(<ndarray>)`
|
||||
// See (it doesn't help): https://numpy.org/doc/stable/reference/generated/numpy.ndarray.__len__.html#numpy.ndarray.__len__
|
||||
SliceIndex len() {
|
||||
// If you do `len(np.asarray(42))` (note that its `.shape` is just `()` - an empty tuple),
|
||||
// numpy throws a `TypeError: len() of unsized object`
|
||||
irrt_assert(!this->is_unsized());
|
||||
|
||||
// Apparently `len(<ndarray>)` is defined to be the first dimension
|
||||
// REFERENCE: https://stackoverflow.com/questions/43081809/len-of-a-numpy-array-in-python
|
||||
return (SliceIndex) this->shape[0];
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
uint32_t __nac3_ndarray_size(NDArray<int32_t>* ndarray) {
|
||||
return ndarray->size();
|
||||
}
|
||||
|
||||
uint64_t __nac3_ndarray_size64(NDArray<int64_t>* ndarray) {
|
||||
return ndarray->size();
|
||||
}
|
||||
|
||||
void __nac3_ndarray_set_strides_by_shape(NDArray<int32_t>* ndarray) {
|
||||
ndarray->set_strides_by_shape();
|
||||
}
|
||||
|
||||
void __nac3_ndarray_set_strides_by_shape64(NDArray<int64_t>* ndarray) {
|
||||
ndarray->set_strides_by_shape();
|
||||
}
|
||||
|
||||
void __nac3_ndarray_fill_generic(NDArray<int32_t>* ndarray, uint8_t* pvalue) {
|
||||
ndarray->fill_generic(pvalue);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_fill_generic64(NDArray<int64_t>* ndarray, uint8_t* pvalue) {
|
||||
ndarray->fill_generic(pvalue);
|
||||
}
|
||||
|
||||
int32_t __nac3_ndarray_deduce_ndims_after_slicing(int32_t ndims, int32_t num_slices, const NDSlice* slices) {
|
||||
return ndarray_util::deduce_ndims_after_slicing(ndims, num_slices, slices);
|
||||
}
|
||||
|
||||
int64_t __nac3_ndarray_deduce_ndims_after_slicing64(int64_t ndims, int64_t num_slices, const NDSlice* slices) {
|
||||
return ndarray_util::deduce_ndims_after_slicing(ndims, num_slices, slices);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_subscript(NDArray<int32_t>* ndarray, int32_t num_slices, NDSlice* slices, NDArray<int32_t> *dst_ndarray) {
|
||||
ndarray->subscript(num_slices, slices, dst_ndarray);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_subscript64(NDArray<int64_t>* ndarray, int32_t num_slices, NDSlice* slices, NDArray<int64_t> *dst_ndarray) {
|
||||
ndarray->subscript(num_slices, slices, dst_ndarray);
|
||||
}
|
||||
|
||||
SliceIndex __nac3_ndarray_len(NDArray<int32_t>* ndarray) {
|
||||
return ndarray->len();
|
||||
}
|
||||
|
||||
SliceIndex __nac3_ndarray_len64(NDArray<int64_t>* ndarray) {
|
||||
return ndarray->len();
|
||||
}
|
||||
}
|
82
nac3core/irrt/irrt_printer.hpp
Normal file
82
nac3core/irrt/irrt_printer.hpp
Normal file
@ -0,0 +1,82 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt_typedefs.hpp"
|
||||
|
||||
// TODO: obviously implementing printf from scratch is bad,
|
||||
// is there a header only, no-cstdlib library for this?
|
||||
|
||||
namespace {
|
||||
struct Printer {
|
||||
char* string_base_ptr;
|
||||
uint32_t max_length;
|
||||
uint32_t length; // NOTE: this could be incremented past max_length, which indicates
|
||||
|
||||
void initialize(char *string_base_ptr, uint32_t max_length) {
|
||||
this->string_base_ptr = string_base_ptr;
|
||||
this->max_length = max_length;
|
||||
this->length = 0;
|
||||
}
|
||||
|
||||
void put_space() {
|
||||
put_char(' ');
|
||||
}
|
||||
|
||||
void put_char(char ch) {
|
||||
push_char(ch);
|
||||
}
|
||||
|
||||
void put_string(const char* string) {
|
||||
// TODO: optimize?
|
||||
while (*string != '\0') {
|
||||
push_char(*string);
|
||||
string++; // Move to next char
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void put_int(T value) {
|
||||
// NOTE: Try not to use recursion to print the digits
|
||||
|
||||
// value == 0 is a special case
|
||||
if (value == 0) {
|
||||
push_char('0');
|
||||
} else {
|
||||
// Add a '-' if the value is negative
|
||||
if (value < 0) {
|
||||
push_char('-');
|
||||
value = -value; // Negate then continue to print the digits
|
||||
}
|
||||
|
||||
// TODO: Recursion is a bad idea on embedded systems?
|
||||
uint32_t num_digits = int_log_floor(value, 10) + 1;
|
||||
put_int_helper(num_digits, value);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: implement put_float() and more would be useful
|
||||
private:
|
||||
void push_char(char ch) {
|
||||
if (length < max_length) {
|
||||
string_base_ptr[length] = ch;
|
||||
}
|
||||
|
||||
// NOTE: this could increment past max_length,
|
||||
// to indicate the true length of the message even if it gets cut off
|
||||
length++;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void put_int_helper(uint32_t num_digits, T value) {
|
||||
// Print the digits recursively
|
||||
__builtin_assume(0 <= value);
|
||||
|
||||
if (num_digits > 0) {
|
||||
put_int_helper(num_digits - 1, value / 10);
|
||||
|
||||
uint32_t digit = value % 10;
|
||||
char digit_char = '0' + (char) digit;
|
||||
put_char(digit_char);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
83
nac3core/irrt/irrt_slice.hpp
Normal file
83
nac3core/irrt/irrt_slice.hpp
Normal file
@ -0,0 +1,83 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt_utils.hpp"
|
||||
#include "irrt_typedefs.hpp"
|
||||
|
||||
namespace {
|
||||
struct Slice {
|
||||
SliceIndex start;
|
||||
SliceIndex stop;
|
||||
SliceIndex step;
|
||||
|
||||
// The length/The number of elements of the slice if it were a range,
|
||||
// i.e., the value of `len(range(this->start, this->stop, this->end))`
|
||||
SliceIndex len() {
|
||||
SliceIndex diff = stop - start;
|
||||
if (diff > 0 && step > 0) {
|
||||
return ((diff - 1) / step) + 1;
|
||||
} else if (diff < 0 && step < 0) {
|
||||
return ((diff + 1) / step) + 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
SliceIndex resolve_index_in_length(SliceIndex length, SliceIndex index) {
|
||||
irrt_assert(length >= 0);
|
||||
if (index < 0) {
|
||||
// Remember that index is negative, so do a plus here
|
||||
return max<SliceIndex>(length + index, 0);
|
||||
} else {
|
||||
return min<SliceIndex>(length, index);
|
||||
}
|
||||
}
|
||||
|
||||
// A user-written Python-like slice.
|
||||
//
|
||||
// i.e., this slice is a triple of either an int or nothing. (e.g., `my_array[:10:2]`, `start` is None)
|
||||
//
|
||||
// You can "resolve" a `UserSlice` by using `UserSlice::indices(<length>)`
|
||||
//
|
||||
// NOTE: using a bitfield for the `*_defined` is better, at the
|
||||
// cost of a more annoying implementation in nac3core inkwell
|
||||
struct UserSlice {
|
||||
// Did the user specify `start`? If 0, `start` is undefined (and contains an empty value)
|
||||
uint8_t start_defined;
|
||||
SliceIndex start;
|
||||
|
||||
// Similar to `start_defined`
|
||||
uint8_t stop_defined;
|
||||
SliceIndex stop;
|
||||
|
||||
// Similar to `start_defined`
|
||||
uint8_t step_defined;
|
||||
SliceIndex step;
|
||||
|
||||
// Like Python's `slice(start, stop, step).indices(length)`
|
||||
Slice indices(SliceIndex length) {
|
||||
// NOTE: This function implements Python's `slice.indices` *FAITHFULLY*.
|
||||
// SEE: https://github.com/python/cpython/blob/f62161837e68c1c77961435f1b954412dd5c2b65/Objects/sliceobject.c#L546
|
||||
irrt_assert(length >= 0);
|
||||
irrt_assert(!step_defined || step != 0); // step_defined -> step != 0; step cannot be zero if specified by user
|
||||
|
||||
Slice result;
|
||||
result.step = step_defined ? step : 1;
|
||||
bool step_is_negative = result.step < 0;
|
||||
|
||||
if (start_defined) {
|
||||
result.start = resolve_index_in_length(length, start);
|
||||
} else {
|
||||
result.start = step_is_negative ? length - 1 : 0;
|
||||
}
|
||||
|
||||
if (stop_defined) {
|
||||
result.stop = resolve_index_in_length(length, stop);
|
||||
} else {
|
||||
result.stop = step_is_negative ? -1 : length;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
};
|
||||
}
|
707
nac3core/irrt/irrt_test.cpp
Normal file
707
nac3core/irrt/irrt_test.cpp
Normal file
@ -0,0 +1,707 @@
|
||||
// This file will be compiled like a real C++ program,
|
||||
// and we do have the luxury to use the standard libraries.
|
||||
// That is if the nix flakes do not have issues... especially on msys2...
|
||||
#include <cstdint>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
// Set `IRRT_DONT_TYPEDEF_INTS` because `cstdint` defines them
|
||||
#define IRRT_DONT_TYPEDEF_INTS
|
||||
#include "irrt_everything.hpp"
|
||||
|
||||
void test_fail() {
|
||||
printf("[!] Test failed\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
void __begin_test(const char* function_name, const char* file, int line) {
|
||||
printf("######### Running %s @ %s:%d\n", function_name, file, line);
|
||||
}
|
||||
|
||||
#define BEGIN_TEST() __begin_test(__FUNCTION__, __FILE__, __LINE__)
|
||||
|
||||
template <typename T>
|
||||
void debug_print_array(const char* format, int len, T* as) {
|
||||
printf("[");
|
||||
for (int i = 0; i < len; i++) {
|
||||
if (i != 0) printf(", ");
|
||||
printf(format, as[i]);
|
||||
}
|
||||
printf("]");
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void assert_arrays_match(const char* label, const char* format, int len, T* expected, T* got) {
|
||||
if (!arrays_match(len, expected, got)) {
|
||||
printf(">>>>>>> %s\n", label);
|
||||
printf(" Expecting = ");
|
||||
debug_print_array(format, len, expected);
|
||||
printf("\n");
|
||||
printf(" Got = ");
|
||||
debug_print_array(format, len, got);
|
||||
printf("\n");
|
||||
test_fail();
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void assert_values_match(const char* label, const char* format, T expected, T got) {
|
||||
if (expected != got) {
|
||||
printf(">>>>>>> %s\n", label);
|
||||
printf(" Expecting = ");
|
||||
printf(format, expected);
|
||||
printf("\n");
|
||||
printf(" Got = ");
|
||||
printf(format, got);
|
||||
printf("\n");
|
||||
test_fail();
|
||||
}
|
||||
}
|
||||
|
||||
void print_repeated(const char *str, int count) {
|
||||
for (int i = 0; i < count; i++) {
|
||||
printf("%s", str);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename SizeT, typename ElementT>
|
||||
void __print_ndarray_aux(const char *format, bool first, bool last, SizeT* cursor, SizeT depth, NDArray<SizeT>* ndarray) {
|
||||
// A really lazy recursive implementation
|
||||
|
||||
// Add left padding unless its the first entry (since there would be "[[[" before it)
|
||||
if (!first) {
|
||||
print_repeated(" ", depth);
|
||||
}
|
||||
|
||||
const SizeT dim = ndarray->shape[depth];
|
||||
if (depth + 1 == ndarray->ndims) {
|
||||
// Recursed down to last dimension, print the values in a nice list
|
||||
printf("[");
|
||||
|
||||
SizeT* indices = (SizeT*) __builtin_alloca(sizeof(SizeT) * ndarray->ndims);
|
||||
for (SizeT i = 0; i < dim; i++) {
|
||||
ndarray_util::set_indices_by_nth(ndarray->ndims, ndarray->shape, indices, *cursor);
|
||||
ElementT* pelement = (ElementT*) ndarray->get_pelement_by_indices(indices);
|
||||
ElementT element = *pelement;
|
||||
|
||||
if (i != 0) printf(", "); // List delimiter
|
||||
printf(format, element);
|
||||
printf("(@");
|
||||
debug_print_array("%d", ndarray->ndims, indices);
|
||||
printf(")");
|
||||
|
||||
(*cursor)++;
|
||||
}
|
||||
printf("]");
|
||||
} else {
|
||||
printf("[");
|
||||
for (SizeT i = 0; i < ndarray->shape[depth]; i++) {
|
||||
__print_ndarray_aux<SizeT, ElementT>(
|
||||
format,
|
||||
i == 0, // first?
|
||||
i + 1 == dim, // last?
|
||||
cursor,
|
||||
depth + 1,
|
||||
ndarray
|
||||
);
|
||||
}
|
||||
printf("]");
|
||||
}
|
||||
|
||||
// Add newline unless its the last entry (since there will be "]]]" after it)
|
||||
if (!last) {
|
||||
print_repeated("\n", depth);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename SizeT, typename ElementT>
|
||||
void print_ndarray(const char *format, NDArray<SizeT>* ndarray) {
|
||||
if (ndarray->ndims == 0) {
|
||||
printf("<empty ndarray>");
|
||||
} else {
|
||||
SizeT cursor = 0;
|
||||
__print_ndarray_aux<SizeT, ElementT>(format, true, true, &cursor, 0, ndarray);
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
void test_calc_size_from_shape_normal() {
|
||||
// Test shapes with normal values
|
||||
BEGIN_TEST();
|
||||
|
||||
int32_t shape[4] = { 2, 3, 5, 7 };
|
||||
assert_values_match("size", "%d", 210, ndarray_util::calc_size_from_shape<int32_t>(4, shape));
|
||||
}
|
||||
|
||||
void test_calc_size_from_shape_has_zero() {
|
||||
// Test shapes with 0 in them
|
||||
BEGIN_TEST();
|
||||
|
||||
int32_t shape[4] = { 2, 0, 5, 7 };
|
||||
assert_values_match("size", "%d", 0, ndarray_util::calc_size_from_shape<int32_t>(4, shape));
|
||||
}
|
||||
|
||||
void test_set_strides_by_shape() {
|
||||
// Test `set_strides_by_shape()`
|
||||
BEGIN_TEST();
|
||||
|
||||
int32_t shape[4] = { 99, 3, 5, 7 };
|
||||
int32_t strides[4] = { 0 };
|
||||
ndarray_util::set_strides_by_shape((int32_t) sizeof(int32_t), 4, strides, shape);
|
||||
|
||||
int32_t expected_strides[4] = {
|
||||
105 * sizeof(int32_t),
|
||||
35 * sizeof(int32_t),
|
||||
7 * sizeof(int32_t),
|
||||
1 * sizeof(int32_t)
|
||||
};
|
||||
assert_arrays_match("strides", "%u", 4u, expected_strides, strides);
|
||||
}
|
||||
|
||||
// void test_ndarray_indices_iter_normal() {
|
||||
// // Test NDArrayIndicesIter normal behavior
|
||||
// BEGIN_TEST();
|
||||
//
|
||||
// int32_t shape[3] = { 1, 2, 3 };
|
||||
// int32_t indices[3] = { 0, 0, 0 };
|
||||
// auto iter = NDArrayIndicesIter<int32_t> {
|
||||
// .ndims = 3,
|
||||
// .shape = shape,
|
||||
// .indices = indices
|
||||
// };
|
||||
//
|
||||
// assert_arrays_match("indices #0", "%u", 3u, iter.indices, (int32_t[3]) { 0, 0, 0 });
|
||||
// iter.next();
|
||||
// assert_arrays_match("indices #1", "%u", 3u, iter.indices, (int32_t[3]) { 0, 0, 1 });
|
||||
// iter.next();
|
||||
// assert_arrays_match("indices #2", "%u", 3u, iter.indices, (int32_t[3]) { 0, 0, 2 });
|
||||
// iter.next();
|
||||
// assert_arrays_match("indices #3", "%u", 3u, iter.indices, (int32_t[3]) { 0, 1, 0 });
|
||||
// iter.next();
|
||||
// assert_arrays_match("indices #4", "%u", 3u, iter.indices, (int32_t[3]) { 0, 1, 1 });
|
||||
// iter.next();
|
||||
// assert_arrays_match("indices #5", "%u", 3u, iter.indices, (int32_t[3]) { 0, 1, 2 });
|
||||
// iter.next();
|
||||
// assert_arrays_match("indices #6", "%u", 3u, iter.indices, (int32_t[3]) { 0, 0, 0 }); // Loops back
|
||||
// iter.next();
|
||||
// assert_arrays_match("indices #7", "%u", 3u, iter.indices, (int32_t[3]) { 0, 0, 1 });
|
||||
// }
|
||||
|
||||
void test_ndarray_fill_generic() {
|
||||
// Test ndarray fill_generic
|
||||
BEGIN_TEST();
|
||||
|
||||
// Choose a type that's neither int32_t nor uint64_t (candidates of SizeT) to spice it up
|
||||
// Also make all the octets non-zero, to see if `memcpy` in `fill_generic` is working perfectly.
|
||||
uint16_t fill_value = 0xFACE;
|
||||
|
||||
uint16_t in_data[6] = { 100, 101, 102, 103, 104, 105 }; // Fill `data` with values that != `999`
|
||||
int32_t in_itemsize = sizeof(uint16_t);
|
||||
const int32_t in_ndims = 2;
|
||||
int32_t in_shape[in_ndims] = { 2, 3 };
|
||||
int32_t in_strides[in_ndims] = {};
|
||||
NDArray<int32_t> ndarray = {
|
||||
.data = (uint8_t*) in_data,
|
||||
.itemsize = in_itemsize,
|
||||
.ndims = in_ndims,
|
||||
.shape = in_shape,
|
||||
.strides = in_strides,
|
||||
};
|
||||
ndarray.set_strides_by_shape();
|
||||
ndarray.fill_generic((uint8_t*) &fill_value); // `fill_generic` here
|
||||
|
||||
uint16_t expected_data[6] = { fill_value, fill_value, fill_value, fill_value, fill_value, fill_value };
|
||||
assert_arrays_match("data", "0x%hX", 6, expected_data, in_data);
|
||||
}
|
||||
|
||||
void test_ndarray_set_to_eye() {
|
||||
// Test `set_to_eye` behavior (helper function to implement `np.eye()`)
|
||||
BEGIN_TEST();
|
||||
|
||||
double in_data[9] = { 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0 };
|
||||
int32_t in_itemsize = sizeof(double);
|
||||
const int32_t in_ndims = 2;
|
||||
int32_t in_shape[in_ndims] = { 3, 3 };
|
||||
int32_t in_strides[in_ndims] = {};
|
||||
NDArray<int32_t> ndarray = {
|
||||
.data = (uint8_t*) in_data,
|
||||
.itemsize = in_itemsize,
|
||||
.ndims = in_ndims,
|
||||
.shape = in_shape,
|
||||
.strides = in_strides,
|
||||
};
|
||||
ndarray.set_strides_by_shape();
|
||||
|
||||
double zero = 0.0;
|
||||
double one = 1.0;
|
||||
ndarray.set_to_eye(1, (uint8_t*) &zero, (uint8_t*) &one);
|
||||
|
||||
assert_values_match("in_data[0]", "%f", 0.0, in_data[0]);
|
||||
assert_values_match("in_data[1]", "%f", 1.0, in_data[1]);
|
||||
assert_values_match("in_data[2]", "%f", 0.0, in_data[2]);
|
||||
assert_values_match("in_data[3]", "%f", 0.0, in_data[3]);
|
||||
assert_values_match("in_data[4]", "%f", 0.0, in_data[4]);
|
||||
assert_values_match("in_data[5]", "%f", 1.0, in_data[5]);
|
||||
assert_values_match("in_data[6]", "%f", 0.0, in_data[6]);
|
||||
assert_values_match("in_data[7]", "%f", 0.0, in_data[7]);
|
||||
assert_values_match("in_data[8]", "%f", 0.0, in_data[8]);
|
||||
}
|
||||
|
||||
void test_slice_1() {
|
||||
// Test `subscript(5, None, None).indices(100) == subscript(5, 100, 1)`
|
||||
BEGIN_TEST();
|
||||
|
||||
UserSlice user_slice = {
|
||||
.start_defined = 1,
|
||||
.start = 5,
|
||||
.stop_defined = 0,
|
||||
.step_defined = 0,
|
||||
};
|
||||
|
||||
auto slice = user_slice.indices(100);
|
||||
assert_values_match("start", "%d", 5, slice.start);
|
||||
assert_values_match("stop", "%d", 100, slice.stop);
|
||||
assert_values_match("step", "%d", 1, slice.step);
|
||||
}
|
||||
|
||||
void test_slice_2() {
|
||||
// Test `subscript(400, 999, None).indices(100) == subscript(100, 100, 1)`
|
||||
BEGIN_TEST();
|
||||
|
||||
UserSlice user_slice = {
|
||||
.start_defined = 1,
|
||||
.start = 400,
|
||||
.stop_defined = 0,
|
||||
.step_defined = 0,
|
||||
};
|
||||
|
||||
auto slice = user_slice.indices(100);
|
||||
assert_values_match("start", "%d", 100, slice.start);
|
||||
assert_values_match("stop", "%d", 100, slice.stop);
|
||||
assert_values_match("step", "%d", 1, slice.step);
|
||||
}
|
||||
|
||||
void test_slice_3() {
|
||||
// Test `subscript(-10, -5, None).indices(100) == subscript(90, 95, 1)`
|
||||
BEGIN_TEST();
|
||||
|
||||
UserSlice user_slice = {
|
||||
.start_defined = 1,
|
||||
.start = -10,
|
||||
.stop_defined = 1,
|
||||
.stop = -5,
|
||||
.step_defined = 0,
|
||||
};
|
||||
|
||||
auto slice = user_slice.indices(100);
|
||||
assert_values_match("start", "%d", 90, slice.start);
|
||||
assert_values_match("stop", "%d", 95, slice.stop);
|
||||
assert_values_match("step", "%d", 1, slice.step);
|
||||
}
|
||||
|
||||
void test_slice_4() {
|
||||
// Test `subscript(None, None, -5).indices(100) == (99, -1, -5)`
|
||||
BEGIN_TEST();
|
||||
|
||||
UserSlice user_slice = {
|
||||
.start_defined = 0,
|
||||
.stop_defined = 0,
|
||||
.step_defined = 1,
|
||||
.step = -5
|
||||
};
|
||||
|
||||
auto slice = user_slice.indices(100);
|
||||
assert_values_match("start", "%d", 99, slice.start);
|
||||
assert_values_match("stop", "%d", -1, slice.stop);
|
||||
assert_values_match("step", "%d", -5, slice.step);
|
||||
}
|
||||
|
||||
void test_ndslice_1() {
|
||||
/*
|
||||
Reference Python code:
|
||||
```python
|
||||
ndarray = np.arange(12, dtype=np.float64).reshape((3, 4));
|
||||
# array([[ 0., 1., 2., 3.],
|
||||
# [ 4., 5., 6., 7.],
|
||||
# [ 8., 9., 10., 11.]])
|
||||
|
||||
dst_ndarray = ndarray[-2:, 1::2]
|
||||
# array([[ 5., 7.],
|
||||
# [ 9., 11.]])
|
||||
|
||||
assert dst_ndarray.shape == (2, 2)
|
||||
assert dst_ndarray.strides == (32, 16)
|
||||
assert dst_ndarray[0, 0] == 5.0
|
||||
assert dst_ndarray[0, 1] == 7.0
|
||||
assert dst_ndarray[1, 0] == 9.0
|
||||
assert dst_ndarray[1, 1] == 11.0
|
||||
```
|
||||
*/
|
||||
BEGIN_TEST();
|
||||
|
||||
double in_data[12] = { 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0 };
|
||||
int32_t in_itemsize = sizeof(double);
|
||||
const int32_t in_ndims = 2;
|
||||
int32_t in_shape[in_ndims] = { 3, 4 };
|
||||
int32_t in_strides[in_ndims] = {};
|
||||
NDArray<int32_t> ndarray = {
|
||||
.data = (uint8_t*) in_data,
|
||||
.itemsize = in_itemsize,
|
||||
.ndims = in_ndims,
|
||||
.shape = in_shape,
|
||||
.strides = in_strides
|
||||
};
|
||||
ndarray.set_strides_by_shape();
|
||||
|
||||
// Destination ndarray
|
||||
// As documented, ndims and shape & strides must be allocated and determined by the caller.
|
||||
const int32_t dst_ndims = 2;
|
||||
int32_t dst_shape[dst_ndims] = {999, 999}; // Empty values
|
||||
int32_t dst_strides[dst_ndims] = {999, 999}; // Empty values
|
||||
NDArray<int32_t> dst_ndarray = {
|
||||
.data = nullptr,
|
||||
.ndims = dst_ndims,
|
||||
.shape = dst_shape,
|
||||
.strides = dst_strides
|
||||
};
|
||||
|
||||
// Create the slice in `ndarray[-2::, 1::2]`
|
||||
UserSlice user_slice_1 = {
|
||||
.start_defined = 1,
|
||||
.start = -2,
|
||||
.stop_defined = 0,
|
||||
.step_defined = 0
|
||||
};
|
||||
|
||||
UserSlice user_slice_2 = {
|
||||
.start_defined = 1,
|
||||
.start = 1,
|
||||
.stop_defined = 0,
|
||||
.step_defined = 1,
|
||||
.step = 2
|
||||
};
|
||||
|
||||
const int32_t num_ndslices = 2;
|
||||
NDSlice ndslices[num_ndslices] = {
|
||||
{ .type = INPUT_SLICE_TYPE_SLICE, .slice = (uint8_t*) &user_slice_1 },
|
||||
{ .type = INPUT_SLICE_TYPE_SLICE, .slice = (uint8_t*) &user_slice_2 }
|
||||
};
|
||||
|
||||
ndarray.subscript(num_ndslices, ndslices, &dst_ndarray);
|
||||
|
||||
int32_t expected_shape[dst_ndims] = { 2, 2 };
|
||||
int32_t expected_strides[dst_ndims] = { 32, 16 };
|
||||
assert_arrays_match("shape", "%d", dst_ndims, expected_shape, dst_ndarray.shape);
|
||||
assert_arrays_match("strides", "%d", dst_ndims, expected_strides, dst_ndarray.strides);
|
||||
|
||||
assert_values_match("dst_ndarray[0, 0]", "%f", 5.0, *((double *) dst_ndarray.get_pelement_by_indices((int32_t[dst_ndims]) { 0, 0 })));
|
||||
assert_values_match("dst_ndarray[0, 1]", "%f", 7.0, *((double *) dst_ndarray.get_pelement_by_indices((int32_t[dst_ndims]) { 0, 1 })));
|
||||
assert_values_match("dst_ndarray[1, 0]", "%f", 9.0, *((double *) dst_ndarray.get_pelement_by_indices((int32_t[dst_ndims]) { 1, 0 })));
|
||||
assert_values_match("dst_ndarray[1, 1]", "%f", 11.0, *((double *) dst_ndarray.get_pelement_by_indices((int32_t[dst_ndims]) { 1, 1 })));
|
||||
}
|
||||
|
||||
void test_ndslice_2() {
|
||||
/*
|
||||
```python
|
||||
ndarray = np.arange(12, dtype=np.float64).reshape((3, 4))
|
||||
# array([[ 0., 1., 2., 3.],
|
||||
# [ 4., 5., 6., 7.],
|
||||
# [ 8., 9., 10., 11.]])
|
||||
|
||||
dst_ndarray = ndarray[2, ::-2]
|
||||
# array([11., 9.])
|
||||
|
||||
assert dst_ndarray.shape == (2,)
|
||||
assert dst_ndarray.strides == (-16,)
|
||||
assert dst_ndarray[0] == 11.0
|
||||
assert dst_ndarray[1] == 9.0
|
||||
|
||||
dst_ndarray[1, 0] == 99 # If you write to `dst_ndarray`
|
||||
assert ndarray[1, 3] == 99 # `ndarray` also updates!!
|
||||
```
|
||||
*/
|
||||
BEGIN_TEST();
|
||||
|
||||
double in_data[12] = { 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0 };
|
||||
int32_t in_itemsize = sizeof(double);
|
||||
const int32_t in_ndims = 2;
|
||||
int32_t in_shape[in_ndims] = { 3, 4 };
|
||||
int32_t in_strides[in_ndims] = {};
|
||||
NDArray<int32_t> ndarray = {
|
||||
.data = (uint8_t*) in_data,
|
||||
.itemsize = in_itemsize,
|
||||
.ndims = in_ndims,
|
||||
.shape = in_shape,
|
||||
.strides = in_strides
|
||||
};
|
||||
ndarray.set_strides_by_shape();
|
||||
|
||||
// Destination ndarray
|
||||
// As documented, ndims and shape & strides must be allocated and determined by the caller.
|
||||
const int32_t dst_ndims = 1;
|
||||
int32_t dst_shape[dst_ndims] = {999}; // Empty values
|
||||
int32_t dst_strides[dst_ndims] = {999}; // Empty values
|
||||
NDArray<int32_t> dst_ndarray = {
|
||||
.data = nullptr,
|
||||
.ndims = dst_ndims,
|
||||
.shape = dst_shape,
|
||||
.strides = dst_strides
|
||||
};
|
||||
|
||||
// Create the slice in `ndarray[2, ::-2]`
|
||||
int32_t user_slice_1 = 2;
|
||||
UserSlice user_slice_2 = {
|
||||
.start_defined = 0,
|
||||
.stop_defined = 0,
|
||||
.step_defined = 1,
|
||||
.step = -2
|
||||
};
|
||||
|
||||
const int32_t num_ndslices = 2;
|
||||
NDSlice ndslices[num_ndslices] = {
|
||||
{ .type = INPUT_SLICE_TYPE_INDEX, .slice = (uint8_t*) &user_slice_1 },
|
||||
{ .type = INPUT_SLICE_TYPE_SLICE, .slice = (uint8_t*) &user_slice_2 }
|
||||
};
|
||||
|
||||
ndarray.subscript(num_ndslices, ndslices, &dst_ndarray);
|
||||
|
||||
int32_t expected_shape[dst_ndims] = { 2 };
|
||||
int32_t expected_strides[dst_ndims] = { -16 };
|
||||
assert_arrays_match("shape", "%d", dst_ndims, expected_shape, dst_ndarray.shape);
|
||||
assert_arrays_match("strides", "%d", dst_ndims, expected_strides, dst_ndarray.strides);
|
||||
|
||||
// [5.0, 3.0]
|
||||
assert_values_match("dst_ndarray[0]", "%f", 11.0, *((double *) dst_ndarray.get_pelement_by_indices((int32_t[dst_ndims]) { 0 })));
|
||||
assert_values_match("dst_ndarray[1]", "%f", 9.0, *((double *) dst_ndarray.get_pelement_by_indices((int32_t[dst_ndims]) { 1 })));
|
||||
}
|
||||
|
||||
void test_ndslice_3() {
|
||||
BEGIN_TEST();
|
||||
|
||||
double in_data[12] = { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
|
||||
const int32_t in_itemsize = sizeof(double);
|
||||
const int32_t in_ndims = 2;
|
||||
int32_t in_shape[in_ndims] = { 3, 4 };
|
||||
int32_t in_strides[in_ndims] = {};
|
||||
NDArray<int32_t> ndarray = {
|
||||
.data = (uint8_t*) in_data,
|
||||
.itemsize = in_itemsize,
|
||||
.ndims = in_ndims,
|
||||
.shape = in_shape,
|
||||
.strides = in_strides
|
||||
};
|
||||
ndarray.set_strides_by_shape();
|
||||
|
||||
const int32_t dst_ndims = 2;
|
||||
int32_t dst_shape[dst_ndims] = {999, 999}; // Empty values
|
||||
int32_t dst_strides[dst_ndims] = {999, 999}; // Empty values
|
||||
NDArray<int32_t> dst_ndarray = {
|
||||
.data = nullptr,
|
||||
.ndims = dst_ndims,
|
||||
.shape = dst_shape,
|
||||
.strides = dst_strides
|
||||
};
|
||||
|
||||
// Create the slice in `ndarray[2:3]`
|
||||
UserSlice user_slice_1 = {
|
||||
.start_defined = 1,
|
||||
.start = 2,
|
||||
.stop_defined = 1,
|
||||
.stop = 3,
|
||||
.step_defined = 0,
|
||||
};
|
||||
|
||||
const int32_t num_ndslices = 1;
|
||||
NDSlice ndslices[num_ndslices] = {
|
||||
{ .type = INPUT_SLICE_TYPE_SLICE, .slice = (uint8_t*) &user_slice_1 },
|
||||
};
|
||||
|
||||
ndarray.subscript(num_ndslices, ndslices, &dst_ndarray);
|
||||
}
|
||||
|
||||
void test_can_broadcast_shape() {
|
||||
BEGIN_TEST();
|
||||
|
||||
assert_values_match(
|
||||
"can_broadcast_shape_to([3], [1, 1, 1, 1, 3]) == true",
|
||||
"%d",
|
||||
true,
|
||||
ndarray_util::can_broadcast_shape_to(1, (int32_t[]) { 3 }, 5, (int32_t[]) { 1, 1, 1, 1, 3 })
|
||||
);
|
||||
assert_values_match(
|
||||
"can_broadcast_shape_to([3], [3, 1]) == false",
|
||||
"%d",
|
||||
false,
|
||||
ndarray_util::can_broadcast_shape_to(1, (int32_t[]) { 3 }, 2, (int32_t[]) { 3, 1 }));
|
||||
assert_values_match(
|
||||
"can_broadcast_shape_to([3], [3]) == true",
|
||||
"%d",
|
||||
true,
|
||||
ndarray_util::can_broadcast_shape_to(1, (int32_t[]) { 3 }, 1, (int32_t[]) { 3 }));
|
||||
assert_values_match(
|
||||
"can_broadcast_shape_to([1], [3]) == false",
|
||||
"%d",
|
||||
false,
|
||||
ndarray_util::can_broadcast_shape_to(1, (int32_t[]) { 1 }, 1, (int32_t[]) { 3 }));
|
||||
assert_values_match(
|
||||
"can_broadcast_shape_to([1], [1]) == true",
|
||||
"%d",
|
||||
true,
|
||||
ndarray_util::can_broadcast_shape_to(1, (int32_t[]) { 1 }, 1, (int32_t[]) { 1 }));
|
||||
assert_values_match(
|
||||
"can_broadcast_shape_to([256, 256, 3], [256, 1, 3]) == true",
|
||||
"%d",
|
||||
true,
|
||||
ndarray_util::can_broadcast_shape_to(3, (int32_t[]) { 256, 256, 3 }, 3, (int32_t[]) { 256, 1, 3 })
|
||||
);
|
||||
assert_values_match(
|
||||
"can_broadcast_shape_to([256, 256, 3], [3]) == true",
|
||||
"%d",
|
||||
true,
|
||||
ndarray_util::can_broadcast_shape_to(3, (int32_t[]) { 256, 256, 3 }, 1, (int32_t[]) { 3 })
|
||||
);
|
||||
assert_values_match(
|
||||
"can_broadcast_shape_to([256, 256, 3], [2]) == false",
|
||||
"%d",
|
||||
false,
|
||||
ndarray_util::can_broadcast_shape_to(3, (int32_t[]) { 256, 256, 3 }, 1, (int32_t[]) { 2 })
|
||||
);
|
||||
assert_values_match(
|
||||
"can_broadcast_shape_to([256, 256, 3], [1]) == true",
|
||||
"%d",
|
||||
true,
|
||||
ndarray_util::can_broadcast_shape_to(3, (int32_t[]) { 256, 256, 3 }, 1, (int32_t[]) { 1 })
|
||||
);
|
||||
|
||||
// In cases when the shapes contain zero(es)
|
||||
assert_values_match(
|
||||
"can_broadcast_shape_to([0], [1]) == true",
|
||||
"%d",
|
||||
true,
|
||||
ndarray_util::can_broadcast_shape_to(1, (int32_t[]) { 0 }, 1, (int32_t[]) { 1 })
|
||||
);
|
||||
assert_values_match(
|
||||
"can_broadcast_shape_to([0], [2]) == false",
|
||||
"%d",
|
||||
false,
|
||||
ndarray_util::can_broadcast_shape_to(1, (int32_t[]) { 0 }, 1, (int32_t[]) { 2 })
|
||||
);
|
||||
assert_values_match(
|
||||
"can_broadcast_shape_to([0, 4, 0, 0], [1]) == true",
|
||||
"%d",
|
||||
true,
|
||||
ndarray_util::can_broadcast_shape_to(4, (int32_t[]) { 0, 4, 0, 0 }, 1, (int32_t[]) { 1 })
|
||||
);
|
||||
assert_values_match(
|
||||
"can_broadcast_shape_to([0, 4, 0, 0], [1, 1, 1, 1]) == true",
|
||||
"%d",
|
||||
true,
|
||||
ndarray_util::can_broadcast_shape_to(4, (int32_t[]) { 0, 4, 0, 0 }, 4, (int32_t[]) { 1, 1, 1, 1 })
|
||||
);
|
||||
assert_values_match(
|
||||
"can_broadcast_shape_to([0, 4, 0, 0], [1, 4, 1, 1]) == true",
|
||||
"%d",
|
||||
true,
|
||||
ndarray_util::can_broadcast_shape_to(4, (int32_t[]) { 0, 4, 0, 0 }, 4, (int32_t[]) { 1, 4, 1, 1 })
|
||||
);
|
||||
assert_values_match(
|
||||
"can_broadcast_shape_to([4, 3], [0, 3]) == false",
|
||||
"%d",
|
||||
false,
|
||||
ndarray_util::can_broadcast_shape_to(2, (int32_t[]) { 4, 3 }, 2, (int32_t[]) { 0, 3 })
|
||||
);
|
||||
assert_values_match(
|
||||
"can_broadcast_shape_to([4, 3], [0, 0]) == false",
|
||||
"%d",
|
||||
false,
|
||||
ndarray_util::can_broadcast_shape_to(2, (int32_t[]) { 4, 3 }, 2, (int32_t[]) { 0, 0 })
|
||||
);
|
||||
}
|
||||
|
||||
void test_ndarray_broadcast_1() {
|
||||
/*
|
||||
```python
|
||||
array = np.array([[19.9, 29.9, 39.9, 49.9]], dtype=np.float64)
|
||||
>>> [[19.9 29.9 39.9 49.9]]
|
||||
|
||||
array = np.broadcast_to(array, (2, 3, 4))
|
||||
>>> [[[19.9 29.9 39.9 49.9]
|
||||
>>> [19.9 29.9 39.9 49.9]
|
||||
>>> [19.9 29.9 39.9 49.9]]
|
||||
>>> [[19.9 29.9 39.9 49.9]
|
||||
>>> [19.9 29.9 39.9 49.9]
|
||||
>>> [19.9 29.9 39.9 49.9]]]
|
||||
|
||||
assert array.strides == (0, 0, 8)
|
||||
# and then pick some values in `array` and check them...
|
||||
```
|
||||
*/
|
||||
BEGIN_TEST();
|
||||
|
||||
double in_data[4] = { 19.9, 29.9, 39.9, 49.9 };
|
||||
const int32_t in_ndims = 2;
|
||||
int32_t in_shape[in_ndims] = {1, 4};
|
||||
int32_t in_strides[in_ndims] = {};
|
||||
NDArray<int32_t> ndarray = {
|
||||
.data = (uint8_t*) in_data,
|
||||
.itemsize = sizeof(double),
|
||||
.ndims = in_ndims,
|
||||
.shape = in_shape,
|
||||
.strides = in_strides
|
||||
};
|
||||
ndarray.set_strides_by_shape();
|
||||
|
||||
const int32_t dst_ndims = 3;
|
||||
int32_t dst_shape[dst_ndims] = {2, 3, 4};
|
||||
int32_t dst_strides[dst_ndims] = {};
|
||||
NDArray<int32_t> dst_ndarray = {
|
||||
.ndims = dst_ndims,
|
||||
.shape = dst_shape,
|
||||
.strides = dst_strides
|
||||
};
|
||||
|
||||
ndarray.broadcast_to(&dst_ndarray);
|
||||
|
||||
assert_arrays_match("dst_ndarray->strides", "%d", dst_ndims, (int32_t[]) { 0, 0, 8 }, dst_ndarray.strides);
|
||||
|
||||
assert_values_match("dst_ndarray[0, 0, 0]", "%f", 19.9, *((double*) dst_ndarray.get_pelement_by_indices((int32_t[]) {0, 0, 0})));
|
||||
assert_values_match("dst_ndarray[0, 0, 1]", "%f", 29.9, *((double*) dst_ndarray.get_pelement_by_indices((int32_t[]) {0, 0, 1})));
|
||||
assert_values_match("dst_ndarray[0, 0, 2]", "%f", 39.9, *((double*) dst_ndarray.get_pelement_by_indices((int32_t[]) {0, 0, 2})));
|
||||
assert_values_match("dst_ndarray[0, 0, 3]", "%f", 49.9, *((double*) dst_ndarray.get_pelement_by_indices((int32_t[]) {0, 0, 3})));
|
||||
assert_values_match("dst_ndarray[0, 1, 0]", "%f", 19.9, *((double*) dst_ndarray.get_pelement_by_indices((int32_t[]) {0, 1, 0})));
|
||||
assert_values_match("dst_ndarray[0, 1, 1]", "%f", 29.9, *((double*) dst_ndarray.get_pelement_by_indices((int32_t[]) {0, 1, 1})));
|
||||
assert_values_match("dst_ndarray[0, 1, 2]", "%f", 39.9, *((double*) dst_ndarray.get_pelement_by_indices((int32_t[]) {0, 1, 2})));
|
||||
assert_values_match("dst_ndarray[0, 1, 3]", "%f", 49.9, *((double*) dst_ndarray.get_pelement_by_indices((int32_t[]) {0, 1, 3})));
|
||||
assert_values_match("dst_ndarray[1, 2, 3]", "%f", 49.9, *((double*) dst_ndarray.get_pelement_by_indices((int32_t[]) {1, 2, 3})));
|
||||
}
|
||||
|
||||
void test_printer() {
|
||||
const uint32_t buffer_len = 256;
|
||||
char buffer[buffer_len];
|
||||
Printer printer = {
|
||||
.string_base_ptr = buffer,
|
||||
.max_length = buffer_len,
|
||||
.length = 0
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
int main() {
|
||||
test_calc_size_from_shape_normal();
|
||||
test_calc_size_from_shape_has_zero();
|
||||
test_set_strides_by_shape();
|
||||
// test_ndarray_indices_iter_normal();
|
||||
test_ndarray_fill_generic();
|
||||
test_ndarray_set_to_eye();
|
||||
test_slice_1();
|
||||
test_slice_2();
|
||||
test_slice_3();
|
||||
test_slice_4();
|
||||
test_ndslice_1();
|
||||
test_ndslice_2();
|
||||
test_ndslice_3();
|
||||
test_can_broadcast_shape();
|
||||
test_ndarray_broadcast_1();
|
||||
test_printer();
|
||||
return 0;
|
||||
}
|
14
nac3core/irrt/irrt_typedefs.hpp
Normal file
14
nac3core/irrt/irrt_typedefs.hpp
Normal file
@ -0,0 +1,14 @@
|
||||
#pragma once
|
||||
|
||||
// This is made toggleable since `irrt_test.cpp` itself would include
|
||||
// headers that define the `int_t` family.
|
||||
#ifndef IRRT_DONT_TYPEDEF_INTS
|
||||
typedef _BitInt(8) int8_t;
|
||||
typedef unsigned _BitInt(8) uint8_t;
|
||||
typedef _BitInt(32) int32_t;
|
||||
typedef unsigned _BitInt(32) uint32_t;
|
||||
typedef _BitInt(64) int64_t;
|
||||
typedef unsigned _BitInt(64) uint64_t;
|
||||
#endif
|
||||
|
||||
typedef int32_t SliceIndex;
|
74
nac3core/irrt/irrt_utils.hpp
Normal file
74
nac3core/irrt/irrt_utils.hpp
Normal file
@ -0,0 +1,74 @@
|
||||
#pragma once
|
||||
|
||||
#include "irrt_typedefs.hpp"
|
||||
|
||||
namespace {
|
||||
template <typename T>
|
||||
T max(T a, T b) {
|
||||
return a > b ? a : b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T min(T a, T b) {
|
||||
return a > b ? b : a;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool arrays_match(int len, T *as, T *bs) {
|
||||
for (int i = 0; i < len; i++) {
|
||||
if (as[i] != bs[i]) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
uint32_t int_log_floor(T value, T base) {
|
||||
uint32_t result = 0;
|
||||
while (value < base) {
|
||||
result++;
|
||||
value /= base;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool string_is_empty(const char *str) {
|
||||
return str[0] == '\0';
|
||||
}
|
||||
|
||||
// TODO: DOCUMENT ME!!!!!
|
||||
// returns false if `src_str` could not be fully copied over to `dst_str`
|
||||
bool string_copy(uint32_t dst_max_size, char* dst_str, const char* src_str) {
|
||||
// This function guarantess that `dst_str` will be null-terminated,
|
||||
|
||||
for (uint32_t i = 0; i < dst_max_size; i++) {
|
||||
bool is_last = i + 1 == dst_max_size;
|
||||
if (is_last && src_str[i] != '\0') {
|
||||
dst_str[i] = '\0';
|
||||
return false;
|
||||
}
|
||||
|
||||
if (src_str[i] == '\0') {
|
||||
dst_str[i] = '\0';
|
||||
return true;
|
||||
}
|
||||
|
||||
dst_str[i] = src_str[i];
|
||||
}
|
||||
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
void irrt_panic() {
|
||||
// Crash the program for now.
|
||||
// TODO: Don't crash the program
|
||||
// ... or at least produce a good message when doing testing IRRT
|
||||
|
||||
uint8_t* death = nullptr;
|
||||
*death = 0; // TODO: address 0 on hardware might be writable?
|
||||
}
|
||||
|
||||
// TODO: Make this a macro and allow it to be toggled on/off (e.g., debug vs release)
|
||||
void irrt_assert(bool condition) {
|
||||
if (!condition) irrt_panic();
|
||||
}
|
||||
}
|
@ -1,21 +0,0 @@
|
||||
[package]
|
||||
name = "nac3core_derive"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
proc-macro = true
|
||||
|
||||
[[test]]
|
||||
name = "structfields_tests"
|
||||
path = "tests/structfields_test.rs"
|
||||
|
||||
[dev-dependencies]
|
||||
nac3core = { path = ".." }
|
||||
trybuild = { version = "1.0", features = ["diff"] }
|
||||
|
||||
[dependencies]
|
||||
proc-macro2 = "1.0"
|
||||
proc-macro-error = "1.0"
|
||||
syn = "2.0"
|
||||
quote = "1.0"
|
@ -1,320 +0,0 @@
|
||||
use proc_macro::TokenStream;
|
||||
use proc_macro_error::{abort, proc_macro_error};
|
||||
use quote::quote;
|
||||
use syn::{
|
||||
parse_macro_input, spanned::Spanned, Data, DataStruct, Expr, ExprField, ExprMethodCall,
|
||||
ExprPath, GenericArgument, Ident, LitStr, Path, PathArguments, Type, TypePath,
|
||||
};
|
||||
|
||||
/// Extracts all generic arguments of a [`Type`] into a [`Vec`].
|
||||
///
|
||||
/// Returns [`Some`] of a possibly-empty [`Vec`] if the path of `ty` matches with
|
||||
/// `expected_ty_name`, otherwise returns [`None`].
|
||||
fn extract_generic_args(expected_ty_name: &'static str, ty: &Type) -> Option<Vec<GenericArgument>> {
|
||||
let Type::Path(TypePath { qself: None, path, .. }) = ty else {
|
||||
return None;
|
||||
};
|
||||
|
||||
let segments = &path.segments;
|
||||
if segments.len() != 1 {
|
||||
return None;
|
||||
};
|
||||
|
||||
let segment = segments.iter().next().unwrap();
|
||||
if segment.ident != expected_ty_name {
|
||||
return None;
|
||||
}
|
||||
|
||||
let PathArguments::AngleBracketed(path_args) = &segment.arguments else {
|
||||
return Some(Vec::new());
|
||||
};
|
||||
let args = &path_args.args;
|
||||
|
||||
Some(args.iter().cloned().collect::<Vec<_>>())
|
||||
}
|
||||
|
||||
/// Maps a `path` matching one of the `target_idents` into the `replacement` [`Ident`].
|
||||
fn map_path_to_ident(path: &Path, target_idents: &[&str], replacement: &str) -> Option<Ident> {
|
||||
path.require_ident()
|
||||
.ok()
|
||||
.filter(|ident| target_idents.iter().any(|target| ident == target))
|
||||
.map(|ident| Ident::new(replacement, ident.span()))
|
||||
}
|
||||
|
||||
/// Extracts the left-hand side of a dot-expression.
|
||||
fn extract_dot_operand(expr: &Expr) -> Option<&Expr> {
|
||||
match expr {
|
||||
Expr::MethodCall(ExprMethodCall { receiver: operand, .. })
|
||||
| Expr::Field(ExprField { base: operand, .. }) => Some(operand),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Replaces the top-level receiver of a dot-expression with an [`Ident`], returning `Some(&mut expr)` if the
|
||||
/// replacement is performed.
|
||||
///
|
||||
/// The top-level receiver is the left-most receiver expression, e.g. the top-level receiver of `a.b.c.foo()` is `a`.
|
||||
fn replace_top_level_receiver(expr: &mut Expr, ident: Ident) -> Option<&mut Expr> {
|
||||
if let Expr::MethodCall(ExprMethodCall { receiver: operand, .. })
|
||||
| Expr::Field(ExprField { base: operand, .. }) = expr
|
||||
{
|
||||
return if extract_dot_operand(operand).is_some() {
|
||||
if replace_top_level_receiver(operand, ident).is_some() {
|
||||
Some(expr)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
*operand = Box::new(Expr::Path(ExprPath {
|
||||
attrs: Vec::default(),
|
||||
qself: None,
|
||||
path: ident.into(),
|
||||
}));
|
||||
|
||||
Some(expr)
|
||||
};
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Iterates all operands to the left-hand side of the `.` of an [expression][`Expr`], i.e. the container operand of all
|
||||
/// [`Expr::Field`] and the receiver operand of all [`Expr::MethodCall`].
|
||||
///
|
||||
/// The iterator will return the operand expressions in reverse order of appearance. For example, `a.b.c.func()` will
|
||||
/// return `vec![c, b, a]`.
|
||||
fn iter_dot_operands(expr: &Expr) -> impl Iterator<Item = &Expr> {
|
||||
let mut o = extract_dot_operand(expr);
|
||||
|
||||
std::iter::from_fn(move || {
|
||||
let this = o;
|
||||
o = o.as_ref().and_then(|o| extract_dot_operand(o));
|
||||
|
||||
this
|
||||
})
|
||||
}
|
||||
|
||||
/// Normalizes a value expression for use when creating an instance of this structure, returning a
|
||||
/// [`proc_macro2::TokenStream`] of tokens representing the normalized expression.
|
||||
fn normalize_value_expr(expr: &Expr) -> proc_macro2::TokenStream {
|
||||
match &expr {
|
||||
Expr::Path(ExprPath { qself: None, path, .. }) => {
|
||||
if let Some(ident) = map_path_to_ident(path, &["usize", "size_t"], "llvm_usize") {
|
||||
quote! { #ident }
|
||||
} else {
|
||||
abort!(
|
||||
path,
|
||||
format!(
|
||||
"Expected one of `size_t`, `usize`, or an implicit call expression in #[value_type(...)], found {}",
|
||||
quote!(#expr).to_string(),
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
Expr::Call(_) => {
|
||||
quote! { ctx.#expr }
|
||||
}
|
||||
|
||||
Expr::MethodCall(_) => {
|
||||
let base_receiver = iter_dot_operands(expr).last();
|
||||
|
||||
match base_receiver {
|
||||
// `usize.{...}`, `size_t.{...}` -> Rewrite the identifiers to `llvm_usize`
|
||||
Some(Expr::Path(ExprPath { qself: None, path, .. }))
|
||||
if map_path_to_ident(path, &["usize", "size_t"], "llvm_usize").is_some() =>
|
||||
{
|
||||
let ident =
|
||||
map_path_to_ident(path, &["usize", "size_t"], "llvm_usize").unwrap();
|
||||
|
||||
let mut expr = expr.clone();
|
||||
let expr = replace_top_level_receiver(&mut expr, ident).unwrap();
|
||||
|
||||
quote!(#expr)
|
||||
}
|
||||
|
||||
// `ctx.{...}`, `context.{...}` -> Rewrite the identifiers to `ctx`
|
||||
Some(Expr::Path(ExprPath { qself: None, path, .. }))
|
||||
if map_path_to_ident(path, &["ctx", "context"], "ctx").is_some() =>
|
||||
{
|
||||
let ident = map_path_to_ident(path, &["ctx", "context"], "ctx").unwrap();
|
||||
|
||||
let mut expr = expr.clone();
|
||||
let expr = replace_top_level_receiver(&mut expr, ident).unwrap();
|
||||
|
||||
quote!(#expr)
|
||||
}
|
||||
|
||||
// No reserved identifier prefix -> Prepend `ctx.` to the entire expression
|
||||
_ => quote! { ctx.#expr },
|
||||
}
|
||||
}
|
||||
|
||||
_ => {
|
||||
abort!(
|
||||
expr,
|
||||
format!(
|
||||
"Expected one of `size_t`, `usize`, or an implicit call expression in #[value_type(...)], found {}",
|
||||
quote!(#expr).to_string(),
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Derives an implementation of `codegen::types::structure::StructFields`.
|
||||
///
|
||||
/// The benefit of using `#[derive(StructFields)]` is that all index- or order-dependent logic required by
|
||||
/// `impl StructFields` is automatically generated by this implementation, including the field index as required by
|
||||
/// `StructField::new` and the fields as returned by `StructFields::to_vec`.
|
||||
///
|
||||
/// # Prerequisites
|
||||
///
|
||||
/// In order to derive from [`StructFields`], you must implement (or derive) [`Eq`] and [`Copy`] as required by
|
||||
/// `StructFields`.
|
||||
///
|
||||
/// Moreover, `#[derive(StructFields)]` can only be used for `struct`s with named fields, and may only contain fields
|
||||
/// with either `StructField` or [`PhantomData`] types.
|
||||
///
|
||||
/// # Attributes for [`StructFields`]
|
||||
///
|
||||
/// Each `StructField` field must be declared with the `#[value_type(...)]` attribute. The argument of `value_type`
|
||||
/// accepts one of the following:
|
||||
///
|
||||
/// - An expression returning an instance of `inkwell::types::BasicType` (with or without the receiver `ctx`/`context`).
|
||||
/// For example, `context.i8_type()`, `ctx.i8_type()`, and `i8_type()` all refer to `i8`.
|
||||
/// - The reserved identifiers `usize` and `size_t` referring to an `inkwell::types::IntType` of the platform-dependent
|
||||
/// integer size. `usize` and `size_t` can also be used as the receiver to other method calls, e.g.
|
||||
/// `usize.array_type(3)`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// The following is an example of an LLVM slice implemented using `#[derive(StructFields)]`.
|
||||
///
|
||||
/// ```rust,ignore
|
||||
/// use nac3core::{
|
||||
/// codegen::types::structure::StructField,
|
||||
/// inkwell::{
|
||||
/// values::{IntValue, PointerValue},
|
||||
/// AddressSpace,
|
||||
/// },
|
||||
/// };
|
||||
/// use nac3core_derive::StructFields;
|
||||
///
|
||||
/// // All classes that implement StructFields must also implement Eq and Copy
|
||||
/// #[derive(PartialEq, Eq, Clone, Copy, StructFields)]
|
||||
/// pub struct SliceValue<'ctx> {
|
||||
/// // Declares ptr have a value type of i8*
|
||||
/// //
|
||||
/// // Can also be written as `ctx.i8_type().ptr_type(...)` or `context.i8_type().ptr_type(...)`
|
||||
/// #[value_type(i8_type().ptr_type(AddressSpace::default()))]
|
||||
/// ptr: StructField<'ctx, PointerValue<'ctx>>,
|
||||
///
|
||||
/// // Declares len have a value type of usize, depending on the target compilation platform
|
||||
/// #[value_type(usize)]
|
||||
/// len: StructField<'ctx, IntValue<'ctx>>,
|
||||
/// }
|
||||
/// ```
|
||||
#[proc_macro_derive(StructFields, attributes(value_type))]
|
||||
#[proc_macro_error]
|
||||
pub fn derive(input: TokenStream) -> TokenStream {
|
||||
let input = parse_macro_input!(input as syn::DeriveInput);
|
||||
let ident = &input.ident;
|
||||
|
||||
let Data::Struct(DataStruct { fields, .. }) = &input.data else {
|
||||
abort!(input, "Only structs with named fields are supported");
|
||||
};
|
||||
if let Err(err_span) =
|
||||
fields
|
||||
.iter()
|
||||
.try_for_each(|field| if field.ident.is_some() { Ok(()) } else { Err(field.span()) })
|
||||
{
|
||||
abort!(err_span, "Only structs with named fields are supported");
|
||||
};
|
||||
|
||||
// Check if struct<'ctx>
|
||||
if input.generics.params.len() != 1 {
|
||||
abort!(input.generics, "Expected exactly 1 generic parameter")
|
||||
}
|
||||
|
||||
let phantom_info = fields
|
||||
.iter()
|
||||
.filter(|field| extract_generic_args("PhantomData", &field.ty).is_some())
|
||||
.map(|field| field.ident.as_ref().unwrap())
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let field_info = fields
|
||||
.iter()
|
||||
.filter(|field| extract_generic_args("PhantomData", &field.ty).is_none())
|
||||
.map(|field| {
|
||||
let ident = field.ident.as_ref().unwrap();
|
||||
let ty = &field.ty;
|
||||
|
||||
let Some(_) = extract_generic_args("StructField", ty) else {
|
||||
abort!(field, "Only StructField and PhantomData are allowed")
|
||||
};
|
||||
|
||||
let attrs = &field.attrs;
|
||||
let Some(value_type_attr) =
|
||||
attrs.iter().find(|attr| attr.path().is_ident("value_type"))
|
||||
else {
|
||||
abort!(field, "Expected #[value_type(...)] attribute for field");
|
||||
};
|
||||
|
||||
let Ok(value_type_expr) = value_type_attr.parse_args::<Expr>() else {
|
||||
abort!(value_type_attr, "Expected expression in #[value_type(...)]");
|
||||
};
|
||||
|
||||
let value_expr_toks = normalize_value_expr(&value_type_expr);
|
||||
|
||||
(ident.clone(), value_expr_toks)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// `<*>::new` impl of `StructField` and `PhantomData` for `StructFields::new`
|
||||
let phantoms_create = phantom_info
|
||||
.iter()
|
||||
.map(|id| quote! { #id: ::std::marker::PhantomData })
|
||||
.collect::<Vec<_>>();
|
||||
let fields_create = field_info
|
||||
.iter()
|
||||
.map(|(id, ty)| {
|
||||
let id_lit = LitStr::new(&id.to_string(), id.span());
|
||||
quote! {
|
||||
#id: ::nac3core::codegen::types::structure::StructField::create(
|
||||
&mut counter,
|
||||
#id_lit,
|
||||
#ty,
|
||||
)
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// `.into()` impl of `StructField` for `StructFields::to_vec`
|
||||
let fields_into =
|
||||
field_info.iter().map(|(id, _)| quote! { self.#id.into() }).collect::<Vec<_>>();
|
||||
|
||||
let impl_block = quote! {
|
||||
impl<'ctx> ::nac3core::codegen::types::structure::StructFields<'ctx> for #ident<'ctx> {
|
||||
fn new(ctx: impl ::nac3core::inkwell::context::AsContextRef<'ctx>, llvm_usize: ::nac3core::inkwell::types::IntType<'ctx>) -> Self {
|
||||
let ctx = unsafe { ::nac3core::inkwell::context::ContextRef::new(ctx.as_ctx_ref()) };
|
||||
|
||||
let mut counter = ::nac3core::codegen::types::structure::FieldIndexCounter::default();
|
||||
|
||||
#ident {
|
||||
#(#fields_create),*
|
||||
#(#phantoms_create),*
|
||||
}
|
||||
}
|
||||
|
||||
fn to_vec(&self) -> ::std::vec::Vec<(&'static str, ::nac3core::inkwell::types::BasicTypeEnum<'ctx>)> {
|
||||
vec![
|
||||
#(#fields_into),*
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
impl_block.into()
|
||||
}
|
@ -1,9 +0,0 @@
|
||||
use nac3core_derive::StructFields;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy, StructFields)]
|
||||
pub struct EmptyValue<'ctx> {
|
||||
_phantom: PhantomData<&'ctx ()>,
|
||||
}
|
||||
|
||||
fn main() {}
|
@ -1,20 +0,0 @@
|
||||
use nac3core::{
|
||||
codegen::types::structure::StructField,
|
||||
inkwell::{
|
||||
values::{IntValue, PointerValue},
|
||||
AddressSpace,
|
||||
},
|
||||
};
|
||||
use nac3core_derive::StructFields;
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy, StructFields)]
|
||||
pub struct NDArrayValue<'ctx> {
|
||||
#[value_type(usize)]
|
||||
ndims: StructField<'ctx, IntValue<'ctx>>,
|
||||
#[value_type(usize.ptr_type(AddressSpace::default()))]
|
||||
shape: StructField<'ctx, PointerValue<'ctx>>,
|
||||
#[value_type(i8_type().ptr_type(AddressSpace::default()))]
|
||||
data: StructField<'ctx, PointerValue<'ctx>>,
|
||||
}
|
||||
|
||||
fn main() {}
|
@ -1,18 +0,0 @@
|
||||
use nac3core::{
|
||||
codegen::types::structure::StructField,
|
||||
inkwell::{
|
||||
values::{IntValue, PointerValue},
|
||||
AddressSpace,
|
||||
},
|
||||
};
|
||||
use nac3core_derive::StructFields;
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy, StructFields)]
|
||||
pub struct SliceValue<'ctx> {
|
||||
#[value_type(i8_type().ptr_type(AddressSpace::default()))]
|
||||
ptr: StructField<'ctx, PointerValue<'ctx>>,
|
||||
#[value_type(usize)]
|
||||
len: StructField<'ctx, IntValue<'ctx>>,
|
||||
}
|
||||
|
||||
fn main() {}
|
@ -1,18 +0,0 @@
|
||||
use nac3core::{
|
||||
codegen::types::structure::StructField,
|
||||
inkwell::{
|
||||
values::{IntValue, PointerValue},
|
||||
AddressSpace,
|
||||
},
|
||||
};
|
||||
use nac3core_derive::StructFields;
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy, StructFields)]
|
||||
pub struct SliceValue<'ctx> {
|
||||
#[value_type(context.i8_type().ptr_type(AddressSpace::default()))]
|
||||
ptr: StructField<'ctx, PointerValue<'ctx>>,
|
||||
#[value_type(usize)]
|
||||
len: StructField<'ctx, IntValue<'ctx>>,
|
||||
}
|
||||
|
||||
fn main() {}
|
@ -1,18 +0,0 @@
|
||||
use nac3core::{
|
||||
codegen::types::structure::StructField,
|
||||
inkwell::{
|
||||
values::{IntValue, PointerValue},
|
||||
AddressSpace,
|
||||
},
|
||||
};
|
||||
use nac3core_derive::StructFields;
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy, StructFields)]
|
||||
pub struct SliceValue<'ctx> {
|
||||
#[value_type(ctx.i8_type().ptr_type(AddressSpace::default()))]
|
||||
ptr: StructField<'ctx, PointerValue<'ctx>>,
|
||||
#[value_type(usize)]
|
||||
len: StructField<'ctx, IntValue<'ctx>>,
|
||||
}
|
||||
|
||||
fn main() {}
|
@ -1,18 +0,0 @@
|
||||
use nac3core::{
|
||||
codegen::types::structure::StructField,
|
||||
inkwell::{
|
||||
values::{IntValue, PointerValue},
|
||||
AddressSpace,
|
||||
},
|
||||
};
|
||||
use nac3core_derive::StructFields;
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy, StructFields)]
|
||||
pub struct SliceValue<'ctx> {
|
||||
#[value_type(i8_type().ptr_type(AddressSpace::default()))]
|
||||
ptr: StructField<'ctx, PointerValue<'ctx>>,
|
||||
#[value_type(size_t)]
|
||||
len: StructField<'ctx, IntValue<'ctx>>,
|
||||
}
|
||||
|
||||
fn main() {}
|
@ -1,10 +0,0 @@
|
||||
#[test]
|
||||
fn test_parse_empty() {
|
||||
let t = trybuild::TestCases::new();
|
||||
t.pass("tests/structfields_empty.rs");
|
||||
t.pass("tests/structfields_slice.rs");
|
||||
t.pass("tests/structfields_slice_ctx.rs");
|
||||
t.pass("tests/structfields_slice_context.rs");
|
||||
t.pass("tests/structfields_slice_sizet.rs");
|
||||
t.pass("tests/structfields_ndarray.rs");
|
||||
}
|
File diff suppressed because it is too large
Load Diff
1930
nac3core/src/codegen/classes.rs
Normal file
1930
nac3core/src/codegen/classes.rs
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,9 +1,3 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use indexmap::IndexMap;
|
||||
|
||||
use nac3parser::ast::StrRef;
|
||||
|
||||
use crate::{
|
||||
symbol_resolver::SymbolValue,
|
||||
toplevel::DefinitionId,
|
||||
@ -15,6 +9,10 @@ use crate::{
|
||||
},
|
||||
};
|
||||
|
||||
use indexmap::IndexMap;
|
||||
use nac3parser::ast::StrRef;
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub struct ConcreteTypeStore {
|
||||
store: Vec<ConcreteTypeEnum>,
|
||||
}
|
||||
@ -27,7 +25,6 @@ pub struct ConcreteFuncArg {
|
||||
pub name: StrRef,
|
||||
pub ty: ConcreteType,
|
||||
pub default_value: Option<SymbolValue>,
|
||||
pub is_vararg: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@ -49,17 +46,12 @@ pub enum ConcreteTypeEnum {
|
||||
TPrimitive(Primitive),
|
||||
TTuple {
|
||||
ty: Vec<ConcreteType>,
|
||||
is_vararg_ctx: bool,
|
||||
},
|
||||
TObj {
|
||||
obj_id: DefinitionId,
|
||||
fields: HashMap<StrRef, (ConcreteType, bool)>,
|
||||
params: IndexMap<TypeVarId, ConcreteType>,
|
||||
},
|
||||
TModule {
|
||||
module_id: DefinitionId,
|
||||
methods: HashMap<StrRef, (ConcreteType, bool)>,
|
||||
},
|
||||
TVirtual {
|
||||
ty: ConcreteType,
|
||||
},
|
||||
@ -110,16 +102,8 @@ impl ConcreteTypeStore {
|
||||
.iter()
|
||||
.map(|arg| ConcreteFuncArg {
|
||||
name: arg.name,
|
||||
ty: if arg.is_vararg {
|
||||
let tuple_ty = unifier
|
||||
.add_ty(TypeEnum::TTuple { ty: vec![arg.ty], is_vararg_ctx: true });
|
||||
|
||||
self.from_unifier_type(unifier, primitives, tuple_ty, cache)
|
||||
} else {
|
||||
self.from_unifier_type(unifier, primitives, arg.ty, cache)
|
||||
},
|
||||
ty: self.from_unifier_type(unifier, primitives, arg.ty, cache),
|
||||
default_value: arg.default_value.clone(),
|
||||
is_vararg: arg.is_vararg,
|
||||
})
|
||||
.collect(),
|
||||
ret: self.from_unifier_type(unifier, primitives, signature.ret, cache),
|
||||
@ -174,12 +158,11 @@ impl ConcreteTypeStore {
|
||||
cache.insert(ty, None);
|
||||
let ty_enum = unifier.get_ty(ty);
|
||||
let result = match &*ty_enum {
|
||||
TypeEnum::TTuple { ty, is_vararg_ctx } => ConcreteTypeEnum::TTuple {
|
||||
TypeEnum::TTuple { ty } => ConcreteTypeEnum::TTuple {
|
||||
ty: ty
|
||||
.iter()
|
||||
.map(|t| self.from_unifier_type(unifier, primitives, *t, cache))
|
||||
.collect(),
|
||||
is_vararg_ctx: *is_vararg_ctx,
|
||||
},
|
||||
TypeEnum::TObj { obj_id, fields, params } => ConcreteTypeEnum::TObj {
|
||||
obj_id: *obj_id,
|
||||
@ -209,19 +192,6 @@ impl ConcreteTypeStore {
|
||||
})
|
||||
.collect(),
|
||||
},
|
||||
TypeEnum::TModule { module_id, attributes } => ConcreteTypeEnum::TModule {
|
||||
module_id: *module_id,
|
||||
methods: attributes
|
||||
.iter()
|
||||
.filter_map(|(name, ty)| match &*unifier.get_ty(ty.0) {
|
||||
TypeEnum::TFunc(..) | TypeEnum::TObj { .. } => None,
|
||||
_ => Some((
|
||||
*name,
|
||||
(self.from_unifier_type(unifier, primitives, ty.0, cache), ty.1),
|
||||
)),
|
||||
})
|
||||
.collect(),
|
||||
},
|
||||
TypeEnum::TVirtual { ty } => ConcreteTypeEnum::TVirtual {
|
||||
ty: self.from_unifier_type(unifier, primitives, *ty, cache),
|
||||
},
|
||||
@ -278,12 +248,11 @@ impl ConcreteTypeStore {
|
||||
*cache.get_mut(&cty).unwrap() = Some(ty);
|
||||
return ty;
|
||||
}
|
||||
ConcreteTypeEnum::TTuple { ty, is_vararg_ctx } => TypeEnum::TTuple {
|
||||
ConcreteTypeEnum::TTuple { ty } => TypeEnum::TTuple {
|
||||
ty: ty
|
||||
.iter()
|
||||
.map(|cty| self.to_unifier_type(unifier, primitives, *cty, cache))
|
||||
.collect(),
|
||||
is_vararg_ctx: *is_vararg_ctx,
|
||||
},
|
||||
ConcreteTypeEnum::TVirtual { ty } => {
|
||||
TypeEnum::TVirtual { ty: self.to_unifier_type(unifier, primitives, *ty, cache) }
|
||||
@ -301,15 +270,6 @@ impl ConcreteTypeStore {
|
||||
TypeVar { id, ty }
|
||||
})),
|
||||
},
|
||||
ConcreteTypeEnum::TModule { module_id, methods } => TypeEnum::TModule {
|
||||
module_id: *module_id,
|
||||
attributes: methods
|
||||
.iter()
|
||||
.map(|(name, cty)| {
|
||||
(*name, (self.to_unifier_type(unifier, primitives, cty.0, cache), cty.1))
|
||||
})
|
||||
.collect::<HashMap<_, _>>(),
|
||||
},
|
||||
ConcreteTypeEnum::TFunc { args, ret, vars } => TypeEnum::TFunc(FunSignature {
|
||||
args: args
|
||||
.iter()
|
||||
@ -317,7 +277,6 @@ impl ConcreteTypeStore {
|
||||
name: arg.name,
|
||||
ty: self.to_unifier_type(unifier, primitives, arg.ty, cache),
|
||||
default_value: arg.default_value.clone(),
|
||||
is_vararg: false,
|
||||
})
|
||||
.collect(),
|
||||
ret: self.to_unifier_type(unifier, primitives, *ret, cache),
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,10 +1,8 @@
|
||||
use inkwell::{
|
||||
attributes::{Attribute, AttributeLoc},
|
||||
values::{BasicValueEnum, CallSiteValue, FloatValue, IntValue},
|
||||
};
|
||||
use inkwell::attributes::{Attribute, AttributeLoc};
|
||||
use inkwell::values::{BasicValueEnum, CallSiteValue, FloatValue, IntValue};
|
||||
use itertools::Either;
|
||||
|
||||
use super::CodeGenContext;
|
||||
use crate::codegen::CodeGenContext;
|
||||
|
||||
/// Macro to generate extern function
|
||||
/// Both function return type and function parameter type are `FloatValue`
|
||||
@ -15,11 +13,11 @@ use super::CodeGenContext;
|
||||
/// * `$extern_fn:literal`: Name of underlying extern function
|
||||
///
|
||||
/// Optional Arguments:
|
||||
/// * `$(,$attributes:literal)*)`: Attributes linked with the extern function.
|
||||
/// The default attributes are "mustprogress", "nofree", "nounwind", "willreturn", and "writeonly".
|
||||
/// These will be used unless other attributes are specified
|
||||
/// * `$(,$attributes:literal)*)`: Attributes linked with the extern function
|
||||
/// The default attributes are "mustprogress", "nofree", "nounwind", "willreturn", and "writeonly"
|
||||
/// These will be used unless other attributes are specified
|
||||
/// * `$(,$args:ident)*`: Operands of the extern function
|
||||
/// The data type of these operands will be set to `FloatValue`
|
||||
/// The data type of these operands will be set to `FloatValue`
|
||||
///
|
||||
macro_rules! generate_extern_fn {
|
||||
("unary", $fn_name:ident, $extern_fn:literal) => {
|
||||
@ -132,62 +130,3 @@ pub fn call_ldexp<'ctx>(
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Macro to generate `np_linalg` and `sp_linalg` functions
|
||||
/// The function takes as input `NDArray` and returns ()
|
||||
///
|
||||
/// Arguments:
|
||||
/// * `$fn_name:ident`: The identifier of the rust function to be generated
|
||||
/// * `$extern_fn:literal`: Name of underlying extern function
|
||||
/// * (2/3/4): Number of `NDArray` that function takes as input
|
||||
///
|
||||
/// Note:
|
||||
/// The operands and resulting `NDArray` are both passed as input to the funcion
|
||||
/// It is the responsibility of caller to ensure that output `NDArray` is properly allocated on stack
|
||||
/// The function changes the content of the output `NDArray` in-place
|
||||
macro_rules! generate_linalg_extern_fn {
|
||||
($fn_name:ident, $extern_fn:literal, 2) => {
|
||||
generate_linalg_extern_fn!($fn_name, $extern_fn, mat1, mat2);
|
||||
};
|
||||
($fn_name:ident, $extern_fn:literal, 3) => {
|
||||
generate_linalg_extern_fn!($fn_name, $extern_fn, mat1, mat2, mat3);
|
||||
};
|
||||
($fn_name:ident, $extern_fn:literal, 4) => {
|
||||
generate_linalg_extern_fn!($fn_name, $extern_fn, mat1, mat2, mat3, mat4);
|
||||
};
|
||||
($fn_name:ident, $extern_fn:literal $(,$input_matrix:ident)*) => {
|
||||
#[doc = concat!("Invokes the linalg `", stringify!($extern_fn), " function." )]
|
||||
pub fn $fn_name<'ctx>(
|
||||
ctx: &mut CodeGenContext<'ctx, '_>
|
||||
$(,$input_matrix: BasicValueEnum<'ctx>)*,
|
||||
name: Option<&str>,
|
||||
){
|
||||
const FN_NAME: &str = $extern_fn;
|
||||
let extern_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
|
||||
let fn_type = ctx.ctx.void_type().fn_type(&[$($input_matrix.get_type().into()),*], false);
|
||||
|
||||
let func = ctx.module.add_function(FN_NAME, fn_type, None);
|
||||
for attr in ["mustprogress", "nofree", "nounwind", "willreturn", "writeonly"] {
|
||||
func.add_attribute(
|
||||
AttributeLoc::Function,
|
||||
ctx.ctx.create_enum_attribute(Attribute::get_named_enum_kind_id(attr), 0),
|
||||
);
|
||||
}
|
||||
func
|
||||
});
|
||||
|
||||
ctx.builder.build_call(extern_fn, &[$($input_matrix.into(),)*], name.unwrap_or_default()).unwrap();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
generate_linalg_extern_fn!(call_np_linalg_cholesky, "np_linalg_cholesky", 2);
|
||||
generate_linalg_extern_fn!(call_np_linalg_qr, "np_linalg_qr", 3);
|
||||
generate_linalg_extern_fn!(call_np_linalg_svd, "np_linalg_svd", 4);
|
||||
generate_linalg_extern_fn!(call_np_linalg_inv, "np_linalg_inv", 2);
|
||||
generate_linalg_extern_fn!(call_np_linalg_pinv, "np_linalg_pinv", 2);
|
||||
generate_linalg_extern_fn!(call_np_linalg_matrix_power, "np_linalg_matrix_power", 3);
|
||||
generate_linalg_extern_fn!(call_np_linalg_det, "np_linalg_det", 2);
|
||||
generate_linalg_extern_fn!(call_sp_linalg_lu, "sp_linalg_lu", 3);
|
||||
generate_linalg_extern_fn!(call_sp_linalg_schur, "sp_linalg_schur", 3);
|
||||
generate_linalg_extern_fn!(call_sp_linalg_hessenberg, "sp_linalg_hessenberg", 3);
|
||||
|
@ -1,27 +1,20 @@
|
||||
use inkwell::{
|
||||
context::Context,
|
||||
targets::TargetMachine,
|
||||
types::{BasicTypeEnum, IntType},
|
||||
values::{BasicValueEnum, IntValue, PointerValue},
|
||||
};
|
||||
|
||||
use nac3parser::ast::{Expr, Stmt, StrRef};
|
||||
|
||||
use super::{bool_to_i1, bool_to_i8, expr::*, stmt::*, values::ArraySliceValue, CodeGenContext};
|
||||
use crate::{
|
||||
codegen::{bool_to_i1, bool_to_i8, classes::ArraySliceValue, expr::*, stmt::*, CodeGenContext},
|
||||
symbol_resolver::ValueEnum,
|
||||
toplevel::{DefinitionId, TopLevelDef},
|
||||
typecheck::typedef::{FunSignature, Type},
|
||||
};
|
||||
use inkwell::{
|
||||
context::Context,
|
||||
types::{BasicTypeEnum, IntType},
|
||||
values::{BasicValueEnum, IntValue, PointerValue},
|
||||
};
|
||||
use nac3parser::ast::{Expr, Stmt, StrRef};
|
||||
|
||||
pub trait CodeGenerator {
|
||||
/// Return the module name for the code generator.
|
||||
fn get_name(&self) -> &str;
|
||||
|
||||
/// Return an instance of [`IntType`] corresponding to the type of `size_t` for this instance.
|
||||
///
|
||||
/// Prefer using [`CodeGenContext::get_size_type`] if [`CodeGenContext`] is available, as it is
|
||||
/// equivalent to this function in a more concise syntax.
|
||||
fn get_size_type<'ctx>(&self, ctx: &'ctx Context) -> IntType<'ctx>;
|
||||
|
||||
/// Generate function call and returns the function return value.
|
||||
@ -64,7 +57,6 @@ pub trait CodeGenerator {
|
||||
/// - fun: Function signature, definition ID and the substitution key.
|
||||
/// - params: Function parameters. Note that this does not include the object even if the
|
||||
/// function is a class method.
|
||||
///
|
||||
/// Note that this function should check if the function is generated in another thread (due to
|
||||
/// possible race condition), see the default implementation for an example.
|
||||
fn gen_func_instance<'ctx>(
|
||||
@ -131,45 +123,11 @@ pub trait CodeGenerator {
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
target: &Expr<Option<Type>>,
|
||||
value: ValueEnum<'ctx>,
|
||||
value_ty: Type,
|
||||
) -> Result<(), String>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
gen_assign(self, ctx, target, value, value_ty)
|
||||
}
|
||||
|
||||
/// Generate code for an assignment expression where LHS is a `"target_list"`.
|
||||
///
|
||||
/// See <https://docs.python.org/3/reference/simple_stmts.html#assignment-statements>.
|
||||
fn gen_assign_target_list<'ctx>(
|
||||
&mut self,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
targets: &Vec<Expr<Option<Type>>>,
|
||||
value: ValueEnum<'ctx>,
|
||||
value_ty: Type,
|
||||
) -> Result<(), String>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
gen_assign_target_list(self, ctx, targets, value, value_ty)
|
||||
}
|
||||
|
||||
/// Generate code for an item assignment.
|
||||
///
|
||||
/// i.e., `target[key] = value`
|
||||
fn gen_setitem<'ctx>(
|
||||
&mut self,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
target: &Expr<Option<Type>>,
|
||||
key: &Expr<Option<Type>>,
|
||||
value: ValueEnum<'ctx>,
|
||||
value_ty: Type,
|
||||
) -> Result<(), String>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
gen_setitem(self, ctx, target, key, value, value_ty)
|
||||
gen_assign(self, ctx, target, value)
|
||||
}
|
||||
|
||||
/// Generate code for a while expression.
|
||||
@ -274,27 +232,19 @@ pub struct DefaultCodeGenerator {
|
||||
|
||||
impl DefaultCodeGenerator {
|
||||
#[must_use]
|
||||
pub fn new(name: String, size_t: IntType<'_>) -> DefaultCodeGenerator {
|
||||
assert!(matches!(size_t.get_bit_width(), 32 | 64));
|
||||
DefaultCodeGenerator { name, size_t: size_t.get_bit_width() }
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn with_target_machine(
|
||||
name: String,
|
||||
ctx: &Context,
|
||||
target_machine: &TargetMachine,
|
||||
) -> DefaultCodeGenerator {
|
||||
let llvm_usize = ctx.ptr_sized_int_type(&target_machine.get_target_data(), None);
|
||||
Self::new(name, llvm_usize)
|
||||
pub fn new(name: String, size_t: u32) -> DefaultCodeGenerator {
|
||||
assert!(matches!(size_t, 32 | 64));
|
||||
DefaultCodeGenerator { name, size_t }
|
||||
}
|
||||
}
|
||||
|
||||
impl CodeGenerator for DefaultCodeGenerator {
|
||||
/// Returns the name for this [`CodeGenerator`].
|
||||
fn get_name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
/// Returns an LLVM integer type representing `size_t`.
|
||||
fn get_size_type<'ctx>(&self, ctx: &'ctx Context) -> IntType<'ctx> {
|
||||
// it should be unsigned, but we don't really need unsigned and this could save us from
|
||||
// having to do a bit cast...
|
||||
|
87
nac3core/src/codegen/irrt/classes.rs
Normal file
87
nac3core/src/codegen/irrt/classes.rs
Normal file
@ -0,0 +1,87 @@
|
||||
// TODO: Use derppening's abstraction
|
||||
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use inkwell::{
|
||||
context::Context,
|
||||
types::{BasicType, BasicTypeEnum, IntType},
|
||||
values::BasicValueEnum,
|
||||
AddressSpace,
|
||||
};
|
||||
|
||||
use crate::codegen::structure::{
|
||||
CustomStructType, CustomType, Field, FieldCreator, IntType2, Object, PointerType2,
|
||||
PointingArrayType,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct NpArrayType<'ctx> {
|
||||
pub size_type: IntType<'ctx>,
|
||||
pub elem_type: BasicTypeEnum<'ctx>,
|
||||
}
|
||||
|
||||
pub struct NpArrayFields<'ctx> {
|
||||
pub data: Field<'ctx, PointerType2<'ctx>>,
|
||||
pub itemsize: Field<'ctx, IntType2<'ctx>>,
|
||||
pub ndims: Field<'ctx, IntType2<'ctx>>,
|
||||
pub shape: Field<'ctx, PointingArrayType<'ctx, IntType2<'ctx>>>,
|
||||
pub strides: Field<'ctx, PointingArrayType<'ctx, IntType2<'ctx>>>,
|
||||
}
|
||||
|
||||
pub type NpArrayValue<'ctx> = Object<'ctx, NpArrayType<'ctx>>;
|
||||
|
||||
// impl<'ctx> CustomType<'ctx> for NpArrayType<'ctx> {
|
||||
// type Value = NpArrayValue<'ctx>;
|
||||
//
|
||||
// fn llvm_basic_type_enum(
|
||||
// &self,
|
||||
// ctx: &'ctx inkwell::context::Context,
|
||||
// ) -> inkwell::types::BasicTypeEnum<'ctx> {
|
||||
// self.llvm_struct_type(ctx).as_basic_type_enum()
|
||||
// }
|
||||
//
|
||||
// fn llvm_field_load(
|
||||
// &self,
|
||||
// ctx: &crate::codegen::CodeGenContext<'ctx, '_>,
|
||||
// field: crate::codegen::structure::FieldInfo,
|
||||
// struct_ptr: inkwell::values::PointerValue<'ctx>,
|
||||
// ) -> Self::Value {
|
||||
// let ok = field.llvm_load(ctx, struct_ptr);
|
||||
// todo!()
|
||||
// }
|
||||
//
|
||||
// fn llvm_field_store(
|
||||
// &self,
|
||||
// ctx: &crate::codegen::CodeGenContext<'ctx, '_>,
|
||||
// field: crate::codegen::structure::FieldInfo,
|
||||
// struct_ptr: inkwell::values::PointerValue<'ctx>,
|
||||
// value: &Self::Value,
|
||||
// ) {
|
||||
// todo!()
|
||||
// }
|
||||
// }
|
||||
|
||||
impl<'ctx> CustomStructType<'ctx> for NpArrayType<'ctx> {
|
||||
type Fields = NpArrayFields<'ctx>;
|
||||
|
||||
fn llvm_struct_name() -> &'static str {
|
||||
"NDArray"
|
||||
}
|
||||
|
||||
fn add_fields_to(&self, creator: &mut FieldCreator<'ctx>) -> Self::Fields {
|
||||
let pi8 = creator.ctx.i8_type().ptr_type(AddressSpace::default());
|
||||
NpArrayFields {
|
||||
data: creator.add_field("data", PointerType2(pi8)),
|
||||
itemsize: creator.add_field("itemsize", IntType2(self.size_type)),
|
||||
ndims: creator.add_field("ndims", IntType2(self.size_type)),
|
||||
shape: creator.add_field("shape", PointingArrayType::new(IntType2(self.size_type))),
|
||||
strides: creator.add_field("strides", PointingArrayType::new(IntType2(self.size_type))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> NpArrayType<'ctx> {
|
||||
pub fn new_opaque_elem(ctx: &'ctx Context, size_type: IntType<'ctx>) -> Self {
|
||||
NpArrayType { elem_type: ctx.i8_type().into(), size_type }
|
||||
}
|
||||
}
|
@ -1,174 +0,0 @@
|
||||
use inkwell::{
|
||||
types::BasicTypeEnum,
|
||||
values::{BasicValueEnum, CallSiteValue, IntValue},
|
||||
AddressSpace, IntPredicate,
|
||||
};
|
||||
use itertools::Either;
|
||||
|
||||
use super::calculate_len_for_slice_range;
|
||||
use crate::codegen::{
|
||||
macros::codegen_unreachable,
|
||||
values::{ArrayLikeValue, ListValue},
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
|
||||
/// This function handles 'end' **inclusively**.
|
||||
/// Order of tuples `assign_idx` and `value_idx` is ('start', 'end', 'step').
|
||||
/// Negative index should be handled before entering this function
|
||||
pub fn list_slice_assignment<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ty: BasicTypeEnum<'ctx>,
|
||||
dest_arr: ListValue<'ctx>,
|
||||
dest_idx: (IntValue<'ctx>, IntValue<'ctx>, IntValue<'ctx>),
|
||||
src_arr: ListValue<'ctx>,
|
||||
src_idx: (IntValue<'ctx>, IntValue<'ctx>, IntValue<'ctx>),
|
||||
) {
|
||||
let llvm_usize = ctx.get_size_type();
|
||||
let llvm_pi8 = ctx.ctx.i8_type().ptr_type(AddressSpace::default());
|
||||
let llvm_i32 = ctx.ctx.i32_type();
|
||||
|
||||
assert_eq!(dest_idx.0.get_type(), llvm_i32);
|
||||
assert_eq!(dest_idx.1.get_type(), llvm_i32);
|
||||
assert_eq!(dest_idx.2.get_type(), llvm_i32);
|
||||
assert_eq!(src_idx.0.get_type(), llvm_i32);
|
||||
assert_eq!(src_idx.1.get_type(), llvm_i32);
|
||||
assert_eq!(src_idx.2.get_type(), llvm_i32);
|
||||
|
||||
let (fun_symbol, elem_ptr_type) = ("__nac3_list_slice_assign_var_size", llvm_pi8);
|
||||
let slice_assign_fun = {
|
||||
let ty_vec = vec![
|
||||
llvm_i32.into(), // dest start idx
|
||||
llvm_i32.into(), // dest end idx
|
||||
llvm_i32.into(), // dest step
|
||||
elem_ptr_type.into(), // dest arr ptr
|
||||
llvm_i32.into(), // dest arr len
|
||||
llvm_i32.into(), // src start idx
|
||||
llvm_i32.into(), // src end idx
|
||||
llvm_i32.into(), // src step
|
||||
elem_ptr_type.into(), // src arr ptr
|
||||
llvm_i32.into(), // src arr len
|
||||
llvm_i32.into(), // size
|
||||
];
|
||||
ctx.module.get_function(fun_symbol).unwrap_or_else(|| {
|
||||
let fn_t = llvm_i32.fn_type(ty_vec.as_slice(), false);
|
||||
ctx.module.add_function(fun_symbol, fn_t, None)
|
||||
})
|
||||
};
|
||||
|
||||
let zero = llvm_i32.const_zero();
|
||||
let one = llvm_i32.const_int(1, false);
|
||||
let dest_arr_ptr = dest_arr.data().base_ptr(ctx, generator);
|
||||
let dest_arr_ptr =
|
||||
ctx.builder.build_pointer_cast(dest_arr_ptr, elem_ptr_type, "dest_arr_ptr_cast").unwrap();
|
||||
let dest_len = dest_arr.load_size(ctx, Some("dest.len"));
|
||||
let dest_len =
|
||||
ctx.builder.build_int_truncate_or_bit_cast(dest_len, llvm_i32, "srclen32").unwrap();
|
||||
let src_arr_ptr = src_arr.data().base_ptr(ctx, generator);
|
||||
let src_arr_ptr =
|
||||
ctx.builder.build_pointer_cast(src_arr_ptr, elem_ptr_type, "src_arr_ptr_cast").unwrap();
|
||||
let src_len = src_arr.load_size(ctx, Some("src.len"));
|
||||
let src_len =
|
||||
ctx.builder.build_int_truncate_or_bit_cast(src_len, llvm_i32, "srclen32").unwrap();
|
||||
|
||||
// index in bound and positive should be done
|
||||
// assert if dest.step == 1 then len(src) <= len(dest) else len(src) == len(dest), and
|
||||
// throw exception if not satisfied
|
||||
let src_end = ctx
|
||||
.builder
|
||||
.build_select(
|
||||
ctx.builder.build_int_compare(IntPredicate::SLT, src_idx.2, zero, "is_neg").unwrap(),
|
||||
ctx.builder.build_int_sub(src_idx.1, one, "e_min_one").unwrap(),
|
||||
ctx.builder.build_int_add(src_idx.1, one, "e_add_one").unwrap(),
|
||||
"final_e",
|
||||
)
|
||||
.map(BasicValueEnum::into_int_value)
|
||||
.unwrap();
|
||||
let dest_end = ctx
|
||||
.builder
|
||||
.build_select(
|
||||
ctx.builder.build_int_compare(IntPredicate::SLT, dest_idx.2, zero, "is_neg").unwrap(),
|
||||
ctx.builder.build_int_sub(dest_idx.1, one, "e_min_one").unwrap(),
|
||||
ctx.builder.build_int_add(dest_idx.1, one, "e_add_one").unwrap(),
|
||||
"final_e",
|
||||
)
|
||||
.map(BasicValueEnum::into_int_value)
|
||||
.unwrap();
|
||||
let src_slice_len =
|
||||
calculate_len_for_slice_range(generator, ctx, src_idx.0, src_end, src_idx.2);
|
||||
let dest_slice_len =
|
||||
calculate_len_for_slice_range(generator, ctx, dest_idx.0, dest_end, dest_idx.2);
|
||||
let src_eq_dest = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::EQ, src_slice_len, dest_slice_len, "slice_src_eq_dest")
|
||||
.unwrap();
|
||||
let src_slt_dest = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::SLT, src_slice_len, dest_slice_len, "slice_src_slt_dest")
|
||||
.unwrap();
|
||||
let dest_step_eq_one = ctx
|
||||
.builder
|
||||
.build_int_compare(
|
||||
IntPredicate::EQ,
|
||||
dest_idx.2,
|
||||
dest_idx.2.get_type().const_int(1, false),
|
||||
"slice_dest_step_eq_one",
|
||||
)
|
||||
.unwrap();
|
||||
let cond_1 = ctx.builder.build_and(dest_step_eq_one, src_slt_dest, "slice_cond_1").unwrap();
|
||||
let cond = ctx.builder.build_or(src_eq_dest, cond_1, "slice_cond").unwrap();
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
cond,
|
||||
"0:ValueError",
|
||||
"attempt to assign sequence of size {0} to slice of size {1} with step size {2}",
|
||||
[Some(src_slice_len), Some(dest_slice_len), Some(dest_idx.2)],
|
||||
ctx.current_loc,
|
||||
);
|
||||
|
||||
let new_len = {
|
||||
let args = vec![
|
||||
dest_idx.0.into(), // dest start idx
|
||||
dest_idx.1.into(), // dest end idx
|
||||
dest_idx.2.into(), // dest step
|
||||
dest_arr_ptr.into(), // dest arr ptr
|
||||
dest_len.into(), // dest arr len
|
||||
src_idx.0.into(), // src start idx
|
||||
src_idx.1.into(), // src end idx
|
||||
src_idx.2.into(), // src step
|
||||
src_arr_ptr.into(), // src arr ptr
|
||||
src_len.into(), // src arr len
|
||||
{
|
||||
let s = match ty {
|
||||
BasicTypeEnum::FloatType(t) => t.size_of(),
|
||||
BasicTypeEnum::IntType(t) => t.size_of(),
|
||||
BasicTypeEnum::PointerType(t) => t.size_of(),
|
||||
BasicTypeEnum::StructType(t) => t.size_of().unwrap(),
|
||||
_ => codegen_unreachable!(ctx),
|
||||
};
|
||||
ctx.builder.build_int_truncate_or_bit_cast(s, llvm_i32, "size").unwrap()
|
||||
}
|
||||
.into(),
|
||||
];
|
||||
ctx.builder
|
||||
.build_call(slice_assign_fun, args.as_slice(), "slice_assign")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
// update length
|
||||
let need_update =
|
||||
ctx.builder.build_int_compare(IntPredicate::NE, new_len, dest_len, "need_update").unwrap();
|
||||
let current = ctx.builder.get_insert_block().unwrap().get_parent().unwrap();
|
||||
let update_bb = ctx.ctx.append_basic_block(current, "update");
|
||||
let cont_bb = ctx.ctx.append_basic_block(current, "cont");
|
||||
ctx.builder.build_conditional_branch(need_update, update_bb, cont_bb).unwrap();
|
||||
ctx.builder.position_at_end(update_bb);
|
||||
let new_len =
|
||||
ctx.builder.build_int_z_extend_or_bit_cast(new_len, llvm_usize, "new_len").unwrap();
|
||||
dest_arr.store_size(ctx, new_len);
|
||||
ctx.builder.build_unconditional_branch(cont_bb).unwrap();
|
||||
ctx.builder.position_at_end(cont_bb);
|
||||
}
|
@ -1,168 +0,0 @@
|
||||
use inkwell::{
|
||||
values::{BasicValueEnum, CallSiteValue, FloatValue, IntValue},
|
||||
IntPredicate,
|
||||
};
|
||||
use itertools::Either;
|
||||
|
||||
use crate::codegen::{
|
||||
macros::codegen_unreachable,
|
||||
{CodeGenContext, CodeGenerator},
|
||||
};
|
||||
|
||||
// repeated squaring method adapted from GNU Scientific Library:
|
||||
// https://git.savannah.gnu.org/cgit/gsl.git/tree/sys/pow_int.c
|
||||
pub fn integer_power<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
base: IntValue<'ctx>,
|
||||
exp: IntValue<'ctx>,
|
||||
signed: bool,
|
||||
) -> IntValue<'ctx> {
|
||||
let symbol = match (base.get_type().get_bit_width(), exp.get_type().get_bit_width(), signed) {
|
||||
(32, 32, true) => "__nac3_int_exp_int32_t",
|
||||
(64, 64, true) => "__nac3_int_exp_int64_t",
|
||||
(32, 32, false) => "__nac3_int_exp_uint32_t",
|
||||
(64, 64, false) => "__nac3_int_exp_uint64_t",
|
||||
_ => codegen_unreachable!(ctx),
|
||||
};
|
||||
let base_type = base.get_type();
|
||||
let pow_fun = ctx.module.get_function(symbol).unwrap_or_else(|| {
|
||||
let fn_type = base_type.fn_type(&[base_type.into(), base_type.into()], false);
|
||||
ctx.module.add_function(symbol, fn_type, None)
|
||||
});
|
||||
// throw exception when exp < 0
|
||||
let ge_zero = ctx
|
||||
.builder
|
||||
.build_int_compare(
|
||||
IntPredicate::SGE,
|
||||
exp,
|
||||
exp.get_type().const_zero(),
|
||||
"assert_int_pow_ge_0",
|
||||
)
|
||||
.unwrap();
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
ge_zero,
|
||||
"0:ValueError",
|
||||
"integer power must be positive or zero",
|
||||
[None, None, None],
|
||||
ctx.current_loc,
|
||||
);
|
||||
ctx.builder
|
||||
.build_call(pow_fun, &[base.into(), exp.into()], "call_int_pow")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Generates a call to `isinf` in IR. Returns an `i1` representing the result.
|
||||
pub fn call_isinf<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
v: FloatValue<'ctx>,
|
||||
) -> IntValue<'ctx> {
|
||||
let llvm_i32 = ctx.ctx.i32_type();
|
||||
let llvm_f64 = ctx.ctx.f64_type();
|
||||
|
||||
assert_eq!(v.get_type(), llvm_f64);
|
||||
|
||||
let intrinsic_fn = ctx.module.get_function("__nac3_isinf").unwrap_or_else(|| {
|
||||
let fn_type = llvm_i32.fn_type(&[llvm_f64.into()], false);
|
||||
ctx.module.add_function("__nac3_isinf", fn_type, None)
|
||||
});
|
||||
|
||||
let ret = ctx
|
||||
.builder
|
||||
.build_call(intrinsic_fn, &[v.into()], "isinf")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap();
|
||||
|
||||
generator.bool_to_i1(ctx, ret)
|
||||
}
|
||||
|
||||
/// Generates a call to `isnan` in IR. Returns an `i1` representing the result.
|
||||
pub fn call_isnan<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
v: FloatValue<'ctx>,
|
||||
) -> IntValue<'ctx> {
|
||||
let llvm_i32 = ctx.ctx.i32_type();
|
||||
let llvm_f64 = ctx.ctx.f64_type();
|
||||
|
||||
assert_eq!(v.get_type(), llvm_f64);
|
||||
|
||||
let intrinsic_fn = ctx.module.get_function("__nac3_isnan").unwrap_or_else(|| {
|
||||
let fn_type = llvm_i32.fn_type(&[llvm_f64.into()], false);
|
||||
ctx.module.add_function("__nac3_isnan", fn_type, None)
|
||||
});
|
||||
|
||||
let ret = ctx
|
||||
.builder
|
||||
.build_call(intrinsic_fn, &[v.into()], "isnan")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap();
|
||||
|
||||
generator.bool_to_i1(ctx, ret)
|
||||
}
|
||||
|
||||
/// Generates a call to `gamma` in IR. Returns an `f64` representing the result.
|
||||
pub fn call_gamma<'ctx>(ctx: &CodeGenContext<'ctx, '_>, v: FloatValue<'ctx>) -> FloatValue<'ctx> {
|
||||
let llvm_f64 = ctx.ctx.f64_type();
|
||||
|
||||
assert_eq!(v.get_type(), llvm_f64);
|
||||
|
||||
let intrinsic_fn = ctx.module.get_function("__nac3_gamma").unwrap_or_else(|| {
|
||||
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
|
||||
ctx.module.add_function("__nac3_gamma", fn_type, None)
|
||||
});
|
||||
|
||||
ctx.builder
|
||||
.build_call(intrinsic_fn, &[v.into()], "gamma")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_float_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Generates a call to `gammaln` in IR. Returns an `f64` representing the result.
|
||||
pub fn call_gammaln<'ctx>(ctx: &CodeGenContext<'ctx, '_>, v: FloatValue<'ctx>) -> FloatValue<'ctx> {
|
||||
let llvm_f64 = ctx.ctx.f64_type();
|
||||
|
||||
assert_eq!(v.get_type(), llvm_f64);
|
||||
|
||||
let intrinsic_fn = ctx.module.get_function("__nac3_gammaln").unwrap_or_else(|| {
|
||||
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
|
||||
ctx.module.add_function("__nac3_gammaln", fn_type, None)
|
||||
});
|
||||
|
||||
ctx.builder
|
||||
.build_call(intrinsic_fn, &[v.into()], "gammaln")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_float_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Generates a call to `j0` in IR. Returns an `f64` representing the result.
|
||||
pub fn call_j0<'ctx>(ctx: &CodeGenContext<'ctx, '_>, v: FloatValue<'ctx>) -> FloatValue<'ctx> {
|
||||
let llvm_f64 = ctx.ctx.f64_type();
|
||||
|
||||
assert_eq!(v.get_type(), llvm_f64);
|
||||
|
||||
let intrinsic_fn = ctx.module.get_function("__nac3_j0").unwrap_or_else(|| {
|
||||
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
|
||||
ctx.module.add_function("__nac3_j0", fn_type, None)
|
||||
});
|
||||
|
||||
ctx.builder
|
||||
.build_call(intrinsic_fn, &[v.into()], "j0")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_float_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,72 +0,0 @@
|
||||
use inkwell::{types::BasicTypeEnum, values::IntValue};
|
||||
|
||||
use crate::codegen::{
|
||||
expr::infer_and_call_function,
|
||||
irrt::get_usize_dependent_function_name,
|
||||
values::{ndarray::NDArrayValue, ListValue, ProxyValue, TypedArrayLikeAccessor},
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_array_set_and_validate_list_shape`.
|
||||
///
|
||||
/// Deduces the target shape of the `ndarray` from the provided `list`, raising an exception if
|
||||
/// there is any issue with the resultant `shape`.
|
||||
///
|
||||
/// `shape` must be pre-allocated by the caller of this function to `[usize; ndims]`, and must be
|
||||
/// initialized to all `-1`s.
|
||||
pub fn call_nac3_ndarray_array_set_and_validate_list_shape<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
list: ListValue<'ctx>,
|
||||
ndims: IntValue<'ctx>,
|
||||
shape: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
|
||||
) {
|
||||
let llvm_usize = ctx.get_size_type();
|
||||
assert_eq!(list.get_type().element_type().unwrap(), ctx.ctx.i8_type().into());
|
||||
assert_eq!(ndims.get_type(), llvm_usize);
|
||||
assert_eq!(
|
||||
BasicTypeEnum::try_from(shape.element_type(ctx, generator)).unwrap(),
|
||||
llvm_usize.into()
|
||||
);
|
||||
|
||||
let name =
|
||||
get_usize_dependent_function_name(ctx, "__nac3_ndarray_array_set_and_validate_list_shape");
|
||||
|
||||
infer_and_call_function(
|
||||
ctx,
|
||||
&name,
|
||||
None,
|
||||
&[list.as_base_value().into(), ndims.into(), shape.base_ptr(ctx, generator).into()],
|
||||
None,
|
||||
None,
|
||||
);
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_array_write_list_to_array`.
|
||||
///
|
||||
/// Copies the contents stored in `list` into `ndarray`.
|
||||
///
|
||||
/// The `ndarray` must fulfill the following preconditions:
|
||||
///
|
||||
/// - `ndarray.itemsize`: Must be initialized.
|
||||
/// - `ndarray.ndims`: Must be initialized.
|
||||
/// - `ndarray.shape`: Must be initialized.
|
||||
/// - `ndarray.data`: Must be allocated and contiguous.
|
||||
pub fn call_nac3_ndarray_array_write_list_to_array<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
list: ListValue<'ctx>,
|
||||
ndarray: NDArrayValue<'ctx>,
|
||||
) {
|
||||
assert_eq!(list.get_type().element_type().unwrap(), ctx.ctx.i8_type().into());
|
||||
|
||||
let name = get_usize_dependent_function_name(ctx, "__nac3_ndarray_array_write_list_to_array");
|
||||
|
||||
infer_and_call_function(
|
||||
ctx,
|
||||
&name,
|
||||
None,
|
||||
&[list.as_base_value().into(), ndarray.as_base_value().into()],
|
||||
None,
|
||||
None,
|
||||
);
|
||||
}
|
@ -1,295 +0,0 @@
|
||||
use inkwell::{
|
||||
types::BasicTypeEnum,
|
||||
values::{BasicValueEnum, IntValue, PointerValue},
|
||||
AddressSpace,
|
||||
};
|
||||
|
||||
use crate::codegen::{
|
||||
expr::{create_and_call_function, infer_and_call_function},
|
||||
irrt::get_usize_dependent_function_name,
|
||||
types::ProxyType,
|
||||
values::{ndarray::NDArrayValue, ProxyValue, TypedArrayLikeAccessor},
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_util_assert_shape_no_negative`.
|
||||
///
|
||||
/// Assets that `shape` does not contain negative dimensions.
|
||||
pub fn call_nac3_ndarray_util_assert_shape_no_negative<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
shape: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
|
||||
) {
|
||||
let llvm_usize = ctx.get_size_type();
|
||||
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
|
||||
|
||||
assert_eq!(
|
||||
BasicTypeEnum::try_from(shape.element_type(ctx, generator)).unwrap(),
|
||||
llvm_usize.into()
|
||||
);
|
||||
|
||||
let name =
|
||||
get_usize_dependent_function_name(ctx, "__nac3_ndarray_util_assert_shape_no_negative");
|
||||
|
||||
create_and_call_function(
|
||||
ctx,
|
||||
&name,
|
||||
Some(llvm_usize.into()),
|
||||
&[
|
||||
(llvm_usize.into(), shape.size(ctx, generator).into()),
|
||||
(llvm_pusize.into(), shape.base_ptr(ctx, generator).into()),
|
||||
],
|
||||
None,
|
||||
None,
|
||||
);
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_util_assert_shape_output_shape_same`.
|
||||
///
|
||||
/// Asserts that `ndarray_shape` and `output_shape` are the same in the context of writing output to
|
||||
/// an `ndarray`.
|
||||
pub fn call_nac3_ndarray_util_assert_output_shape_same<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
ndarray_shape: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
|
||||
output_shape: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
|
||||
) {
|
||||
let llvm_usize = ctx.get_size_type();
|
||||
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
|
||||
|
||||
assert_eq!(
|
||||
BasicTypeEnum::try_from(ndarray_shape.element_type(ctx, generator)).unwrap(),
|
||||
llvm_usize.into()
|
||||
);
|
||||
assert_eq!(
|
||||
BasicTypeEnum::try_from(output_shape.element_type(ctx, generator)).unwrap(),
|
||||
llvm_usize.into()
|
||||
);
|
||||
|
||||
let name =
|
||||
get_usize_dependent_function_name(ctx, "__nac3_ndarray_util_assert_output_shape_same");
|
||||
|
||||
create_and_call_function(
|
||||
ctx,
|
||||
&name,
|
||||
Some(llvm_usize.into()),
|
||||
&[
|
||||
(llvm_usize.into(), ndarray_shape.size(ctx, generator).into()),
|
||||
(llvm_pusize.into(), ndarray_shape.base_ptr(ctx, generator).into()),
|
||||
(llvm_usize.into(), output_shape.size(ctx, generator).into()),
|
||||
(llvm_pusize.into(), output_shape.base_ptr(ctx, generator).into()),
|
||||
],
|
||||
None,
|
||||
None,
|
||||
);
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_size`.
|
||||
///
|
||||
/// Returns a [`usize`][CodeGenerator::get_size_type] value of the number of elements of an
|
||||
/// `ndarray`, corresponding to the value of `ndarray.size`.
|
||||
pub fn call_nac3_ndarray_size<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
ndarray: NDArrayValue<'ctx>,
|
||||
) -> IntValue<'ctx> {
|
||||
let llvm_usize = ctx.get_size_type();
|
||||
let llvm_ndarray = ndarray.get_type().as_base_type();
|
||||
|
||||
let name = get_usize_dependent_function_name(ctx, "__nac3_ndarray_size");
|
||||
|
||||
create_and_call_function(
|
||||
ctx,
|
||||
&name,
|
||||
Some(llvm_usize.into()),
|
||||
&[(llvm_ndarray.into(), ndarray.as_base_value().into())],
|
||||
Some("size"),
|
||||
None,
|
||||
)
|
||||
.map(BasicValueEnum::into_int_value)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_nbytes`.
|
||||
///
|
||||
/// Returns a [`usize`][CodeGenerator::get_size_type] value of the number of bytes consumed by the
|
||||
/// data of the `ndarray`, corresponding to the value of `ndarray.nbytes`.
|
||||
pub fn call_nac3_ndarray_nbytes<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
ndarray: NDArrayValue<'ctx>,
|
||||
) -> IntValue<'ctx> {
|
||||
let llvm_usize = ctx.get_size_type();
|
||||
let llvm_ndarray = ndarray.get_type().as_base_type();
|
||||
|
||||
let name = get_usize_dependent_function_name(ctx, "__nac3_ndarray_nbytes");
|
||||
|
||||
create_and_call_function(
|
||||
ctx,
|
||||
&name,
|
||||
Some(llvm_usize.into()),
|
||||
&[(llvm_ndarray.into(), ndarray.as_base_value().into())],
|
||||
Some("nbytes"),
|
||||
None,
|
||||
)
|
||||
.map(BasicValueEnum::into_int_value)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_len`.
|
||||
///
|
||||
/// Returns a [`usize`][CodeGenerator::get_size_type] value of the size of the topmost dimension of
|
||||
/// the `ndarray`, corresponding to the value of `ndarray.__len__`.
|
||||
pub fn call_nac3_ndarray_len<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
ndarray: NDArrayValue<'ctx>,
|
||||
) -> IntValue<'ctx> {
|
||||
let llvm_usize = ctx.get_size_type();
|
||||
let llvm_ndarray = ndarray.get_type().as_base_type();
|
||||
|
||||
let name = get_usize_dependent_function_name(ctx, "__nac3_ndarray_len");
|
||||
|
||||
create_and_call_function(
|
||||
ctx,
|
||||
&name,
|
||||
Some(llvm_usize.into()),
|
||||
&[(llvm_ndarray.into(), ndarray.as_base_value().into())],
|
||||
Some("len"),
|
||||
None,
|
||||
)
|
||||
.map(BasicValueEnum::into_int_value)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_is_c_contiguous`.
|
||||
///
|
||||
/// Returns an `i1` value indicating whether the `ndarray` is C-contiguous.
|
||||
pub fn call_nac3_ndarray_is_c_contiguous<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
ndarray: NDArrayValue<'ctx>,
|
||||
) -> IntValue<'ctx> {
|
||||
let llvm_i1 = ctx.ctx.bool_type();
|
||||
let llvm_ndarray = ndarray.get_type().as_base_type();
|
||||
|
||||
let name = get_usize_dependent_function_name(ctx, "__nac3_ndarray_is_c_contiguous");
|
||||
|
||||
create_and_call_function(
|
||||
ctx,
|
||||
&name,
|
||||
Some(llvm_i1.into()),
|
||||
&[(llvm_ndarray.into(), ndarray.as_base_value().into())],
|
||||
Some("is_c_contiguous"),
|
||||
None,
|
||||
)
|
||||
.map(BasicValueEnum::into_int_value)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_get_nth_pelement`.
|
||||
///
|
||||
/// Returns a [`PointerValue`] to the `index`-th flattened element of the `ndarray`.
|
||||
pub fn call_nac3_ndarray_get_nth_pelement<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
ndarray: NDArrayValue<'ctx>,
|
||||
index: IntValue<'ctx>,
|
||||
) -> PointerValue<'ctx> {
|
||||
let llvm_i8 = ctx.ctx.i8_type();
|
||||
let llvm_pi8 = llvm_i8.ptr_type(AddressSpace::default());
|
||||
let llvm_usize = ctx.get_size_type();
|
||||
let llvm_ndarray = ndarray.get_type().as_base_type();
|
||||
|
||||
assert_eq!(index.get_type(), llvm_usize);
|
||||
|
||||
let name = get_usize_dependent_function_name(ctx, "__nac3_ndarray_get_nth_pelement");
|
||||
|
||||
create_and_call_function(
|
||||
ctx,
|
||||
&name,
|
||||
Some(llvm_pi8.into()),
|
||||
&[(llvm_ndarray.into(), ndarray.as_base_value().into()), (llvm_usize.into(), index.into())],
|
||||
Some("pelement"),
|
||||
None,
|
||||
)
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_get_pelement_by_indices`.
|
||||
///
|
||||
/// `indices` must have the same number of elements as the number of dimensions in `ndarray`.
|
||||
///
|
||||
/// Returns a [`PointerValue`] to the element indexed by `indices`.
|
||||
pub fn call_nac3_ndarray_get_pelement_by_indices<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
ndarray: NDArrayValue<'ctx>,
|
||||
indices: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
|
||||
) -> PointerValue<'ctx> {
|
||||
let llvm_i8 = ctx.ctx.i8_type();
|
||||
let llvm_pi8 = llvm_i8.ptr_type(AddressSpace::default());
|
||||
let llvm_usize = ctx.get_size_type();
|
||||
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
|
||||
let llvm_ndarray = ndarray.get_type().as_base_type();
|
||||
|
||||
assert_eq!(
|
||||
BasicTypeEnum::try_from(indices.element_type(ctx, generator)).unwrap(),
|
||||
llvm_usize.into()
|
||||
);
|
||||
|
||||
let name = get_usize_dependent_function_name(ctx, "__nac3_ndarray_get_pelement_by_indices");
|
||||
|
||||
create_and_call_function(
|
||||
ctx,
|
||||
&name,
|
||||
Some(llvm_pi8.into()),
|
||||
&[
|
||||
(llvm_ndarray.into(), ndarray.as_base_value().into()),
|
||||
(llvm_pusize.into(), indices.base_ptr(ctx, generator).into()),
|
||||
],
|
||||
Some("pelement"),
|
||||
None,
|
||||
)
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_set_strides_by_shape`.
|
||||
///
|
||||
/// Sets `ndarray.strides` assuming that `ndarray.shape` is C-contiguous.
|
||||
pub fn call_nac3_ndarray_set_strides_by_shape<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
ndarray: NDArrayValue<'ctx>,
|
||||
) {
|
||||
let llvm_ndarray = ndarray.get_type().as_base_type();
|
||||
|
||||
let name = get_usize_dependent_function_name(ctx, "__nac3_ndarray_set_strides_by_shape");
|
||||
|
||||
create_and_call_function(
|
||||
ctx,
|
||||
&name,
|
||||
None,
|
||||
&[(llvm_ndarray.into(), ndarray.as_base_value().into())],
|
||||
None,
|
||||
None,
|
||||
);
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_copy_data`.
|
||||
///
|
||||
/// Copies all elements from `src_ndarray` to `dst_ndarray` using their flattened views. The number
|
||||
/// of elements in `src_ndarray` must be greater than or equal to the number of elements in
|
||||
/// `dst_ndarray`.
|
||||
pub fn call_nac3_ndarray_copy_data<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
src_ndarray: NDArrayValue<'ctx>,
|
||||
dst_ndarray: NDArrayValue<'ctx>,
|
||||
) {
|
||||
let name = get_usize_dependent_function_name(ctx, "__nac3_ndarray_copy_data");
|
||||
|
||||
infer_and_call_function(
|
||||
ctx,
|
||||
&name,
|
||||
None,
|
||||
&[src_ndarray.as_base_value().into(), dst_ndarray.as_base_value().into()],
|
||||
None,
|
||||
None,
|
||||
);
|
||||
}
|
@ -1,81 +0,0 @@
|
||||
use inkwell::values::IntValue;
|
||||
|
||||
use crate::codegen::{
|
||||
expr::infer_and_call_function,
|
||||
irrt::get_usize_dependent_function_name,
|
||||
types::{ndarray::ShapeEntryType, ProxyType},
|
||||
values::{
|
||||
ndarray::NDArrayValue, ArrayLikeValue, ArraySliceValue, ProxyValue, TypedArrayLikeAccessor,
|
||||
TypedArrayLikeMutator,
|
||||
},
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_broadcast_to`.
|
||||
///
|
||||
/// Attempts to broadcast `src_ndarray` to the new shape defined by `dst_ndarray`.
|
||||
///
|
||||
/// `dst_ndarray` must meet the following preconditions:
|
||||
///
|
||||
/// - `dst_ndarray.ndims` must be initialized and matching the length of `dst_ndarray.shape`.
|
||||
/// - `dst_ndarray.shape` must be initialized and contains the target broadcast shape.
|
||||
/// - `dst_ndarray.strides` must be allocated and may contain uninitialized values.
|
||||
pub fn call_nac3_ndarray_broadcast_to<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
src_ndarray: NDArrayValue<'ctx>,
|
||||
dst_ndarray: NDArrayValue<'ctx>,
|
||||
) {
|
||||
let name = get_usize_dependent_function_name(ctx, "__nac3_ndarray_broadcast_to");
|
||||
infer_and_call_function(
|
||||
ctx,
|
||||
&name,
|
||||
None,
|
||||
&[src_ndarray.as_base_value().into(), dst_ndarray.as_base_value().into()],
|
||||
None,
|
||||
None,
|
||||
);
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_broadcast_shapes`.
|
||||
///
|
||||
/// Attempts to calculate the resultant shape from broadcasting all shapes in `shape_entries`,
|
||||
/// writing the result to `dst_shape`.
|
||||
pub fn call_nac3_ndarray_broadcast_shapes<'ctx, G, Shape>(
|
||||
generator: &G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
num_shape_entries: IntValue<'ctx>,
|
||||
shape_entries: ArraySliceValue<'ctx>,
|
||||
dst_ndims: IntValue<'ctx>,
|
||||
dst_shape: &Shape,
|
||||
) where
|
||||
G: CodeGenerator + ?Sized,
|
||||
Shape: TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>
|
||||
+ TypedArrayLikeMutator<'ctx, G, IntValue<'ctx>>,
|
||||
{
|
||||
let llvm_usize = ctx.get_size_type();
|
||||
|
||||
assert_eq!(num_shape_entries.get_type(), llvm_usize);
|
||||
assert!(ShapeEntryType::is_type(
|
||||
generator,
|
||||
ctx.ctx,
|
||||
shape_entries.base_ptr(ctx, generator).get_type()
|
||||
)
|
||||
.is_ok());
|
||||
assert_eq!(dst_ndims.get_type(), llvm_usize);
|
||||
assert_eq!(dst_shape.element_type(ctx, generator), llvm_usize.into());
|
||||
|
||||
let name = get_usize_dependent_function_name(ctx, "__nac3_ndarray_broadcast_shapes");
|
||||
infer_and_call_function(
|
||||
ctx,
|
||||
&name,
|
||||
None,
|
||||
&[
|
||||
num_shape_entries.into(),
|
||||
shape_entries.base_ptr(ctx, generator).into(),
|
||||
dst_ndims.into(),
|
||||
dst_shape.base_ptr(ctx, generator).into(),
|
||||
],
|
||||
None,
|
||||
None,
|
||||
);
|
||||
}
|
@ -1,34 +0,0 @@
|
||||
use crate::codegen::{
|
||||
expr::infer_and_call_function,
|
||||
irrt::get_usize_dependent_function_name,
|
||||
values::{ndarray::NDArrayValue, ArrayLikeValue, ArraySliceValue, ProxyValue},
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_index`.
|
||||
///
|
||||
/// Performs [basic indexing](https://numpy.org/doc/stable/user/basics.indexing.html#basic-indexing)
|
||||
/// on `src_ndarray` using `indices`, writing the result to `dst_ndarray`, corresponding to the
|
||||
/// operation `dst_ndarray = src_ndarray[indices]`.
|
||||
pub fn call_nac3_ndarray_index<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
indices: ArraySliceValue<'ctx>,
|
||||
src_ndarray: NDArrayValue<'ctx>,
|
||||
dst_ndarray: NDArrayValue<'ctx>,
|
||||
) {
|
||||
let name = get_usize_dependent_function_name(ctx, "__nac3_ndarray_index");
|
||||
infer_and_call_function(
|
||||
ctx,
|
||||
&name,
|
||||
None,
|
||||
&[
|
||||
indices.size(ctx, generator).into(),
|
||||
indices.base_ptr(ctx, generator).into(),
|
||||
src_ndarray.as_base_value().into(),
|
||||
dst_ndarray.as_base_value().into(),
|
||||
],
|
||||
None,
|
||||
None,
|
||||
);
|
||||
}
|
@ -1,81 +0,0 @@
|
||||
use inkwell::{
|
||||
types::BasicTypeEnum,
|
||||
values::{BasicValueEnum, IntValue},
|
||||
AddressSpace,
|
||||
};
|
||||
|
||||
use crate::codegen::{
|
||||
expr::{create_and_call_function, infer_and_call_function},
|
||||
irrt::get_usize_dependent_function_name,
|
||||
types::ProxyType,
|
||||
values::{
|
||||
ndarray::{NDArrayValue, NDIterValue},
|
||||
ProxyValue, TypedArrayLikeAccessor,
|
||||
},
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
|
||||
/// Generates a call to `__nac3_nditer_initialize`.
|
||||
///
|
||||
/// Initializes the `iter` object.
|
||||
pub fn call_nac3_nditer_initialize<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
iter: NDIterValue<'ctx>,
|
||||
ndarray: NDArrayValue<'ctx>,
|
||||
indices: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
|
||||
) {
|
||||
let llvm_usize = ctx.get_size_type();
|
||||
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
|
||||
|
||||
assert_eq!(
|
||||
BasicTypeEnum::try_from(indices.element_type(ctx, generator)).unwrap(),
|
||||
llvm_usize.into()
|
||||
);
|
||||
|
||||
let name = get_usize_dependent_function_name(ctx, "__nac3_nditer_initialize");
|
||||
|
||||
create_and_call_function(
|
||||
ctx,
|
||||
&name,
|
||||
None,
|
||||
&[
|
||||
(iter.get_type().as_base_type().into(), iter.as_base_value().into()),
|
||||
(ndarray.get_type().as_base_type().into(), ndarray.as_base_value().into()),
|
||||
(llvm_pusize.into(), indices.base_ptr(ctx, generator).into()),
|
||||
],
|
||||
None,
|
||||
None,
|
||||
);
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_nditer_initialize_has_element`.
|
||||
///
|
||||
/// Returns an `i1` value indicating whether there are elements left to traverse for the `iter`
|
||||
/// object.
|
||||
pub fn call_nac3_nditer_has_element<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
iter: NDIterValue<'ctx>,
|
||||
) -> IntValue<'ctx> {
|
||||
let name = get_usize_dependent_function_name(ctx, "__nac3_nditer_has_element");
|
||||
|
||||
infer_and_call_function(
|
||||
ctx,
|
||||
&name,
|
||||
Some(ctx.ctx.bool_type().into()),
|
||||
&[iter.as_base_value().into()],
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.map(BasicValueEnum::into_int_value)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_nditer_next`.
|
||||
///
|
||||
/// Moves `iter` to point to the next element.
|
||||
pub fn call_nac3_nditer_next<'ctx>(ctx: &CodeGenContext<'ctx, '_>, iter: NDIterValue<'ctx>) {
|
||||
let name = get_usize_dependent_function_name(ctx, "__nac3_nditer_next");
|
||||
|
||||
infer_and_call_function(ctx, &name, None, &[iter.as_base_value().into()], None, None);
|
||||
}
|
@ -1,65 +0,0 @@
|
||||
use inkwell::{types::BasicTypeEnum, values::IntValue};
|
||||
|
||||
use crate::codegen::{
|
||||
expr::infer_and_call_function, irrt::get_usize_dependent_function_name,
|
||||
values::TypedArrayLikeAccessor, CodeGenContext, CodeGenerator,
|
||||
};
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_matmul_calculate_shapes`.
|
||||
///
|
||||
/// Calculates the broadcasted shapes for `a`, `b`, and the `ndarray` holding the final values of
|
||||
/// `a @ b`.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn call_nac3_ndarray_matmul_calculate_shapes<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
a_shape: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
|
||||
b_shape: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
|
||||
final_ndims: IntValue<'ctx>,
|
||||
new_a_shape: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
|
||||
new_b_shape: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
|
||||
dst_shape: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
|
||||
) {
|
||||
let llvm_usize = ctx.get_size_type();
|
||||
|
||||
assert_eq!(
|
||||
BasicTypeEnum::try_from(a_shape.element_type(ctx, generator)).unwrap(),
|
||||
llvm_usize.into()
|
||||
);
|
||||
assert_eq!(
|
||||
BasicTypeEnum::try_from(b_shape.element_type(ctx, generator)).unwrap(),
|
||||
llvm_usize.into()
|
||||
);
|
||||
assert_eq!(
|
||||
BasicTypeEnum::try_from(new_a_shape.element_type(ctx, generator)).unwrap(),
|
||||
llvm_usize.into()
|
||||
);
|
||||
assert_eq!(
|
||||
BasicTypeEnum::try_from(new_b_shape.element_type(ctx, generator)).unwrap(),
|
||||
llvm_usize.into()
|
||||
);
|
||||
assert_eq!(
|
||||
BasicTypeEnum::try_from(dst_shape.element_type(ctx, generator)).unwrap(),
|
||||
llvm_usize.into()
|
||||
);
|
||||
|
||||
let name = get_usize_dependent_function_name(ctx, "__nac3_ndarray_matmul_calculate_shapes");
|
||||
|
||||
infer_and_call_function(
|
||||
ctx,
|
||||
&name,
|
||||
None,
|
||||
&[
|
||||
a_shape.size(ctx, generator).into(),
|
||||
a_shape.base_ptr(ctx, generator).into(),
|
||||
b_shape.size(ctx, generator).into(),
|
||||
b_shape.base_ptr(ctx, generator).into(),
|
||||
final_ndims.into(),
|
||||
new_a_shape.base_ptr(ctx, generator).into(),
|
||||
new_b_shape.base_ptr(ctx, generator).into(),
|
||||
dst_shape.base_ptr(ctx, generator).into(),
|
||||
],
|
||||
None,
|
||||
None,
|
||||
);
|
||||
}
|
@ -1,17 +0,0 @@
|
||||
pub use array::*;
|
||||
pub use basic::*;
|
||||
pub use broadcast::*;
|
||||
pub use indexing::*;
|
||||
pub use iter::*;
|
||||
pub use matmul::*;
|
||||
pub use reshape::*;
|
||||
pub use transpose::*;
|
||||
|
||||
mod array;
|
||||
mod basic;
|
||||
mod broadcast;
|
||||
mod indexing;
|
||||
mod iter;
|
||||
mod matmul;
|
||||
mod reshape;
|
||||
mod transpose;
|
@ -1,39 +0,0 @@
|
||||
use inkwell::values::IntValue;
|
||||
|
||||
use crate::codegen::{
|
||||
expr::infer_and_call_function,
|
||||
irrt::get_usize_dependent_function_name,
|
||||
values::{ArrayLikeValue, ArraySliceValue},
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_reshape_resolve_and_check_new_shape`.
|
||||
///
|
||||
/// Resolves unknown dimensions in `new_shape` for `numpy.reshape(<ndarray>, new_shape)`, raising an
|
||||
/// assertion if multiple dimensions are unknown (`-1`).
|
||||
pub fn call_nac3_ndarray_reshape_resolve_and_check_new_shape<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
size: IntValue<'ctx>,
|
||||
new_ndims: IntValue<'ctx>,
|
||||
new_shape: ArraySliceValue<'ctx>,
|
||||
) {
|
||||
let llvm_usize = ctx.get_size_type();
|
||||
|
||||
assert_eq!(size.get_type(), llvm_usize);
|
||||
assert_eq!(new_ndims.get_type(), llvm_usize);
|
||||
assert_eq!(new_shape.element_type(ctx, generator), llvm_usize.into());
|
||||
|
||||
let name = get_usize_dependent_function_name(
|
||||
ctx,
|
||||
"__nac3_ndarray_reshape_resolve_and_check_new_shape",
|
||||
);
|
||||
infer_and_call_function(
|
||||
ctx,
|
||||
&name,
|
||||
None,
|
||||
&[size.into(), new_ndims.into(), new_shape.base_ptr(ctx, generator).into()],
|
||||
None,
|
||||
None,
|
||||
);
|
||||
}
|
@ -1,48 +0,0 @@
|
||||
use inkwell::{values::IntValue, AddressSpace};
|
||||
|
||||
use crate::codegen::{
|
||||
expr::infer_and_call_function,
|
||||
irrt::get_usize_dependent_function_name,
|
||||
values::{ndarray::NDArrayValue, ProxyValue, TypedArrayLikeAccessor},
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_transpose`.
|
||||
///
|
||||
/// Creates a transpose view of `src_ndarray` and writes the result to `dst_ndarray`.
|
||||
///
|
||||
/// `dst_ndarray` must fulfill the following preconditions:
|
||||
///
|
||||
/// - `dst_ndarray.ndims` must be initialized and must be equal to `src_ndarray.ndims`.
|
||||
/// - `dst_ndarray.shape` must be allocated and may contain uninitialized values.
|
||||
/// - `dst_ndarray.strides` must be allocated and may contain uninitialized values.
|
||||
pub fn call_nac3_ndarray_transpose<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
src_ndarray: NDArrayValue<'ctx>,
|
||||
dst_ndarray: NDArrayValue<'ctx>,
|
||||
axes: Option<&impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>>,
|
||||
) {
|
||||
let llvm_usize = ctx.get_size_type();
|
||||
|
||||
assert!(axes.is_none_or(|axes| axes.size(ctx, generator).get_type() == llvm_usize));
|
||||
assert!(axes.is_none_or(|axes| axes.element_type(ctx, generator) == llvm_usize.into()));
|
||||
|
||||
let name = get_usize_dependent_function_name(ctx, "__nac3_ndarray_transpose");
|
||||
infer_and_call_function(
|
||||
ctx,
|
||||
&name,
|
||||
None,
|
||||
&[
|
||||
src_ndarray.as_base_value().into(),
|
||||
dst_ndarray.as_base_value().into(),
|
||||
axes.map_or(llvm_usize.const_zero(), |axes| axes.size(ctx, generator)).into(),
|
||||
axes.map_or(llvm_usize.ptr_type(AddressSpace::default()).const_null(), |axes| {
|
||||
axes.base_ptr(ctx, generator)
|
||||
})
|
||||
.into(),
|
||||
],
|
||||
None,
|
||||
None,
|
||||
);
|
||||
}
|
@ -1,56 +0,0 @@
|
||||
use inkwell::{
|
||||
values::{BasicValueEnum, CallSiteValue, IntValue},
|
||||
IntPredicate,
|
||||
};
|
||||
use itertools::Either;
|
||||
|
||||
use crate::codegen::{CodeGenContext, CodeGenerator};
|
||||
|
||||
/// Invokes the `__nac3_range_slice_len` in IRRT.
|
||||
///
|
||||
/// - `start`: The `i32` start value for the slice.
|
||||
/// - `end`: The `i32` end value for the slice.
|
||||
/// - `step`: The `i32` step value for the slice.
|
||||
///
|
||||
/// Returns an `i32` value of the length of the slice.
|
||||
pub fn calculate_len_for_slice_range<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
start: IntValue<'ctx>,
|
||||
end: IntValue<'ctx>,
|
||||
step: IntValue<'ctx>,
|
||||
) -> IntValue<'ctx> {
|
||||
const SYMBOL: &str = "__nac3_range_slice_len";
|
||||
|
||||
let llvm_i32 = ctx.ctx.i32_type();
|
||||
|
||||
assert_eq!(start.get_type(), llvm_i32);
|
||||
assert_eq!(end.get_type(), llvm_i32);
|
||||
assert_eq!(step.get_type(), llvm_i32);
|
||||
|
||||
let len_func = ctx.module.get_function(SYMBOL).unwrap_or_else(|| {
|
||||
let fn_t = llvm_i32.fn_type(&[llvm_i32.into(), llvm_i32.into(), llvm_i32.into()], false);
|
||||
ctx.module.add_function(SYMBOL, fn_t, None)
|
||||
});
|
||||
|
||||
// assert step != 0, throw exception if not
|
||||
let not_zero = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::NE, step, step.get_type().const_zero(), "range_step_ne")
|
||||
.unwrap();
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
not_zero,
|
||||
"0:ValueError",
|
||||
"step must not be zero",
|
||||
[None, None, None],
|
||||
ctx.current_loc,
|
||||
);
|
||||
|
||||
ctx.builder
|
||||
.build_call(len_func, &[start.into(), end.into(), step.into()], "calc_len")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
@ -1,39 +0,0 @@
|
||||
use inkwell::values::{BasicValueEnum, CallSiteValue, IntValue};
|
||||
use itertools::Either;
|
||||
|
||||
use nac3parser::ast::Expr;
|
||||
|
||||
use crate::{
|
||||
codegen::{CodeGenContext, CodeGenerator},
|
||||
typecheck::typedef::Type,
|
||||
};
|
||||
|
||||
/// this function allows index out of range, since python
|
||||
/// allows index out of range in slice (`a = [1,2,3]; a[1:10] == [2,3]`).
|
||||
pub fn handle_slice_index_bound<'ctx, G: CodeGenerator>(
|
||||
i: &Expr<Option<Type>>,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
generator: &mut G,
|
||||
length: IntValue<'ctx>,
|
||||
) -> Result<Option<IntValue<'ctx>>, String> {
|
||||
const SYMBOL: &str = "__nac3_slice_index_bound";
|
||||
let func = ctx.module.get_function(SYMBOL).unwrap_or_else(|| {
|
||||
let i32_t = ctx.ctx.i32_type();
|
||||
let fn_t = i32_t.fn_type(&[i32_t.into(), i32_t.into()], false);
|
||||
ctx.module.add_function(SYMBOL, fn_t, None)
|
||||
});
|
||||
|
||||
let i = if let Some(v) = generator.gen_expr(ctx, i)? {
|
||||
v.to_basic_value_enum(ctx, generator, i.custom.unwrap())?
|
||||
} else {
|
||||
return Ok(None);
|
||||
};
|
||||
Ok(Some(
|
||||
ctx.builder
|
||||
.build_call(func, &[i.into(), length.into()], "bounded_ind")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap(),
|
||||
))
|
||||
}
|
@ -1,45 +0,0 @@
|
||||
use inkwell::values::{BasicValueEnum, CallSiteValue, IntValue, PointerValue};
|
||||
use itertools::Either;
|
||||
|
||||
use super::get_usize_dependent_function_name;
|
||||
use crate::codegen::CodeGenContext;
|
||||
|
||||
/// Generates a call to string equality comparison. Returns an `i1` representing whether the strings are equal.
|
||||
pub fn call_string_eq<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
str1_ptr: PointerValue<'ctx>,
|
||||
str1_len: IntValue<'ctx>,
|
||||
str2_ptr: PointerValue<'ctx>,
|
||||
str2_len: IntValue<'ctx>,
|
||||
) -> IntValue<'ctx> {
|
||||
let llvm_i1 = ctx.ctx.bool_type();
|
||||
|
||||
let func_name = get_usize_dependent_function_name(ctx, "nac3_str_eq");
|
||||
|
||||
let func = ctx.module.get_function(&func_name).unwrap_or_else(|| {
|
||||
ctx.module.add_function(
|
||||
&func_name,
|
||||
llvm_i1.fn_type(
|
||||
&[
|
||||
str1_ptr.get_type().into(),
|
||||
str1_len.get_type().into(),
|
||||
str2_ptr.get_type().into(),
|
||||
str2_len.get_type().into(),
|
||||
],
|
||||
false,
|
||||
),
|
||||
None,
|
||||
)
|
||||
});
|
||||
|
||||
ctx.builder
|
||||
.build_call(
|
||||
func,
|
||||
&[str1_ptr.into(), str1_len.into(), str2_ptr.into(), str2_len.into()],
|
||||
"str_eq_call",
|
||||
)
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
26
nac3core/src/codegen/irrt/test.rs
Normal file
26
nac3core/src/codegen/irrt/test.rs
Normal file
@ -0,0 +1,26 @@
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{path::Path, process::Command};
|
||||
|
||||
#[test]
|
||||
fn run_irrt_test() {
|
||||
assert!(
|
||||
cfg!(feature = "test"),
|
||||
"Please do `cargo test -F test` to compile `irrt_test.out` and run test"
|
||||
);
|
||||
|
||||
let irrt_test_out_path = Path::new(concat!(env!("OUT_DIR"), "/irrt_test.out"));
|
||||
let output = Command::new(irrt_test_out_path.to_str().unwrap()).output().unwrap();
|
||||
|
||||
if !output.status.success() {
|
||||
eprintln!("irrt_test failed with status {}:", output.status);
|
||||
eprintln!("====== stdout ======");
|
||||
eprintln!("{}", String::from_utf8(output.stdout).unwrap());
|
||||
eprintln!("====== stderr ======");
|
||||
eprintln!("{}", String::from_utf8(output.stderr).unwrap());
|
||||
eprintln!("====================");
|
||||
|
||||
panic!("irrt_test failed");
|
||||
}
|
||||
}
|
||||
}
|
@ -1,45 +1,86 @@
|
||||
use inkwell::{
|
||||
intrinsics::Intrinsic,
|
||||
types::AnyTypeEnum::IntType,
|
||||
values::{BasicValueEnum, CallSiteValue, FloatValue, IntValue, PointerValue},
|
||||
AddressSpace,
|
||||
};
|
||||
use crate::codegen::CodeGenContext;
|
||||
use inkwell::context::Context;
|
||||
use inkwell::intrinsics::Intrinsic;
|
||||
use inkwell::types::AnyTypeEnum::IntType;
|
||||
use inkwell::types::FloatType;
|
||||
use inkwell::values::{BasicValueEnum, CallSiteValue, FloatValue, IntValue, PointerValue};
|
||||
use inkwell::AddressSpace;
|
||||
use itertools::Either;
|
||||
|
||||
use super::CodeGenContext;
|
||||
/// Returns the string representation for the floating-point type `ft` when used in intrinsic
|
||||
/// functions.
|
||||
fn get_float_intrinsic_repr(ctx: &Context, ft: FloatType) -> &'static str {
|
||||
// Standard LLVM floating-point types
|
||||
if ft == ctx.f16_type() {
|
||||
return "f16";
|
||||
}
|
||||
if ft == ctx.f32_type() {
|
||||
return "f32";
|
||||
}
|
||||
if ft == ctx.f64_type() {
|
||||
return "f64";
|
||||
}
|
||||
if ft == ctx.f128_type() {
|
||||
return "f128";
|
||||
}
|
||||
|
||||
/// Invokes the [`llvm.va_start`](https://llvm.org/docs/LangRef.html#llvm-va-start-intrinsic)
|
||||
/// intrinsic.
|
||||
pub fn call_va_start<'ctx>(ctx: &CodeGenContext<'ctx, '_>, arglist: PointerValue<'ctx>) {
|
||||
const FN_NAME: &str = "llvm.va_start";
|
||||
// Non-standard floating-point types
|
||||
if ft == ctx.x86_f80_type() {
|
||||
return "f80";
|
||||
}
|
||||
if ft == ctx.ppc_f128_type() {
|
||||
return "ppcf128";
|
||||
}
|
||||
|
||||
let intrinsic_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
|
||||
let llvm_void = ctx.ctx.void_type();
|
||||
let llvm_i8 = ctx.ctx.i8_type();
|
||||
let llvm_p0i8 = llvm_i8.ptr_type(AddressSpace::default());
|
||||
let fn_type = llvm_void.fn_type(&[llvm_p0i8.into()], false);
|
||||
|
||||
ctx.module.add_function(FN_NAME, fn_type, None)
|
||||
});
|
||||
|
||||
ctx.builder.build_call(intrinsic_fn, &[arglist.into()], "").unwrap();
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
/// Invokes the [`llvm.va_end`](https://llvm.org/docs/LangRef.html#llvm-va-end-intrinsic)
|
||||
/// Invokes the [`llvm.lifetime.start`](https://releases.llvm.org/14.0.0/docs/LangRef.html#llvm-lifetime-start-intrinsic)
|
||||
/// intrinsic.
|
||||
pub fn call_va_end<'ctx>(ctx: &CodeGenContext<'ctx, '_>, arglist: PointerValue<'ctx>) {
|
||||
const FN_NAME: &str = "llvm.va_end";
|
||||
|
||||
pub fn call_lifetime_start<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
size: IntValue<'ctx>,
|
||||
ptr: PointerValue<'ctx>,
|
||||
) {
|
||||
const FN_NAME: &str = "llvm.lifetime.start";
|
||||
// NOTE: inkwell temporary workaround, see [`call_stackrestore`] for details
|
||||
let intrinsic_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
|
||||
let llvm_void = ctx.ctx.void_type();
|
||||
let llvm_i8 = ctx.ctx.i8_type();
|
||||
let llvm_p0i8 = llvm_i8.ptr_type(AddressSpace::default());
|
||||
let fn_type = llvm_void.fn_type(&[llvm_p0i8.into()], false);
|
||||
let llvm_i64 = ctx.ctx.i64_type();
|
||||
let llvm_p0i8 = ctx.ctx.i8_type().ptr_type(AddressSpace::default());
|
||||
let fn_type = llvm_void.fn_type(&[llvm_i64.into(), llvm_p0i8.into()], false);
|
||||
|
||||
ctx.module.add_function(FN_NAME, fn_type, None)
|
||||
});
|
||||
|
||||
ctx.builder.build_call(intrinsic_fn, &[arglist.into()], "").unwrap();
|
||||
ctx.builder
|
||||
.build_call(intrinsic_fn, &[size.into(), ptr.into()], "")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
/// Invokes the [`llvm.lifetime.end`](https://releases.llvm.org/14.0.0/docs/LangRef.html#llvm-lifetime-end-intrinsic)
|
||||
/// intrinsic.
|
||||
pub fn call_lifetime_end<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
size: IntValue<'ctx>,
|
||||
ptr: PointerValue<'ctx>,
|
||||
) {
|
||||
const FN_NAME: &str = "llvm.lifetime.end";
|
||||
// NOTE: inkwell temporary workaround, see [`call_stackrestore`] for details
|
||||
let intrinsic_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
|
||||
let llvm_void = ctx.ctx.void_type();
|
||||
let llvm_i64 = ctx.ctx.i64_type();
|
||||
let llvm_p0i8 = ctx.ctx.i8_type().ptr_type(AddressSpace::default());
|
||||
let fn_type = llvm_void.fn_type(&[llvm_i64.into(), llvm_p0i8.into()], false);
|
||||
|
||||
ctx.module.add_function(FN_NAME, fn_type, None)
|
||||
});
|
||||
|
||||
ctx.builder
|
||||
.build_call(intrinsic_fn, &[size.into(), ptr.into()], "")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
/// Invokes the [`llvm.stacksave`](https://llvm.org/docs/LangRef.html#llvm-stacksave-intrinsic)
|
||||
@ -156,7 +197,7 @@ pub fn call_memcpy_generic<'ctx>(
|
||||
dest
|
||||
} else {
|
||||
ctx.builder
|
||||
.build_bit_cast(dest, llvm_p0i8, "")
|
||||
.build_bitcast(dest, llvm_p0i8, "")
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap()
|
||||
};
|
||||
@ -164,7 +205,7 @@ pub fn call_memcpy_generic<'ctx>(
|
||||
src
|
||||
} else {
|
||||
ctx.builder
|
||||
.build_bit_cast(src, llvm_p0i8, "")
|
||||
.build_bitcast(src, llvm_p0i8, "")
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap()
|
||||
};
|
||||
@ -172,58 +213,14 @@ pub fn call_memcpy_generic<'ctx>(
|
||||
call_memcpy(ctx, dest, src, len, is_volatile);
|
||||
}
|
||||
|
||||
/// Invokes the `llvm.memcpy` intrinsic.
|
||||
///
|
||||
/// Unlike [`call_memcpy`], this function accepts any type of pointer value. If `dest` or `src` is
|
||||
/// not a pointer to an integer, the pointer(s) will be cast to `i8*` before invoking `memcpy`.
|
||||
/// Moreover, `len` now refers to the number of elements to copy (rather than number of bytes to
|
||||
/// copy).
|
||||
pub fn call_memcpy_generic_array<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
dest: PointerValue<'ctx>,
|
||||
src: PointerValue<'ctx>,
|
||||
len: IntValue<'ctx>,
|
||||
is_volatile: IntValue<'ctx>,
|
||||
) {
|
||||
let llvm_i8 = ctx.ctx.i8_type();
|
||||
let llvm_p0i8 = llvm_i8.ptr_type(AddressSpace::default());
|
||||
let llvm_sizeof_expr_t = llvm_i8.size_of().get_type();
|
||||
|
||||
let dest_elem_t = dest.get_type().get_element_type();
|
||||
let src_elem_t = src.get_type().get_element_type();
|
||||
|
||||
let dest = if matches!(dest_elem_t, IntType(t) if t.get_bit_width() == 8) {
|
||||
dest
|
||||
} else {
|
||||
ctx.builder
|
||||
.build_bit_cast(dest, llvm_p0i8, "")
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap()
|
||||
};
|
||||
let src = if matches!(src_elem_t, IntType(t) if t.get_bit_width() == 8) {
|
||||
src
|
||||
} else {
|
||||
ctx.builder
|
||||
.build_bit_cast(src, llvm_p0i8, "")
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let len = ctx.builder.build_int_z_extend_or_bit_cast(len, llvm_sizeof_expr_t, "").unwrap();
|
||||
let len = ctx.builder.build_int_mul(len, src_elem_t.size_of().unwrap(), "").unwrap();
|
||||
|
||||
call_memcpy(ctx, dest, src, len, is_volatile);
|
||||
}
|
||||
|
||||
/// Macro to find and generate build call for llvm intrinsic (body of llvm intrinsic function)
|
||||
///
|
||||
/// Arguments:
|
||||
/// * `$ctx:ident`: Reference to the current Code Generation Context
|
||||
/// * `$name:ident`: Optional name to be assigned to the llvm build call (Option<&str>)
|
||||
/// * `$llvm_name:literal`: Name of underlying llvm intrinsic function
|
||||
/// * `$map_fn:ident`: Mapping function to be applied on `BasicValue` (`BasicValue` -> Function Return Type).
|
||||
/// Use `BasicValueEnum::into_int_value` for Integer return type and
|
||||
/// `BasicValueEnum::into_float_value` for Float return type
|
||||
/// * `$map_fn:ident`: Mapping function to be applied on `BasicValue` (`BasicValue` -> Function Return Type)
|
||||
/// Use `BasicValueEnum::into_int_value` for Integer return type and `BasicValueEnum::into_float_value` for Float return type
|
||||
/// * `$llvm_ty:ident`: Type of first operand
|
||||
/// * `,($val:ident)*`: Comma separated list of operands
|
||||
macro_rules! generate_llvm_intrinsic_fn_body {
|
||||
@ -239,8 +236,8 @@ macro_rules! generate_llvm_intrinsic_fn_body {
|
||||
/// Arguments:
|
||||
/// * `float/int`: Indicates the return and argument type of the function
|
||||
/// * `$fn_name:ident`: The identifier of the rust function to be generated
|
||||
/// * `$llvm_name:literal`: Name of underlying llvm intrinsic function.
|
||||
/// Omit "llvm." prefix from the function name i.e. use "ceil" instead of "llvm.ceil"
|
||||
/// * `$llvm_name:literal`: Name of underlying llvm intrinsic function
|
||||
/// Omit "llvm." prefix from the function name i.e. use "ceil" instead of "llvm.ceil"
|
||||
/// * `$val:ident`: The operand for unary operations
|
||||
/// * `$val1:ident`, `$val2:ident`: The operands for binary operations
|
||||
macro_rules! generate_llvm_intrinsic_fn {
|
||||
@ -357,25 +354,3 @@ pub fn call_float_powi<'ctx>(
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Invokes the [`llvm.ctpop`](https://llvm.org/docs/LangRef.html#llvm-ctpop-intrinsic) intrinsic.
|
||||
pub fn call_int_ctpop<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
src: IntValue<'ctx>,
|
||||
name: Option<&str>,
|
||||
) -> IntValue<'ctx> {
|
||||
const FN_NAME: &str = "llvm.ctpop";
|
||||
|
||||
let llvm_src_t = src.get_type();
|
||||
|
||||
let intrinsic_fn = Intrinsic::find(FN_NAME)
|
||||
.and_then(|intrinsic| intrinsic.get_declaration(&ctx.module, &[llvm_src_t.into()]))
|
||||
.unwrap();
|
||||
|
||||
ctx.builder
|
||||
.build_call(intrinsic_fn, &[src.into()], name.unwrap_or_default())
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap()
|
||||
}
|
||||
|
@ -1,13 +1,13 @@
|
||||
use std::{
|
||||
cell::OnceCell,
|
||||
collections::{HashMap, HashSet},
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
use crate::{
|
||||
codegen::classes::{ListType, NDArrayType, ProxyType, RangeType},
|
||||
symbol_resolver::{StaticValue, SymbolResolver},
|
||||
toplevel::{helper::PrimDef, numpy::unpack_ndarray_var_tys, TopLevelContext, TopLevelDef},
|
||||
typecheck::{
|
||||
type_inferencer::{CodeLocation, PrimitiveStore},
|
||||
typedef::{CallId, FuncArg, Type, TypeEnum, Unifier},
|
||||
},
|
||||
thread,
|
||||
};
|
||||
|
||||
use classes::NpArrayType;
|
||||
use crossbeam::channel::{unbounded, Receiver, Sender};
|
||||
use inkwell::{
|
||||
attributes::{Attribute, AttributeLoc},
|
||||
@ -20,32 +20,22 @@ use inkwell::{
|
||||
module::Module,
|
||||
passes::PassBuilderOptions,
|
||||
targets::{CodeModel, RelocMode, Target, TargetMachine, TargetTriple},
|
||||
types::{AnyType, BasicType, BasicTypeEnum, IntType},
|
||||
types::{AnyType, BasicType, BasicTypeEnum},
|
||||
values::{BasicValueEnum, FunctionValue, IntValue, PhiValue, PointerValue},
|
||||
AddressSpace, IntPredicate, OptimizationLevel,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
use parking_lot::{Condvar, Mutex};
|
||||
|
||||
use nac3parser::ast::{Location, Stmt, StrRef};
|
||||
|
||||
use crate::{
|
||||
symbol_resolver::{StaticValue, SymbolResolver},
|
||||
toplevel::{
|
||||
helper::{extract_ndims, PrimDef},
|
||||
numpy::unpack_ndarray_var_tys,
|
||||
TopLevelContext, TopLevelDef,
|
||||
},
|
||||
typecheck::{
|
||||
type_inferencer::{CodeLocation, PrimitiveStore},
|
||||
typedef::{CallId, FuncArg, Type, TypeEnum, Unifier},
|
||||
},
|
||||
use parking_lot::{Condvar, Mutex};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
};
|
||||
use concrete_type::{ConcreteType, ConcreteTypeEnum, ConcreteTypeStore};
|
||||
pub use generator::{CodeGenerator, DefaultCodeGenerator};
|
||||
use types::{ndarray::NDArrayType, ListType, ProxyType, RangeType, TupleType};
|
||||
use std::thread;
|
||||
|
||||
pub mod builtin_fns;
|
||||
pub mod classes;
|
||||
pub mod concrete_type;
|
||||
pub mod expr;
|
||||
pub mod extern_fns;
|
||||
@ -54,27 +44,13 @@ pub mod irrt;
|
||||
pub mod llvm_intrinsics;
|
||||
pub mod numpy;
|
||||
pub mod stmt;
|
||||
pub mod types;
|
||||
pub mod values;
|
||||
pub mod structure;
|
||||
|
||||
#[cfg(test)]
|
||||
mod test;
|
||||
|
||||
mod macros {
|
||||
/// Codegen-variant of [`std::unreachable`] which accepts an instance of [`CodeGenContext`] as
|
||||
/// its first argument to provide Python source information to indicate the codegen location
|
||||
/// causing the assertion.
|
||||
macro_rules! codegen_unreachable {
|
||||
($ctx:expr $(,)?) => {
|
||||
std::unreachable!("unreachable code while processing {}", &$ctx.current_loc)
|
||||
};
|
||||
($ctx:expr, $($arg:tt)*) => {
|
||||
std::unreachable!("unreachable code while processing {}: {}", &$ctx.current_loc, std::format!("{}", std::format_args!($($arg)+)))
|
||||
};
|
||||
}
|
||||
|
||||
pub(crate) use codegen_unreachable;
|
||||
}
|
||||
use concrete_type::{ConcreteType, ConcreteTypeEnum, ConcreteTypeStore};
|
||||
pub use generator::{CodeGenerator, DefaultCodeGenerator};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct StaticValueStore {
|
||||
@ -94,16 +70,6 @@ pub struct CodeGenLLVMOptions {
|
||||
pub target: CodeGenTargetMachineOptions,
|
||||
}
|
||||
|
||||
impl CodeGenLLVMOptions {
|
||||
/// Creates a [`TargetMachine`] using the target options specified by this struct.
|
||||
///
|
||||
/// See [`Target::create_target_machine`].
|
||||
#[must_use]
|
||||
pub fn create_target_machine(&self) -> Option<TargetMachine> {
|
||||
self.target.create_target_machine(self.opt_level)
|
||||
}
|
||||
}
|
||||
|
||||
/// Additional options for code generation for the target machine.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct CodeGenTargetMachineOptions {
|
||||
@ -227,33 +193,14 @@ pub struct CodeGenContext<'ctx, 'a> {
|
||||
|
||||
/// The current source location.
|
||||
pub current_loc: Location,
|
||||
|
||||
/// The cached type of `size_t`.
|
||||
llvm_usize: OnceCell<IntType<'ctx>>,
|
||||
}
|
||||
|
||||
impl<'ctx> CodeGenContext<'ctx, '_> {
|
||||
impl<'ctx, 'a> CodeGenContext<'ctx, 'a> {
|
||||
/// Whether the [current basic block][Builder::get_insert_block] referenced by `builder`
|
||||
/// contains a [terminator statement][BasicBlock::get_terminator].
|
||||
pub fn is_terminated(&self) -> bool {
|
||||
self.builder.get_insert_block().and_then(BasicBlock::get_terminator).is_some()
|
||||
}
|
||||
|
||||
/// Returns a [`IntType`] representing `size_t` for the compilation target as specified by
|
||||
/// [`self.registry`][WorkerRegistry].
|
||||
pub fn get_size_type(&self) -> IntType<'ctx> {
|
||||
*self.llvm_usize.get_or_init(|| {
|
||||
self.ctx.ptr_sized_int_type(
|
||||
&self
|
||||
.registry
|
||||
.llvm_options
|
||||
.create_target_machine()
|
||||
.map(|tm| tm.get_target_data())
|
||||
.unwrap(),
|
||||
None,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type Fp = Box<dyn Fn(&Module) + Send + Sync>;
|
||||
@ -393,10 +340,6 @@ impl WorkerRegistry {
|
||||
let mut builder = context.create_builder();
|
||||
let mut module = context.create_module(generator.get_name());
|
||||
|
||||
let target_machine = self.llvm_options.create_target_machine().unwrap();
|
||||
module.set_data_layout(&target_machine.get_target_data().get_data_layout());
|
||||
module.set_triple(&target_machine.get_triple());
|
||||
|
||||
module.add_basic_value_flag(
|
||||
"Debug Info Version",
|
||||
inkwell::module::FlagBehavior::Warning,
|
||||
@ -420,10 +363,6 @@ impl WorkerRegistry {
|
||||
errors.insert(e);
|
||||
// create a new empty module just to continue codegen and collect errors
|
||||
module = context.create_module(&format!("{}_recover", generator.get_name()));
|
||||
|
||||
let target_machine = self.llvm_options.create_target_machine().unwrap();
|
||||
module.set_data_layout(&target_machine.get_target_data().get_data_layout());
|
||||
module.set_triple(&target_machine.get_triple());
|
||||
}
|
||||
}
|
||||
*self.task_count.lock() -= 1;
|
||||
@ -489,7 +428,7 @@ pub struct CodeGenTask {
|
||||
fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>(
|
||||
ctx: &'ctx Context,
|
||||
module: &Module<'ctx>,
|
||||
generator: &G,
|
||||
generator: &mut G,
|
||||
unifier: &mut Unifier,
|
||||
top_level: &TopLevelContext,
|
||||
type_cache: &mut HashMap<Type, BasicTypeEnum<'ctx>>,
|
||||
@ -501,38 +440,6 @@ fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>(
|
||||
type_cache.get(&unifier.get_representative(ty)).copied().unwrap_or_else(|| {
|
||||
let ty_enum = unifier.get_ty(ty);
|
||||
let result = match &*ty_enum {
|
||||
TModule {module_id, attributes} => {
|
||||
let top_level_defs = top_level.definitions.read();
|
||||
let definition = top_level_defs.get(module_id.0).unwrap();
|
||||
let TopLevelDef::Module { name, attributes: attribute_fields, .. } = &*definition.read() else {
|
||||
unreachable!()
|
||||
};
|
||||
let ty: BasicTypeEnum<'_> = if let Some(t) = module.get_struct_type(&name.to_string()) {
|
||||
t.ptr_type(AddressSpace::default()).into()
|
||||
} else {
|
||||
let struct_type = ctx.opaque_struct_type(&name.to_string());
|
||||
type_cache.insert(
|
||||
unifier.get_representative(ty),
|
||||
struct_type.ptr_type(AddressSpace::default()).into(),
|
||||
);
|
||||
let module_fields: Vec<BasicTypeEnum<'_>> = attribute_fields.iter()
|
||||
.map(|f| {
|
||||
get_llvm_type(
|
||||
ctx,
|
||||
module,
|
||||
generator,
|
||||
unifier,
|
||||
top_level,
|
||||
type_cache,
|
||||
attributes[&f.0].0,
|
||||
)
|
||||
})
|
||||
.collect_vec();
|
||||
struct_type.set_body(&module_fields, false);
|
||||
struct_type.ptr_type(AddressSpace::default()).into()
|
||||
};
|
||||
return ty;
|
||||
},
|
||||
TObj { obj_id, fields, .. } => {
|
||||
// check to avoid treating non-class primitives as classes
|
||||
if PrimDef::contains_id(*obj_id) {
|
||||
@ -562,17 +469,23 @@ fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>(
|
||||
*params.iter().next().unwrap().1,
|
||||
);
|
||||
|
||||
ListType::new_with_generator(generator, ctx, element_type).as_base_type().into()
|
||||
ListType::new(generator, ctx, element_type).as_base_type().into()
|
||||
}
|
||||
|
||||
TObj { obj_id, .. } if *obj_id == PrimDef::NDArray.id() => {
|
||||
let (dtype, ndims) = unpack_ndarray_var_tys(unifier, ty);
|
||||
let ndims = extract_ndims(unifier, ndims);
|
||||
let (dtype, _) = unpack_ndarray_var_tys(unifier, ty);
|
||||
let element_type = get_llvm_type(
|
||||
ctx, module, generator, unifier, top_level, type_cache, dtype,
|
||||
);
|
||||
|
||||
NDArrayType::new_with_generator(generator, ctx, element_type, ndims).as_base_type().into()
|
||||
let ndarray_ty = NpArrayType {
|
||||
size_type: generator.get_size_type(ctx),
|
||||
elem_type: element_type,
|
||||
};
|
||||
ndarray_ty
|
||||
.get_struct_type(ctx)
|
||||
.ptr_type(AddressSpace::default())
|
||||
.as_basic_type_enum()
|
||||
}
|
||||
|
||||
_ => unreachable!(
|
||||
@ -616,17 +529,15 @@ fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>(
|
||||
};
|
||||
return ty;
|
||||
}
|
||||
TTuple { ty, is_vararg_ctx } => {
|
||||
TTuple { ty } => {
|
||||
// a struct with fields in the order present in the tuple
|
||||
assert!(!is_vararg_ctx, "Tuples in vararg context must be instantiated with the correct number of arguments before calling get_llvm_type");
|
||||
|
||||
let fields = ty
|
||||
.iter()
|
||||
.map(|ty| {
|
||||
get_llvm_type(ctx, module, generator, unifier, top_level, type_cache, *ty)
|
||||
})
|
||||
.collect_vec();
|
||||
TupleType::new_with_generator(generator, ctx, &fields).as_base_type().into()
|
||||
ctx.struct_type(&fields, false).into()
|
||||
}
|
||||
TVirtual { .. } => unimplemented!(),
|
||||
_ => unreachable!("{}", ty_enum.get_type_name()),
|
||||
@ -649,7 +560,7 @@ fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>(
|
||||
fn get_llvm_abi_type<'ctx, G: CodeGenerator + ?Sized>(
|
||||
ctx: &'ctx Context,
|
||||
module: &Module<'ctx>,
|
||||
generator: &G,
|
||||
generator: &mut G,
|
||||
unifier: &mut Unifier,
|
||||
top_level: &TopLevelContext,
|
||||
type_cache: &mut HashMap<Type, BasicTypeEnum<'ctx>>,
|
||||
@ -658,11 +569,11 @@ fn get_llvm_abi_type<'ctx, G: CodeGenerator + ?Sized>(
|
||||
) -> BasicTypeEnum<'ctx> {
|
||||
// If the type is used in the definition of a function, return `i1` instead of `i8` for ABI
|
||||
// consistency.
|
||||
if unifier.unioned(ty, primitives.bool) {
|
||||
return if unifier.unioned(ty, primitives.bool) {
|
||||
ctx.bool_type().into()
|
||||
} else {
|
||||
get_llvm_type(ctx, module, generator, unifier, top_level, type_cache, ty)
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Whether `sret` is needed for a return value with type `ty`.
|
||||
@ -687,40 +598,6 @@ fn need_sret(ty: BasicTypeEnum) -> bool {
|
||||
need_sret_impl(ty, true)
|
||||
}
|
||||
|
||||
/// Returns the [`BasicTypeEnum`] representing a `va_list` struct for variadic arguments.
|
||||
fn get_llvm_valist_type<'ctx>(ctx: &'ctx Context, triple: &TargetTriple) -> BasicTypeEnum<'ctx> {
|
||||
let triple = TargetMachine::normalize_triple(triple);
|
||||
let triple = triple.as_str().to_str().unwrap();
|
||||
let arch = triple.split('-').next().unwrap();
|
||||
|
||||
let llvm_pi8 = ctx.i8_type().ptr_type(AddressSpace::default());
|
||||
|
||||
// Referenced from parseArch() in llvm/lib/Support/Triple.cpp
|
||||
match arch {
|
||||
"i386" | "i486" | "i586" | "i686" | "riscv32" => {
|
||||
ctx.i8_type().ptr_type(AddressSpace::default()).into()
|
||||
}
|
||||
"amd64" | "x86_64" | "x86_64h" => {
|
||||
let llvm_i32 = ctx.i32_type();
|
||||
|
||||
let va_list_tag = ctx.opaque_struct_type("struct.__va_list_tag");
|
||||
va_list_tag.set_body(
|
||||
&[llvm_i32.into(), llvm_i32.into(), llvm_pi8.into(), llvm_pi8.into()],
|
||||
false,
|
||||
);
|
||||
va_list_tag.into()
|
||||
}
|
||||
"armv7" => {
|
||||
let va_list = ctx.opaque_struct_type("struct.__va_list");
|
||||
va_list.set_body(&[llvm_pi8.into()], false);
|
||||
va_list.into()
|
||||
}
|
||||
triple => {
|
||||
todo!("Unsupported platform for varargs: {triple}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Implementation for generating LLVM IR for a function.
|
||||
pub fn gen_func_impl<
|
||||
'ctx,
|
||||
@ -832,7 +709,6 @@ pub fn gen_func_impl<
|
||||
name: arg.name,
|
||||
ty: task.store.to_unifier_type(&mut unifier, &primitives, arg.ty, &mut cache),
|
||||
default_value: arg.default_value.clone(),
|
||||
is_vararg: arg.is_vararg,
|
||||
})
|
||||
.collect_vec(),
|
||||
task.store.to_unifier_type(&mut unifier, &primitives, *ret, &mut cache),
|
||||
@ -855,10 +731,7 @@ pub fn gen_func_impl<
|
||||
let has_sret = ret_type.map_or(false, |ty| need_sret(ty));
|
||||
let mut params = args
|
||||
.iter()
|
||||
.filter(|arg| !arg.is_vararg)
|
||||
.map(|arg| {
|
||||
debug_assert!(!arg.is_vararg);
|
||||
|
||||
get_llvm_abi_type(
|
||||
context,
|
||||
&module,
|
||||
@ -877,12 +750,9 @@ pub fn gen_func_impl<
|
||||
params.insert(0, ret_type.unwrap().ptr_type(AddressSpace::default()).into());
|
||||
}
|
||||
|
||||
debug_assert!(matches!(args.iter().filter(|arg| arg.is_vararg).count(), 0..=1));
|
||||
let vararg_arg = args.iter().find(|arg| arg.is_vararg);
|
||||
|
||||
let fn_type = match ret_type {
|
||||
Some(ret_type) if !has_sret => ret_type.fn_type(¶ms, vararg_arg.is_some()),
|
||||
_ => context.void_type().fn_type(¶ms, vararg_arg.is_some()),
|
||||
Some(ret_type) if !has_sret => ret_type.fn_type(¶ms, false),
|
||||
_ => context.void_type().fn_type(¶ms, false),
|
||||
};
|
||||
|
||||
let symbol = &task.symbol_name;
|
||||
@ -910,10 +780,9 @@ pub fn gen_func_impl<
|
||||
builder.position_at_end(init_bb);
|
||||
let body_bb = context.append_basic_block(fn_val, "body");
|
||||
|
||||
// Store non-vararg argument values into local variables
|
||||
let mut var_assignment = HashMap::new();
|
||||
let offset = u32::from(has_sret);
|
||||
for (n, arg) in args.iter().enumerate().filter(|(_, arg)| !arg.is_vararg) {
|
||||
for (n, arg) in args.iter().enumerate() {
|
||||
let param = fn_val.get_nth_param((n as u32) + offset).unwrap();
|
||||
let local_type = get_llvm_type(
|
||||
context,
|
||||
@ -946,8 +815,6 @@ pub fn gen_func_impl<
|
||||
var_assignment.insert(arg.name, (alloca, None, 0));
|
||||
}
|
||||
|
||||
// TODO: Save vararg parameters as list
|
||||
|
||||
let return_buffer = if has_sret {
|
||||
Some(fn_val.get_nth_param(0).unwrap().into_pointer_value())
|
||||
} else {
|
||||
@ -1039,20 +906,8 @@ pub fn gen_func_impl<
|
||||
need_sret: has_sret,
|
||||
current_loc: Location::default(),
|
||||
debug_info: (dibuilder, compile_unit, func_scope.as_debug_info_scope()),
|
||||
llvm_usize: OnceCell::default(),
|
||||
};
|
||||
|
||||
let target_llvm_usize = context.ptr_sized_int_type(
|
||||
®istry.llvm_options.create_target_machine().map(|tm| tm.get_target_data()).unwrap(),
|
||||
None,
|
||||
);
|
||||
let generator_llvm_usize = generator.get_size_type(context);
|
||||
assert_eq!(
|
||||
generator_llvm_usize,
|
||||
target_llvm_usize,
|
||||
"CodeGenerator (size_t = {generator_llvm_usize}) is not compatible with CodeGen Target (size_t = {target_llvm_usize})",
|
||||
);
|
||||
|
||||
let loc = code_gen_context.debug_info.0.create_debug_location(
|
||||
context,
|
||||
row as u32,
|
||||
@ -1182,112 +1037,3 @@ fn gen_in_range_check<'ctx>(
|
||||
|
||||
ctx.builder.build_int_compare(IntPredicate::SLT, lo, hi, "cmp").unwrap()
|
||||
}
|
||||
|
||||
/// Returns the internal name for the `va_count` argument, used to indicate the number of arguments
|
||||
/// passed to the variadic function.
|
||||
fn get_va_count_arg_name(arg_name: StrRef) -> StrRef {
|
||||
format!("__{}_va_count", &arg_name).into()
|
||||
}
|
||||
|
||||
/// Returns the alignment of the type.
|
||||
///
|
||||
/// This is necessary as `get_alignment` is not implemented as part of [`BasicType`].
|
||||
pub fn get_type_alignment<'ctx>(ty: impl Into<BasicTypeEnum<'ctx>>) -> IntValue<'ctx> {
|
||||
match ty.into() {
|
||||
BasicTypeEnum::ArrayType(ty) => ty.get_alignment(),
|
||||
BasicTypeEnum::FloatType(ty) => ty.get_alignment(),
|
||||
BasicTypeEnum::IntType(ty) => ty.get_alignment(),
|
||||
BasicTypeEnum::PointerType(ty) => ty.get_alignment(),
|
||||
BasicTypeEnum::StructType(ty) => ty.get_alignment(),
|
||||
BasicTypeEnum::VectorType(ty) => ty.get_alignment(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Inserts an `alloca` instruction with allocation `size` given in bytes and the alignment of the
|
||||
/// given type.
|
||||
///
|
||||
/// The returned [`PointerValue`] will have a type of `i8*`, a size of at least `size`, and will be
|
||||
/// aligned with the alignment of `align_ty`.
|
||||
pub fn type_aligned_alloca<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
align_ty: impl Into<BasicTypeEnum<'ctx>>,
|
||||
size: IntValue<'ctx>,
|
||||
name: Option<&str>,
|
||||
) -> PointerValue<'ctx> {
|
||||
/// Round `val` up to its modulo `power_of_two`.
|
||||
fn round_up<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
val: IntValue<'ctx>,
|
||||
power_of_two: IntValue<'ctx>,
|
||||
) -> IntValue<'ctx> {
|
||||
debug_assert_eq!(
|
||||
val.get_type().get_bit_width(),
|
||||
power_of_two.get_type().get_bit_width(),
|
||||
"`val` ({}) and `power_of_two` ({}) must be the same type",
|
||||
val.get_type(),
|
||||
power_of_two.get_type(),
|
||||
);
|
||||
|
||||
let llvm_val_t = val.get_type();
|
||||
|
||||
let max_rem =
|
||||
ctx.builder.build_int_sub(power_of_two, llvm_val_t.const_int(1, false), "").unwrap();
|
||||
ctx.builder
|
||||
.build_and(
|
||||
ctx.builder.build_int_add(val, max_rem, "").unwrap(),
|
||||
ctx.builder.build_not(max_rem, "").unwrap(),
|
||||
"",
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
let llvm_i8 = ctx.ctx.i8_type();
|
||||
let llvm_pi8 = llvm_i8.ptr_type(AddressSpace::default());
|
||||
let llvm_usize = ctx.get_size_type();
|
||||
let align_ty = align_ty.into();
|
||||
|
||||
let size = ctx.builder.build_int_truncate_or_bit_cast(size, llvm_usize, "").unwrap();
|
||||
|
||||
debug_assert_eq!(
|
||||
size.get_type().get_bit_width(),
|
||||
llvm_usize.get_bit_width(),
|
||||
"Expected size_t ({}) for parameter `size` of `aligned_alloca`, got {}",
|
||||
llvm_usize,
|
||||
size.get_type(),
|
||||
);
|
||||
|
||||
let alignment = get_type_alignment(align_ty);
|
||||
let alignment = ctx.builder.build_int_truncate_or_bit_cast(alignment, llvm_usize, "").unwrap();
|
||||
|
||||
if ctx.registry.llvm_options.opt_level == OptimizationLevel::None {
|
||||
let alignment_bitcount = llvm_intrinsics::call_int_ctpop(ctx, alignment, None);
|
||||
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
ctx.builder
|
||||
.build_int_compare(
|
||||
IntPredicate::EQ,
|
||||
alignment_bitcount,
|
||||
alignment_bitcount.get_type().const_int(1, false),
|
||||
"",
|
||||
)
|
||||
.unwrap(),
|
||||
"0:AssertionError",
|
||||
"Expected power-of-two alignment for aligned_alloca, got {0}",
|
||||
[Some(alignment), None, None],
|
||||
ctx.current_loc,
|
||||
);
|
||||
}
|
||||
|
||||
let buffer_size = round_up(ctx, size, alignment);
|
||||
let aligned_slices = ctx.builder.build_int_unsigned_div(buffer_size, alignment, "").unwrap();
|
||||
|
||||
// Just to be absolutely sure, alloca in [i8 x alignment] slices
|
||||
let buffer = ctx.builder.build_array_alloca(align_ty, aligned_slices, "").unwrap();
|
||||
|
||||
ctx.builder
|
||||
.build_bit_cast(buffer, llvm_pi8, name.unwrap_or_default())
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap()
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
318
nac3core/src/codegen/structure.rs
Normal file
318
nac3core/src/codegen/structure.rs
Normal file
@ -0,0 +1,318 @@
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use inkwell::{
|
||||
context::Context,
|
||||
types::{BasicType, BasicTypeEnum, IntType, PointerType, StructType},
|
||||
values::{BasicValue, BasicValueEnum, IntValue, PointerValue},
|
||||
AddressSpace,
|
||||
};
|
||||
|
||||
use super::CodeGenContext;
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct FieldInfo {
|
||||
gep_index: u32,
|
||||
name: &'static str,
|
||||
}
|
||||
|
||||
impl FieldInfo {
|
||||
pub fn llvm_gep<'ctx>(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
struct_ptr: PointerValue<'ctx>,
|
||||
) -> PointerValue<'ctx> {
|
||||
let index_type = ctx.ctx.i32_type(); // TODO: I think I'm not supposed to *just* use i32 for GEP like that
|
||||
unsafe {
|
||||
ctx.builder
|
||||
.build_in_bounds_gep(
|
||||
struct_ptr,
|
||||
&[index_type.const_zero(), index_type.const_int(self.gep_index as u64, false)],
|
||||
self.name,
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn llvm_load<'ctx>(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
struct_ptr: PointerValue<'ctx>,
|
||||
) -> BasicValueEnum<'ctx> {
|
||||
// We will use `self.name` as the LLVM label for debugging purposes
|
||||
ctx.builder.build_load(self.llvm_gep(ctx, struct_ptr), self.name).unwrap()
|
||||
}
|
||||
|
||||
pub fn llvm_store<'ctx>(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
struct_ptr: PointerValue<'ctx>,
|
||||
value: BasicValueEnum<'ctx>,
|
||||
) {
|
||||
ctx.builder.build_store(self.llvm_gep(ctx, struct_ptr), value).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Object<'ctx, T> {
|
||||
pub ty: T,
|
||||
pub ptr: PointerValue<'ctx>,
|
||||
}
|
||||
|
||||
pub struct Field<'ctx, T: CustomType<'ctx>> {
|
||||
pub info: FieldInfo,
|
||||
pub ty: T,
|
||||
_phantom: PhantomData<&'ctx ()>,
|
||||
}
|
||||
|
||||
pub struct FieldCreator<'ctx> {
|
||||
pub ctx: &'ctx Context,
|
||||
struct_name: &'ctx str,
|
||||
gep_index_counter: u32,
|
||||
fields: Vec<(FieldInfo, BasicTypeEnum<'ctx>)>,
|
||||
}
|
||||
|
||||
impl<'ctx> FieldCreator<'ctx> {
|
||||
pub fn new(ctx: &'ctx Context, struct_name: &'ctx str) -> Self {
|
||||
FieldCreator { ctx, struct_name, gep_index_counter: 0, fields: Vec::new() }
|
||||
}
|
||||
|
||||
fn next_gep_index(&mut self) -> u32 {
|
||||
let index = self.gep_index_counter;
|
||||
self.gep_index_counter += 1;
|
||||
index
|
||||
}
|
||||
|
||||
fn get_struct_field_types(&self) -> Vec<BasicTypeEnum<'ctx>> {
|
||||
self.fields.iter().map(|x| x.1.clone()).collect()
|
||||
}
|
||||
|
||||
pub fn add_field<T: CustomType<'ctx>>(&mut self, name: &'static str, ty: T) -> Field<'ctx, T> {
|
||||
let gep_index = self.next_gep_index();
|
||||
|
||||
let field_type = ty.llvm_basic_type_enum(self.ctx);
|
||||
let field_info = FieldInfo { gep_index, name };
|
||||
let field = Field { info: field_info, ty, _phantom: PhantomData };
|
||||
|
||||
self.fields.push((field_info.clone(), field_type));
|
||||
|
||||
field
|
||||
}
|
||||
|
||||
fn num_fields(&self) -> u32 {
|
||||
self.fields.len() as u32 // casted to u32 because that is what inkwell returns
|
||||
}
|
||||
}
|
||||
|
||||
pub trait CustomType<'ctx>: Clone {
|
||||
type Value;
|
||||
|
||||
fn llvm_basic_type_enum(&self, ctx: &'ctx Context) -> BasicTypeEnum<'ctx>;
|
||||
|
||||
fn llvm_field_load(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
field: FieldInfo,
|
||||
struct_ptr: PointerValue<'ctx>,
|
||||
) -> Self::Value;
|
||||
|
||||
fn llvm_field_store(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
field: FieldInfo,
|
||||
struct_ptr: PointerValue<'ctx>,
|
||||
value: &Self::Value,
|
||||
);
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct IntType2<'ctx>(pub IntType<'ctx>);
|
||||
|
||||
impl<'ctx> CustomType<'ctx> for IntType2<'ctx> {
|
||||
type Value = IntValue<'ctx>;
|
||||
|
||||
fn llvm_basic_type_enum(&self, ctx: &'ctx Context) -> BasicTypeEnum<'ctx> {
|
||||
self.0.as_basic_type_enum()
|
||||
}
|
||||
|
||||
fn llvm_field_load(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
field: FieldInfo,
|
||||
struct_ptr: PointerValue<'ctx>,
|
||||
) -> Self::Value {
|
||||
let int_value = field.llvm_load(ctx, struct_ptr).into_int_value();
|
||||
assert_eq!(int_value.get_type().get_bit_width(), self.0.get_bit_width());
|
||||
int_value
|
||||
}
|
||||
|
||||
fn llvm_field_store(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
field: FieldInfo,
|
||||
struct_ptr: PointerValue<'ctx>,
|
||||
int_value: &Self::Value,
|
||||
) {
|
||||
assert_eq!(int_value.get_type().get_bit_width(), self.0.get_bit_width());
|
||||
field.llvm_store(ctx, struct_ptr, int_value.as_basic_value_enum());
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct PointerType2<'ctx>(pub PointerType<'ctx>);
|
||||
|
||||
impl<'ctx> CustomType<'ctx> for PointerType2<'ctx> {
|
||||
type Value = PointerValue<'ctx>;
|
||||
|
||||
fn llvm_basic_type_enum(&self, ctx: &'ctx Context) -> BasicTypeEnum<'ctx> {
|
||||
self.0.as_basic_type_enum()
|
||||
}
|
||||
|
||||
fn llvm_field_load(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
field: FieldInfo,
|
||||
struct_ptr: PointerValue<'ctx>,
|
||||
) -> Self::Value {
|
||||
field.llvm_load(ctx, struct_ptr).into_pointer_value()
|
||||
}
|
||||
|
||||
fn llvm_field_store(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
field: FieldInfo,
|
||||
struct_ptr: PointerValue<'ctx>,
|
||||
pointer_value: &Self::Value,
|
||||
) {
|
||||
field.llvm_store(ctx, struct_ptr, pointer_value.as_basic_value_enum());
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct PointingArrayType<'ctx, ElementType: CustomType<'ctx>> {
|
||||
pub element_type: ElementType,
|
||||
_phantom: PhantomData<&'ctx ()>,
|
||||
}
|
||||
|
||||
impl<'ctx, ElementType: CustomType<'ctx>> PointingArrayType<'ctx, ElementType> {
|
||||
pub fn new(element_type: ElementType) -> Self {
|
||||
PointingArrayType { element_type, _phantom: PhantomData }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx, Element: CustomType<'ctx>> CustomType<'ctx> for PointingArrayType<'ctx, Element> {
|
||||
type Value = Object<'ctx, Self>;
|
||||
|
||||
fn llvm_basic_type_enum(&self, ctx: &'ctx Context) -> BasicTypeEnum<'ctx> {
|
||||
// Element*
|
||||
self.element_type
|
||||
.llvm_basic_type_enum(ctx)
|
||||
.ptr_type(AddressSpace::default())
|
||||
.as_basic_type_enum()
|
||||
}
|
||||
|
||||
fn llvm_field_load(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
field: FieldInfo,
|
||||
struct_ptr: PointerValue<'ctx>,
|
||||
) -> Self::Value {
|
||||
// Remember that it is just a pointer
|
||||
Object { ty: self.clone(), ptr: field.llvm_load(ctx, struct_ptr).into_pointer_value() }
|
||||
}
|
||||
|
||||
fn llvm_field_store(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
field: FieldInfo,
|
||||
struct_ptr: PointerValue<'ctx>,
|
||||
value: &Self::Value,
|
||||
) {
|
||||
// Remember that it is just a pointer
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn check_basic_types_match<'ctx, A, B>(expected: A, got: B) -> Result<(), String>
|
||||
where
|
||||
A: BasicType<'ctx>,
|
||||
B: BasicType<'ctx>,
|
||||
{
|
||||
let expected = expected.as_basic_type_enum();
|
||||
let got = got.as_basic_type_enum();
|
||||
|
||||
// Put those logic into here,
|
||||
// otherwise there is always a fallback reporting on any kind of mismatch
|
||||
match (expected, got) {
|
||||
(BasicTypeEnum::IntType(expected), BasicTypeEnum::IntType(got)) => {
|
||||
if expected.get_bit_width() != got.get_bit_width() {
|
||||
return Err(format!(
|
||||
"Expected IntType ({expected}-bit(s)), got IntType ({got}-bit(s))"
|
||||
));
|
||||
}
|
||||
}
|
||||
(expected, got) => {
|
||||
if expected != got {
|
||||
return Err(format!("Expected {expected}, got {got}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub trait CustomStructType<'ctx> {
|
||||
type Fields;
|
||||
|
||||
fn llvm_struct_name() -> &'static str;
|
||||
|
||||
fn add_fields_to(&self, creator: &mut FieldCreator<'ctx>) -> Self::Fields;
|
||||
|
||||
fn fields(&self, ctx: &'ctx Context) -> Self::Fields {
|
||||
let mut creator = FieldCreator::new(ctx, Self::llvm_struct_name());
|
||||
let fields = self.add_fields_to(&mut creator);
|
||||
fields
|
||||
}
|
||||
|
||||
fn llvm_struct_type(&self, ctx: &'ctx Context) -> StructType<'ctx> {
|
||||
let mut creator = FieldCreator::new(ctx, Self::llvm_struct_name());
|
||||
self.add_fields_to(&mut creator);
|
||||
|
||||
ctx.struct_type(&creator.get_struct_field_types(), false)
|
||||
}
|
||||
|
||||
fn check_struct_type(
|
||||
&self,
|
||||
ctx: &'ctx Context,
|
||||
scrutinee: StructType<'ctx>,
|
||||
) -> Result<(), String> {
|
||||
let mut creator = FieldCreator::new(ctx, Self::llvm_struct_name());
|
||||
self.add_fields_to(&mut creator);
|
||||
|
||||
// Check scrutinee's number of struct fields
|
||||
let expected_field_count = creator.num_fields();
|
||||
let got_field_count = scrutinee.count_fields();
|
||||
if got_field_count != expected_field_count {
|
||||
return Err(format!(
|
||||
"Expected {expected_count} field(s) in `{struct_name}` type, got {got_count}",
|
||||
struct_name = Self::llvm_struct_name(),
|
||||
expected_count = expected_field_count,
|
||||
got_count = got_field_count,
|
||||
));
|
||||
}
|
||||
|
||||
// Check the scrutinee's field types
|
||||
for (field_info, expected_field_ty) in creator.fields {
|
||||
let got_field_ty = scrutinee.get_field_type_at_index(field_info.gep_index).unwrap();
|
||||
|
||||
if let Err(field_err) = check_basic_types_match(expected_field_ty, got_field_ty) {
|
||||
return Err(format!(
|
||||
"Field GEP index {gep_index} does not match the expected type of ({struct_name}::{field_name}): {field_err}",
|
||||
gep_index = field_info.gep_index,
|
||||
struct_name = Self::llvm_struct_name(),
|
||||
field_name = field_info.name,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Done
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -1,41 +1,39 @@
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use indexmap::IndexMap;
|
||||
use indoc::indoc;
|
||||
use inkwell::{
|
||||
targets::{InitializationConfig, Target},
|
||||
OptimizationLevel,
|
||||
};
|
||||
use nac3parser::{
|
||||
ast::{fold::Fold, FileName, StrRef},
|
||||
parser::parse_program,
|
||||
};
|
||||
use parking_lot::RwLock;
|
||||
|
||||
use super::{
|
||||
concrete_type::ConcreteTypeStore,
|
||||
types::{ndarray::NDArrayType, ListType, ProxyType, RangeType},
|
||||
CodeGenContext, CodeGenLLVMOptions, CodeGenTargetMachineOptions, CodeGenTask, CodeGenerator,
|
||||
DefaultCodeGenerator, WithCall, WorkerRegistry,
|
||||
};
|
||||
use crate::{
|
||||
codegen::{
|
||||
classes::{ListType, NDArrayType, ProxyType, RangeType},
|
||||
concrete_type::ConcreteTypeStore,
|
||||
CodeGenContext, CodeGenLLVMOptions, CodeGenTargetMachineOptions, CodeGenTask,
|
||||
CodeGenerator, DefaultCodeGenerator, WithCall, WorkerRegistry,
|
||||
},
|
||||
symbol_resolver::{SymbolResolver, ValueEnum},
|
||||
toplevel::{
|
||||
composer::{ComposerConfig, TopLevelComposer},
|
||||
DefinitionId, FunInstance, TopLevelContext, TopLevelDef,
|
||||
},
|
||||
typecheck::{
|
||||
type_inferencer::{FunctionData, IdentifierInfo, Inferencer, PrimitiveStore},
|
||||
type_inferencer::{FunctionData, Inferencer, PrimitiveStore},
|
||||
typedef::{FunSignature, FuncArg, Type, TypeEnum, Unifier, VarMap},
|
||||
},
|
||||
};
|
||||
use indexmap::IndexMap;
|
||||
use indoc::indoc;
|
||||
use inkwell::{
|
||||
targets::{InitializationConfig, Target},
|
||||
OptimizationLevel,
|
||||
};
|
||||
use nac3parser::ast::FileName;
|
||||
use nac3parser::{
|
||||
ast::{fold::Fold, StrRef},
|
||||
parser::parse_program,
|
||||
};
|
||||
use parking_lot::RwLock;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
|
||||
struct Resolver {
|
||||
id_to_type: HashMap<StrRef, Type>,
|
||||
id_to_def: RwLock<HashMap<StrRef, DefinitionId>>,
|
||||
class_names: HashMap<StrRef, Type>,
|
||||
}
|
||||
|
||||
impl Resolver {
|
||||
@ -66,7 +64,6 @@ impl SymbolResolver for Resolver {
|
||||
&self,
|
||||
_: StrRef,
|
||||
_: &mut CodeGenContext<'ctx, '_>,
|
||||
_: &mut dyn CodeGenerator,
|
||||
) -> Option<ValueEnum<'ctx>> {
|
||||
unimplemented!()
|
||||
}
|
||||
@ -97,32 +94,23 @@ fn test_primitives() {
|
||||
"};
|
||||
let statements = parse_program(source, FileName::default()).unwrap();
|
||||
|
||||
let context = inkwell::context::Context::create();
|
||||
let composer = TopLevelComposer::new(Vec::new(), Vec::new(), ComposerConfig::default(), 64).0;
|
||||
let composer = TopLevelComposer::new(Vec::new(), ComposerConfig::default(), 32).0;
|
||||
let mut unifier = composer.unifier.clone();
|
||||
let primitives = composer.primitives_ty;
|
||||
let top_level = Arc::new(composer.make_top_level_context());
|
||||
unifier.top_level = Some(top_level.clone());
|
||||
|
||||
let resolver =
|
||||
Arc::new(Resolver { id_to_type: HashMap::new(), id_to_def: RwLock::new(HashMap::new()) })
|
||||
as Arc<dyn SymbolResolver + Send + Sync>;
|
||||
let resolver = Arc::new(Resolver {
|
||||
id_to_type: HashMap::new(),
|
||||
id_to_def: RwLock::new(HashMap::new()),
|
||||
class_names: HashMap::default(),
|
||||
}) as Arc<dyn SymbolResolver + Send + Sync>;
|
||||
|
||||
let threads = vec![DefaultCodeGenerator::new("test".into(), context.i64_type()).into()];
|
||||
let threads = vec![DefaultCodeGenerator::new("test".into(), 32).into()];
|
||||
let signature = FunSignature {
|
||||
args: vec![
|
||||
FuncArg {
|
||||
name: "a".into(),
|
||||
ty: primitives.int32,
|
||||
default_value: None,
|
||||
is_vararg: false,
|
||||
},
|
||||
FuncArg {
|
||||
name: "b".into(),
|
||||
ty: primitives.int32,
|
||||
default_value: None,
|
||||
is_vararg: false,
|
||||
},
|
||||
FuncArg { name: "a".into(), ty: primitives.int32, default_value: None },
|
||||
FuncArg { name: "b".into(), ty: primitives.int32, default_value: None },
|
||||
],
|
||||
ret: primitives.int32,
|
||||
vars: VarMap::new(),
|
||||
@ -140,8 +128,7 @@ fn test_primitives() {
|
||||
};
|
||||
let mut virtual_checks = Vec::new();
|
||||
let mut calls = HashMap::new();
|
||||
let mut identifiers: HashMap<_, _> =
|
||||
["a".into(), "b".into()].map(|id| (id, IdentifierInfo::default())).into();
|
||||
let mut identifiers: HashSet<_> = ["a".into(), "b".into()].into();
|
||||
let mut inferencer = Inferencer {
|
||||
top_level: &top_level,
|
||||
function_data: &mut function_data,
|
||||
@ -202,8 +189,6 @@ fn test_primitives() {
|
||||
let expected = indoc! {"
|
||||
; ModuleID = 'test'
|
||||
source_filename = \"test\"
|
||||
target datalayout = \"e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128\"
|
||||
target triple = \"x86_64-unknown-linux-gnu\"
|
||||
|
||||
; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
|
||||
define i32 @testing(i32 %0, i32 %1) local_unnamed_addr #0 !dbg !4 {
|
||||
@ -261,20 +246,14 @@ fn test_simple_call() {
|
||||
"};
|
||||
let statements_2 = parse_program(source_2, FileName::default()).unwrap();
|
||||
|
||||
let context = inkwell::context::Context::create();
|
||||
let composer = TopLevelComposer::new(Vec::new(), Vec::new(), ComposerConfig::default(), 64).0;
|
||||
let composer = TopLevelComposer::new(Vec::new(), ComposerConfig::default(), 32).0;
|
||||
let mut unifier = composer.unifier.clone();
|
||||
let primitives = composer.primitives_ty;
|
||||
let top_level = Arc::new(composer.make_top_level_context());
|
||||
unifier.top_level = Some(top_level.clone());
|
||||
|
||||
let signature = FunSignature {
|
||||
args: vec![FuncArg {
|
||||
name: "a".into(),
|
||||
ty: primitives.int32,
|
||||
default_value: None,
|
||||
is_vararg: false,
|
||||
}],
|
||||
args: vec![FuncArg { name: "a".into(), ty: primitives.int32, default_value: None }],
|
||||
ret: primitives.int32,
|
||||
vars: VarMap::new(),
|
||||
};
|
||||
@ -297,7 +276,11 @@ fn test_simple_call() {
|
||||
loc: None,
|
||||
})));
|
||||
|
||||
let resolver = Resolver { id_to_type: HashMap::new(), id_to_def: RwLock::new(HashMap::new()) };
|
||||
let resolver = Resolver {
|
||||
id_to_type: HashMap::new(),
|
||||
id_to_def: RwLock::new(HashMap::new()),
|
||||
class_names: HashMap::default(),
|
||||
};
|
||||
resolver.add_id_def("foo".into(), DefinitionId(foo_id));
|
||||
let resolver = Arc::new(resolver) as Arc<dyn SymbolResolver + Send + Sync>;
|
||||
|
||||
@ -309,7 +292,7 @@ fn test_simple_call() {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
let threads = vec![DefaultCodeGenerator::new("test".into(), context.i64_type()).into()];
|
||||
let threads = vec![DefaultCodeGenerator::new("test".into(), 32).into()];
|
||||
let mut function_data = FunctionData {
|
||||
resolver: resolver.clone(),
|
||||
bound_variables: Vec::new(),
|
||||
@ -317,8 +300,7 @@ fn test_simple_call() {
|
||||
};
|
||||
let mut virtual_checks = Vec::new();
|
||||
let mut calls = HashMap::new();
|
||||
let mut identifiers: HashMap<_, _> =
|
||||
["a".into(), "foo".into()].map(|id| (id, IdentifierInfo::default())).into();
|
||||
let mut identifiers: HashSet<_> = ["a".into(), "foo".into()].into();
|
||||
let mut inferencer = Inferencer {
|
||||
top_level: &top_level,
|
||||
function_data: &mut function_data,
|
||||
@ -386,8 +368,6 @@ fn test_simple_call() {
|
||||
let expected = indoc! {"
|
||||
; ModuleID = 'test'
|
||||
source_filename = \"test\"
|
||||
target datalayout = \"e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128\"
|
||||
target triple = \"x86_64-unknown-linux-gnu\"
|
||||
|
||||
; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
|
||||
define i32 @testing(i32 %0) local_unnamed_addr #0 !dbg !5 {
|
||||
@ -441,13 +421,13 @@ fn test_simple_call() {
|
||||
#[test]
|
||||
fn test_classes_list_type_new() {
|
||||
let ctx = inkwell::context::Context::create();
|
||||
let generator = DefaultCodeGenerator::new(String::new(), ctx.i64_type());
|
||||
let generator = DefaultCodeGenerator::new(String::new(), 64);
|
||||
|
||||
let llvm_i32 = ctx.i32_type();
|
||||
let llvm_usize = generator.get_size_type(&ctx);
|
||||
|
||||
let llvm_list = ListType::new_with_generator(&generator, &ctx, llvm_i32.into());
|
||||
assert!(ListType::is_representable(llvm_list.as_base_type(), llvm_usize).is_ok());
|
||||
let llvm_list = ListType::new(&generator, &ctx, llvm_i32.into());
|
||||
assert!(ListType::is_type(llvm_list.as_base_type(), llvm_usize).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -455,17 +435,18 @@ fn test_classes_range_type_new() {
|
||||
let ctx = inkwell::context::Context::create();
|
||||
|
||||
let llvm_range = RangeType::new(&ctx);
|
||||
assert!(RangeType::is_representable(llvm_range.as_base_type()).is_ok());
|
||||
assert!(RangeType::is_type(llvm_range.as_base_type()).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_classes_ndarray_type_new() {
|
||||
let ctx = inkwell::context::Context::create();
|
||||
let generator = DefaultCodeGenerator::new(String::new(), ctx.i64_type());
|
||||
|
||||
let llvm_i32 = ctx.i32_type();
|
||||
let llvm_usize = generator.get_size_type(&ctx);
|
||||
|
||||
let llvm_ndarray = NDArrayType::new_with_generator(&generator, &ctx, llvm_i32.into(), 2);
|
||||
assert!(NDArrayType::is_representable(llvm_ndarray.as_base_type(), llvm_usize).is_ok());
|
||||
}
|
||||
// #[test]
|
||||
// fn test_classes_ndarray_type_new() {
|
||||
// let ctx = inkwell::context::Context::create();
|
||||
// let generator = DefaultCodeGenerator::new(String::new(), 64);
|
||||
//
|
||||
// let llvm_i32 = ctx.i32_type();
|
||||
// let llvm_usize = generator.get_size_type(&ctx);
|
||||
//
|
||||
// let llvm_ndarray = NDArrayType::new(&generator, &ctx, llvm_i32.into());
|
||||
// assert!(NDArrayType::is_type(llvm_ndarray.as_base_type(), llvm_usize).is_ok());
|
||||
// }
|
||||
//
|
||||
|
@ -1,372 +0,0 @@
|
||||
use inkwell::{
|
||||
context::{AsContextRef, Context},
|
||||
types::{AnyTypeEnum, BasicType, BasicTypeEnum, IntType, PointerType},
|
||||
values::{IntValue, PointerValue},
|
||||
AddressSpace, IntPredicate, OptimizationLevel,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
|
||||
use nac3core_derive::StructFields;
|
||||
|
||||
use super::ProxyType;
|
||||
use crate::{
|
||||
codegen::{
|
||||
types::structure::{
|
||||
check_struct_type_matches_fields, FieldIndexCounter, StructField, StructFields,
|
||||
},
|
||||
values::{ListValue, ProxyValue},
|
||||
CodeGenContext, CodeGenerator,
|
||||
},
|
||||
typecheck::typedef::{iter_type_vars, Type, TypeEnum},
|
||||
};
|
||||
|
||||
/// Proxy type for a `list` type in LLVM.
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
pub struct ListType<'ctx> {
|
||||
ty: PointerType<'ctx>,
|
||||
item: Option<BasicTypeEnum<'ctx>>,
|
||||
llvm_usize: IntType<'ctx>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy, StructFields)]
|
||||
pub struct ListStructFields<'ctx> {
|
||||
/// Array pointer to content.
|
||||
#[value_type(i8_type().ptr_type(AddressSpace::default()))]
|
||||
pub items: StructField<'ctx, PointerValue<'ctx>>,
|
||||
|
||||
/// Number of items in the array.
|
||||
#[value_type(usize)]
|
||||
pub len: StructField<'ctx, IntValue<'ctx>>,
|
||||
}
|
||||
|
||||
impl<'ctx> ListStructFields<'ctx> {
|
||||
#[must_use]
|
||||
pub fn new_typed(item: BasicTypeEnum<'ctx>, llvm_usize: IntType<'ctx>) -> Self {
|
||||
let mut counter = FieldIndexCounter::default();
|
||||
|
||||
ListStructFields {
|
||||
items: StructField::create(
|
||||
&mut counter,
|
||||
"items",
|
||||
item.ptr_type(AddressSpace::default()),
|
||||
),
|
||||
len: StructField::create(&mut counter, "len", llvm_usize),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> ListType<'ctx> {
|
||||
/// Checks whether `llvm_ty` represents a `list` type, returning [Err] if it does not.
|
||||
pub fn is_representable(
|
||||
llvm_ty: PointerType<'ctx>,
|
||||
llvm_usize: IntType<'ctx>,
|
||||
) -> Result<(), String> {
|
||||
let ctx = llvm_ty.get_context();
|
||||
|
||||
let llvm_ty = llvm_ty.get_element_type();
|
||||
let AnyTypeEnum::StructType(llvm_ty) = llvm_ty else {
|
||||
return Err(format!("Expected struct type for `list` type, got {llvm_ty}"));
|
||||
};
|
||||
|
||||
let fields = ListStructFields::new(ctx, llvm_usize);
|
||||
|
||||
check_struct_type_matches_fields(
|
||||
fields,
|
||||
llvm_ty,
|
||||
"list",
|
||||
&[(fields.items.name(), &|ty| {
|
||||
if ty.is_pointer_type() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(format!("Expected T* for `list.items`, got {ty}"))
|
||||
}
|
||||
})],
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns an instance of [`StructFields`] containing all field accessors for this type.
|
||||
#[must_use]
|
||||
fn fields(item: BasicTypeEnum<'ctx>, llvm_usize: IntType<'ctx>) -> ListStructFields<'ctx> {
|
||||
ListStructFields::new_typed(item, llvm_usize)
|
||||
}
|
||||
|
||||
/// See [`ListType::fields`].
|
||||
// TODO: Move this into e.g. StructProxyType
|
||||
#[must_use]
|
||||
pub fn get_fields(&self, _ctx: &impl AsContextRef<'ctx>) -> ListStructFields<'ctx> {
|
||||
Self::fields(self.item.unwrap_or(self.llvm_usize.into()), self.llvm_usize)
|
||||
}
|
||||
|
||||
/// Creates an LLVM type corresponding to the expected structure of a `List`.
|
||||
#[must_use]
|
||||
fn llvm_type(
|
||||
ctx: &'ctx Context,
|
||||
element_type: Option<BasicTypeEnum<'ctx>>,
|
||||
llvm_usize: IntType<'ctx>,
|
||||
) -> PointerType<'ctx> {
|
||||
let element_type = element_type.map_or(llvm_usize.into(), |ty| ty.as_basic_type_enum());
|
||||
|
||||
let field_tys =
|
||||
Self::fields(element_type, llvm_usize).into_iter().map(|field| field.1).collect_vec();
|
||||
|
||||
ctx.struct_type(&field_tys, false).ptr_type(AddressSpace::default())
|
||||
}
|
||||
|
||||
fn new_impl(
|
||||
ctx: &'ctx Context,
|
||||
element_type: Option<BasicTypeEnum<'ctx>>,
|
||||
llvm_usize: IntType<'ctx>,
|
||||
) -> Self {
|
||||
let llvm_list = Self::llvm_type(ctx, element_type, llvm_usize);
|
||||
|
||||
Self { ty: llvm_list, item: element_type, llvm_usize }
|
||||
}
|
||||
|
||||
/// Creates an instance of [`ListType`].
|
||||
#[must_use]
|
||||
pub fn new(ctx: &CodeGenContext<'ctx, '_>, element_type: &impl BasicType<'ctx>) -> Self {
|
||||
Self::new_impl(ctx.ctx, Some(element_type.as_basic_type_enum()), ctx.get_size_type())
|
||||
}
|
||||
|
||||
/// Creates an instance of [`ListType`].
|
||||
#[must_use]
|
||||
pub fn new_with_generator<G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &'ctx Context,
|
||||
element_type: BasicTypeEnum<'ctx>,
|
||||
) -> Self {
|
||||
Self::new_impl(ctx, Some(element_type.as_basic_type_enum()), generator.get_size_type(ctx))
|
||||
}
|
||||
|
||||
/// Creates an instance of [`ListType`] with an unknown element type.
|
||||
#[must_use]
|
||||
pub fn new_untyped(ctx: &CodeGenContext<'ctx, '_>) -> Self {
|
||||
Self::new_impl(ctx.ctx, None, ctx.get_size_type())
|
||||
}
|
||||
|
||||
/// Creates an instance of [`ListType`] with an unknown element type.
|
||||
#[must_use]
|
||||
pub fn new_untyped_with_generator<G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &'ctx Context,
|
||||
) -> Self {
|
||||
Self::new_impl(ctx, None, generator.get_size_type(ctx))
|
||||
}
|
||||
|
||||
/// Creates an [`ListType`] from a [unifier type][Type].
|
||||
#[must_use]
|
||||
pub fn from_unifier_type<G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ty: Type,
|
||||
) -> Self {
|
||||
// Check unifier type and extract `item_type`
|
||||
let elem_type = match &*ctx.unifier.get_ty_immutable(ty) {
|
||||
TypeEnum::TObj { obj_id, params, .. }
|
||||
if *obj_id == ctx.primitives.list.obj_id(&ctx.unifier).unwrap() =>
|
||||
{
|
||||
iter_type_vars(params).next().unwrap().ty
|
||||
}
|
||||
|
||||
_ => panic!("Expected `list` type, but got {}", ctx.unifier.stringify(ty)),
|
||||
};
|
||||
|
||||
let llvm_usize = ctx.get_size_type();
|
||||
let llvm_elem_type = if let TypeEnum::TVar { .. } = &*ctx.unifier.get_ty_immutable(ty) {
|
||||
None
|
||||
} else {
|
||||
Some(ctx.get_llvm_type(generator, elem_type))
|
||||
};
|
||||
|
||||
Self::new_impl(ctx.ctx, llvm_elem_type, llvm_usize)
|
||||
}
|
||||
|
||||
/// Creates an [`ListType`] from a [`PointerType`].
|
||||
#[must_use]
|
||||
pub fn from_type(ptr_ty: PointerType<'ctx>, llvm_usize: IntType<'ctx>) -> Self {
|
||||
debug_assert!(Self::is_representable(ptr_ty, llvm_usize).is_ok());
|
||||
|
||||
let ctx = ptr_ty.get_context();
|
||||
|
||||
// We are just searching for the index off a field - Slot an arbitrary element type in.
|
||||
let item_field_idx =
|
||||
Self::fields(ctx.i8_type().into(), llvm_usize).index_of_field(|f| f.items);
|
||||
let item = unsafe {
|
||||
ptr_ty
|
||||
.get_element_type()
|
||||
.into_struct_type()
|
||||
.get_field_type_at_index_unchecked(item_field_idx)
|
||||
.into_pointer_type()
|
||||
.get_element_type()
|
||||
};
|
||||
let item = BasicTypeEnum::try_from(item).unwrap_or_else(|()| {
|
||||
panic!(
|
||||
"Expected BasicTypeEnum for list element type, got {}",
|
||||
ptr_ty.get_element_type().print_to_string()
|
||||
)
|
||||
});
|
||||
|
||||
ListType { ty: ptr_ty, item: Some(item), llvm_usize }
|
||||
}
|
||||
|
||||
/// Returns the type of the `size` field of this `list` type.
|
||||
#[must_use]
|
||||
pub fn size_type(&self) -> IntType<'ctx> {
|
||||
self.llvm_usize
|
||||
}
|
||||
|
||||
/// Returns the element type of this `list` type.
|
||||
#[must_use]
|
||||
pub fn element_type(&self) -> Option<BasicTypeEnum<'ctx>> {
|
||||
self.item
|
||||
}
|
||||
|
||||
/// Allocates an instance of [`ListValue`] as if by calling `alloca` on the base type.
|
||||
///
|
||||
/// See [`ProxyType::raw_alloca`].
|
||||
#[must_use]
|
||||
pub fn alloca(
|
||||
&self,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
<Self as ProxyType<'ctx>>::Value::from_pointer_value(
|
||||
self.raw_alloca(ctx, name),
|
||||
self.llvm_usize,
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
/// Allocates an instance of [`ListValue`] as if by calling `alloca` on the base type.
|
||||
///
|
||||
/// See [`ProxyType::raw_alloca_var`].
|
||||
#[must_use]
|
||||
pub fn alloca_var<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
<Self as ProxyType<'ctx>>::Value::from_pointer_value(
|
||||
self.raw_alloca_var(generator, ctx, name),
|
||||
self.llvm_usize,
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
/// Allocates a [`ListValue`] on the stack using `item` of this [`ListType`] instance.
|
||||
///
|
||||
/// The returned list will contain:
|
||||
///
|
||||
/// - `data`: Allocated with `len` number of elements.
|
||||
/// - `len`: Initialized to the value of `len` passed to this function.
|
||||
#[must_use]
|
||||
pub fn construct<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
len: IntValue<'ctx>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
let len = ctx.builder.build_int_z_extend(len, self.llvm_usize, "").unwrap();
|
||||
|
||||
// Generate a runtime assertion if allocating a non-empty list with unknown element type
|
||||
if ctx.registry.llvm_options.opt_level == OptimizationLevel::None && self.item.is_none() {
|
||||
let len_eqz = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::EQ, len, self.llvm_usize.const_zero(), "")
|
||||
.unwrap();
|
||||
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
len_eqz,
|
||||
"0:AssertionError",
|
||||
"Cannot allocate a non-empty list with unknown element type",
|
||||
[None, None, None],
|
||||
ctx.current_loc,
|
||||
);
|
||||
}
|
||||
|
||||
let plist = self.alloca_var(generator, ctx, name);
|
||||
plist.store_size(ctx, len);
|
||||
|
||||
let item = self.item.unwrap_or(self.llvm_usize.into());
|
||||
plist.create_data(ctx, item, None);
|
||||
|
||||
plist
|
||||
}
|
||||
|
||||
/// Convenience function for creating a list with zero elements.
|
||||
///
|
||||
/// This function is preferred over [`ListType::construct`] if the length is known to always be
|
||||
/// 0, as this function avoids injecting an IR assertion for checking if a non-empty untyped
|
||||
/// list is being allocated.
|
||||
///
|
||||
/// The returned list will contain:
|
||||
///
|
||||
/// - `data`: Initialized to `(T*) 0`.
|
||||
/// - `len`: Initialized to `0`.
|
||||
#[must_use]
|
||||
pub fn construct_empty<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
let plist = self.alloca_var(generator, ctx, name);
|
||||
|
||||
plist.store_size(ctx, self.llvm_usize.const_zero());
|
||||
plist.create_data(ctx, self.item.unwrap_or(self.llvm_usize.into()), None);
|
||||
|
||||
plist
|
||||
}
|
||||
|
||||
/// Converts an existing value into a [`ListValue`].
|
||||
#[must_use]
|
||||
pub fn map_value(
|
||||
&self,
|
||||
value: <<Self as ProxyType<'ctx>>::Value as ProxyValue<'ctx>>::Base,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
<Self as ProxyType<'ctx>>::Value::from_pointer_value(value, self.llvm_usize, name)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> ProxyType<'ctx> for ListType<'ctx> {
|
||||
type Base = PointerType<'ctx>;
|
||||
type Value = ListValue<'ctx>;
|
||||
|
||||
fn is_type<G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &'ctx Context,
|
||||
llvm_ty: impl BasicType<'ctx>,
|
||||
) -> Result<(), String> {
|
||||
if let BasicTypeEnum::PointerType(ty) = llvm_ty.as_basic_type_enum() {
|
||||
<Self as ProxyType<'ctx>>::is_representable(generator, ctx, ty)
|
||||
} else {
|
||||
Err(format!("Expected pointer type, got {llvm_ty:?}"))
|
||||
}
|
||||
}
|
||||
|
||||
fn is_representable<G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &'ctx Context,
|
||||
llvm_ty: Self::Base,
|
||||
) -> Result<(), String> {
|
||||
Self::is_representable(llvm_ty, generator.get_size_type(ctx))
|
||||
}
|
||||
|
||||
fn alloca_type(&self) -> impl BasicType<'ctx> {
|
||||
self.as_base_type().get_element_type().into_struct_type()
|
||||
}
|
||||
|
||||
fn as_base_type(&self) -> Self::Base {
|
||||
self.ty
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> From<ListType<'ctx>> for PointerType<'ctx> {
|
||||
fn from(value: ListType<'ctx>) -> Self {
|
||||
value.as_base_type()
|
||||
}
|
||||
}
|
@ -1,125 +0,0 @@
|
||||
//! This module contains abstraction over all intrinsic composite types of NAC3.
|
||||
//!
|
||||
//! # `raw_alloca` vs `alloca` vs `construct`
|
||||
//!
|
||||
//! There are three ways of creating a new object instance using the abstractions provided by this
|
||||
//! module.
|
||||
//!
|
||||
//! - `raw_alloca`: Allocates the object on the stack, returning an instance of
|
||||
//! [`impl BasicValue`][inkwell::values::BasicValue]. This is similar to a `malloc` expression in
|
||||
//! C++ but the object is allocated on the stack.
|
||||
//! - `alloca`: Similar to `raw_alloca`, but also wraps the allocated object with
|
||||
//! [`<Self as ProxyType<'ctx>>::Value`][ProxyValue], and returns the wrapped object. The returned
|
||||
//! object will not initialize any value or fields. This is similar to a type-safe `malloc`
|
||||
//! expression in C++ but the object is allocated on the stack.
|
||||
//! - `construct`: Similar to `alloca`, but performs some initialization on the value or fields of
|
||||
//! the returned object. This is similar to a `new` expression in C++ but the object is allocated
|
||||
//! on the stack.
|
||||
|
||||
use inkwell::{
|
||||
context::Context,
|
||||
types::BasicType,
|
||||
values::{IntValue, PointerValue},
|
||||
};
|
||||
|
||||
use super::{
|
||||
values::{ArraySliceValue, ProxyValue},
|
||||
{CodeGenContext, CodeGenerator},
|
||||
};
|
||||
pub use list::*;
|
||||
pub use range::*;
|
||||
pub use tuple::*;
|
||||
|
||||
mod list;
|
||||
pub mod ndarray;
|
||||
mod range;
|
||||
pub mod structure;
|
||||
mod tuple;
|
||||
pub mod utils;
|
||||
|
||||
/// A LLVM type that is used to represent a corresponding type in NAC3.
|
||||
pub trait ProxyType<'ctx>: Into<Self::Base> {
|
||||
/// The LLVM type of which values of this type possess. This is usually a
|
||||
/// [LLVM pointer type][PointerType] for any non-primitive types.
|
||||
type Base: BasicType<'ctx>;
|
||||
|
||||
/// The type of values represented by this type.
|
||||
type Value: ProxyValue<'ctx, Type = Self>;
|
||||
|
||||
fn is_type<G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &'ctx Context,
|
||||
llvm_ty: impl BasicType<'ctx>,
|
||||
) -> Result<(), String>;
|
||||
|
||||
/// Checks whether `llvm_ty` can be represented by this [`ProxyType`].
|
||||
fn is_representable<G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &'ctx Context,
|
||||
llvm_ty: Self::Base,
|
||||
) -> Result<(), String>;
|
||||
|
||||
/// Returns the type that should be used in `alloca` IR statements.
|
||||
fn alloca_type(&self) -> impl BasicType<'ctx>;
|
||||
|
||||
/// Creates a new value of this type by invoking `alloca` at the current builder location,
|
||||
/// returning a [`PointerValue`] instance representing the allocated value.
|
||||
fn raw_alloca(
|
||||
&self,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> PointerValue<'ctx> {
|
||||
ctx.builder
|
||||
.build_alloca(self.alloca_type().as_basic_type_enum(), name.unwrap_or_default())
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Creates a new value of this type by invoking `alloca` at the beginning of the function,
|
||||
/// returning a [`PointerValue`] instance representing the allocated value.
|
||||
fn raw_alloca_var<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> PointerValue<'ctx> {
|
||||
generator.gen_var_alloc(ctx, self.alloca_type().as_basic_type_enum(), name).unwrap()
|
||||
}
|
||||
|
||||
/// Creates a new array value of this type by invoking `alloca` at the current builder location,
|
||||
/// returning an [`ArraySliceValue`] encapsulating the resulting array.
|
||||
fn array_alloca(
|
||||
&self,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
size: IntValue<'ctx>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> ArraySliceValue<'ctx> {
|
||||
ArraySliceValue::from_ptr_val(
|
||||
ctx.builder
|
||||
.build_array_alloca(
|
||||
self.alloca_type().as_basic_type_enum(),
|
||||
size,
|
||||
name.unwrap_or_default(),
|
||||
)
|
||||
.unwrap(),
|
||||
size,
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
/// Creates a new array value of this type by invoking `alloca` at the beginning of the
|
||||
/// function, returning an [`ArraySliceValue`] encapsulating the resulting array.
|
||||
fn array_alloca_var<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
size: IntValue<'ctx>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> ArraySliceValue<'ctx> {
|
||||
generator
|
||||
.gen_array_var_alloc(ctx, self.alloca_type().as_basic_type_enum(), size, name)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Returns the [base type][Self::Base] of this proxy.
|
||||
fn as_base_type(&self) -> Self::Base;
|
||||
}
|
@ -1,240 +0,0 @@
|
||||
use inkwell::{
|
||||
types::BasicTypeEnum,
|
||||
values::{BasicValueEnum, IntValue},
|
||||
AddressSpace,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
codegen::{
|
||||
irrt,
|
||||
stmt::gen_if_else_expr_callback,
|
||||
types::{ndarray::NDArrayType, ListType, ProxyType},
|
||||
values::{
|
||||
ndarray::NDArrayValue, ArrayLikeValue, ArraySliceValue, ListValue, ProxyValue,
|
||||
TypedArrayLikeAdapter, TypedArrayLikeMutator,
|
||||
},
|
||||
CodeGenContext, CodeGenerator,
|
||||
},
|
||||
toplevel::helper::{arraylike_flatten_element_type, arraylike_get_ndims},
|
||||
typecheck::typedef::{Type, TypeEnum},
|
||||
};
|
||||
|
||||
/// Get the expected `dtype` and `ndims` of the ndarray returned by `np_array(<list>)`.
|
||||
fn get_list_object_dtype_and_ndims<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
list_ty: Type,
|
||||
) -> (BasicTypeEnum<'ctx>, u64) {
|
||||
let dtype = arraylike_flatten_element_type(&mut ctx.unifier, list_ty);
|
||||
let ndims = arraylike_get_ndims(&mut ctx.unifier, list_ty);
|
||||
|
||||
(ctx.get_llvm_type(generator, dtype), ndims)
|
||||
}
|
||||
|
||||
impl<'ctx> NDArrayType<'ctx> {
|
||||
/// Implementation of `np_array(<list>, copy=True)`
|
||||
fn construct_numpy_array_from_list_copy_true_impl<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
(list_ty, list): (Type, ListValue<'ctx>),
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
let (dtype, ndims_int) = get_list_object_dtype_and_ndims(generator, ctx, list_ty);
|
||||
assert!(self.ndims >= ndims_int);
|
||||
assert_eq!(dtype, self.dtype);
|
||||
|
||||
let list_value = list.as_i8_list(ctx);
|
||||
|
||||
// Validate `list` has a consistent shape.
|
||||
// Raise an exception if `list` is something abnormal like `[[1, 2], [3]]`.
|
||||
// If `list` has a consistent shape, deduce the shape and write it to `shape`.
|
||||
let ndims = self.llvm_usize.const_int(ndims_int, false);
|
||||
let shape = ctx.builder.build_array_alloca(self.llvm_usize, ndims, "").unwrap();
|
||||
let shape = ArraySliceValue::from_ptr_val(shape, ndims, None);
|
||||
let shape = TypedArrayLikeAdapter::from(
|
||||
shape,
|
||||
|_, _, val| val.into_int_value(),
|
||||
|_, _, val| val.into(),
|
||||
);
|
||||
irrt::ndarray::call_nac3_ndarray_array_set_and_validate_list_shape(
|
||||
generator, ctx, list_value, ndims, &shape,
|
||||
);
|
||||
|
||||
let ndarray =
|
||||
Self::new(ctx, dtype, ndims_int).construct_uninitialized(generator, ctx, name);
|
||||
ndarray.copy_shape_from_array(generator, ctx, shape.base_ptr(ctx, generator));
|
||||
unsafe { ndarray.create_data(generator, ctx) };
|
||||
|
||||
// Copy all contents from the list.
|
||||
irrt::ndarray::call_nac3_ndarray_array_write_list_to_array(ctx, list_value, ndarray);
|
||||
|
||||
ndarray
|
||||
}
|
||||
|
||||
/// Implementation of `np_array(<list>, copy=None)`
|
||||
fn construct_numpy_array_from_list_copy_none_impl<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
(list_ty, list): (Type, ListValue<'ctx>),
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
// np_array without copying is only possible `list` is not nested.
|
||||
//
|
||||
// If `list` is `list[T]`, we can create an ndarray with `data` set
|
||||
// to the array pointer of `list`.
|
||||
//
|
||||
// If `list` is `list[list[T]]` or worse, copy.
|
||||
|
||||
let (dtype, ndims) = get_list_object_dtype_and_ndims(generator, ctx, list_ty);
|
||||
if ndims == 1 {
|
||||
// `list` is not nested
|
||||
assert_eq!(ndims, 1);
|
||||
assert!(self.ndims >= ndims);
|
||||
assert_eq!(dtype, self.dtype);
|
||||
|
||||
let llvm_pi8 = ctx.ctx.i8_type().ptr_type(AddressSpace::default());
|
||||
|
||||
let ndarray = Self::new(ctx, dtype, 1).construct_uninitialized(generator, ctx, name);
|
||||
|
||||
// Set data
|
||||
let data = ctx
|
||||
.builder
|
||||
.build_pointer_cast(list.data().base_ptr(ctx, generator), llvm_pi8, "")
|
||||
.unwrap();
|
||||
ndarray.store_data(ctx, data);
|
||||
|
||||
// ndarray->shape[0] = list->len;
|
||||
let shape = ndarray.shape();
|
||||
let list_len = list.load_size(ctx, None);
|
||||
unsafe {
|
||||
shape.set_typed_unchecked(ctx, generator, &self.llvm_usize.const_zero(), list_len);
|
||||
}
|
||||
|
||||
// Set strides, the `data` is contiguous
|
||||
ndarray.set_strides_contiguous(ctx);
|
||||
|
||||
ndarray
|
||||
} else {
|
||||
// `list` is nested, copy
|
||||
self.construct_numpy_array_from_list_copy_true_impl(
|
||||
generator,
|
||||
ctx,
|
||||
(list_ty, list),
|
||||
name,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Implementation of `np_array(<list>, copy=copy)`
|
||||
fn construct_numpy_array_list_impl<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
(list_ty, list): (Type, ListValue<'ctx>),
|
||||
copy: IntValue<'ctx>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
assert_eq!(copy.get_type(), ctx.ctx.bool_type());
|
||||
|
||||
let (dtype, ndims) = get_list_object_dtype_and_ndims(generator, ctx, list_ty);
|
||||
|
||||
let ndarray = gen_if_else_expr_callback(
|
||||
generator,
|
||||
ctx,
|
||||
|_generator, _ctx| Ok(copy),
|
||||
|generator, ctx| {
|
||||
let ndarray = self.construct_numpy_array_from_list_copy_true_impl(
|
||||
generator,
|
||||
ctx,
|
||||
(list_ty, list),
|
||||
name,
|
||||
);
|
||||
Ok(Some(ndarray.as_base_value()))
|
||||
},
|
||||
|generator, ctx| {
|
||||
let ndarray = self.construct_numpy_array_from_list_copy_none_impl(
|
||||
generator,
|
||||
ctx,
|
||||
(list_ty, list),
|
||||
name,
|
||||
);
|
||||
Ok(Some(ndarray.as_base_value()))
|
||||
},
|
||||
)
|
||||
.unwrap()
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap();
|
||||
|
||||
NDArrayType::new(ctx, dtype, ndims).map_value(ndarray, None)
|
||||
}
|
||||
|
||||
/// Implementation of `np_array(<ndarray>, copy=copy)`.
|
||||
pub fn construct_numpy_array_ndarray_impl<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ndarray: NDArrayValue<'ctx>,
|
||||
copy: IntValue<'ctx>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
assert_eq!(ndarray.get_type().dtype, self.dtype);
|
||||
assert!(self.ndims >= ndarray.get_type().ndims);
|
||||
assert_eq!(copy.get_type(), ctx.ctx.bool_type());
|
||||
|
||||
let ndarray_val = gen_if_else_expr_callback(
|
||||
generator,
|
||||
ctx,
|
||||
|_generator, _ctx| Ok(copy),
|
||||
|generator, ctx| {
|
||||
let ndarray = ndarray.make_copy(generator, ctx); // Force copy
|
||||
Ok(Some(ndarray.as_base_value()))
|
||||
},
|
||||
|_generator, _ctx| {
|
||||
// No need to copy. Return `ndarray` itself.
|
||||
Ok(Some(ndarray.as_base_value()))
|
||||
},
|
||||
)
|
||||
.unwrap()
|
||||
.map(BasicValueEnum::into_pointer_value)
|
||||
.unwrap();
|
||||
|
||||
ndarray.get_type().map_value(ndarray_val, name)
|
||||
}
|
||||
|
||||
/// Create a new ndarray like
|
||||
/// [`np.array()`](https://numpy.org/doc/stable/reference/generated/numpy.array.html).
|
||||
///
|
||||
/// Note that the returned [`NDArrayValue`] may have fewer dimensions than is specified by this
|
||||
/// instance. Use [`NDArrayValue::atleast_nd`] on the returned value if an `ndarray` instance
|
||||
/// with the exact number of dimensions is needed.
|
||||
pub fn construct_numpy_array<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
(object_ty, object): (Type, BasicValueEnum<'ctx>),
|
||||
copy: IntValue<'ctx>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
match &*ctx.unifier.get_ty_immutable(object_ty) {
|
||||
TypeEnum::TObj { obj_id, .. }
|
||||
if *obj_id == ctx.primitives.list.obj_id(&ctx.unifier).unwrap() =>
|
||||
{
|
||||
let list = ListType::from_unifier_type(generator, ctx, object_ty)
|
||||
.map_value(object.into_pointer_value(), None);
|
||||
self.construct_numpy_array_list_impl(generator, ctx, (object_ty, list), copy, name)
|
||||
}
|
||||
|
||||
TypeEnum::TObj { obj_id, .. }
|
||||
if *obj_id == ctx.primitives.ndarray.obj_id(&ctx.unifier).unwrap() =>
|
||||
{
|
||||
let ndarray = NDArrayType::from_unifier_type(generator, ctx, object_ty)
|
||||
.map_value(object.into_pointer_value(), None);
|
||||
self.construct_numpy_array_ndarray_impl(generator, ctx, ndarray, copy, name)
|
||||
}
|
||||
|
||||
_ => panic!("Unrecognized object type: {}", ctx.unifier.stringify(object_ty)), // Typechecker ensures this
|
||||
}
|
||||
}
|
||||
}
|
@ -1,188 +0,0 @@
|
||||
use inkwell::{
|
||||
context::{AsContextRef, Context},
|
||||
types::{AnyTypeEnum, BasicType, BasicTypeEnum, IntType, PointerType},
|
||||
values::{IntValue, PointerValue},
|
||||
AddressSpace,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
|
||||
use nac3core_derive::StructFields;
|
||||
|
||||
use crate::codegen::{
|
||||
types::{
|
||||
structure::{check_struct_type_matches_fields, StructField, StructFields},
|
||||
ProxyType,
|
||||
},
|
||||
values::{ndarray::ShapeEntryValue, ProxyValue},
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
pub struct ShapeEntryType<'ctx> {
|
||||
ty: PointerType<'ctx>,
|
||||
llvm_usize: IntType<'ctx>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy, StructFields)]
|
||||
pub struct ShapeEntryStructFields<'ctx> {
|
||||
#[value_type(usize)]
|
||||
pub ndims: StructField<'ctx, IntValue<'ctx>>,
|
||||
#[value_type(usize.ptr_type(AddressSpace::default()))]
|
||||
pub shape: StructField<'ctx, PointerValue<'ctx>>,
|
||||
}
|
||||
|
||||
impl<'ctx> ShapeEntryType<'ctx> {
|
||||
/// Checks whether `llvm_ty` represents a [`ShapeEntryType`], returning [Err] if it does not.
|
||||
pub fn is_representable(
|
||||
llvm_ty: PointerType<'ctx>,
|
||||
llvm_usize: IntType<'ctx>,
|
||||
) -> Result<(), String> {
|
||||
let ctx = llvm_ty.get_context();
|
||||
|
||||
let llvm_ndarray_ty = llvm_ty.get_element_type();
|
||||
let AnyTypeEnum::StructType(llvm_ndarray_ty) = llvm_ndarray_ty else {
|
||||
return Err(format!(
|
||||
"Expected struct type for `ShapeEntry` type, got {llvm_ndarray_ty}"
|
||||
));
|
||||
};
|
||||
|
||||
check_struct_type_matches_fields(
|
||||
Self::fields(ctx, llvm_usize),
|
||||
llvm_ndarray_ty,
|
||||
"NDArray",
|
||||
&[],
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns an instance of [`StructFields`] containing all field accessors for this type.
|
||||
#[must_use]
|
||||
fn fields(
|
||||
ctx: impl AsContextRef<'ctx>,
|
||||
llvm_usize: IntType<'ctx>,
|
||||
) -> ShapeEntryStructFields<'ctx> {
|
||||
ShapeEntryStructFields::new(ctx, llvm_usize)
|
||||
}
|
||||
|
||||
/// See [`ShapeEntryStructFields::fields`].
|
||||
// TODO: Move this into e.g. StructProxyType
|
||||
#[must_use]
|
||||
pub fn get_fields(&self, ctx: impl AsContextRef<'ctx>) -> ShapeEntryStructFields<'ctx> {
|
||||
Self::fields(ctx, self.llvm_usize)
|
||||
}
|
||||
|
||||
/// Creates an LLVM type corresponding to the expected structure of a `ShapeEntry`.
|
||||
#[must_use]
|
||||
fn llvm_type(ctx: &'ctx Context, llvm_usize: IntType<'ctx>) -> PointerType<'ctx> {
|
||||
let field_tys =
|
||||
Self::fields(ctx, llvm_usize).into_iter().map(|field| field.1).collect_vec();
|
||||
|
||||
ctx.struct_type(&field_tys, false).ptr_type(AddressSpace::default())
|
||||
}
|
||||
|
||||
fn new_impl(ctx: &'ctx Context, llvm_usize: IntType<'ctx>) -> Self {
|
||||
let llvm_ty = Self::llvm_type(ctx, llvm_usize);
|
||||
|
||||
Self { ty: llvm_ty, llvm_usize }
|
||||
}
|
||||
|
||||
/// Creates an instance of [`ShapeEntryType`].
|
||||
#[must_use]
|
||||
pub fn new(ctx: &CodeGenContext<'ctx, '_>) -> Self {
|
||||
Self::new_impl(ctx.ctx, ctx.get_size_type())
|
||||
}
|
||||
|
||||
/// Creates an instance of [`ShapeEntryType`].
|
||||
#[must_use]
|
||||
pub fn new_with_generator<G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &'ctx Context,
|
||||
) -> Self {
|
||||
Self::new_impl(ctx, generator.get_size_type(ctx))
|
||||
}
|
||||
|
||||
/// Creates a [`ShapeEntryType`] from a [`PointerType`] representing an `ShapeEntry`.
|
||||
#[must_use]
|
||||
pub fn from_type(ptr_ty: PointerType<'ctx>, llvm_usize: IntType<'ctx>) -> Self {
|
||||
debug_assert!(Self::is_representable(ptr_ty, llvm_usize).is_ok());
|
||||
|
||||
Self { ty: ptr_ty, llvm_usize }
|
||||
}
|
||||
|
||||
/// Allocates an instance of [`ShapeEntryValue`] as if by calling `alloca` on the base type.
|
||||
#[must_use]
|
||||
pub fn alloca(
|
||||
&self,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
<Self as ProxyType<'ctx>>::Value::from_pointer_value(
|
||||
self.raw_alloca(ctx, name),
|
||||
self.llvm_usize,
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
/// Allocates an instance of [`ShapeEntryValue`] as if by calling `alloca` on the base type.
|
||||
#[must_use]
|
||||
pub fn alloca_var<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
<Self as ProxyType<'ctx>>::Value::from_pointer_value(
|
||||
self.raw_alloca_var(generator, ctx, name),
|
||||
self.llvm_usize,
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
/// Converts an existing value into a [`ShapeEntryValue`].
|
||||
#[must_use]
|
||||
pub fn map_value(
|
||||
&self,
|
||||
value: <<Self as ProxyType<'ctx>>::Value as ProxyValue<'ctx>>::Base,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
<Self as ProxyType<'ctx>>::Value::from_pointer_value(value, self.llvm_usize, name)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> ProxyType<'ctx> for ShapeEntryType<'ctx> {
|
||||
type Base = PointerType<'ctx>;
|
||||
type Value = ShapeEntryValue<'ctx>;
|
||||
|
||||
fn is_type<G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &'ctx Context,
|
||||
llvm_ty: impl BasicType<'ctx>,
|
||||
) -> Result<(), String> {
|
||||
if let BasicTypeEnum::PointerType(ty) = llvm_ty.as_basic_type_enum() {
|
||||
<Self as ProxyType<'ctx>>::is_representable(generator, ctx, ty)
|
||||
} else {
|
||||
Err(format!("Expected pointer type, got {llvm_ty:?}"))
|
||||
}
|
||||
}
|
||||
|
||||
fn is_representable<G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &'ctx Context,
|
||||
llvm_ty: Self::Base,
|
||||
) -> Result<(), String> {
|
||||
Self::is_representable(llvm_ty, generator.get_size_type(ctx))
|
||||
}
|
||||
|
||||
fn alloca_type(&self) -> impl BasicType<'ctx> {
|
||||
self.as_base_type().get_element_type().into_struct_type()
|
||||
}
|
||||
|
||||
fn as_base_type(&self) -> Self::Base {
|
||||
self.ty
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> From<ShapeEntryType<'ctx>> for PointerType<'ctx> {
|
||||
fn from(value: ShapeEntryType<'ctx>) -> Self {
|
||||
value.as_base_type()
|
||||
}
|
||||
}
|
@ -1,258 +0,0 @@
|
||||
use inkwell::{
|
||||
context::Context,
|
||||
types::{AnyTypeEnum, BasicType, BasicTypeEnum, IntType, PointerType},
|
||||
values::{IntValue, PointerValue},
|
||||
AddressSpace,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
|
||||
use nac3core_derive::StructFields;
|
||||
|
||||
use crate::{
|
||||
codegen::{
|
||||
types::{
|
||||
structure::{
|
||||
check_struct_type_matches_fields, FieldIndexCounter, StructField, StructFields,
|
||||
},
|
||||
ProxyType,
|
||||
},
|
||||
values::{ndarray::ContiguousNDArrayValue, ProxyValue},
|
||||
CodeGenContext, CodeGenerator,
|
||||
},
|
||||
toplevel::numpy::unpack_ndarray_var_tys,
|
||||
typecheck::typedef::Type,
|
||||
};
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
pub struct ContiguousNDArrayType<'ctx> {
|
||||
ty: PointerType<'ctx>,
|
||||
item: BasicTypeEnum<'ctx>,
|
||||
llvm_usize: IntType<'ctx>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy, StructFields)]
|
||||
pub struct ContiguousNDArrayStructFields<'ctx> {
|
||||
#[value_type(usize)]
|
||||
pub ndims: StructField<'ctx, IntValue<'ctx>>,
|
||||
#[value_type(usize.ptr_type(AddressSpace::default()))]
|
||||
pub shape: StructField<'ctx, PointerValue<'ctx>>,
|
||||
#[value_type(i8_type().ptr_type(AddressSpace::default()))]
|
||||
pub data: StructField<'ctx, PointerValue<'ctx>>,
|
||||
}
|
||||
|
||||
impl<'ctx> ContiguousNDArrayStructFields<'ctx> {
|
||||
#[must_use]
|
||||
pub fn new_typed(item: BasicTypeEnum<'ctx>, llvm_usize: IntType<'ctx>) -> Self {
|
||||
let mut counter = FieldIndexCounter::default();
|
||||
|
||||
ContiguousNDArrayStructFields {
|
||||
ndims: StructField::create(&mut counter, "ndims", llvm_usize),
|
||||
shape: StructField::create(
|
||||
&mut counter,
|
||||
"shape",
|
||||
llvm_usize.ptr_type(AddressSpace::default()),
|
||||
),
|
||||
data: StructField::create(&mut counter, "data", item.ptr_type(AddressSpace::default())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> ContiguousNDArrayType<'ctx> {
|
||||
/// Checks whether `llvm_ty` represents a `ndarray` type, returning [Err] if it does not.
|
||||
pub fn is_representable(
|
||||
llvm_ty: PointerType<'ctx>,
|
||||
llvm_usize: IntType<'ctx>,
|
||||
) -> Result<(), String> {
|
||||
let ctx = llvm_ty.get_context();
|
||||
|
||||
let llvm_ty = llvm_ty.get_element_type();
|
||||
let AnyTypeEnum::StructType(llvm_ty) = llvm_ty else {
|
||||
return Err(format!(
|
||||
"Expected struct type for `ContiguousNDArray` type, got {llvm_ty}"
|
||||
));
|
||||
};
|
||||
|
||||
let fields = ContiguousNDArrayStructFields::new(ctx, llvm_usize);
|
||||
|
||||
check_struct_type_matches_fields(
|
||||
fields,
|
||||
llvm_ty,
|
||||
"ContiguousNDArray",
|
||||
&[(fields.data.name(), &|ty| {
|
||||
if ty.is_pointer_type() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(format!("Expected T* for `ContiguousNDArray.data`, got {ty}"))
|
||||
}
|
||||
})],
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns an instance of [`StructFields`] containing all field accessors for this type.
|
||||
#[must_use]
|
||||
fn fields(
|
||||
item: BasicTypeEnum<'ctx>,
|
||||
llvm_usize: IntType<'ctx>,
|
||||
) -> ContiguousNDArrayStructFields<'ctx> {
|
||||
ContiguousNDArrayStructFields::new_typed(item, llvm_usize)
|
||||
}
|
||||
|
||||
/// See [`NDArrayType::fields`].
|
||||
// TODO: Move this into e.g. StructProxyType
|
||||
#[must_use]
|
||||
pub fn get_fields(&self) -> ContiguousNDArrayStructFields<'ctx> {
|
||||
Self::fields(self.item, self.llvm_usize)
|
||||
}
|
||||
|
||||
/// Creates an LLVM type corresponding to the expected structure of an `NDArray`.
|
||||
#[must_use]
|
||||
fn llvm_type(
|
||||
ctx: &'ctx Context,
|
||||
item: BasicTypeEnum<'ctx>,
|
||||
llvm_usize: IntType<'ctx>,
|
||||
) -> PointerType<'ctx> {
|
||||
let field_tys =
|
||||
Self::fields(item, llvm_usize).into_iter().map(|field| field.1).collect_vec();
|
||||
|
||||
ctx.struct_type(&field_tys, false).ptr_type(AddressSpace::default())
|
||||
}
|
||||
|
||||
fn new_impl(ctx: &'ctx Context, item: BasicTypeEnum<'ctx>, llvm_usize: IntType<'ctx>) -> Self {
|
||||
let llvm_cndarray = Self::llvm_type(ctx, item, llvm_usize);
|
||||
|
||||
Self { ty: llvm_cndarray, item, llvm_usize }
|
||||
}
|
||||
|
||||
/// Creates an instance of [`ContiguousNDArrayType`].
|
||||
#[must_use]
|
||||
pub fn new(ctx: &CodeGenContext<'ctx, '_>, item: &impl BasicType<'ctx>) -> Self {
|
||||
Self::new_impl(ctx.ctx, item.as_basic_type_enum(), ctx.get_size_type())
|
||||
}
|
||||
|
||||
/// Creates an instance of [`ContiguousNDArrayType`].
|
||||
#[must_use]
|
||||
pub fn new_with_generator<G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &'ctx Context,
|
||||
item: BasicTypeEnum<'ctx>,
|
||||
) -> Self {
|
||||
Self::new_impl(ctx, item, generator.get_size_type(ctx))
|
||||
}
|
||||
|
||||
/// Creates an [`ContiguousNDArrayType`] from a [unifier type][Type].
|
||||
#[must_use]
|
||||
pub fn from_unifier_type<G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ty: Type,
|
||||
) -> Self {
|
||||
let (dtype, _) = unpack_ndarray_var_tys(&mut ctx.unifier, ty);
|
||||
|
||||
let llvm_dtype = ctx.get_llvm_type(generator, dtype);
|
||||
|
||||
Self::new_impl(ctx.ctx, llvm_dtype, ctx.get_size_type())
|
||||
}
|
||||
|
||||
/// Creates an [`ContiguousNDArrayType`] from a [`PointerType`] representing an `NDArray`.
|
||||
#[must_use]
|
||||
pub fn from_type(
|
||||
ptr_ty: PointerType<'ctx>,
|
||||
item: BasicTypeEnum<'ctx>,
|
||||
llvm_usize: IntType<'ctx>,
|
||||
) -> Self {
|
||||
debug_assert!(Self::is_representable(ptr_ty, llvm_usize).is_ok());
|
||||
|
||||
Self { ty: ptr_ty, item, llvm_usize }
|
||||
}
|
||||
|
||||
/// Allocates an instance of [`ContiguousNDArrayValue`] as if by calling `alloca` on the base
|
||||
/// type.
|
||||
///
|
||||
/// See [`ProxyType::raw_alloca`].
|
||||
#[must_use]
|
||||
pub fn alloca(
|
||||
&self,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
<Self as ProxyType<'ctx>>::Value::from_pointer_value(
|
||||
self.raw_alloca(ctx, name),
|
||||
self.item,
|
||||
self.llvm_usize,
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
/// Allocates an instance of [`ContiguousNDArrayValue`] as if by calling `alloca` on the base
|
||||
/// type.
|
||||
///
|
||||
/// See [`ProxyType::raw_alloca_var`].
|
||||
#[must_use]
|
||||
pub fn alloca_var<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
<Self as ProxyType<'ctx>>::Value::from_pointer_value(
|
||||
self.raw_alloca_var(generator, ctx, name),
|
||||
self.item,
|
||||
self.llvm_usize,
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
/// Converts an existing value into a [`ContiguousNDArrayValue`].
|
||||
#[must_use]
|
||||
pub fn map_value(
|
||||
&self,
|
||||
value: <<Self as ProxyType<'ctx>>::Value as ProxyValue<'ctx>>::Base,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
<Self as ProxyType<'ctx>>::Value::from_pointer_value(
|
||||
value,
|
||||
self.item,
|
||||
self.llvm_usize,
|
||||
name,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> ProxyType<'ctx> for ContiguousNDArrayType<'ctx> {
|
||||
type Base = PointerType<'ctx>;
|
||||
type Value = ContiguousNDArrayValue<'ctx>;
|
||||
|
||||
fn is_type<G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &'ctx Context,
|
||||
llvm_ty: impl BasicType<'ctx>,
|
||||
) -> Result<(), String> {
|
||||
if let BasicTypeEnum::PointerType(ty) = llvm_ty.as_basic_type_enum() {
|
||||
<Self as ProxyType<'ctx>>::is_representable(generator, ctx, ty)
|
||||
} else {
|
||||
Err(format!("Expected pointer type, got {llvm_ty:?}"))
|
||||
}
|
||||
}
|
||||
|
||||
fn is_representable<G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &'ctx Context,
|
||||
llvm_ty: Self::Base,
|
||||
) -> Result<(), String> {
|
||||
Self::is_representable(llvm_ty, generator.get_size_type(ctx))
|
||||
}
|
||||
|
||||
fn alloca_type(&self) -> impl BasicType<'ctx> {
|
||||
self.as_base_type().get_element_type().into_struct_type()
|
||||
}
|
||||
|
||||
fn as_base_type(&self) -> Self::Base {
|
||||
self.ty
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> From<ContiguousNDArrayType<'ctx>> for PointerType<'ctx> {
|
||||
fn from(value: ContiguousNDArrayType<'ctx>) -> Self {
|
||||
value.as_base_type()
|
||||
}
|
||||
}
|
@ -1,236 +0,0 @@
|
||||
use inkwell::{
|
||||
values::{BasicValueEnum, IntValue},
|
||||
IntPredicate,
|
||||
};
|
||||
|
||||
use super::NDArrayType;
|
||||
use crate::{
|
||||
codegen::{
|
||||
irrt, types::ProxyType, values::TypedArrayLikeAccessor, CodeGenContext, CodeGenerator,
|
||||
},
|
||||
typecheck::typedef::Type,
|
||||
};
|
||||
|
||||
/// Get the zero value in `np.zeros()` of a `dtype`.
|
||||
fn ndarray_zero_value<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
dtype: Type,
|
||||
) -> BasicValueEnum<'ctx> {
|
||||
if [ctx.primitives.int32, ctx.primitives.uint32]
|
||||
.iter()
|
||||
.any(|ty| ctx.unifier.unioned(dtype, *ty))
|
||||
{
|
||||
ctx.ctx.i32_type().const_zero().into()
|
||||
} else if [ctx.primitives.int64, ctx.primitives.uint64]
|
||||
.iter()
|
||||
.any(|ty| ctx.unifier.unioned(dtype, *ty))
|
||||
{
|
||||
ctx.ctx.i64_type().const_zero().into()
|
||||
} else if ctx.unifier.unioned(dtype, ctx.primitives.float) {
|
||||
ctx.ctx.f64_type().const_zero().into()
|
||||
} else if ctx.unifier.unioned(dtype, ctx.primitives.bool) {
|
||||
ctx.ctx.bool_type().const_zero().into()
|
||||
} else if ctx.unifier.unioned(dtype, ctx.primitives.str) {
|
||||
ctx.gen_string(generator, "").into()
|
||||
} else {
|
||||
panic!("unrecognized dtype: {}", ctx.unifier.stringify(dtype));
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the one value in `np.ones()` of a `dtype`.
|
||||
fn ndarray_one_value<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
dtype: Type,
|
||||
) -> BasicValueEnum<'ctx> {
|
||||
if [ctx.primitives.int32, ctx.primitives.uint32]
|
||||
.iter()
|
||||
.any(|ty| ctx.unifier.unioned(dtype, *ty))
|
||||
{
|
||||
let is_signed = ctx.unifier.unioned(dtype, ctx.primitives.int32);
|
||||
ctx.ctx.i32_type().const_int(1, is_signed).into()
|
||||
} else if [ctx.primitives.int64, ctx.primitives.uint64]
|
||||
.iter()
|
||||
.any(|ty| ctx.unifier.unioned(dtype, *ty))
|
||||
{
|
||||
let is_signed = ctx.unifier.unioned(dtype, ctx.primitives.int64);
|
||||
ctx.ctx.i64_type().const_int(1, is_signed).into()
|
||||
} else if ctx.unifier.unioned(dtype, ctx.primitives.float) {
|
||||
ctx.ctx.f64_type().const_float(1.0).into()
|
||||
} else if ctx.unifier.unioned(dtype, ctx.primitives.bool) {
|
||||
ctx.ctx.bool_type().const_int(1, false).into()
|
||||
} else if ctx.unifier.unioned(dtype, ctx.primitives.str) {
|
||||
ctx.gen_string(generator, "1").into()
|
||||
} else {
|
||||
panic!("unrecognized dtype: {}", ctx.unifier.stringify(dtype));
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> NDArrayType<'ctx> {
|
||||
/// Create an ndarray like
|
||||
/// [`np.empty`](https://numpy.org/doc/stable/reference/generated/numpy.empty.html).
|
||||
pub fn construct_numpy_empty<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
shape: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
let ndarray = self.construct_uninitialized(generator, ctx, name);
|
||||
|
||||
// Validate `shape`
|
||||
irrt::ndarray::call_nac3_ndarray_util_assert_shape_no_negative(generator, ctx, shape);
|
||||
|
||||
ndarray.copy_shape_from_array(generator, ctx, shape.base_ptr(ctx, generator));
|
||||
unsafe { ndarray.create_data(generator, ctx) };
|
||||
|
||||
ndarray
|
||||
}
|
||||
|
||||
/// Create an ndarray like
|
||||
/// [`np.full`](https://numpy.org/doc/stable/reference/generated/numpy.full.html).
|
||||
pub fn construct_numpy_full<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
shape: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
|
||||
fill_value: BasicValueEnum<'ctx>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
let ndarray = self.construct_numpy_empty(generator, ctx, shape, name);
|
||||
ndarray.fill(generator, ctx, fill_value);
|
||||
ndarray
|
||||
}
|
||||
|
||||
/// Create an ndarray like
|
||||
/// [`np.zero`](https://numpy.org/doc/stable/reference/generated/numpy.zeros.html).
|
||||
pub fn construct_numpy_zeros<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
dtype: Type,
|
||||
shape: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
assert_eq!(
|
||||
ctx.get_llvm_type(generator, dtype),
|
||||
self.dtype,
|
||||
"Expected LLVM dtype={} but got {}",
|
||||
self.dtype.print_to_string(),
|
||||
ctx.get_llvm_type(generator, dtype).print_to_string(),
|
||||
);
|
||||
|
||||
let fill_value = ndarray_zero_value(generator, ctx, dtype);
|
||||
self.construct_numpy_full(generator, ctx, shape, fill_value, name)
|
||||
}
|
||||
|
||||
/// Create an ndarray like
|
||||
/// [`np.ones`](https://numpy.org/doc/stable/reference/generated/numpy.ones.html).
|
||||
pub fn construct_numpy_ones<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
dtype: Type,
|
||||
shape: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
assert_eq!(
|
||||
ctx.get_llvm_type(generator, dtype),
|
||||
self.dtype,
|
||||
"Expected LLVM dtype={} but got {}",
|
||||
self.dtype.print_to_string(),
|
||||
ctx.get_llvm_type(generator, dtype).print_to_string(),
|
||||
);
|
||||
|
||||
let fill_value = ndarray_one_value(generator, ctx, dtype);
|
||||
self.construct_numpy_full(generator, ctx, shape, fill_value, name)
|
||||
}
|
||||
|
||||
/// Create an ndarray like
|
||||
/// [`np.eye`](https://numpy.org/doc/stable/reference/generated/numpy.eye.html).
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn construct_numpy_eye<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
dtype: Type,
|
||||
nrows: IntValue<'ctx>,
|
||||
ncols: IntValue<'ctx>,
|
||||
offset: IntValue<'ctx>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
assert_eq!(
|
||||
ctx.get_llvm_type(generator, dtype),
|
||||
self.dtype,
|
||||
"Expected LLVM dtype={} but got {}",
|
||||
self.dtype.print_to_string(),
|
||||
ctx.get_llvm_type(generator, dtype).print_to_string(),
|
||||
);
|
||||
assert_eq!(nrows.get_type(), self.llvm_usize);
|
||||
assert_eq!(ncols.get_type(), self.llvm_usize);
|
||||
assert_eq!(offset.get_type(), self.llvm_usize);
|
||||
|
||||
let ndzero = ndarray_zero_value(generator, ctx, dtype);
|
||||
let ndone = ndarray_one_value(generator, ctx, dtype);
|
||||
|
||||
let ndarray = self.construct_dyn_shape(generator, ctx, &[nrows, ncols], name);
|
||||
|
||||
// Create data and make the matrix like look np.eye()
|
||||
unsafe {
|
||||
ndarray.create_data(generator, ctx);
|
||||
}
|
||||
ndarray
|
||||
.foreach(generator, ctx, |generator, ctx, _, nditer| {
|
||||
// NOTE: rows and cols can never be zero here, since this ndarray's `np.size` would be zero
|
||||
// and this loop would not execute.
|
||||
|
||||
let indices = nditer.get_indices();
|
||||
|
||||
let row_i = unsafe {
|
||||
indices.get_typed_unchecked(ctx, generator, &self.llvm_usize.const_zero(), None)
|
||||
};
|
||||
let col_i = unsafe {
|
||||
indices.get_typed_unchecked(
|
||||
ctx,
|
||||
generator,
|
||||
&self.llvm_usize.const_int(1, false),
|
||||
None,
|
||||
)
|
||||
};
|
||||
|
||||
let be_one = ctx
|
||||
.builder
|
||||
.build_int_compare(
|
||||
IntPredicate::EQ,
|
||||
ctx.builder.build_int_add(row_i, offset, "").unwrap(),
|
||||
col_i,
|
||||
"",
|
||||
)
|
||||
.unwrap();
|
||||
let value = ctx.builder.build_select(be_one, ndone, ndzero, "value").unwrap();
|
||||
|
||||
let p = nditer.get_pointer(ctx);
|
||||
ctx.builder.build_store(p, value).unwrap();
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
ndarray
|
||||
}
|
||||
|
||||
/// Create an ndarray like
|
||||
/// [`np.identity`](https://numpy.org/doc/stable/reference/generated/numpy.identity.html).
|
||||
pub fn construct_numpy_identity<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
dtype: Type,
|
||||
size: IntValue<'ctx>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
let offset = self.llvm_usize.const_zero();
|
||||
self.construct_numpy_eye(generator, ctx, dtype, size, size, offset, name)
|
||||
}
|
||||
}
|
@ -1,216 +0,0 @@
|
||||
use inkwell::{
|
||||
context::{AsContextRef, Context},
|
||||
types::{AnyTypeEnum, BasicType, BasicTypeEnum, IntType, PointerType},
|
||||
values::{IntValue, PointerValue},
|
||||
AddressSpace,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
|
||||
use nac3core_derive::StructFields;
|
||||
|
||||
use crate::codegen::{
|
||||
types::{
|
||||
structure::{check_struct_type_matches_fields, StructField, StructFields},
|
||||
ProxyType,
|
||||
},
|
||||
values::{
|
||||
ndarray::{NDIndexValue, RustNDIndex},
|
||||
ArrayLikeIndexer, ArraySliceValue, ProxyValue,
|
||||
},
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
pub struct NDIndexType<'ctx> {
|
||||
ty: PointerType<'ctx>,
|
||||
llvm_usize: IntType<'ctx>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy, StructFields)]
|
||||
pub struct NDIndexStructFields<'ctx> {
|
||||
#[value_type(i8_type())]
|
||||
pub type_: StructField<'ctx, IntValue<'ctx>>,
|
||||
#[value_type(i8_type().ptr_type(AddressSpace::default()))]
|
||||
pub data: StructField<'ctx, PointerValue<'ctx>>,
|
||||
}
|
||||
|
||||
impl<'ctx> NDIndexType<'ctx> {
|
||||
/// Checks whether `llvm_ty` represents a `ndindex` type, returning [Err] if it does not.
|
||||
pub fn is_representable(
|
||||
llvm_ty: PointerType<'ctx>,
|
||||
llvm_usize: IntType<'ctx>,
|
||||
) -> Result<(), String> {
|
||||
let ctx = llvm_ty.get_context();
|
||||
|
||||
let llvm_ty = llvm_ty.get_element_type();
|
||||
let AnyTypeEnum::StructType(llvm_ty) = llvm_ty else {
|
||||
return Err(format!(
|
||||
"Expected struct type for `ContiguousNDArray` type, got {llvm_ty}"
|
||||
));
|
||||
};
|
||||
|
||||
let fields = NDIndexStructFields::new(ctx, llvm_usize);
|
||||
|
||||
check_struct_type_matches_fields(fields, llvm_ty, "NDIndex", &[])
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn fields(
|
||||
ctx: impl AsContextRef<'ctx>,
|
||||
llvm_usize: IntType<'ctx>,
|
||||
) -> NDIndexStructFields<'ctx> {
|
||||
NDIndexStructFields::new(ctx, llvm_usize)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn get_fields(&self) -> NDIndexStructFields<'ctx> {
|
||||
Self::fields(self.ty.get_context(), self.llvm_usize)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn llvm_type(ctx: &'ctx Context, llvm_usize: IntType<'ctx>) -> PointerType<'ctx> {
|
||||
let field_tys =
|
||||
Self::fields(ctx, llvm_usize).into_iter().map(|field| field.1).collect_vec();
|
||||
|
||||
ctx.struct_type(&field_tys, false).ptr_type(AddressSpace::default())
|
||||
}
|
||||
|
||||
fn new_impl(ctx: &'ctx Context, llvm_usize: IntType<'ctx>) -> Self {
|
||||
let llvm_ndindex = Self::llvm_type(ctx, llvm_usize);
|
||||
|
||||
Self { ty: llvm_ndindex, llvm_usize }
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn new(ctx: &CodeGenContext<'ctx, '_>) -> Self {
|
||||
Self::new_impl(ctx.ctx, ctx.get_size_type())
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn new_with_generator<G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &'ctx Context,
|
||||
) -> Self {
|
||||
Self::new_impl(ctx, generator.get_size_type(ctx))
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn from_type(ptr_ty: PointerType<'ctx>, llvm_usize: IntType<'ctx>) -> Self {
|
||||
debug_assert!(Self::is_representable(ptr_ty, llvm_usize).is_ok());
|
||||
|
||||
Self { ty: ptr_ty, llvm_usize }
|
||||
}
|
||||
|
||||
/// Allocates an instance of [`NDIndexValue`] as if by calling `alloca` on the base type.
|
||||
///
|
||||
/// See [`ProxyType::raw_alloca`].
|
||||
#[must_use]
|
||||
pub fn alloca(
|
||||
&self,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
<Self as ProxyType<'ctx>>::Value::from_pointer_value(
|
||||
self.raw_alloca(ctx, name),
|
||||
self.llvm_usize,
|
||||
name,
|
||||
)
|
||||
}
|
||||
/// Allocates an instance of [`NDIndexValue`] as if by calling `alloca` on the base type.
|
||||
///
|
||||
/// See [`ProxyType::raw_alloca_var`].
|
||||
#[must_use]
|
||||
pub fn alloca_var<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
<Self as ProxyType<'ctx>>::Value::from_pointer_value(
|
||||
self.raw_alloca_var(generator, ctx, name),
|
||||
self.llvm_usize,
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
/// Serialize a list of [`RustNDIndex`] as a newly allocated LLVM array of [`NDIndexValue`].
|
||||
#[must_use]
|
||||
pub fn construct_ndindices<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
in_ndindices: &[RustNDIndex<'ctx>],
|
||||
) -> ArraySliceValue<'ctx> {
|
||||
// Allocate the LLVM ndindices.
|
||||
let num_ndindices = self.llvm_usize.const_int(in_ndindices.len() as u64, false);
|
||||
let ndindices = self.array_alloca_var(generator, ctx, num_ndindices, None);
|
||||
|
||||
// Initialize all of them.
|
||||
for (i, in_ndindex) in in_ndindices.iter().enumerate() {
|
||||
let pndindex = unsafe {
|
||||
ndindices.ptr_offset_unchecked(
|
||||
ctx,
|
||||
generator,
|
||||
&ctx.ctx.i64_type().const_int(u64::try_from(i).unwrap(), false),
|
||||
None,
|
||||
)
|
||||
};
|
||||
|
||||
in_ndindex.write_to_ndindex(
|
||||
generator,
|
||||
ctx,
|
||||
NDIndexValue::from_pointer_value(pndindex, self.llvm_usize, None),
|
||||
);
|
||||
}
|
||||
|
||||
ndindices
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn map_value(
|
||||
&self,
|
||||
value: <<Self as ProxyType<'ctx>>::Value as ProxyValue<'ctx>>::Base,
|
||||
name: Option<&'ctx str>,
|
||||
) -> <Self as ProxyType<'ctx>>::Value {
|
||||
<Self as ProxyType<'ctx>>::Value::from_pointer_value(value, self.llvm_usize, name)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> ProxyType<'ctx> for NDIndexType<'ctx> {
|
||||
type Base = PointerType<'ctx>;
|
||||
type Value = NDIndexValue<'ctx>;
|
||||
|
||||
fn is_type<G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &'ctx Context,
|
||||
llvm_ty: impl BasicType<'ctx>,
|
||||
) -> Result<(), String> {
|
||||
if let BasicTypeEnum::PointerType(ty) = llvm_ty.as_basic_type_enum() {
|
||||
<Self as ProxyType<'ctx>>::is_representable(generator, ctx, ty)
|
||||
} else {
|
||||
Err(format!("Expected pointer type, got {llvm_ty:?}"))
|
||||
}
|
||||
}
|
||||
|
||||
fn is_representable<G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &'ctx Context,
|
||||
llvm_ty: Self::Base,
|
||||
) -> Result<(), String> {
|
||||
Self::is_representable(llvm_ty, generator.get_size_type(ctx))
|
||||
}
|
||||
|
||||
fn alloca_type(&self) -> impl BasicType<'ctx> {
|
||||
self.as_base_type().get_element_type().into_struct_type()
|
||||
}
|
||||
|
||||
fn as_base_type(&self) -> Self::Base {
|
||||
self.ty
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> From<NDIndexType<'ctx>> for PointerType<'ctx> {
|
||||
fn from(value: NDIndexType<'ctx>) -> Self {
|
||||
value.as_base_type()
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user