forked from M-Labs/nac3
Compare commits
26 Commits
master
...
ndstrides-
Author | SHA1 | Date | |
---|---|---|---|
991103c6f0 | |||
d3a5c5a48a | |||
496171a4a5 | |||
d90604b713 | |||
0946bd86ea | |||
29734ce3af | |||
b940b0a3a1 | |||
b12d7fcb2d | |||
628965e519 | |||
61dd9762d8 | |||
cc8103152f | |||
b8c0d5836f | |||
0cc7e41c6f | |||
d92cccb85e | |||
3344a2bcd3 | |||
51a099b602 | |||
1f2bb80812 | |||
709844b855 | |||
73937730e0 | |||
5faac4b9d4 | |||
c4d54b198b | |||
9ad7a78dbe | |||
1721ebac66 | |||
f033639415 | |||
3116f11814 | |||
5047379ac0 |
@ -13,6 +13,7 @@
|
||||
''
|
||||
mkdir -p $out/bin
|
||||
ln -s ${pkgs.llvmPackages_14.clang-unwrapped}/bin/clang $out/bin/clang-irrt
|
||||
ln -s ${pkgs.llvmPackages_14.clang}/bin/clang $out/bin/clang-irrt-test
|
||||
ln -s ${pkgs.llvmPackages_14.llvm.out}/bin/llvm-as $out/bin/llvm-as-irrt
|
||||
'';
|
||||
nac3artiq = pkgs.python3Packages.toPythonModule (
|
||||
@ -23,6 +24,7 @@
|
||||
cargoLock = {
|
||||
lockFile = ./Cargo.lock;
|
||||
};
|
||||
cargoTestFlags = [ "--features" "test" ];
|
||||
passthru.cargoLock = cargoLock;
|
||||
nativeBuildInputs = [ pkgs.python3 pkgs.llvmPackages_14.clang llvm-tools-irrt pkgs.llvmPackages_14.llvm.out llvm-nac3 ];
|
||||
buildInputs = [ pkgs.python3 llvm-nac3 ];
|
||||
@ -161,7 +163,10 @@
|
||||
clippy
|
||||
pre-commit
|
||||
rustfmt
|
||||
rust-analyzer
|
||||
];
|
||||
# https://nixos.wiki/wiki/Rust#Shell.nix_example
|
||||
RUST_SRC_PATH = "${pkgs.rust.packages.stable.rustPlatform.rustLibSrc}";
|
||||
};
|
||||
devShells.x86_64-linux.msys2 = pkgs.mkShell {
|
||||
name = "nac3-dev-shell-msys2";
|
||||
|
@ -1,3 +1,6 @@
|
||||
[features]
|
||||
test = []
|
||||
|
||||
[package]
|
||||
name = "nac3core"
|
||||
version = "0.1.0"
|
||||
|
@ -3,20 +3,34 @@ use std::{
|
||||
env,
|
||||
fs::File,
|
||||
io::Write,
|
||||
path::Path,
|
||||
path::{Path, PathBuf},
|
||||
process::{Command, Stdio},
|
||||
};
|
||||
|
||||
fn main() {
|
||||
const FILE: &str = "src/codegen/irrt/irrt.cpp";
|
||||
const CMD_IRRT_CLANG: &str = "clang-irrt";
|
||||
const CMD_IRRT_CLANG_TEST: &str = "clang-irrt-test";
|
||||
const CMD_IRRT_LLVM_AS: &str = "llvm-as-irrt";
|
||||
|
||||
fn get_out_dir() -> PathBuf {
|
||||
PathBuf::from(env::var("OUT_DIR").unwrap())
|
||||
}
|
||||
|
||||
fn get_irrt_dir() -> &'static Path {
|
||||
Path::new("irrt")
|
||||
}
|
||||
|
||||
/// Compile `irrt.cpp` for use in `src/codegen`
|
||||
fn compile_irrt_cpp() {
|
||||
let out_dir = get_out_dir();
|
||||
let irrt_dir = get_irrt_dir();
|
||||
|
||||
/*
|
||||
* HACK: Sadly, clang doesn't let us emit generic LLVM bitcode.
|
||||
* Compiling for WASM32 and filtering the output with regex is the closest we can get.
|
||||
*/
|
||||
let irrt_cpp_path = irrt_dir.join("irrt.cpp");
|
||||
let flags: &[&str] = &[
|
||||
"--target=wasm32",
|
||||
FILE,
|
||||
"-x",
|
||||
"c++",
|
||||
"-fno-discard-value-names",
|
||||
@ -31,15 +45,19 @@ fn main() {
|
||||
"-S",
|
||||
"-Wall",
|
||||
"-Wextra",
|
||||
"-Werror=return-type",
|
||||
"-I",
|
||||
irrt_dir.to_str().unwrap(),
|
||||
"-o",
|
||||
"-",
|
||||
irrt_cpp_path.to_str().unwrap(),
|
||||
];
|
||||
|
||||
println!("cargo:rerun-if-changed={FILE}");
|
||||
let out_dir = env::var("OUT_DIR").unwrap();
|
||||
let out_path = Path::new(&out_dir);
|
||||
// Tell Cargo to rerun if any file under `irrt_dir` (recursive) changes
|
||||
println!("cargo:rerun-if-changed={}", irrt_dir.to_str().unwrap());
|
||||
|
||||
let output = Command::new("clang-irrt")
|
||||
// Compile IRRT and capture the LLVM IR output
|
||||
let output = Command::new(CMD_IRRT_CLANG)
|
||||
.args(flags)
|
||||
.output()
|
||||
.map(|o| {
|
||||
@ -52,7 +70,17 @@ fn main() {
|
||||
let output = std::str::from_utf8(&output.stdout).unwrap().replace("\r\n", "\n");
|
||||
let mut filtered_output = String::with_capacity(output.len());
|
||||
|
||||
let regex_filter = Regex::new(r"(?ms:^define.*?\}$)|(?m:^declare.*?$)").unwrap();
|
||||
// Filter out irrelevant IR
|
||||
//
|
||||
// Regex:
|
||||
// - `(?ms:^define.*?\}$)` captures LLVM `define` blocks
|
||||
// - `(?m:^declare.*?$)` captures LLVM `declare` lines
|
||||
// - `(?m:^%.+?=\s*type\s*\{.+?\}$)` captures LLVM `type` declarations
|
||||
// - `(?m:^@.+?=.+$)` captures global constants
|
||||
let regex_filter = Regex::new(
|
||||
r"(?ms:^define.*?\}$)|(?m:^declare.*?$)|(?m:^%.+?=\s*type\s*\{.+?\}$)|(?m:^@.+?=.+$)",
|
||||
)
|
||||
.unwrap();
|
||||
for f in regex_filter.captures_iter(&output) {
|
||||
assert_eq!(f.len(), 1);
|
||||
filtered_output.push_str(&f[0]);
|
||||
@ -63,20 +91,73 @@ fn main() {
|
||||
.unwrap()
|
||||
.replace_all(&filtered_output, "");
|
||||
|
||||
println!("cargo:rerun-if-env-changed=DEBUG_DUMP_IRRT");
|
||||
if env::var("DEBUG_DUMP_IRRT").is_ok() {
|
||||
let mut file = File::create(out_path.join("irrt.ll")).unwrap();
|
||||
// For debugging
|
||||
// Doing `DEBUG_DUMP_IRRT=1 cargo build -p nac3core` dumps the LLVM IR generated
|
||||
const DEBUG_DUMP_IRRT: &str = "DEBUG_DUMP_IRRT";
|
||||
println!("cargo:rerun-if-env-changed={DEBUG_DUMP_IRRT}");
|
||||
if env::var(DEBUG_DUMP_IRRT).is_ok() {
|
||||
let mut file = File::create(out_dir.join("irrt.ll")).unwrap();
|
||||
file.write_all(output.as_bytes()).unwrap();
|
||||
let mut file = File::create(out_path.join("irrt-filtered.ll")).unwrap();
|
||||
|
||||
let mut file = File::create(out_dir.join("irrt-filtered.ll")).unwrap();
|
||||
file.write_all(filtered_output.as_bytes()).unwrap();
|
||||
}
|
||||
|
||||
let mut llvm_as = Command::new("llvm-as-irrt")
|
||||
// Assemble the emitted and filtered IR to .bc
|
||||
// That .bc will be integrated into nac3core's codegen
|
||||
let mut llvm_as = Command::new(CMD_IRRT_LLVM_AS)
|
||||
.stdin(Stdio::piped())
|
||||
.arg("-o")
|
||||
.arg(out_path.join("irrt.bc"))
|
||||
.arg(out_dir.join("irrt.bc"))
|
||||
.spawn()
|
||||
.unwrap();
|
||||
llvm_as.stdin.as_mut().unwrap().write_all(filtered_output.as_bytes()).unwrap();
|
||||
assert!(llvm_as.wait().unwrap().success());
|
||||
}
|
||||
|
||||
/// Compile `irrt_test.cpp` for testing
|
||||
fn compile_irrt_test_cpp() {
|
||||
let out_dir = get_out_dir();
|
||||
let irrt_dir = get_irrt_dir();
|
||||
|
||||
let exe_path = out_dir.join("irrt_test.out"); // Output path of the compiled test executable
|
||||
let irrt_test_cpp_path = irrt_dir.join("irrt_test.cpp");
|
||||
let flags: &[&str] = &[
|
||||
irrt_test_cpp_path.to_str().unwrap(),
|
||||
"-x",
|
||||
"c++",
|
||||
"-I",
|
||||
irrt_dir.to_str().unwrap(),
|
||||
"-g",
|
||||
"-fno-discard-value-names",
|
||||
"-O0",
|
||||
"-Wall",
|
||||
"-Wextra",
|
||||
"-Werror=return-type",
|
||||
"-lm", // for `tgamma()`, `lgamma()`
|
||||
"-I",
|
||||
irrt_dir.to_str().unwrap(),
|
||||
"-o",
|
||||
exe_path.to_str().unwrap(),
|
||||
];
|
||||
|
||||
Command::new(CMD_IRRT_CLANG_TEST)
|
||||
.args(flags)
|
||||
.output()
|
||||
.map(|o| {
|
||||
assert!(o.status.success(), "{}", std::str::from_utf8(&o.stderr).unwrap());
|
||||
o
|
||||
})
|
||||
.unwrap();
|
||||
println!("cargo:rerun-if-changed={}", irrt_dir.to_str().unwrap());
|
||||
}
|
||||
|
||||
fn main() {
|
||||
compile_irrt_cpp();
|
||||
|
||||
// https://github.com/rust-lang/cargo/issues/2549
|
||||
// `cargo test -F test` to also build `irrt_test.cpp
|
||||
if cfg!(feature = "test") {
|
||||
compile_irrt_test_cpp();
|
||||
}
|
||||
}
|
||||
|
9
nac3core/irrt/irrt.cpp
Normal file
9
nac3core/irrt/irrt.cpp
Normal file
@ -0,0 +1,9 @@
|
||||
#define IRRT_DEFINE_TYPEDEF_INTS
|
||||
#include <irrt_everything.hpp>
|
||||
|
||||
/*
|
||||
All IRRT implementations.
|
||||
|
||||
We don't have any pre-compiled objects, so we are writing all implementations in headers and
|
||||
concatenate them with `#include` into one massive source file that contains all the IRRT stuff.
|
||||
*/
|
@ -1,9 +1,7 @@
|
||||
using int8_t = _BitInt(8);
|
||||
using uint8_t = unsigned _BitInt(8);
|
||||
using int32_t = _BitInt(32);
|
||||
using uint32_t = unsigned _BitInt(32);
|
||||
using int64_t = _BitInt(64);
|
||||
using uint64_t = unsigned _BitInt(64);
|
||||
#pragma once
|
||||
|
||||
#include <irrt/utils.hpp>
|
||||
#include <irrt/int_defs.hpp>
|
||||
|
||||
// NDArray indices are always `uint32_t`.
|
||||
using NDIndex = uint32_t;
|
||||
@ -11,16 +9,6 @@ using NDIndex = uint32_t;
|
||||
using SliceIndex = int32_t;
|
||||
|
||||
namespace {
|
||||
template <typename T>
|
||||
const T& max(const T& a, const T& b) {
|
||||
return a > b ? a : b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
const T& min(const T& a, const T& b) {
|
||||
return a > b ? b : a;
|
||||
}
|
||||
|
||||
// adapted from GNU Scientific Library: https://git.savannah.gnu.org/cgit/gsl.git/tree/sys/pow_int.c
|
||||
// need to make sure `exp >= 0` before calling this function
|
||||
template <typename T>
|
85
nac3core/irrt/irrt/error_context.hpp
Normal file
85
nac3core/irrt/irrt/error_context.hpp
Normal file
@ -0,0 +1,85 @@
|
||||
#pragma once
|
||||
|
||||
#include <irrt/int_defs.hpp>
|
||||
#include <irrt/utils.hpp>
|
||||
|
||||
namespace {
|
||||
// nac3core's "str" struct type definition
|
||||
template <typename SizeT>
|
||||
struct Str {
|
||||
const char* content;
|
||||
SizeT length;
|
||||
};
|
||||
|
||||
// A limited set of errors IRRT could use.
|
||||
typedef uint32_t ErrorId;
|
||||
struct ErrorIds {
|
||||
ErrorId index_error;
|
||||
ErrorId value_error;
|
||||
ErrorId assertion_error;
|
||||
ErrorId runtime_error;
|
||||
ErrorId type_error;
|
||||
};
|
||||
|
||||
struct ErrorContext {
|
||||
// Context
|
||||
const ErrorIds* error_ids;
|
||||
|
||||
// Error thrown by IRRT
|
||||
ErrorId error_id;
|
||||
const char* message_template; // MUST BE `&'static`
|
||||
int64_t param1;
|
||||
int64_t param2;
|
||||
int64_t param3;
|
||||
|
||||
void initialize(const ErrorIds* error_ids) {
|
||||
this->error_ids = error_ids;
|
||||
clear_error();
|
||||
}
|
||||
|
||||
void clear_error() {
|
||||
// Point the message_template to an empty str. Don't set it to nullptr as a sentinel
|
||||
this->message_template = "";
|
||||
}
|
||||
|
||||
void set_error(ErrorId error_id, const char* message, int64_t param1 = 0, int64_t param2 = 0, int64_t param3 = 0) {
|
||||
this->error_id = error_id;
|
||||
this->message_template = message;
|
||||
this->param1 = param1;
|
||||
this->param2 = param2;
|
||||
this->param3 = param3;
|
||||
}
|
||||
|
||||
bool has_error() {
|
||||
return !cstr_utils::is_empty(message_template);
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
void get_error_str(Str<SizeT> *dst_str) {
|
||||
dst_str->content = message_template;
|
||||
dst_str->length = (SizeT) cstr_utils::length(message_template);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
void __nac3_error_context_initialize(ErrorContext* errctx, const ErrorIds* error_ids) {
|
||||
errctx->initialize(error_ids);
|
||||
}
|
||||
|
||||
bool __nac3_error_context_has_no_error(ErrorContext* errctx) {
|
||||
return !errctx->has_error();
|
||||
}
|
||||
|
||||
void __nac3_error_context_get_error_str(ErrorContext* errctx, Str<int32_t> *dst_str) {
|
||||
errctx->get_error_str<int32_t>(dst_str);
|
||||
}
|
||||
|
||||
void __nac3_error_context_get_error_str64(ErrorContext* errctx, Str<int64_t> *dst_str) {
|
||||
errctx->get_error_str<int64_t>(dst_str);
|
||||
}
|
||||
|
||||
void __nac3_error_dummy_raise(ErrorContext* errctx) {
|
||||
errctx->set_error(errctx->error_ids->runtime_error, "THROWN FROM __nac3_error_dummy_raise!!!!!!");
|
||||
}
|
||||
}
|
12
nac3core/irrt/irrt/int_defs.hpp
Normal file
12
nac3core/irrt/irrt/int_defs.hpp
Normal file
@ -0,0 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
// This is made toggleable since `irrt_test.cpp` itself would include
|
||||
// headers that define these typedefs
|
||||
#ifdef IRRT_DEFINE_TYPEDEF_INTS
|
||||
typedef _BitInt(8) int8_t;
|
||||
typedef unsigned _BitInt(8) uint8_t;
|
||||
typedef _BitInt(32) int32_t;
|
||||
typedef unsigned _BitInt(32) uint32_t;
|
||||
typedef _BitInt(64) int64_t;
|
||||
typedef unsigned _BitInt(64) uint64_t;
|
||||
#endif
|
221
nac3core/irrt/irrt/numpy/ndarray_basic.hpp
Normal file
221
nac3core/irrt/irrt/numpy/ndarray_basic.hpp
Normal file
@ -0,0 +1,221 @@
|
||||
#pragma once
|
||||
|
||||
#include <irrt/int_defs.hpp>
|
||||
#include <irrt/error_context.hpp>
|
||||
#include <irrt/numpy/ndarray_def.hpp>
|
||||
|
||||
namespace { namespace ndarray { namespace basic {
|
||||
namespace util {
|
||||
// throw an error if there is an axis with negative dimension
|
||||
template <typename SizeT>
|
||||
void assert_shape_no_negative(ErrorContext* errctx, SizeT ndims, const SizeT* shape) {
|
||||
for (SizeT axis = 0; axis < ndims; axis++) {
|
||||
if (shape[axis] < 0) {
|
||||
errctx->set_error(
|
||||
errctx->error_ids->value_error,
|
||||
"negative dimensions are not allowed; axis {0} has dimension {1}",
|
||||
axis, shape[axis]
|
||||
);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// compute the size/# of elements of an ndarray given its shape
|
||||
template <typename SizeT>
|
||||
SizeT calc_size_from_shape(SizeT ndims, const SizeT* shape) {
|
||||
SizeT size = 1;
|
||||
for (SizeT axis = 0; axis < ndims; axis++) size *= shape[axis];
|
||||
return size;
|
||||
}
|
||||
|
||||
// compute the strides of an ndarray given an ndarray `shape`
|
||||
// and assuming that the ndarray is *fully c-contagious*.
|
||||
//
|
||||
// you might want to read up on https://ajcr.net/stride-guide-part-1/.
|
||||
//
|
||||
// this function might be used in isolation without an ndarray. that's
|
||||
// why it separated out into its own util function.
|
||||
template <typename SizeT>
|
||||
void set_strides_by_shape(SizeT itemsize, SizeT ndims, SizeT* dst_strides, const SizeT* shape) {
|
||||
SizeT stride_product = 1;
|
||||
for (SizeT i = 0; i < ndims; i++) {
|
||||
int axis = ndims - i - 1;
|
||||
dst_strides[axis] = stride_product * itemsize;
|
||||
stride_product *= shape[axis];
|
||||
}
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
void set_indices_by_nth(SizeT ndims, const SizeT* shape, SizeT* indices, SizeT nth) {
|
||||
for (int32_t i = 0; i < ndims; i++) {
|
||||
int32_t axis = ndims - i - 1;
|
||||
int32_t dim = shape[axis];
|
||||
|
||||
indices[axis] = nth % dim;
|
||||
nth /= dim;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// calculate the size/# of elements of an `ndarray`.
|
||||
// this function corresponds to `np.size(<ndarray>)` or `ndarray.size`
|
||||
template <typename SizeT>
|
||||
SizeT size(NDArray<SizeT>* ndarray) {
|
||||
return util::calc_size_from_shape(ndarray->ndims, ndarray->shape);
|
||||
}
|
||||
|
||||
// calculate the number of bytes of its content of an `ndarray` *in its view*.
|
||||
// this function corresponds to `ndarray.nbytes`
|
||||
template <typename SizeT>
|
||||
SizeT nbytes(NDArray<SizeT>* ndarray) {
|
||||
return size(ndarray) * ndarray->itemsize;
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
void set_strides_by_shape(NDArray<SizeT>* ndarray) {
|
||||
util::set_strides_by_shape(ndarray->itemsize, ndarray->ndims, ndarray->strides, ndarray->shape);
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
uint8_t* get_pelement_by_indices(NDArray<SizeT>* ndarray, const SizeT* indices) {
|
||||
uint8_t* element = ndarray->data;
|
||||
for (SizeT dim_i = 0; dim_i < ndarray->ndims; dim_i++)
|
||||
element += indices[dim_i] * ndarray->strides[dim_i];
|
||||
return element;
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
uint8_t* get_nth_pelement(NDArray<SizeT>* ndarray, SizeT nth) {
|
||||
SizeT* indices = (SizeT*) __builtin_alloca(sizeof(SizeT) * ndarray->ndims);
|
||||
util::set_indices_by_nth(ndarray->ndims, ndarray->shape, indices, nth);
|
||||
return get_pelement_by_indices(ndarray, indices);
|
||||
}
|
||||
|
||||
// get the pointer to the nth element of the ndarray as if it were flattened.
|
||||
template <typename SizeT>
|
||||
uint8_t* checked_get_nth_pelement(NDArray<SizeT>* ndarray, ErrorContext* errctx, SizeT nth) {
|
||||
SizeT arr_size = ndarray->size();
|
||||
if (!(0 <= nth && nth < arr_size)) {
|
||||
errctx->set_error(
|
||||
errctx->error_ids->index_error,
|
||||
"index {0} is out of bounds, valid range is {1} <= index < {2}",
|
||||
nth, 0, arr_size
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
return get_nth_pelement(ndarray, nth);
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
void set_pelement_value(NDArray<SizeT>* ndarray, uint8_t* pelement, const uint8_t* pvalue) {
|
||||
__builtin_memcpy(pelement, pvalue, ndarray->itemsize);
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
void len(ErrorContext* errctx, NDArray<SizeT>* ndarray, SliceIndex* dst_length) {
|
||||
// Error if the ndarray is "unsized" (i.e, ndims == 0)
|
||||
if (ndarray->ndims == 0) {
|
||||
// Error copied from python by doing `len(np.zeros(()))`
|
||||
errctx->set_error(
|
||||
errctx->error_ids->type_error,
|
||||
"len() of unsized object"
|
||||
);
|
||||
return; // Terminate
|
||||
}
|
||||
|
||||
*dst_length = (SliceIndex) ndarray->shape[0];
|
||||
}
|
||||
|
||||
// Copy data from one ndarray to another *OF THE EXACT SAME* ndims, shape, and itemsize.
|
||||
template <typename SizeT>
|
||||
void copy_data(const NDArray<SizeT>* src_ndarray, NDArray<SizeT>* dst_ndarray) {
|
||||
__builtin_assume(src_ndarray->ndims == dst_ndarray->ndims);
|
||||
__builtin_assume(src_ndarray->itemsize == dst_ndarray->itemsize);
|
||||
|
||||
for (SizeT i = 0; i < src_ndarray->size; i++) {
|
||||
auto src_element = ndarray::basic::get_nth_pelement(src_ndarray, i);
|
||||
auto dst_element = ndarray::basic::get_nth_pelement(dst_ndarray, i);
|
||||
ndarray::basic::set_pelement_value(dst_ndarray, dst_element, src_element);
|
||||
}
|
||||
}
|
||||
|
||||
// `copy_data()` with assertions to check ndims, shape, and itemsize between the two ndarrays.
|
||||
template <typename SizeT>
|
||||
void copy_data_checked(ErrorContext* errctx, const NDArray<SizeT>* src_ndarray, NDArray<SizeT>* dst_ndarray) {
|
||||
// NOTE: Out of all error types, runtime error seems appropriate
|
||||
|
||||
// Check ndims
|
||||
if (src_ndarray->ndims != dst_ndarray->ndims) {
|
||||
errctx->set_error(
|
||||
errctx->error_ids->runtime_error,
|
||||
"IRRT copy_data_checked input arrays `ndims` are mismatched"
|
||||
);
|
||||
return; // Terminate
|
||||
}
|
||||
|
||||
// Check shape
|
||||
if (!arrays_match(src_ndarray->ndims, src_ndarray->shape, dst_ndarray->shape)) {
|
||||
errctx->set_error(
|
||||
errctx->error_ids->runtime_error,
|
||||
"IRRT copy_data_checked input arrays `shape` are mismatched"
|
||||
);
|
||||
return; // Terminate
|
||||
}
|
||||
|
||||
// Check itemsize
|
||||
if (src_ndarray->itemsize != dst_ndarray->itemsize) {
|
||||
errctx->set_error(
|
||||
errctx->error_ids->runtime_error,
|
||||
"IRRT copy_data_checked input arrays `itemsize` are mismatched"
|
||||
);
|
||||
return; // Terminate
|
||||
}
|
||||
|
||||
copy_data(src_ndarray, dst_ndarray);
|
||||
}
|
||||
} } }
|
||||
|
||||
extern "C" {
|
||||
using namespace ndarray::basic;
|
||||
|
||||
uint32_t __nac3_ndarray_size(NDArray<int32_t>* ndarray) {
|
||||
return size(ndarray);
|
||||
}
|
||||
|
||||
uint64_t __nac3_ndarray_size64(NDArray<int64_t>* ndarray) {
|
||||
return size(ndarray);
|
||||
}
|
||||
|
||||
uint32_t __nac3_ndarray_nbytes(NDArray<int32_t>* ndarray) {
|
||||
return nbytes(ndarray);
|
||||
}
|
||||
|
||||
uint64_t __nac3_ndarray_nbytes64(NDArray<int64_t>* ndarray) {
|
||||
return nbytes(ndarray);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_len(ErrorContext* errctx, NDArray<int32_t>* ndarray, SliceIndex* dst_len) {
|
||||
return len(errctx, ndarray, dst_len);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_len64(ErrorContext* errctx, NDArray<int64_t>* ndarray, SliceIndex* dst_len) {
|
||||
return len(errctx, ndarray, dst_len);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_util_assert_shape_no_negative(ErrorContext* errctx, int32_t ndims, int32_t* shape) {
|
||||
util::assert_shape_no_negative(errctx, ndims, shape);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_util_assert_shape_no_negative64(ErrorContext* errctx, int64_t ndims, int64_t* shape) {
|
||||
util::assert_shape_no_negative(errctx, ndims, shape);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_set_strides_by_shape(NDArray<int32_t>* ndarray) {
|
||||
set_strides_by_shape(ndarray);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_set_strides_by_shape64(NDArray<int64_t>* ndarray) {
|
||||
set_strides_by_shape(ndarray);
|
||||
}
|
||||
}
|
135
nac3core/irrt/irrt/numpy/ndarray_broadcast.hpp
Normal file
135
nac3core/irrt/irrt/numpy/ndarray_broadcast.hpp
Normal file
@ -0,0 +1,135 @@
|
||||
#include <irrt/numpy/ndarray_def.hpp>
|
||||
|
||||
namespace { namespace ndarray { namespace broadcast {
|
||||
namespace util {
|
||||
template <typename SizeT>
|
||||
void assert_broadcast_shape_to(
|
||||
ErrorContext* errctx,
|
||||
const SizeT target_ndims,
|
||||
const SizeT* target_shape,
|
||||
const SizeT src_ndims,
|
||||
const SizeT* src_shape
|
||||
) {
|
||||
/*
|
||||
// See https://numpy.org/doc/stable/user/basics.broadcasting.html
|
||||
|
||||
This function handles this example:
|
||||
```
|
||||
Image (3d array): 256 x 256 x 3
|
||||
Scale (1d array): 3
|
||||
Result (3d array): 256 x 256 x 3
|
||||
```
|
||||
|
||||
Other interesting examples to consider:
|
||||
- `can_broadcast_shape_to([3], [1, 1, 1, 1, 3]) ... ok`
|
||||
- `can_broadcast_shape_to([3], [3, 1]) == false`
|
||||
- `can_broadcast_shape_to([256, 256, 3], [256, 1, 3]) ... ok`
|
||||
|
||||
In cases when the shapes contain zero(es):
|
||||
- `can_broadcast_shape_to([0], [1]) ... ok`
|
||||
- `can_broadcast_shape_to([0], [2]) == false`
|
||||
- `can_broadcast_shape_to([0, 4, 0, 0], [1]) ... ok`
|
||||
- `can_broadcast_shape_to([0, 4, 0, 0], [1, 1, 1, 1]) ... ok`
|
||||
- `can_broadcast_shape_to([0, 4, 0, 0], [1, 4, 1, 1]) ... ok`
|
||||
- `can_broadcast_shape_to([4, 3], [0, 3]) == false`
|
||||
- `can_broadcast_shape_to([4, 3], [0, 0]) == false`
|
||||
*/
|
||||
|
||||
// Target ndims must not be smaller than source ndims
|
||||
// e.g., `np.broadcast_to(np.zeros((1, 1, 1, 1)), (1, ))` is prohibited by numpy
|
||||
if (target_ndims < src_ndims) {
|
||||
// Error copied from python by doing the `np.broadcast_to(np.zeros((1, 1, 1, 1)), (1, ))`
|
||||
errctx->set_error(
|
||||
errctx->error_ids->value_error,
|
||||
"input operand has more dimensions than allowed by the axis remapping"
|
||||
);
|
||||
return; // Terminate
|
||||
}
|
||||
|
||||
// Implements the rules in https://numpy.org/doc/stable/user/basics.broadcasting.html
|
||||
for (SizeT i = 0; i < src_ndims; i++) {
|
||||
SizeT target_axis = target_ndims - i - 1;
|
||||
SizeT src_axis = src_ndims - i - 1;
|
||||
|
||||
bool target_dim_exists = target_axis >= 0;
|
||||
bool src_dim_exists = src_axis >= 0;
|
||||
|
||||
SizeT target_dim = target_dim_exists ? target_shape[target_axis] : 1;
|
||||
SizeT src_dim = src_dim_exists ? src_shape[src_axis] : 1;
|
||||
|
||||
bool ok = src_dim == 1 || target_dim == src_dim;
|
||||
if (!ok) {
|
||||
// Error copied from python by doing `np.broadcast_to(np.zeros((3, 1)), (1, 1)),
|
||||
// but this is the true numpy error:
|
||||
// "ValueError: operands could not be broadcast together with remapped shapes [original->remapped]: (3,1) and requested shape (1,1)"
|
||||
// TODO: we cannot show more than 3 parameters!!
|
||||
errctx->set_error(
|
||||
errctx->error_ids->value_error,
|
||||
"operands could not be broadcast together with remapping shapes [original->remapped]"
|
||||
);
|
||||
return; // Terminate
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Similar to `np.broadcast_to(<ndarray>, <target_shape>)`
|
||||
// Assumptions:
|
||||
// - `src_ndarray` has to be fully initialized.
|
||||
// - `dst_ndarray->ndims` has to be set.
|
||||
// - `dst_ndarray->shape` has to be set, this determines the shape `this` broadcasts to.
|
||||
//
|
||||
// Other notes:
|
||||
// - `dst_ndarray->data` does not have to be set, it will be set to `src_ndarray->data`.
|
||||
// - `dst_ndarray->itemsize` does not have to be set, it will be set to `src_ndarray->data`.
|
||||
// - `dst_ndarray->strides` does not have to be set, it will be overwritten.
|
||||
//
|
||||
// Cautions:
|
||||
// ```
|
||||
// xs = np.zeros((4,))
|
||||
// ys = np.zero((4, 1))
|
||||
// ys[:] = xs # ok
|
||||
//
|
||||
// xs = np.zeros((1, 4))
|
||||
// ys = np.zero((4,))
|
||||
// ys[:] = xs # allowed
|
||||
// # However `np.broadcast_to(xs, (4,))` would fails, as per numpy's broadcasting rule.
|
||||
// # and apparently numpy will "deprecate" this? SEE https://github.com/numpy/numpy/issues/21744
|
||||
// # This implementation will NOT support this assignment.
|
||||
// ```
|
||||
template <typename SizeT>
|
||||
void broadcast_to(ErrorContext* errctx, NDArray<SizeT>* src_ndarray, NDArray<SizeT>* dst_ndarray) {
|
||||
dst_ndarray->data = src_ndarray->data;
|
||||
dst_ndarray->itemsize = src_ndarray->itemsize;
|
||||
|
||||
ndarray::broadcast::util::assert_broadcast_shape_to(
|
||||
errctx,
|
||||
dst_ndarray->ndims,
|
||||
dst_ndarray->shape,
|
||||
src_ndarray->ndims,
|
||||
src_ndarray->shape
|
||||
);
|
||||
if (errctx->has_error()) {
|
||||
return; // Propagate error
|
||||
}
|
||||
|
||||
SizeT stride_product = 1;
|
||||
for (SizeT i = 0; i < max(src_ndarray->ndims, dst_ndarray->ndims); i++) {
|
||||
SizeT this_dim_i = src_ndarray->ndims - i - 1;
|
||||
SizeT dst_dim_i = dst_ndarray->ndims - i - 1;
|
||||
|
||||
bool this_dim_exists = this_dim_i >= 0;
|
||||
bool dst_dim_exists = dst_dim_i >= 0;
|
||||
|
||||
// TODO: Explain how this works
|
||||
bool c1 = this_dim_exists && src_ndarray->shape[this_dim_i] == 1;
|
||||
bool c2 = dst_dim_exists && dst_ndarray->shape[dst_dim_i] != 1;
|
||||
if (!this_dim_exists || (c1 && c2)) {
|
||||
dst_ndarray->strides[dst_dim_i] = 0; // Freeze it in-place
|
||||
} else {
|
||||
dst_ndarray->strides[dst_dim_i] = stride_product * src_ndarray->itemsize;
|
||||
stride_product *= src_ndarray->shape[this_dim_i]; // NOTE: this_dim_exist must be true here.
|
||||
}
|
||||
}
|
||||
}
|
||||
} } }
|
55
nac3core/irrt/irrt/numpy/ndarray_def.hpp
Normal file
55
nac3core/irrt/irrt/numpy/ndarray_def.hpp
Normal file
@ -0,0 +1,55 @@
|
||||
#pragma once
|
||||
|
||||
namespace {
|
||||
// The NDArray object. `SizeT` is the *signed* size type of this ndarray.
|
||||
//
|
||||
// NOTE: The order of fields is IMPORTANT. DON'T TOUCH IT
|
||||
//
|
||||
// Some resources you might find helpful:
|
||||
// - The official numpy implementations:
|
||||
// - https://github.com/numpy/numpy/blob/735a477f0bc2b5b84d0e72d92f224bde78d4e069/doc/source/reference/c-api/types-and-structures.rst
|
||||
// - On strides (about reshaping, slicing, C-contagiousness, etc)
|
||||
// - https://ajcr.net/stride-guide-part-1/.
|
||||
// - https://ajcr.net/stride-guide-part-2/.
|
||||
// - https://ajcr.net/stride-guide-part-3/.
|
||||
template <typename SizeT>
|
||||
struct NDArray {
|
||||
// The underlying data this `ndarray` is pointing to.
|
||||
//
|
||||
// NOTE: Formally this should be of type `void *`, but clang
|
||||
// translates `void *` to `i8 *` when run with `-S -emit-llvm`,
|
||||
// so we will put `uint8_t *` here for clarity.
|
||||
//
|
||||
// This pointer should point to the first element of the ndarray directly
|
||||
uint8_t *data;
|
||||
|
||||
// The number of bytes of a single element in `data`.
|
||||
//
|
||||
// The `SizeT` is treated as `unsigned`.
|
||||
SizeT itemsize;
|
||||
|
||||
// The number of dimensions of this shape.
|
||||
//
|
||||
// The `SizeT` is treated as `unsigned`.
|
||||
SizeT ndims;
|
||||
|
||||
// Array shape, with length equal to `ndims`.
|
||||
//
|
||||
// The `SizeT` is treated as `unsigned`.
|
||||
//
|
||||
// NOTE: `shape` can contain 0.
|
||||
// (those appear when the user makes an out of bounds slice into an ndarray, e.g., `np.zeros((3, 3))[400:].shape == (0, 3)`)
|
||||
SizeT *shape;
|
||||
|
||||
// Array strides (stride value is in number of bytes, NOT number of elements), with length equal to `ndims`.
|
||||
//
|
||||
// The `SizeT` is treated as `signed`.
|
||||
//
|
||||
// NOTE: `strides` can have negative numbers.
|
||||
// (those appear when there is a slice with a negative step, e.g., `my_array[::-1]`)
|
||||
SizeT *strides;
|
||||
};
|
||||
|
||||
// Because ndarray is so complicated, its functions are splitted into
|
||||
// different files and namespaces.
|
||||
}
|
28
nac3core/irrt/irrt/numpy/ndarray_fill.hpp
Normal file
28
nac3core/irrt/irrt/numpy/ndarray_fill.hpp
Normal file
@ -0,0 +1,28 @@
|
||||
#pragma once
|
||||
|
||||
#include <irrt/numpy/ndarray_def.hpp>
|
||||
#include <irrt/numpy/ndarray_basic.hpp>
|
||||
|
||||
namespace { namespace ndarray { namespace fill {
|
||||
// Fill the ndarray with a value
|
||||
template <typename SizeT>
|
||||
void fill_generic(NDArray<SizeT>* ndarray, const uint8_t* pvalue) {
|
||||
const SizeT size = ndarray::basic::size(ndarray);
|
||||
for (SizeT i = 0; i < size; i++) {
|
||||
uint8_t* pelement = ndarray::basic::get_nth_pelement(ndarray, i); // No need for checked_get_nth_pelement
|
||||
ndarray::basic::set_pelement_value(ndarray, pelement, pvalue);
|
||||
}
|
||||
}
|
||||
} } }
|
||||
|
||||
extern "C" {
|
||||
using namespace ndarray::fill;
|
||||
|
||||
void __nac3_ndarray_fill_generic(NDArray<int32_t>* ndarray, uint8_t* pvalue) {
|
||||
fill_generic(ndarray, pvalue);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_fill_generic64(NDArray<int64_t>* ndarray, uint8_t* pvalue) {
|
||||
fill_generic(ndarray, pvalue);
|
||||
}
|
||||
}
|
169
nac3core/irrt/irrt/numpy/ndarray_subscript.hpp
Normal file
169
nac3core/irrt/irrt/numpy/ndarray_subscript.hpp
Normal file
@ -0,0 +1,169 @@
|
||||
#pragma once
|
||||
|
||||
#include <irrt/slice.hpp>
|
||||
#include <irrt/numpy/ndarray_def.hpp>
|
||||
#include <irrt/numpy/ndarray_basic.hpp>
|
||||
#include <irrt/error_context.hpp>
|
||||
|
||||
namespace {
|
||||
typedef uint8_t NDSubscriptType;
|
||||
|
||||
const NDSubscriptType INPUT_SUBSCRIPT_TYPE_INDEX = 0;
|
||||
const NDSubscriptType INPUT_SUBSCRIPT_TYPE_SLICE = 1;
|
||||
|
||||
struct NDSubscript {
|
||||
// A poor-man's enum variant type
|
||||
NDSubscriptType type;
|
||||
|
||||
/*
|
||||
if type == INPUT_SUBSCRIPT_TYPE_INDEX => `slice` points to a single `SliceIndex`
|
||||
if type == INPUT_SUBSCRIPT_TYPE_SLICE => `slice` points to a single `UserRange`
|
||||
|
||||
`SizeT` is controlled by the caller: `NDSubscript` only cares about where that
|
||||
slice is (the pointer), `NDSubscript` does not care/know about the actual `sizeof()`
|
||||
of the slice value.
|
||||
*/
|
||||
uint8_t* data;
|
||||
};
|
||||
}
|
||||
|
||||
namespace { namespace ndarray { namespace subscript {
|
||||
namespace util {
|
||||
template<typename SizeT>
|
||||
void deduce_ndims_after_slicing(ErrorContext* errctx, SizeT* result, SizeT ndims, SizeT num_ndsubscripts, const NDSubscript* ndsubscripts) {
|
||||
if (num_ndsubscripts > ndims) {
|
||||
// Error copied from python by doing `np.zeros((3, 4))[:, :, :]`
|
||||
errctx->set_error(
|
||||
errctx->error_ids->index_error,
|
||||
"too many indices for array: array is {0}-dimensional, but {1} were indexed",
|
||||
ndims, num_ndsubscripts
|
||||
);
|
||||
return; // Terminate
|
||||
}
|
||||
|
||||
SizeT final_ndims = ndims;
|
||||
for (SizeT i = 0; i < num_ndsubscripts; i++) {
|
||||
if (ndsubscripts[i].type == INPUT_SUBSCRIPT_TYPE_INDEX) {
|
||||
final_ndims--; // An index demotes the rank by 1
|
||||
}
|
||||
}
|
||||
|
||||
*result = final_ndims;
|
||||
}
|
||||
}
|
||||
|
||||
// To support numpy "basic indexing" https://numpy.org/doc/stable/user/basics.indexing.html#basic-indexing
|
||||
// "Advanced indexing" https://numpy.org/doc/stable/user/basics.indexing.html#advanced-indexing is not supported
|
||||
//
|
||||
// This function supports:
|
||||
// - "scalar indexing",
|
||||
// - "slicing and strides",
|
||||
// - and "dimensional indexing tools" (TODO, but this is really easy to implement).
|
||||
//
|
||||
// Things assumed by this function:
|
||||
// - `dst_ndarray` is allocated by the caller
|
||||
// - `dst_ndarray.ndims` has the correct value (according to `ndarray::util::deduce_ndims_after_slicing`).
|
||||
// - ... and `dst_ndarray.shape` and `dst_ndarray.strides` have been allocated by the caller as well
|
||||
//
|
||||
// Other notes:
|
||||
// - `dst_ndarray->data` does not have to be set, it will be derived.
|
||||
// - `dst_ndarray->itemsize` does not have to be set, it will be set to `src_ndarray->itemsize`
|
||||
// - `dst_ndarray->shape` and `dst_ndarray.strides` can contain empty values
|
||||
template <typename SizeT>
|
||||
void subscript(ErrorContext* errctx, SliceIndex num_subscripts, NDSubscript* subscripts, NDArray<SizeT>* src_ndarray, NDArray<SizeT>* dst_ndarray) {
|
||||
// REFERENCE CODE (check out `_index_helper` in `__getitem__`):
|
||||
// https://github.com/wadetb/tinynumpy/blob/0d23d22e07062ffab2afa287374c7b366eebdda1/tinynumpy/tinynumpy.py#L652
|
||||
|
||||
// irrt_assert(dst_ndarray->ndims == ndarray::util::deduce_ndims_after_slicing(src_ndarray->ndims, num_subscripts, subscripts));
|
||||
|
||||
dst_ndarray->data = src_ndarray->data;
|
||||
dst_ndarray->itemsize = src_ndarray->itemsize;
|
||||
|
||||
SizeT src_axis = 0;
|
||||
SizeT dst_axis = 0;
|
||||
|
||||
for (SliceIndex i = 0; i < num_subscripts; i++) {
|
||||
NDSubscript *ndsubscript = &subscripts[i];
|
||||
if (ndsubscript->type == INPUT_SUBSCRIPT_TYPE_INDEX) {
|
||||
// Handle when the ndsubscript is just a single (possibly negative) integer
|
||||
// e.g., `my_array[::2, -5, ::-1]`
|
||||
// ^^------ like this
|
||||
SliceIndex input_index = *((SliceIndex*) ndsubscript->data);
|
||||
|
||||
SliceIndex index = slice::resolve_index_in_length(src_ndarray->shape[src_axis], input_index);
|
||||
if (index == slice::OUT_OF_BOUNDS) {
|
||||
// Error message copied from numpy by doing `np.zeros((3, 4))[100]`
|
||||
errctx->set_error(
|
||||
errctx->error_ids->index_error,
|
||||
"index {0} is out of bounds for axis {1} with size {2}",
|
||||
input_index, src_axis, src_ndarray->shape[src_axis]
|
||||
);
|
||||
return; // Terminate
|
||||
}
|
||||
|
||||
dst_ndarray->data += index * src_ndarray->strides[src_axis]; // Add offset
|
||||
|
||||
// Next
|
||||
src_axis++;
|
||||
} else if (ndsubscript->type == INPUT_SUBSCRIPT_TYPE_SLICE) {
|
||||
// Handle when the ndsubscript is a slice (represented by UserSlice in IRRT)
|
||||
// e.g., `my_array[::2, -5, ::-1]`
|
||||
// ^^^------^^^^----- like these
|
||||
UserSlice* input_user_slice = (UserSlice*) ndsubscript->data;
|
||||
|
||||
// TODO: use checked indices
|
||||
Slice slice;
|
||||
input_user_slice->indices_checked(errctx, src_ndarray->shape[src_axis], &slice); // To resolve negative indices and other funny stuff written by the user
|
||||
if (errctx->has_error()) {
|
||||
return; // Propagate error
|
||||
}
|
||||
|
||||
// NOTE: There is no need to write special code to handle negative steps/strides.
|
||||
// This simple implementation meticulously handles both positive and negative steps/strides.
|
||||
// Check out the tinynumpy and IRRT's test cases if you are not convinced.
|
||||
dst_ndarray->data += (SizeT) slice.start * src_ndarray->strides[src_axis]; // Add offset (NOTE: no need to `* itemsize`, strides count in # of bytes)
|
||||
dst_ndarray->strides[dst_axis] = ((SizeT) slice.step) * src_ndarray->strides[src_axis]; // Determine stride
|
||||
dst_ndarray->shape[dst_axis] = (SizeT) slice.len(); // Determine shape dimension
|
||||
|
||||
// Next
|
||||
dst_axis++;
|
||||
src_axis++;
|
||||
} else {
|
||||
__builtin_unreachable();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Reference python code:
|
||||
```python
|
||||
dst_ndarray.shape.extend(src_ndarray.shape[src_axis:])
|
||||
dst_ndarray.strides.extend(src_ndarray.strides[src_axis:])
|
||||
```
|
||||
*/
|
||||
|
||||
for (; dst_axis < dst_ndarray->ndims; dst_axis++, src_axis++) {
|
||||
dst_ndarray->shape[dst_axis] = src_ndarray->shape[src_axis];
|
||||
dst_ndarray->strides[dst_axis] = src_ndarray->strides[src_axis];
|
||||
}
|
||||
}
|
||||
} } }
|
||||
|
||||
extern "C" {
|
||||
using namespace ndarray::subscript;
|
||||
|
||||
void __nac3_ndarray_subscript_deduce_ndims_after_slicing(ErrorContext* errctx, int32_t* result, int32_t ndims, int32_t num_ndsubscripts, const NDSubscript* ndsubscripts) {
|
||||
ndarray::subscript::util::deduce_ndims_after_slicing(errctx, result, ndims, num_ndsubscripts, ndsubscripts);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_subscript_deduce_ndims_after_slicing64(ErrorContext* errctx, int64_t* result, int64_t ndims, int64_t num_ndsubscripts, const NDSubscript* ndsubscripts) {
|
||||
ndarray::subscript::util::deduce_ndims_after_slicing(errctx, result, ndims, num_ndsubscripts, ndsubscripts);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_subscript(ErrorContext* errctx, SliceIndex num_subscripts, NDSubscript* subscripts, NDArray<int32_t>* src_ndarray, NDArray<int32_t> *dst_ndarray) {
|
||||
subscript(errctx, num_subscripts, subscripts, src_ndarray, dst_ndarray);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_subscript64(ErrorContext* errctx, SliceIndex num_subscripts, NDSubscript* subscripts, NDArray<int64_t>* src_ndarray, NDArray<int64_t> *dst_ndarray) {
|
||||
subscript(errctx, num_subscripts, subscripts, src_ndarray, dst_ndarray);
|
||||
}
|
||||
}
|
143
nac3core/irrt/irrt/slice.hpp
Normal file
143
nac3core/irrt/irrt/slice.hpp
Normal file
@ -0,0 +1,143 @@
|
||||
#pragma once
|
||||
|
||||
#include <irrt/int_defs.hpp>
|
||||
#include <irrt/slice.hpp>
|
||||
|
||||
namespace {
|
||||
struct Slice {
|
||||
SliceIndex start;
|
||||
SliceIndex stop;
|
||||
SliceIndex step;
|
||||
|
||||
// The length/The number of elements of the slice if it were a range,
|
||||
// i.e., the value of `len(range(this->start, this->stop, this->end))`
|
||||
SliceIndex len() {
|
||||
SliceIndex diff = stop - start;
|
||||
if (diff > 0 && step > 0) {
|
||||
return ((diff - 1) / step) + 1;
|
||||
} else if (diff < 0 && step < 0) {
|
||||
return ((diff + 1) / step) + 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
namespace slice {
|
||||
// "Resolve" an index value under a length in Python lists.
|
||||
// If you have a `list` of length 100, `list[-1]` would resolve to `list[100-1] == list[99]`.
|
||||
//
|
||||
// If length == 0, this function returns 0
|
||||
//
|
||||
// If index is out of bounds, this function clamps the value
|
||||
// (to `list[0]` or `list[-1]` in the context of a list and depending on if index is + or -)
|
||||
SliceIndex resolve_index_in_length_clamped(SliceIndex length, SliceIndex index) {
|
||||
if (index < 0) {
|
||||
// Remember that index is negative, so do a plus here
|
||||
return max<SliceIndex>(length + index, 0);
|
||||
} else {
|
||||
return min<SliceIndex>(length, index);
|
||||
}
|
||||
}
|
||||
|
||||
const SliceIndex OUT_OF_BOUNDS = -1;
|
||||
|
||||
// Like `resolve_index_in_length`.
|
||||
// But also checks if the resolved index is in
|
||||
// bounds (function returns true) or out of bounds
|
||||
// (function returns false); `0 <= resolved index < length` is false).
|
||||
SliceIndex resolve_index_in_length(SliceIndex length, SliceIndex index) {
|
||||
SliceIndex resolved = index < 0 ? length + index : index;
|
||||
|
||||
bool in_bounds = 0 <= resolved && resolved < length;
|
||||
return in_bounds ? resolved : OUT_OF_BOUNDS;
|
||||
}
|
||||
}
|
||||
|
||||
// A user-written Python-like slice.
|
||||
//
|
||||
// i.e., this slice is a triple of either an int or nothing. (e.g., `my_array[:10:2]`, `start` is None)
|
||||
//
|
||||
// You can "resolve" a `UserSlice` by using `user_slice.indices(<length>)`
|
||||
struct UserSlice {
|
||||
// Did the user specify `start`? If 0, `start` is undefined (and contains an empty value)
|
||||
bool start_defined;
|
||||
SliceIndex start;
|
||||
|
||||
// Similar to `start_defined`
|
||||
bool stop_defined;
|
||||
SliceIndex stop;
|
||||
|
||||
// Similar to `start_defined`
|
||||
bool step_defined;
|
||||
SliceIndex step;
|
||||
|
||||
// Convenient constructor for C++ internal use only (say testing)
|
||||
UserSlice() {
|
||||
this->reset();
|
||||
}
|
||||
|
||||
void reset() {
|
||||
this->start_defined = false;
|
||||
this->stop_defined = false;
|
||||
this->step_defined = false;
|
||||
}
|
||||
|
||||
void set_start(SliceIndex start) {
|
||||
this->start_defined = true;
|
||||
this->start = start;
|
||||
}
|
||||
|
||||
void set_stop(SliceIndex stop) {
|
||||
this->stop_defined = true;
|
||||
this->stop = stop;
|
||||
}
|
||||
|
||||
void set_step(SliceIndex step) {
|
||||
this->step_defined = true;
|
||||
this->step = step;
|
||||
}
|
||||
|
||||
// Like Python's `slice(start, stop, step).indices(length)`
|
||||
void indices(SliceIndex length, Slice* result) {
|
||||
// NOTE: This function implements Python's `slice.indices` *FAITHFULLY*.
|
||||
// SEE: https://github.com/python/cpython/blob/f62161837e68c1c77961435f1b954412dd5c2b65/Objects/sliceobject.c#L546
|
||||
result->step = step_defined ? step : 1;
|
||||
bool step_is_negative = result->step < 0;
|
||||
|
||||
if (start_defined) {
|
||||
result->start = slice::resolve_index_in_length_clamped(length, start);
|
||||
} else {
|
||||
result->start = step_is_negative ? length - 1 : 0;
|
||||
}
|
||||
|
||||
if (stop_defined) {
|
||||
result->stop = slice::resolve_index_in_length_clamped(length, stop);
|
||||
} else {
|
||||
result->stop = step_is_negative ? -1 : length;
|
||||
}
|
||||
}
|
||||
|
||||
// `indices()` but asserts `this->step != 0` and `this->length >= 0`
|
||||
void indices_checked(ErrorContext* errctx, SliceIndex length, Slice* result) {
|
||||
if (length < 0) {
|
||||
errctx->set_error(
|
||||
errctx->error_ids->value_error,
|
||||
"length should not be negative, got {0}", // Edited. Error message copied from python by doing `slice(0, 0, 0).indices(100)`
|
||||
length
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if (this->step_defined && this->step == 0) {
|
||||
// Error message
|
||||
errctx->set_error(
|
||||
errctx->error_ids->value_error,
|
||||
"slice step cannot be zero" // Error message copied from python by doing `slice(0, 0, 0).indices(100)`
|
||||
);
|
||||
return;
|
||||
}
|
||||
this->indices(length, result);
|
||||
}
|
||||
};
|
||||
}
|
78
nac3core/irrt/irrt/utils.hpp
Normal file
78
nac3core/irrt/irrt/utils.hpp
Normal file
@ -0,0 +1,78 @@
|
||||
#pragma once
|
||||
|
||||
#include <irrt/int_defs.hpp>
|
||||
|
||||
namespace {
|
||||
template <typename T>
|
||||
const T& max(const T& a, const T& b) {
|
||||
return a > b ? a : b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
const T& min(const T& a, const T& b) {
|
||||
return a > b ? b : a;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool arrays_match(int len, T* as, T* bs) {
|
||||
for (int i = 0; i < len; i++) {
|
||||
if (as[i] != bs[i]) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
namespace cstr_utils {
|
||||
bool is_empty(const char* str) {
|
||||
return str[0] == '\0';
|
||||
}
|
||||
|
||||
int8_t compare(const char* a, const char* b) {
|
||||
uint32_t i = 0;
|
||||
while (true) {
|
||||
if (a[i] < b[i]) {
|
||||
return -1;
|
||||
} else if (a[i] > b[i]) {
|
||||
return 1;
|
||||
} else { // a[i] == b[i]
|
||||
if (a[i] == '\0') {
|
||||
return 0;
|
||||
} else {
|
||||
i++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int8_t equal(const char* a, const char* b) {
|
||||
return compare(a, b) == 0;
|
||||
}
|
||||
|
||||
uint32_t length(const char* str) {
|
||||
uint32_t length = 0;
|
||||
while (*str != '\0') {
|
||||
length++;
|
||||
str++;
|
||||
}
|
||||
return length;
|
||||
}
|
||||
|
||||
bool copy(const char* src, char* dst, uint32_t dst_max_size) {
|
||||
for (uint32_t i = 0; i < dst_max_size; i++) {
|
||||
bool is_last = i + 1 == dst_max_size;
|
||||
if (is_last && src[i] != '\0') {
|
||||
dst[i] = '\0';
|
||||
return false;
|
||||
}
|
||||
|
||||
if (src[i] == '\0') {
|
||||
dst[i] = '\0';
|
||||
return true;
|
||||
}
|
||||
|
||||
dst[i] = src[i];
|
||||
}
|
||||
|
||||
__builtin_unreachable();
|
||||
}
|
||||
}
|
||||
}
|
12
nac3core/irrt/irrt_everything.hpp
Normal file
12
nac3core/irrt/irrt_everything.hpp
Normal file
@ -0,0 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <irrt/core.hpp>
|
||||
#include <irrt/error_context.hpp>
|
||||
#include <irrt/int_defs.hpp>
|
||||
#include <irrt/numpy/ndarray_basic.hpp>
|
||||
#include <irrt/numpy/ndarray_broadcast.hpp>
|
||||
#include <irrt/numpy/ndarray_def.hpp>
|
||||
#include <irrt/numpy/ndarray_fill.hpp>
|
||||
#include <irrt/numpy/ndarray_subscript.hpp>
|
||||
#include <irrt/slice.hpp>
|
||||
#include <irrt/utils.hpp>
|
25
nac3core/irrt/irrt_test.cpp
Normal file
25
nac3core/irrt/irrt_test.cpp
Normal file
@ -0,0 +1,25 @@
|
||||
// This file will be compiled like a real C++ program,
|
||||
// and we do have the luxury to use the standard libraries.
|
||||
// That is if the nix flakes do not have issues... especially on msys2...
|
||||
#include <cstdint>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include <irrt_everything.hpp>
|
||||
|
||||
#include <test/core.hpp>
|
||||
#include <test/test_core.hpp>
|
||||
#include <test/test_ndarray_basic.hpp>
|
||||
#include <test/test_ndarray_subscript.hpp>
|
||||
#include <test/test_ndarray_broadcast.hpp>
|
||||
#include <test/test_slice.hpp>
|
||||
|
||||
int main() {
|
||||
// Be wise about the order of suites!!
|
||||
test::core::run();
|
||||
test::slice::run();
|
||||
test::ndarray_basic::run();
|
||||
test::ndarray_subscript::run();
|
||||
test::ndarray_broadcast::run();
|
||||
return 0;
|
||||
}
|
143
nac3core/irrt/test/core.hpp
Normal file
143
nac3core/irrt/test/core.hpp
Normal file
@ -0,0 +1,143 @@
|
||||
#pragma once
|
||||
|
||||
// Include this header for every test_*.cpp
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
#include <test/print.hpp>
|
||||
|
||||
// Some utils can be used here
|
||||
#include <irrt/utils.hpp>
|
||||
|
||||
void __begin_test(const char* function_name, const char* file, int line) {
|
||||
printf("######### Running %s @ %s:%d\n", function_name, file, line);
|
||||
}
|
||||
|
||||
#define BEGIN_TEST() __begin_test(__FUNCTION__, __FILE__, __LINE__)
|
||||
|
||||
void test_fail() {
|
||||
printf("[!] Test failed. Exiting with status code 1.\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
void print_assertion_passed(const char* file, int line) {
|
||||
printf("[*] Assertion passed on %s:%d\n", file, line);
|
||||
}
|
||||
|
||||
void print_assertion_failed(const char* file, int line) {
|
||||
printf("[!] Assertion failed on %s:%d\n", file, line);
|
||||
}
|
||||
|
||||
void __assert_true(const char* file, int line, bool cond) {
|
||||
if (cond) {
|
||||
print_assertion_passed(file, line);
|
||||
} else {
|
||||
print_assertion_failed(file, line);
|
||||
test_fail();
|
||||
}
|
||||
}
|
||||
|
||||
#define assert_true(cond) __assert_true(__FILE__, __LINE__, cond)
|
||||
|
||||
template <typename T>
|
||||
void __assert_arrays_match(const char* file, int line, int len, const T* expected, const T* got) {
|
||||
if (arrays_match(len, expected, got)) {
|
||||
print_assertion_passed(file, line);
|
||||
} else {
|
||||
print_assertion_failed(file, line);
|
||||
printf("Expect = ");
|
||||
print_array(len, expected);
|
||||
printf("\n");
|
||||
printf(" Got = ");
|
||||
print_array(len, got);
|
||||
printf("\n");
|
||||
test_fail();
|
||||
}
|
||||
}
|
||||
|
||||
#define assert_arrays_match(len, expected, got) __assert_arrays_match(__FILE__, __LINE__, len, expected, got)
|
||||
|
||||
template <typename T>
|
||||
void __assert_values_match(const char* file, int line, T expected, T got) {
|
||||
if (expected == got) {
|
||||
print_assertion_passed(file, line);
|
||||
} else {
|
||||
print_assertion_failed(file, line);
|
||||
printf("Expect = ");
|
||||
print_value(expected);
|
||||
printf("\n");
|
||||
printf(" Got = ");
|
||||
print_value(got);
|
||||
printf("\n");
|
||||
test_fail();
|
||||
}
|
||||
}
|
||||
|
||||
#define assert_values_match(expected, got) __assert_values_match(__FILE__, __LINE__, expected, got)
|
||||
|
||||
// A fake set of ErrorIds for testing only
|
||||
const ErrorIds TEST_ERROR_IDS = {
|
||||
.index_error = 0,
|
||||
.value_error = 1,
|
||||
.assertion_error = 2,
|
||||
.runtime_error = 3,
|
||||
};
|
||||
|
||||
ErrorContext create_testing_errctx() {
|
||||
// Everything is global so it is fine to directly return a struct ErrorContext
|
||||
ErrorContext errctx;
|
||||
errctx.initialize(&TEST_ERROR_IDS);
|
||||
return errctx;
|
||||
}
|
||||
|
||||
void print_errctx_content(ErrorContext* errctx) {
|
||||
if (errctx->has_error()) {
|
||||
printf(
|
||||
"(Error ID %d): %s ... where param1 = %ld, param2 = %ld, param3 = %ld\n",
|
||||
errctx->error_id,
|
||||
errctx->message_template,
|
||||
errctx->param1,
|
||||
errctx->param2,
|
||||
errctx->param3
|
||||
);
|
||||
} else {
|
||||
printf("<no error>\n");
|
||||
}
|
||||
}
|
||||
|
||||
void __assert_errctx_no_error(const char* file, int line, ErrorContext* errctx) {
|
||||
if (errctx->has_error()) {
|
||||
print_assertion_failed(file, line);
|
||||
printf("Expecting no error but caught the following:\n\n");
|
||||
print_errctx_content(errctx);
|
||||
test_fail();
|
||||
}
|
||||
}
|
||||
|
||||
#define assert_errctx_no_error(errctx) __assert_errctx_no_error(__FILE__, __LINE__, errctx)
|
||||
|
||||
void __assert_errctx_has_error(const char* file, int line, ErrorContext* errctx, ErrorId expected_error_id) {
|
||||
if (errctx->has_error()) {
|
||||
if (errctx->error_id == expected_error_id) {
|
||||
// OK
|
||||
} else {
|
||||
// Otherwise it got the wrong kind of error
|
||||
print_assertion_failed(file, line);
|
||||
printf(
|
||||
"Expecting error id %d but got error id %d. Error caught:\n\n",
|
||||
expected_error_id,
|
||||
errctx->error_id
|
||||
);
|
||||
print_errctx_content(errctx);
|
||||
test_fail();
|
||||
}
|
||||
} else {
|
||||
print_assertion_failed(file, line);
|
||||
printf("Expecting an error, but there is none.");
|
||||
test_fail();
|
||||
}
|
||||
}
|
||||
|
||||
#define assert_errctx_has_error(errctx, expected_error_id) __assert_errctx_has_error(__FILE__, __LINE__, errctx, expected_error_id)
|
118
nac3core/irrt/test/print.hpp
Normal file
118
nac3core/irrt/test/print.hpp
Normal file
@ -0,0 +1,118 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdlib>
|
||||
#include <cstdio>
|
||||
|
||||
template <class T>
|
||||
void print_value(const T& value);
|
||||
|
||||
template <>
|
||||
void print_value(const bool& value) {
|
||||
printf("%s", value ? "true" : "false");
|
||||
}
|
||||
|
||||
template <>
|
||||
void print_value(const int8_t& value) {
|
||||
printf("%d", value);
|
||||
}
|
||||
|
||||
template <>
|
||||
void print_value(const int32_t& value) {
|
||||
printf("%d", value);
|
||||
}
|
||||
|
||||
template <>
|
||||
void print_value(const uint8_t& value) {
|
||||
printf("%u", value);
|
||||
}
|
||||
|
||||
template <>
|
||||
void print_value(const uint32_t& value) {
|
||||
printf("%u", value);
|
||||
}
|
||||
|
||||
template <>
|
||||
void print_value(const float& value) {
|
||||
printf("%f", value);
|
||||
}
|
||||
|
||||
template <>
|
||||
void print_value(const double& value) {
|
||||
printf("%f", value);
|
||||
}
|
||||
|
||||
void print_repeated(const char *str, int count) {
|
||||
for (int i = 0; i < count; i++) {
|
||||
printf("%s", str);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void print_array(int len, const T* as) {
|
||||
printf("[");
|
||||
for (int i = 0; i < len; i++) {
|
||||
if (i != 0) printf(", ");
|
||||
print_value(as[i]);
|
||||
}
|
||||
printf("]");
|
||||
}
|
||||
|
||||
template<typename ElementT, typename SizeT>
|
||||
void __print_ndarray_aux(bool first, bool last, SizeT* cursor, SizeT depth, NDArray<SizeT>* ndarray) {
|
||||
// A really lazy recursive implementation
|
||||
|
||||
// Add left padding unless its the first entry (since there would be "[[[" before it)
|
||||
if (!first) {
|
||||
print_repeated(" ", depth);
|
||||
}
|
||||
|
||||
const SizeT dim = ndarray->shape[depth];
|
||||
if (depth + 1 == ndarray->ndims) {
|
||||
// Recursed down to last dimension, print the values in a nice list
|
||||
printf("[");
|
||||
|
||||
SizeT* indices = (SizeT*) __builtin_alloca(sizeof(SizeT) * ndarray->ndims);
|
||||
for (SizeT i = 0; i < dim; i++) {
|
||||
ndarray::basic::util::set_indices_by_nth(ndarray->ndims, ndarray->shape, indices, *cursor);
|
||||
ElementT* pelement = (ElementT*) ndarray::basic::get_pelement_by_indices<SizeT>(ndarray, indices);
|
||||
ElementT element = *pelement;
|
||||
|
||||
if (i != 0) printf(", "); // List delimiter
|
||||
print_value(element);
|
||||
printf("(@");
|
||||
print_array(ndarray->ndims, indices);
|
||||
printf(")");
|
||||
|
||||
(*cursor)++;
|
||||
}
|
||||
printf("]");
|
||||
} else {
|
||||
printf("[");
|
||||
for (SizeT i = 0; i < ndarray->shape[depth]; i++) {
|
||||
__print_ndarray_aux<ElementT, SizeT>(
|
||||
i == 0, // first?
|
||||
i + 1 == dim, // last?
|
||||
cursor,
|
||||
depth + 1,
|
||||
ndarray
|
||||
);
|
||||
}
|
||||
printf("]");
|
||||
}
|
||||
|
||||
// Add newline unless its the last entry (since there will be "]]]" after it)
|
||||
if (!last) {
|
||||
print_repeated("\n", depth);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename ElementT, typename SizeT>
|
||||
void print_ndarray(NDArray<SizeT>* ndarray) {
|
||||
if (ndarray->ndims == 0) {
|
||||
printf("<empty ndarray>");
|
||||
} else {
|
||||
SizeT cursor = 0;
|
||||
__print_ndarray_aux<ElementT, SizeT>(true, true, &cursor, 0, ndarray);
|
||||
}
|
||||
printf("\n");
|
||||
}
|
19
nac3core/irrt/test/test_core.hpp
Normal file
19
nac3core/irrt/test/test_core.hpp
Normal file
@ -0,0 +1,19 @@
|
||||
#pragma once
|
||||
|
||||
#include <test/core.hpp>
|
||||
#include <irrt_everything.hpp>
|
||||
|
||||
namespace test {
|
||||
namespace core {
|
||||
void test_int_exp() {
|
||||
BEGIN_TEST();
|
||||
|
||||
assert_values_match(125, __nac3_int_exp_impl<int32_t>(5, 3));
|
||||
assert_values_match(3125, __nac3_int_exp_impl<int32_t>(5, 5));
|
||||
}
|
||||
|
||||
void run() {
|
||||
test_int_exp();
|
||||
}
|
||||
}
|
||||
}
|
47
nac3core/irrt/test/test_ndarray_basic.hpp
Normal file
47
nac3core/irrt/test/test_ndarray_basic.hpp
Normal file
@ -0,0 +1,47 @@
|
||||
#pragma once
|
||||
|
||||
#include <test/core.hpp>
|
||||
#include <irrt_everything.hpp>
|
||||
|
||||
namespace test {
|
||||
namespace ndarray_basic {
|
||||
void test_calc_size_from_shape_normal() {
|
||||
// Test shapes with normal values
|
||||
BEGIN_TEST();
|
||||
|
||||
int32_t shape[4] = { 2, 3, 5, 7 };
|
||||
assert_values_match(210, ndarray::basic::util::calc_size_from_shape<int32_t>(4, shape));
|
||||
}
|
||||
|
||||
void test_calc_size_from_shape_has_zero() {
|
||||
// Test shapes with 0 in them
|
||||
BEGIN_TEST();
|
||||
|
||||
int32_t shape[4] = { 2, 0, 5, 7 };
|
||||
assert_values_match(0, ndarray::basic::util::calc_size_from_shape<int32_t>(4, shape));
|
||||
}
|
||||
|
||||
void test_set_strides_by_shape() {
|
||||
// Test `set_strides_by_shape()`
|
||||
BEGIN_TEST();
|
||||
|
||||
int32_t shape[4] = { 99, 3, 5, 7 };
|
||||
int32_t strides[4] = { 0 };
|
||||
ndarray::basic::util::set_strides_by_shape((int32_t) sizeof(int32_t), 4, strides, shape);
|
||||
|
||||
int32_t expected_strides[4] = {
|
||||
105 * sizeof(int32_t),
|
||||
35 * sizeof(int32_t),
|
||||
7 * sizeof(int32_t),
|
||||
1 * sizeof(int32_t)
|
||||
};
|
||||
assert_arrays_match(4, expected_strides, strides);
|
||||
}
|
||||
|
||||
void run() {
|
||||
test_calc_size_from_shape_normal();
|
||||
test_calc_size_from_shape_has_zero();
|
||||
test_set_strides_by_shape();
|
||||
}
|
||||
}
|
||||
}
|
72
nac3core/irrt/test/test_ndarray_broadcast.hpp
Normal file
72
nac3core/irrt/test/test_ndarray_broadcast.hpp
Normal file
@ -0,0 +1,72 @@
|
||||
#pragma once
|
||||
|
||||
#include <test/core.hpp>
|
||||
#include <irrt_everything.hpp>
|
||||
|
||||
namespace test { namespace ndarray_broadcast {
|
||||
void test_ndarray_broadcast_1() {
|
||||
/*
|
||||
```python
|
||||
array = np.array([[19.9, 29.9, 39.9, 49.9]], dtype=np.float64)
|
||||
>>> [[19.9 29.9 39.9 49.9]]
|
||||
|
||||
array = np.broadcast_to(array, (2, 3, 4))
|
||||
>>> [[[19.9 29.9 39.9 49.9]
|
||||
>>> [19.9 29.9 39.9 49.9]
|
||||
>>> [19.9 29.9 39.9 49.9]]
|
||||
>>> [[19.9 29.9 39.9 49.9]
|
||||
>>> [19.9 29.9 39.9 49.9]
|
||||
>>> [19.9 29.9 39.9 49.9]]]
|
||||
|
||||
assert array.strides == (0, 0, 8)
|
||||
# and then pick some values in `array` and check them...
|
||||
```
|
||||
*/
|
||||
BEGIN_TEST();
|
||||
|
||||
// Prepare src_ndarray
|
||||
double src_data[4] = { 19.9, 29.9, 39.9, 49.9 };
|
||||
const int32_t src_ndims = 2;
|
||||
int32_t src_shape[src_ndims] = {1, 4};
|
||||
int32_t src_strides[src_ndims] = {};
|
||||
NDArray<int32_t> src_ndarray = {
|
||||
.data = (uint8_t*) src_data,
|
||||
.itemsize = sizeof(double),
|
||||
.ndims = src_ndims,
|
||||
.shape = src_shape,
|
||||
.strides = src_strides
|
||||
};
|
||||
ndarray::basic::set_strides_by_shape(&src_ndarray);
|
||||
|
||||
// Prepare dst_ndarray
|
||||
const int32_t dst_ndims = 3;
|
||||
int32_t dst_shape[dst_ndims] = {2, 3, 4};
|
||||
int32_t dst_strides[dst_ndims] = {};
|
||||
NDArray<int32_t> dst_ndarray = {
|
||||
.ndims = dst_ndims,
|
||||
.shape = dst_shape,
|
||||
.strides = dst_strides
|
||||
};
|
||||
|
||||
// Broadcast
|
||||
ErrorContext errctx = create_testing_errctx();
|
||||
ndarray::broadcast::broadcast_to(&errctx, &src_ndarray, &dst_ndarray);
|
||||
assert_errctx_no_error(&errctx);
|
||||
|
||||
assert_arrays_match(dst_ndims, ((int32_t[]) { 0, 0, 8 }), dst_ndarray.strides);
|
||||
|
||||
assert_values_match(19.9, *((double*) ndarray::basic::get_pelement_by_indices(&dst_ndarray, ((int32_t[]) {0, 0, 0}))));
|
||||
assert_values_match(29.9, *((double*) ndarray::basic::get_pelement_by_indices(&dst_ndarray, ((int32_t[]) {0, 0, 1}))));
|
||||
assert_values_match(39.9, *((double*) ndarray::basic::get_pelement_by_indices(&dst_ndarray, ((int32_t[]) {0, 0, 2}))));
|
||||
assert_values_match(49.9, *((double*) ndarray::basic::get_pelement_by_indices(&dst_ndarray, ((int32_t[]) {0, 0, 3}))));
|
||||
assert_values_match(19.9, *((double*) ndarray::basic::get_pelement_by_indices(&dst_ndarray, ((int32_t[]) {0, 1, 0}))));
|
||||
assert_values_match(29.9, *((double*) ndarray::basic::get_pelement_by_indices(&dst_ndarray, ((int32_t[]) {0, 1, 1}))));
|
||||
assert_values_match(39.9, *((double*) ndarray::basic::get_pelement_by_indices(&dst_ndarray, ((int32_t[]) {0, 1, 2}))));
|
||||
assert_values_match(49.9, *((double*) ndarray::basic::get_pelement_by_indices(&dst_ndarray, ((int32_t[]) {0, 1, 3}))));
|
||||
assert_values_match(49.9, *((double*) ndarray::basic::get_pelement_by_indices(&dst_ndarray, ((int32_t[]) {1, 2, 3}))));
|
||||
}
|
||||
|
||||
void run() {
|
||||
test_ndarray_broadcast_1();
|
||||
}
|
||||
}}
|
233
nac3core/irrt/test/test_ndarray_subscript.hpp
Normal file
233
nac3core/irrt/test/test_ndarray_subscript.hpp
Normal file
@ -0,0 +1,233 @@
|
||||
#pragma once
|
||||
|
||||
#include <test/core.hpp>
|
||||
#include <irrt_everything.hpp>
|
||||
|
||||
namespace test { namespace ndarray_subscript {
|
||||
void test_ndsubscript_normal_1() {
|
||||
/*
|
||||
Reference Python code:
|
||||
```python
|
||||
ndarray = np.arange(12, dtype=np.float64).reshape((3, 4));
|
||||
# array([[ 0., 1., 2., 3.],
|
||||
# [ 4., 5., 6., 7.],
|
||||
# [ 8., 9., 10., 11.]])
|
||||
|
||||
dst_ndarray = ndarray[-2:, 1::2]
|
||||
# array([[ 5., 7.],
|
||||
# [ 9., 11.]])
|
||||
|
||||
assert dst_ndarray.shape == (2, 2)
|
||||
assert dst_ndarray.strides == (32, 16)
|
||||
assert dst_ndarray[0, 0] == 5.0
|
||||
assert dst_ndarray[0, 1] == 7.0
|
||||
assert dst_ndarray[1, 0] == 9.0
|
||||
assert dst_ndarray[1, 1] == 11.0
|
||||
```
|
||||
*/
|
||||
BEGIN_TEST();
|
||||
|
||||
// Prepare src_ndarray
|
||||
double src_data[12] = { 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0 };
|
||||
int32_t src_itemsize = sizeof(double);
|
||||
const int32_t src_ndims = 2;
|
||||
int32_t src_shape[src_ndims] = { 3, 4 };
|
||||
int32_t src_strides[src_ndims] = {};
|
||||
NDArray<int32_t> src_ndarray = {
|
||||
.data = (uint8_t*) src_data,
|
||||
.itemsize = src_itemsize,
|
||||
.ndims = src_ndims,
|
||||
.shape = src_shape,
|
||||
.strides = src_strides
|
||||
};
|
||||
ndarray::basic::set_strides_by_shape(&src_ndarray);
|
||||
|
||||
// Prepare dst_ndarray
|
||||
const int32_t dst_ndims = 2;
|
||||
int32_t dst_shape[dst_ndims] = {999, 999}; // Empty values
|
||||
int32_t dst_strides[dst_ndims] = {999, 999}; // Empty values
|
||||
NDArray<int32_t> dst_ndarray = {
|
||||
.data = nullptr,
|
||||
.ndims = dst_ndims,
|
||||
.shape = dst_shape,
|
||||
.strides = dst_strides
|
||||
};
|
||||
|
||||
// Create the subscripts in `ndarray[-2::, 1::2]`
|
||||
UserSlice subscript_1;
|
||||
subscript_1.set_start(-2);
|
||||
|
||||
UserSlice subscript_2;
|
||||
subscript_2.set_start(1);
|
||||
subscript_2.set_step(2);
|
||||
|
||||
const int32_t num_ndsubscripts = 2;
|
||||
NDSubscript ndsubscripts[num_ndsubscripts] = {
|
||||
{ .type = INPUT_SUBSCRIPT_TYPE_SLICE, .data = (uint8_t*) &subscript_1 },
|
||||
{ .type = INPUT_SUBSCRIPT_TYPE_SLICE, .data = (uint8_t*) &subscript_2 }
|
||||
};
|
||||
|
||||
ErrorContext errctx = create_testing_errctx();
|
||||
ndarray::subscript::subscript(&errctx, num_ndsubscripts, ndsubscripts, &src_ndarray, &dst_ndarray);
|
||||
assert_errctx_no_error(&errctx);
|
||||
|
||||
int32_t expected_shape[dst_ndims] = { 2, 2 };
|
||||
int32_t expected_strides[dst_ndims] = { 32, 16 };
|
||||
|
||||
assert_arrays_match(dst_ndims, expected_shape, dst_ndarray.shape);
|
||||
assert_arrays_match(dst_ndims, expected_strides, dst_ndarray.strides);
|
||||
|
||||
// dst_ndarray[0, 0]
|
||||
assert_values_match(
|
||||
5.0,
|
||||
*((double *) ndarray::basic::get_pelement_by_indices(&dst_ndarray, (int32_t[dst_ndims]) { 0, 0 }))
|
||||
);
|
||||
// dst_ndarray[0, 1]
|
||||
assert_values_match(
|
||||
7.0,
|
||||
*((double *) ndarray::basic::get_pelement_by_indices(&dst_ndarray, (int32_t[dst_ndims]) { 0, 1 }))
|
||||
);
|
||||
// dst_ndarray[1, 0]
|
||||
assert_values_match(
|
||||
9.0,
|
||||
*((double *) ndarray::basic::get_pelement_by_indices(&dst_ndarray, (int32_t[dst_ndims]) { 1, 0 }))
|
||||
);
|
||||
// dst_ndarray[1, 1]
|
||||
assert_values_match(
|
||||
11.0,
|
||||
*((double *) ndarray::basic::get_pelement_by_indices(&dst_ndarray, (int32_t[dst_ndims]) { 1, 1 }))
|
||||
);
|
||||
}
|
||||
|
||||
void test_ndsubscript_normal_2() {
|
||||
/*
|
||||
```python
|
||||
ndarray = np.arange(12, dtype=np.float64).reshape((3, 4))
|
||||
# array([[ 0., 1., 2., 3.],
|
||||
# [ 4., 5., 6., 7.],
|
||||
# [ 8., 9., 10., 11.]])
|
||||
|
||||
dst_ndarray = ndarray[2, ::-2]
|
||||
# array([11., 9.])
|
||||
|
||||
assert dst_ndarray.shape == (2,)
|
||||
assert dst_ndarray.strides == (-16,)
|
||||
assert dst_ndarray[0] == 11.0
|
||||
assert dst_ndarray[1] == 9.0
|
||||
```
|
||||
*/
|
||||
BEGIN_TEST();
|
||||
|
||||
// Prepare src_ndarray
|
||||
double src_data[12] = { 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0 };
|
||||
int32_t src_itemsize = sizeof(double);
|
||||
const int32_t src_ndims = 2;
|
||||
int32_t src_shape[src_ndims] = { 3, 4 };
|
||||
int32_t src_strides[src_ndims] = {};
|
||||
NDArray<int32_t> src_ndarray = {
|
||||
.data = (uint8_t*) src_data,
|
||||
.itemsize = src_itemsize,
|
||||
.ndims = src_ndims,
|
||||
.shape = src_shape,
|
||||
.strides = src_strides
|
||||
};
|
||||
ndarray::basic::set_strides_by_shape(&src_ndarray);
|
||||
|
||||
// Prepare dst_ndarray
|
||||
const int32_t dst_ndims = 1;
|
||||
int32_t dst_shape[dst_ndims] = {999}; // Empty values
|
||||
int32_t dst_strides[dst_ndims] = {999}; // Empty values
|
||||
NDArray<int32_t> dst_ndarray = {
|
||||
.data = nullptr,
|
||||
.ndims = dst_ndims,
|
||||
.shape = dst_shape,
|
||||
.strides = dst_strides
|
||||
};
|
||||
|
||||
// Create the subscripts in `ndarray[2, ::-2]`
|
||||
int32_t subscript_1 = 2;
|
||||
|
||||
UserSlice subscript_2;
|
||||
subscript_2.set_step(-2);
|
||||
|
||||
const int32_t num_ndsubscripts = 2;
|
||||
NDSubscript ndsubscripts[num_ndsubscripts] = {
|
||||
{ .type = INPUT_SUBSCRIPT_TYPE_INDEX, .data = (uint8_t*) &subscript_1 },
|
||||
{ .type = INPUT_SUBSCRIPT_TYPE_SLICE, .data = (uint8_t*) &subscript_2 }
|
||||
};
|
||||
|
||||
ErrorContext errctx = create_testing_errctx();
|
||||
ndarray::subscript::subscript(&errctx, num_ndsubscripts, ndsubscripts, &src_ndarray, &dst_ndarray);
|
||||
assert_errctx_no_error(&errctx);
|
||||
|
||||
int32_t expected_shape[dst_ndims] = { 2 };
|
||||
int32_t expected_strides[dst_ndims] = { -16 };
|
||||
assert_arrays_match(dst_ndims, expected_shape, dst_ndarray.shape);
|
||||
assert_arrays_match(dst_ndims, expected_strides, dst_ndarray.strides);
|
||||
|
||||
assert_values_match(
|
||||
11.0,
|
||||
*((double *) ndarray::basic::get_pelement_by_indices(&dst_ndarray, (int32_t[dst_ndims]) { 0 }))
|
||||
);
|
||||
assert_values_match(
|
||||
9.0,
|
||||
*((double *) ndarray::basic::get_pelement_by_indices(&dst_ndarray, (int32_t[dst_ndims]) { 1 }))
|
||||
);
|
||||
}
|
||||
|
||||
void test_ndsubscript_index_subscript_out_of_bounds() {
|
||||
/*
|
||||
# Consider `my_array`
|
||||
|
||||
print(my_array.shape)
|
||||
# (4, 5, 6)
|
||||
|
||||
my_array[2, 100] # error, index subscript at axis 1 is out of bounds
|
||||
*/
|
||||
BEGIN_TEST();
|
||||
|
||||
// Prepare src_ndarray
|
||||
const int32_t src_ndims = 2;
|
||||
int32_t src_shape[src_ndims] = { 3, 4 };
|
||||
int32_t src_strides[src_ndims] = {};
|
||||
NDArray<int32_t> src_ndarray = {
|
||||
.data = (uint8_t*) nullptr, // placeholder, we wouldn't access it
|
||||
.itemsize = sizeof(double), // placeholder
|
||||
.ndims = src_ndims,
|
||||
.shape = src_shape,
|
||||
.strides = src_strides
|
||||
};
|
||||
ndarray::basic::set_strides_by_shape(&src_ndarray);
|
||||
|
||||
// Create the subscripts in `my_array[2, 100]`
|
||||
int32_t subscript_1 = 2;
|
||||
int32_t subscript_2 = 100;
|
||||
|
||||
const int32_t num_ndsubscripts = 2;
|
||||
NDSubscript ndsubscripts[num_ndsubscripts] = {
|
||||
{ .type = INPUT_SUBSCRIPT_TYPE_INDEX, .data = (uint8_t*) &subscript_1 },
|
||||
{ .type = INPUT_SUBSCRIPT_TYPE_INDEX, .data = (uint8_t*) &subscript_2 }
|
||||
};
|
||||
|
||||
// Prepare dst_ndarray
|
||||
const int32_t dst_ndims = 0;
|
||||
int32_t dst_shape[dst_ndims] = {};
|
||||
int32_t dst_strides[dst_ndims] = {};
|
||||
NDArray<int32_t> dst_ndarray = {
|
||||
.data = nullptr, // placehloder
|
||||
.ndims = dst_ndims,
|
||||
.shape = dst_shape,
|
||||
.strides = dst_strides
|
||||
};
|
||||
|
||||
ErrorContext errctx = create_testing_errctx();
|
||||
ndarray::subscript::subscript(&errctx, num_ndsubscripts, ndsubscripts, &src_ndarray, &dst_ndarray);
|
||||
assert_errctx_has_error(&errctx, errctx.error_ids->index_error);
|
||||
}
|
||||
|
||||
void run() {
|
||||
test_ndsubscript_normal_1();
|
||||
test_ndsubscript_normal_2();
|
||||
test_ndsubscript_index_subscript_out_of_bounds();
|
||||
}
|
||||
} }
|
96
nac3core/irrt/test/test_slice.hpp
Normal file
96
nac3core/irrt/test/test_slice.hpp
Normal file
@ -0,0 +1,96 @@
|
||||
#pragma once
|
||||
|
||||
#include <test/core.hpp>
|
||||
#include <irrt_everything.hpp>
|
||||
|
||||
namespace test {
|
||||
namespace slice {
|
||||
void test_slice_normal() {
|
||||
// Normal situation
|
||||
BEGIN_TEST();
|
||||
|
||||
UserSlice user_slice;
|
||||
user_slice.set_stop(5);
|
||||
|
||||
Slice slice;
|
||||
user_slice.indices(100, &slice);
|
||||
|
||||
printf("%d, %d, %d\n", slice.start, slice.stop, slice.step);
|
||||
|
||||
assert_values_match(0, slice.start);
|
||||
assert_values_match(5, slice.stop);
|
||||
assert_values_match(1, slice.step);
|
||||
}
|
||||
|
||||
void test_slice_start_too_large() {
|
||||
// Start is too large and should be clamped to length
|
||||
BEGIN_TEST();
|
||||
|
||||
UserSlice user_slice;
|
||||
user_slice.set_start(400);
|
||||
|
||||
Slice slice;
|
||||
user_slice.indices(100, &slice);
|
||||
|
||||
assert_values_match(100, slice.start);
|
||||
assert_values_match(100, slice.stop);
|
||||
assert_values_match(1, slice.step);
|
||||
}
|
||||
|
||||
void test_slice_negative_start_stop() {
|
||||
// Negative start/stop should be resolved
|
||||
BEGIN_TEST();
|
||||
|
||||
UserSlice user_slice;
|
||||
user_slice.set_start(-10);
|
||||
user_slice.set_stop(-5);
|
||||
|
||||
Slice slice;
|
||||
user_slice.indices(100, &slice);
|
||||
|
||||
assert_values_match(90, slice.start);
|
||||
assert_values_match(95, slice.stop);
|
||||
assert_values_match(1, slice.step);
|
||||
}
|
||||
|
||||
void test_slice_only_negative_step() {
|
||||
// Things like `[::-5]` should be handled correctly
|
||||
BEGIN_TEST();
|
||||
|
||||
UserSlice user_slice;
|
||||
user_slice.set_step(-5);
|
||||
|
||||
Slice slice;
|
||||
user_slice.indices(100, &slice);
|
||||
|
||||
assert_values_match(99, slice.start);
|
||||
assert_values_match(-1, slice.stop);
|
||||
assert_values_match(-5, slice.step);
|
||||
}
|
||||
|
||||
void test_slice_step_zero() {
|
||||
// Step = 0 is a value error
|
||||
BEGIN_TEST();
|
||||
|
||||
ErrorContext errctx = create_testing_errctx();
|
||||
|
||||
UserSlice user_slice;
|
||||
user_slice.set_start(2);
|
||||
user_slice.set_stop(12);
|
||||
user_slice.set_step(0);
|
||||
|
||||
Slice slice;
|
||||
user_slice.indices_checked(&errctx, 100, &slice);
|
||||
|
||||
assert_errctx_has_error(&errctx, errctx.error_ids->value_error);
|
||||
}
|
||||
|
||||
void run() {
|
||||
test_slice_normal();
|
||||
test_slice_start_too_large();
|
||||
test_slice_negative_start_stop();
|
||||
test_slice_only_negative_step();
|
||||
test_slice_step_zero();
|
||||
}
|
||||
}
|
||||
}
|
@ -4,11 +4,18 @@ use crate::{
|
||||
codegen::{
|
||||
classes::{
|
||||
ArrayLikeIndexer, ArrayLikeValue, ListType, ListValue, NDArrayValue, ProxyType,
|
||||
ProxyValue, RangeValue, TypedArrayLikeAccessor, UntypedArrayLikeAccessor,
|
||||
ProxyValue, RangeValue, UntypedArrayLikeAccessor,
|
||||
},
|
||||
concrete_type::{ConcreteFuncArg, ConcreteTypeEnum, ConcreteTypeStore},
|
||||
gen_in_range_check, get_llvm_abi_type, get_llvm_type,
|
||||
irrt::*,
|
||||
irrt::{
|
||||
numpy::{
|
||||
ndarray::{self, alloca_ndarray_and_init},
|
||||
slice::{RustUserSlice, SliceIndexModel},
|
||||
subscript::{call_nac3_ndarray_subscript, RustNDSubscript},
|
||||
},
|
||||
*,
|
||||
},
|
||||
llvm_intrinsics::{
|
||||
call_expect, call_float_floor, call_float_pow, call_float_powi, call_int_smax,
|
||||
call_memcpy_generic,
|
||||
@ -18,31 +25,33 @@ use crate::{
|
||||
gen_for_callback_incrementing, gen_if_callback, gen_if_else_expr_callback, gen_raise,
|
||||
gen_var,
|
||||
},
|
||||
CodeGenContext, CodeGenTask, CodeGenerator,
|
||||
CodeGenContext, CodeGenTask, CodeGenerator, Int32,
|
||||
},
|
||||
symbol_resolver::{SymbolValue, ValueEnum},
|
||||
toplevel::{
|
||||
helper::PrimDef,
|
||||
numpy::{make_ndarray_ty, unpack_ndarray_var_tys},
|
||||
DefinitionId, TopLevelDef,
|
||||
},
|
||||
toplevel::{helper::PrimDef, numpy::unpack_ndarray_var_tys, DefinitionId, TopLevelDef},
|
||||
typecheck::{
|
||||
magic_methods::{Binop, BinopVariant, HasOpInfo},
|
||||
typedef::{FunSignature, FuncArg, Type, TypeEnum, TypeVarId, Unifier, VarMap},
|
||||
typedef::{
|
||||
iter_type_vars, FunSignature, FuncArg, Type, TypeEnum, TypeVarId, Unifier, VarMap,
|
||||
},
|
||||
},
|
||||
};
|
||||
use inkwell::{
|
||||
attributes::{Attribute, AttributeLoc},
|
||||
types::{AnyType, BasicType, BasicTypeEnum},
|
||||
values::{BasicValueEnum, CallSiteValue, FunctionValue, IntValue, PointerValue},
|
||||
values::{
|
||||
AnyValue, BasicValue, BasicValueEnum, CallSiteValue, FunctionValue, IntValue, PointerValue,
|
||||
},
|
||||
AddressSpace, IntPredicate, OptimizationLevel,
|
||||
};
|
||||
use itertools::{chain, izip, Either, Itertools};
|
||||
use nac3parser::ast::{
|
||||
self, Boolop, Cmpop, Comprehension, Constant, Expr, ExprKind, Location, Operator, StrRef,
|
||||
Unaryop,
|
||||
self, Boolop, Cmpop, Comprehension, Constant, Expr, ExprKind, Located, Location, Operator,
|
||||
StrRef, Unaryop,
|
||||
};
|
||||
|
||||
use super::{irrt::numpy::ndarray::NpArray, IntModel, Model, Pointer, PointerModel, StructModel};
|
||||
|
||||
pub fn get_subst_key(
|
||||
unifier: &mut Unifier,
|
||||
obj: Option<Type>,
|
||||
@ -576,6 +585,21 @@ impl<'ctx, 'a> CodeGenContext<'ctx, 'a> {
|
||||
params: [Option<IntValue<'ctx>>; 3],
|
||||
loc: Location,
|
||||
) {
|
||||
let error_id = self.resolver.get_string_id(name);
|
||||
let error_id = self.ctx.i32_type().const_int(error_id as u64, false);
|
||||
self.raise_exn_by_id(generator, error_id, msg, params, loc);
|
||||
}
|
||||
|
||||
pub fn raise_exn_by_id<G: CodeGenerator + ?Sized>(
|
||||
&mut self,
|
||||
generator: &mut G,
|
||||
error_id: IntValue<'ctx>,
|
||||
msg: BasicValueEnum<'ctx>,
|
||||
params: [Option<IntValue<'ctx>>; 3],
|
||||
loc: Location,
|
||||
) {
|
||||
assert_eq!(error_id.get_type().get_bit_width(), 32);
|
||||
|
||||
let zelf = if let Some(exception_val) = self.exception_val {
|
||||
exception_val
|
||||
} else {
|
||||
@ -588,8 +612,7 @@ impl<'ctx, 'a> CodeGenContext<'ctx, 'a> {
|
||||
let zero = int32.const_zero();
|
||||
unsafe {
|
||||
let id_ptr = self.builder.build_in_bounds_gep(zelf, &[zero, zero], "exn.id").unwrap();
|
||||
let id = self.resolver.get_string_id(name);
|
||||
self.builder.build_store(id_ptr, int32.const_int(id as u64, false)).unwrap();
|
||||
self.builder.build_store(id_ptr, error_id).unwrap();
|
||||
let ptr = self
|
||||
.builder
|
||||
.build_in_bounds_gep(zelf, &[zero, int32.const_int(5, false)], "exn.msg")
|
||||
@ -652,6 +675,32 @@ impl<'ctx, 'a> CodeGenContext<'ctx, 'a> {
|
||||
self.raise_exn(generator, err_name, err_msg, params, loc);
|
||||
self.builder.position_at_end(then_block);
|
||||
}
|
||||
|
||||
pub fn make_assert_impl_by_id<G: CodeGenerator + ?Sized>(
|
||||
&mut self,
|
||||
generator: &mut G,
|
||||
cond: IntValue<'ctx>,
|
||||
err_id: IntValue<'ctx>,
|
||||
err_msg: BasicValueEnum<'ctx>,
|
||||
params: [Option<IntValue<'ctx>>; 3],
|
||||
loc: Location,
|
||||
) {
|
||||
let i1 = self.ctx.bool_type();
|
||||
let i1_true = i1.const_all_ones();
|
||||
// we assume that the condition is most probably true, so the normal path is the most
|
||||
// probable path
|
||||
// even if this assumption is violated, it does not matter as exception unwinding is
|
||||
// slow anyway...
|
||||
let cond = call_expect(self, cond, i1_true, Some("expect"));
|
||||
let current_bb = self.builder.get_insert_block().unwrap();
|
||||
let current_fun = current_bb.get_parent().unwrap();
|
||||
let then_block = self.ctx.insert_basic_block_after(current_bb, "succ");
|
||||
let exn_block = self.ctx.append_basic_block(current_fun, "fail");
|
||||
self.builder.build_conditional_branch(cond, then_block, exn_block).unwrap();
|
||||
self.builder.position_at_end(exn_block);
|
||||
self.raise_exn_by_id(generator, err_id, err_msg, params, loc);
|
||||
self.builder.position_at_end(then_block);
|
||||
}
|
||||
}
|
||||
|
||||
/// See [`CodeGenerator::gen_constructor`].
|
||||
@ -1014,98 +1063,132 @@ pub fn gen_comprehension<'ctx, G: CodeGenerator>(
|
||||
ctx.builder.build_store(index, zero_size_t).unwrap();
|
||||
|
||||
let elem_ty = ctx.get_llvm_type(generator, elt.custom.unwrap());
|
||||
let is_range = ctx.unifier.unioned(iter.custom.unwrap(), ctx.primitives.range);
|
||||
let list;
|
||||
let list_content;
|
||||
|
||||
if is_range {
|
||||
let iter_val = RangeValue::from_ptr_val(iter_val.into_pointer_value(), Some("range"));
|
||||
let (start, stop, step) = destructure_range(ctx, iter_val);
|
||||
let diff = ctx.builder.build_int_sub(stop, start, "diff").unwrap();
|
||||
// add 1 to the length as the value is rounded to zero
|
||||
// the length may be 1 more than the actual length if the division is exact, but the
|
||||
// length is a upper bound only anyway so it does not matter.
|
||||
let length = ctx.builder.build_int_signed_div(diff, step, "div").unwrap();
|
||||
let length = ctx.builder.build_int_add(length, int32.const_int(1, false), "add1").unwrap();
|
||||
// in case length is non-positive
|
||||
let is_valid =
|
||||
ctx.builder.build_int_compare(IntPredicate::SGT, length, zero_32, "check").unwrap();
|
||||
// The implementation of the for loop logic depends on
|
||||
// the typechecker type of `iter`.
|
||||
let iter_ty = iter.custom.unwrap();
|
||||
match &*ctx.unifier.get_ty(iter_ty) {
|
||||
TypeEnum::TObj { obj_id, params, .. }
|
||||
if *obj_id == ctx.primitives.list.obj_id(&ctx.unifier).unwrap() =>
|
||||
{
|
||||
// `iter` is a `List[T]`, and `T` is the element type
|
||||
|
||||
let list_alloc_size = ctx
|
||||
.builder
|
||||
.build_select(
|
||||
is_valid,
|
||||
ctx.builder.build_int_z_extend_or_bit_cast(length, size_t, "z_ext_len").unwrap(),
|
||||
zero_size_t,
|
||||
"listcomp.alloc_size",
|
||||
// Get the `T` out of `List[T]` - it is defined to be the 1st param.
|
||||
let list_elem_ty = iter_type_vars(params).nth(0).unwrap().ty;
|
||||
|
||||
let length = ctx
|
||||
.build_gep_and_load(
|
||||
iter_val.into_pointer_value(),
|
||||
&[zero_size_t, int32.const_int(1, false)],
|
||||
Some("length"),
|
||||
)
|
||||
.into_int_value();
|
||||
list = allocate_list(generator, ctx, Some(elem_ty), length, Some("listcomp"));
|
||||
list_content = list.data().base_ptr(ctx, generator);
|
||||
let counter = generator.gen_var_alloc(ctx, size_t.into(), Some("counter.addr"))?;
|
||||
// counter = -1
|
||||
ctx.builder.build_store(counter, size_t.const_int(u64::MAX, true)).unwrap();
|
||||
ctx.builder.build_unconditional_branch(test_bb).unwrap();
|
||||
|
||||
ctx.builder.position_at_end(test_bb);
|
||||
let tmp =
|
||||
ctx.builder.build_load(counter, "i").map(BasicValueEnum::into_int_value).unwrap();
|
||||
let tmp = ctx.builder.build_int_add(tmp, size_t.const_int(1, false), "inc").unwrap();
|
||||
ctx.builder.build_store(counter, tmp).unwrap();
|
||||
let cmp = ctx.builder.build_int_compare(IntPredicate::SLT, tmp, length, "cmp").unwrap();
|
||||
ctx.builder.build_conditional_branch(cmp, body_bb, cont_bb).unwrap();
|
||||
|
||||
ctx.builder.position_at_end(body_bb);
|
||||
let arr_ptr = ctx
|
||||
.build_gep_and_load(
|
||||
iter_val.into_pointer_value(),
|
||||
&[zero_size_t, zero_32],
|
||||
Some("arr.addr"),
|
||||
)
|
||||
.into_pointer_value();
|
||||
let val = ctx.build_gep_and_load(arr_ptr, &[tmp], Some("val"));
|
||||
generator.gen_assign(ctx, target, val.into(), list_elem_ty)?;
|
||||
}
|
||||
TypeEnum::TObj { obj_id, .. }
|
||||
if *obj_id == ctx.primitives.range.obj_id(&ctx.unifier).unwrap() =>
|
||||
{
|
||||
// `iter` is a `range(start, stop, step)`, and `int32` is the element type
|
||||
|
||||
let iter_val = RangeValue::from_ptr_val(iter_val.into_pointer_value(), Some("range"));
|
||||
let (start, stop, step) = destructure_range(ctx, iter_val);
|
||||
let diff = ctx.builder.build_int_sub(stop, start, "diff").unwrap();
|
||||
// add 1 to the length as the value is rounded to zero
|
||||
// the length may be 1 more than the actual length if the division is exact, but the
|
||||
// length is a upper bound only anyway so it does not matter.
|
||||
let length = ctx.builder.build_int_signed_div(diff, step, "div").unwrap();
|
||||
let length =
|
||||
ctx.builder.build_int_add(length, int32.const_int(1, false), "add1").unwrap();
|
||||
// in case length is non-positive
|
||||
let is_valid =
|
||||
ctx.builder.build_int_compare(IntPredicate::SGT, length, zero_32, "check").unwrap();
|
||||
|
||||
let list_alloc_size = ctx
|
||||
.builder
|
||||
.build_select(
|
||||
is_valid,
|
||||
ctx.builder
|
||||
.build_int_z_extend_or_bit_cast(length, size_t, "z_ext_len")
|
||||
.unwrap(),
|
||||
zero_size_t,
|
||||
"listcomp.alloc_size",
|
||||
)
|
||||
.unwrap();
|
||||
list = allocate_list(
|
||||
generator,
|
||||
ctx,
|
||||
Some(elem_ty),
|
||||
list_alloc_size.into_int_value(),
|
||||
Some("listcomp.addr"),
|
||||
);
|
||||
list_content = list.data().base_ptr(ctx, generator);
|
||||
|
||||
let i = generator.gen_store_target(ctx, target, Some("i.addr"))?.unwrap();
|
||||
ctx.builder
|
||||
.build_store(i, ctx.builder.build_int_sub(start, step, "start_init").unwrap())
|
||||
.unwrap();
|
||||
|
||||
ctx.builder
|
||||
.build_conditional_branch(
|
||||
gen_in_range_check(ctx, start, stop, step),
|
||||
test_bb,
|
||||
cont_bb,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
ctx.builder.position_at_end(test_bb);
|
||||
// add and test
|
||||
let tmp = ctx
|
||||
.builder
|
||||
.build_int_add(
|
||||
ctx.builder.build_load(i, "i").map(BasicValueEnum::into_int_value).unwrap(),
|
||||
step,
|
||||
"start_loop",
|
||||
)
|
||||
.unwrap();
|
||||
ctx.builder.build_store(i, tmp).unwrap();
|
||||
ctx.builder
|
||||
.build_conditional_branch(
|
||||
gen_in_range_check(ctx, tmp, stop, step),
|
||||
body_bb,
|
||||
cont_bb,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
ctx.builder.position_at_end(body_bb);
|
||||
}
|
||||
_ => {
|
||||
panic!(
|
||||
"unsupported iterator type in list comprehension: {}",
|
||||
ctx.unifier.stringify(iter_ty)
|
||||
)
|
||||
.unwrap();
|
||||
list = allocate_list(
|
||||
generator,
|
||||
ctx,
|
||||
Some(elem_ty),
|
||||
list_alloc_size.into_int_value(),
|
||||
Some("listcomp.addr"),
|
||||
);
|
||||
list_content = list.data().base_ptr(ctx, generator);
|
||||
|
||||
let i = generator.gen_store_target(ctx, target, Some("i.addr"))?.unwrap();
|
||||
ctx.builder
|
||||
.build_store(i, ctx.builder.build_int_sub(start, step, "start_init").unwrap())
|
||||
.unwrap();
|
||||
|
||||
ctx.builder
|
||||
.build_conditional_branch(gen_in_range_check(ctx, start, stop, step), test_bb, cont_bb)
|
||||
.unwrap();
|
||||
|
||||
ctx.builder.position_at_end(test_bb);
|
||||
// add and test
|
||||
let tmp = ctx
|
||||
.builder
|
||||
.build_int_add(
|
||||
ctx.builder.build_load(i, "i").map(BasicValueEnum::into_int_value).unwrap(),
|
||||
step,
|
||||
"start_loop",
|
||||
)
|
||||
.unwrap();
|
||||
ctx.builder.build_store(i, tmp).unwrap();
|
||||
ctx.builder
|
||||
.build_conditional_branch(gen_in_range_check(ctx, tmp, stop, step), body_bb, cont_bb)
|
||||
.unwrap();
|
||||
|
||||
ctx.builder.position_at_end(body_bb);
|
||||
} else {
|
||||
let length = ctx
|
||||
.build_gep_and_load(
|
||||
iter_val.into_pointer_value(),
|
||||
&[zero_size_t, int32.const_int(1, false)],
|
||||
Some("length"),
|
||||
)
|
||||
.into_int_value();
|
||||
list = allocate_list(generator, ctx, Some(elem_ty), length, Some("listcomp"));
|
||||
list_content = list.data().base_ptr(ctx, generator);
|
||||
let counter = generator.gen_var_alloc(ctx, size_t.into(), Some("counter.addr"))?;
|
||||
// counter = -1
|
||||
ctx.builder.build_store(counter, size_t.const_int(u64::MAX, true)).unwrap();
|
||||
ctx.builder.build_unconditional_branch(test_bb).unwrap();
|
||||
|
||||
ctx.builder.position_at_end(test_bb);
|
||||
let tmp = ctx.builder.build_load(counter, "i").map(BasicValueEnum::into_int_value).unwrap();
|
||||
let tmp = ctx.builder.build_int_add(tmp, size_t.const_int(1, false), "inc").unwrap();
|
||||
ctx.builder.build_store(counter, tmp).unwrap();
|
||||
let cmp = ctx.builder.build_int_compare(IntPredicate::SLT, tmp, length, "cmp").unwrap();
|
||||
ctx.builder.build_conditional_branch(cmp, body_bb, cont_bb).unwrap();
|
||||
|
||||
ctx.builder.position_at_end(body_bb);
|
||||
let arr_ptr = ctx
|
||||
.build_gep_and_load(
|
||||
iter_val.into_pointer_value(),
|
||||
&[zero_size_t, zero_32],
|
||||
Some("arr.addr"),
|
||||
)
|
||||
.into_pointer_value();
|
||||
let val = ctx.build_gep_and_load(arr_ptr, &[tmp], Some("val"));
|
||||
generator.gen_assign(ctx, target, val.into())?;
|
||||
}
|
||||
}
|
||||
|
||||
// Emits the content of `cont_bb`
|
||||
@ -2090,322 +2173,153 @@ pub fn gen_cmpop_expr<'ctx, G: CodeGenerator>(
|
||||
|
||||
/// Generates code for a subscript expression on an `ndarray`.
|
||||
///
|
||||
/// * `ty` - The `Type` of the `NDArray` elements.
|
||||
/// * `elem_ty` - The `Type` of the `NDArray` elements.
|
||||
/// * `ndims` - The `Type` of the `NDArray` number-of-dimensions `Literal`.
|
||||
/// * `v` - The `NDArray` value.
|
||||
/// * `src_ndarray` - The `NDArray` value.
|
||||
/// * `slice` - The slice expression used to subscript into the `ndarray`.
|
||||
fn gen_ndarray_subscript_expr<'ctx, G: CodeGenerator>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ty: Type,
|
||||
elem_ty: Type,
|
||||
ndims: Type,
|
||||
v: NDArrayValue<'ctx>,
|
||||
src_ndarray: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
slice: &Expr<Option<Type>>,
|
||||
) -> Result<Option<ValueEnum<'ctx>>, String> {
|
||||
let llvm_i1 = ctx.ctx.bool_type();
|
||||
let llvm_i32 = ctx.ctx.i32_type();
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
// TODO: Support https://numpy.org/doc/stable/user/basics.indexing.html#dimensional-indexing-tools
|
||||
|
||||
let TypeEnum::TLiteral { values, .. } = &*ctx.unifier.get_ty_immutable(ndims) else {
|
||||
let sizet = src_ndarray.element.0.sizet;
|
||||
debug_assert_eq!(sizet.0, generator.get_size_type(ctx.ctx)); // If the ndarray's size_type somehow isn't that of `generator.get_size_type()`... there would be a bug
|
||||
|
||||
let slice_index_model = SliceIndexModel::default();
|
||||
|
||||
// Annoying notes about `slice`
|
||||
// - `my_array[5]`
|
||||
// - slice is a `Constant`
|
||||
// - `my_array[:5]`
|
||||
// - slice is a `Slice`
|
||||
// - `my_array[:]`
|
||||
// - slice is a `Slice`, but lower upper step would all be `Option::None`
|
||||
// - `my_array[:, :]`
|
||||
// - slice is now a `Tuple` of two `Slice`-s
|
||||
//
|
||||
// In summary:
|
||||
// - when there is a comma "," within [], `slice` will be a `Tuple` of the entries.
|
||||
// - when there is not comma "," within [] (i.e., just a single entry), `slice` will be that entry itself.
|
||||
//
|
||||
// So we first "flatten" out the slice expression
|
||||
let subscript_exprs = match &slice.node {
|
||||
ExprKind::Tuple { elts, .. } => elts.iter().collect_vec(),
|
||||
_ => vec![slice],
|
||||
};
|
||||
|
||||
// Process all subscript expressions in subscripts
|
||||
let mut rust_ndsubscripts: Vec<RustNDSubscript> = Vec::with_capacity(subscript_exprs.len()); // Not using iterators here because `?` is used here.
|
||||
for subscript_expr in subscript_exprs {
|
||||
// NOTE: Currently nac3core's slices do not have an object representation,
|
||||
// so the code/implementation looks awkward - we have to do pattern matching on the expression
|
||||
let ndsubscript =
|
||||
if let ExprKind::Slice { lower: start, upper: stop, step } = &subscript_expr.node {
|
||||
// Helper function here to deduce code duplication
|
||||
type ValueExpr = Option<Box<Located<ExprKind<Option<Type>>, Option<Type>>>>;
|
||||
let mut help = |value_expr: &ValueExpr| -> Result<_, String> {
|
||||
Ok(match value_expr {
|
||||
None => None,
|
||||
Some(value_expr) => Some(
|
||||
slice_index_model.review(
|
||||
ctx.ctx,
|
||||
generator
|
||||
.gen_expr(ctx, value_expr)?
|
||||
.unwrap()
|
||||
.to_basic_value_enum(ctx, generator, ctx.primitives.int32)?
|
||||
.as_any_value_enum(),
|
||||
),
|
||||
),
|
||||
})
|
||||
};
|
||||
|
||||
let start = help(start)?;
|
||||
let stop = help(stop)?;
|
||||
let step = help(step)?;
|
||||
|
||||
RustNDSubscript::Slice(RustUserSlice { start, stop, step })
|
||||
} else {
|
||||
// Anything else that is not a slice (might be illegal values),
|
||||
// For nac3core, this should be e.g., an int32 constant, an int32 variable, otherwise its an error
|
||||
|
||||
let index = slice_index_model.review(
|
||||
ctx.ctx,
|
||||
generator
|
||||
.gen_expr(ctx, subscript_expr)?
|
||||
.unwrap()
|
||||
.to_basic_value_enum(ctx, generator, ctx.primitives.int32)?
|
||||
.as_any_value_enum(),
|
||||
);
|
||||
|
||||
RustNDSubscript::Index(index)
|
||||
};
|
||||
rust_ndsubscripts.push(ndsubscript);
|
||||
}
|
||||
|
||||
// Extract the `ndims` from a `Type` to `i128`
|
||||
// We *HAVE* to know this statically, this is used to determine
|
||||
// whether the subscript returns a scalar or an ndarray
|
||||
let TypeEnum::TLiteral { values: ndims_values, .. } = &*ctx.unifier.get_ty_immutable(ndims)
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
assert_eq!(ndims_values.len(), 1);
|
||||
let src_ndims = i128::try_from(ndims_values[0].clone()).unwrap();
|
||||
|
||||
let ndims = values
|
||||
.iter()
|
||||
.map(|ndim| u64::try_from(ndim.clone()).map_err(|()| ndim.clone()))
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(|val| {
|
||||
format!(
|
||||
"Expected non-negative literal for ndarray.ndims, got {}",
|
||||
i128::try_from(val).unwrap()
|
||||
)
|
||||
})?;
|
||||
|
||||
assert!(!ndims.is_empty());
|
||||
|
||||
// The number of dimensions subscripted by the index expression.
|
||||
// Slicing a ndarray will yield the same number of dimensions, whereas indexing into a
|
||||
// dimension will remove a dimension.
|
||||
let subscripted_dims = match &slice.node {
|
||||
ExprKind::Tuple { elts, .. } => elts.iter().fold(0, |acc, value_subexpr| {
|
||||
if let ExprKind::Slice { .. } = &value_subexpr.node {
|
||||
acc
|
||||
} else {
|
||||
acc + 1
|
||||
}
|
||||
}),
|
||||
|
||||
ExprKind::Slice { .. } => 0,
|
||||
_ => 1,
|
||||
};
|
||||
|
||||
let ndarray_ndims_ty = ctx.unifier.get_fresh_literal(
|
||||
ndims.iter().map(|v| SymbolValue::U64(v - subscripted_dims)).collect(),
|
||||
None,
|
||||
);
|
||||
let ndarray_ty =
|
||||
make_ndarray_ty(&mut ctx.unifier, &ctx.primitives, Some(ty), Some(ndarray_ndims_ty));
|
||||
let llvm_pndarray_t = ctx.get_llvm_type(generator, ndarray_ty).into_pointer_type();
|
||||
let llvm_ndarray_t = llvm_pndarray_t.get_element_type().into_struct_type();
|
||||
let llvm_ndarray_data_t = ctx.get_llvm_type(generator, ty).as_basic_type_enum();
|
||||
|
||||
// Check that len is non-zero
|
||||
let len = v.load_ndims(ctx);
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
ctx.builder.build_int_compare(IntPredicate::SGT, len, llvm_usize.const_zero(), "").unwrap(),
|
||||
"0:IndexError",
|
||||
"too many indices for array: array is {0}-dimensional but 1 were indexed",
|
||||
[Some(len), None, None],
|
||||
slice.location,
|
||||
);
|
||||
|
||||
// Normalizes a possibly-negative index to its corresponding positive index
|
||||
let normalize_index = |generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
index: IntValue<'ctx>,
|
||||
dim: u64| {
|
||||
gen_if_else_expr_callback(
|
||||
// Check for "too many indices for array: array is ..." error
|
||||
if src_ndims < rust_ndsubscripts.len() as i128 {
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
ctx,
|
||||
|_, ctx| {
|
||||
Ok(ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::SGE, index, index.get_type().const_zero(), "")
|
||||
.unwrap())
|
||||
},
|
||||
|_, _| Ok(Some(index)),
|
||||
|generator, ctx| {
|
||||
let llvm_i32 = ctx.ctx.i32_type();
|
||||
ctx.ctx.bool_type().const_int(1, false),
|
||||
"0:IndexError",
|
||||
"too many indices for array: array is {0}-dimensional, but {1} were indexed",
|
||||
[None, None, None],
|
||||
ctx.current_loc,
|
||||
);
|
||||
}
|
||||
|
||||
let len = unsafe {
|
||||
v.dim_sizes().get_typed_unchecked(
|
||||
ctx,
|
||||
generator,
|
||||
&llvm_usize.const_int(dim, true),
|
||||
None,
|
||||
)
|
||||
};
|
||||
// Statically determine `dst_ndims`
|
||||
let dst_ndims =
|
||||
RustNDSubscript::deduce_ndims_after_slicing(&rust_ndsubscripts, src_ndims as i32);
|
||||
|
||||
let index = ctx
|
||||
.builder
|
||||
.build_int_add(
|
||||
len,
|
||||
ctx.builder.build_int_s_extend(index, llvm_usize, "").unwrap(),
|
||||
"",
|
||||
)
|
||||
.unwrap();
|
||||
// Prepare dst_ndarray
|
||||
let elem_llvm_ty = ctx.get_llvm_type(generator, elem_ty);
|
||||
let dst_ndarray = alloca_ndarray_and_init(
|
||||
generator,
|
||||
ctx,
|
||||
elem_llvm_ty,
|
||||
ndarray::NDArrayInitMode::NDims { ndims: sizet.constant(dst_ndims as u64) },
|
||||
"subndarray",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
Ok(Some(ctx.builder.build_int_truncate(index, llvm_i32, "").unwrap()))
|
||||
},
|
||||
)
|
||||
.map(|v| v.map(BasicValueEnum::into_int_value))
|
||||
};
|
||||
// Prepare the subscripts
|
||||
let ndsubscript_array = RustNDSubscript::alloca_subscripts(ctx, &rust_ndsubscripts);
|
||||
|
||||
// Converts a slice expression into a slice-range tuple
|
||||
let expr_to_slice = |generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
node: &ExprKind<Option<Type>>,
|
||||
dim: u64| {
|
||||
match node {
|
||||
ExprKind::Constant { value: Constant::Int(v), .. } => {
|
||||
let Some(index) =
|
||||
normalize_index(generator, ctx, llvm_i32.const_int(*v as u64, true), dim)?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
// NOTE: IRRT does check for indexing errors
|
||||
call_nac3_ndarray_subscript(
|
||||
generator,
|
||||
ctx,
|
||||
ndsubscript_array.num_elements.signed_cast_to_fixed(ctx, Int32, "num_ndsubscripts"),
|
||||
ndsubscript_array.pointer,
|
||||
src_ndarray,
|
||||
dst_ndarray,
|
||||
);
|
||||
|
||||
Ok(Some((index, index, llvm_i32.const_int(1, true))))
|
||||
}
|
||||
|
||||
ExprKind::Slice { lower, upper, step } => {
|
||||
let dim_sz = unsafe {
|
||||
v.dim_sizes().get_typed_unchecked(
|
||||
ctx,
|
||||
generator,
|
||||
&llvm_usize.const_int(dim, false),
|
||||
None,
|
||||
)
|
||||
};
|
||||
|
||||
handle_slice_indices(lower, upper, step, ctx, generator, dim_sz)
|
||||
}
|
||||
|
||||
_ => {
|
||||
let Some(index) = generator.gen_expr(ctx, slice)? else { return Ok(None) };
|
||||
let index = index
|
||||
.to_basic_value_enum(ctx, generator, slice.custom.unwrap())?
|
||||
.into_int_value();
|
||||
let Some(index) = normalize_index(generator, ctx, index, dim)? else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
Ok(Some((index, index, llvm_i32.const_int(1, true))))
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let make_indices_arr = |generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>|
|
||||
-> Result<_, String> {
|
||||
Ok(if let ExprKind::Tuple { elts, .. } = &slice.node {
|
||||
let llvm_int_ty = ctx.get_llvm_type(generator, elts[0].custom.unwrap());
|
||||
let index_addr = generator.gen_array_var_alloc(
|
||||
ctx,
|
||||
llvm_int_ty,
|
||||
llvm_usize.const_int(elts.len() as u64, false),
|
||||
None,
|
||||
)?;
|
||||
|
||||
for (i, elt) in elts.iter().enumerate() {
|
||||
let Some(index) = generator.gen_expr(ctx, elt)? else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let index = index
|
||||
.to_basic_value_enum(ctx, generator, elt.custom.unwrap())?
|
||||
.into_int_value();
|
||||
let Some(index) = normalize_index(generator, ctx, index, 0)? else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let store_ptr = unsafe {
|
||||
index_addr.ptr_offset_unchecked(
|
||||
ctx,
|
||||
generator,
|
||||
&llvm_usize.const_int(i as u64, false),
|
||||
None,
|
||||
)
|
||||
};
|
||||
ctx.builder.build_store(store_ptr, index).unwrap();
|
||||
}
|
||||
|
||||
Some(index_addr)
|
||||
} else if let Some(index) = generator.gen_expr(ctx, slice)? {
|
||||
let llvm_int_ty = ctx.get_llvm_type(generator, slice.custom.unwrap());
|
||||
let index_addr = generator.gen_array_var_alloc(
|
||||
ctx,
|
||||
llvm_int_ty,
|
||||
llvm_usize.const_int(1u64, false),
|
||||
None,
|
||||
)?;
|
||||
|
||||
let index =
|
||||
index.to_basic_value_enum(ctx, generator, slice.custom.unwrap())?.into_int_value();
|
||||
let Some(index) = normalize_index(generator, ctx, index, 0)? else { return Ok(None) };
|
||||
|
||||
let store_ptr = unsafe {
|
||||
index_addr.ptr_offset_unchecked(ctx, generator, &llvm_usize.const_zero(), None)
|
||||
};
|
||||
ctx.builder.build_store(store_ptr, index).unwrap();
|
||||
|
||||
Some(index_addr)
|
||||
} else {
|
||||
None
|
||||
})
|
||||
};
|
||||
|
||||
Ok(Some(if ndims.len() == 1 && ndims[0] - subscripted_dims == 0 {
|
||||
let Some(index_addr) = make_indices_arr(generator, ctx)? else { return Ok(None) };
|
||||
|
||||
v.data().get(ctx, generator, &index_addr, None).into()
|
||||
// ...and return the result, with two cases
|
||||
let result_llvm_value = if dst_ndims == 0 {
|
||||
// 1) ndims == 0 (this happens when you do `np.zerps((3, 4))[1, 1]`), return *THE ELEMENT*
|
||||
let element_ptr = dst_ndarray.gep(ctx, |f| f.data).load(ctx, "pelement"); // `*data` points to the first element by definition
|
||||
element_ptr.cast_opaque_to(ctx, elem_llvm_ty, "").load_opaque(ctx, "element")
|
||||
} else {
|
||||
match &slice.node {
|
||||
ExprKind::Tuple { elts, .. } => {
|
||||
let slices = elts
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(dim, elt)| expr_to_slice(generator, ctx, &elt.node, dim as u64))
|
||||
.take_while_inclusive(|slice| slice.as_ref().is_ok_and(Option::is_some))
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
if slices.len() < elts.len() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let slices = slices.into_iter().map(Option::unwrap).collect_vec();
|
||||
|
||||
numpy::ndarray_sliced_copy(generator, ctx, ty, v, &slices)?.as_base_value().into()
|
||||
}
|
||||
|
||||
ExprKind::Slice { .. } => {
|
||||
let Some(slice) = expr_to_slice(generator, ctx, &slice.node, 0)? else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
numpy::ndarray_sliced_copy(generator, ctx, ty, v, &[slice])?.as_base_value().into()
|
||||
}
|
||||
|
||||
_ => {
|
||||
// Accessing an element from a multi-dimensional `ndarray`
|
||||
|
||||
let Some(index_addr) = make_indices_arr(generator, ctx)? else { return Ok(None) };
|
||||
|
||||
// Create a new array, remove the top dimension from the dimension-size-list, and copy the
|
||||
// elements over
|
||||
let subscripted_ndarray =
|
||||
generator.gen_var_alloc(ctx, llvm_ndarray_t.into(), None)?;
|
||||
let ndarray = NDArrayValue::from_ptr_val(subscripted_ndarray, llvm_usize, None);
|
||||
|
||||
let num_dims = v.load_ndims(ctx);
|
||||
ndarray.store_ndims(
|
||||
ctx,
|
||||
generator,
|
||||
ctx.builder
|
||||
.build_int_sub(num_dims, llvm_usize.const_int(1, false), "")
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
let ndarray_num_dims = ndarray.load_ndims(ctx);
|
||||
ndarray.create_dim_sizes(ctx, llvm_usize, ndarray_num_dims);
|
||||
|
||||
let ndarray_num_dims = ndarray.load_ndims(ctx);
|
||||
let v_dims_src_ptr = unsafe {
|
||||
v.dim_sizes().ptr_offset_unchecked(
|
||||
ctx,
|
||||
generator,
|
||||
&llvm_usize.const_int(1, false),
|
||||
None,
|
||||
)
|
||||
};
|
||||
call_memcpy_generic(
|
||||
ctx,
|
||||
ndarray.dim_sizes().base_ptr(ctx, generator),
|
||||
v_dims_src_ptr,
|
||||
ctx.builder
|
||||
.build_int_mul(ndarray_num_dims, llvm_usize.size_of(), "")
|
||||
.map(Into::into)
|
||||
.unwrap(),
|
||||
llvm_i1.const_zero(),
|
||||
);
|
||||
|
||||
let ndarray_num_elems = call_ndarray_calc_size(
|
||||
generator,
|
||||
ctx,
|
||||
&ndarray.dim_sizes().as_slice_value(ctx, generator),
|
||||
(None, None),
|
||||
);
|
||||
ndarray.create_data(ctx, llvm_ndarray_data_t, ndarray_num_elems);
|
||||
|
||||
let v_data_src_ptr = v.data().ptr_offset(ctx, generator, &index_addr, None);
|
||||
call_memcpy_generic(
|
||||
ctx,
|
||||
ndarray.data().base_ptr(ctx, generator),
|
||||
v_data_src_ptr,
|
||||
ctx.builder
|
||||
.build_int_mul(
|
||||
ndarray_num_elems,
|
||||
llvm_ndarray_data_t.size_of().unwrap(),
|
||||
"",
|
||||
)
|
||||
.map(Into::into)
|
||||
.unwrap(),
|
||||
llvm_i1.const_zero(),
|
||||
);
|
||||
|
||||
ndarray.as_base_value().into()
|
||||
}
|
||||
}
|
||||
}))
|
||||
// 2) ndims > 0 (other cases), return subndarray
|
||||
dst_ndarray.value.as_basic_value_enum()
|
||||
};
|
||||
Ok(Some(ValueEnum::Dynamic(result_llvm_value)))
|
||||
}
|
||||
|
||||
/// See [`CodeGenerator::gen_expr`].
|
||||
@ -3048,17 +2962,26 @@ pub fn gen_expr<'ctx, G: CodeGenerator>(
|
||||
}
|
||||
}
|
||||
TypeEnum::TObj { obj_id, params, .. } if *obj_id == PrimDef::NDArray.id() => {
|
||||
let (ty, ndims) = params.iter().map(|(_, ty)| ty).collect_tuple().unwrap();
|
||||
let (elem_ty, ndims) = params.iter().map(|(_, ty)| ty).collect_tuple().unwrap();
|
||||
|
||||
let v = if let Some(v) = generator.gen_expr(ctx, value)? {
|
||||
v.to_basic_value_enum(ctx, generator, value.custom.unwrap())?
|
||||
.into_pointer_value()
|
||||
let ndarray_ptr = if let Some(v) = generator.gen_expr(ctx, value)? {
|
||||
let sizet = IntModel(generator.get_size_type(ctx.ctx));
|
||||
let ndarray_ptr_model = PointerModel(StructModel(NpArray { sizet }));
|
||||
|
||||
let v = v.to_basic_value_enum(ctx, generator, value.custom.unwrap())?;
|
||||
ndarray_ptr_model.review(ctx.ctx, v.as_any_value_enum())
|
||||
} else {
|
||||
return Ok(None);
|
||||
};
|
||||
let v = NDArrayValue::from_ptr_val(v, usize, None);
|
||||
|
||||
return gen_ndarray_subscript_expr(generator, ctx, *ty, *ndims, v, slice);
|
||||
return gen_ndarray_subscript_expr(
|
||||
generator,
|
||||
ctx,
|
||||
*elem_ty,
|
||||
*ndims,
|
||||
ndarray_ptr,
|
||||
slice,
|
||||
);
|
||||
}
|
||||
TypeEnum::TTuple { .. } => {
|
||||
let index: u32 =
|
||||
|
@ -123,11 +123,12 @@ pub trait CodeGenerator {
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
target: &Expr<Option<Type>>,
|
||||
value: ValueEnum<'ctx>,
|
||||
value_ty: Type,
|
||||
) -> Result<(), String>
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
gen_assign(self, ctx, target, value)
|
||||
gen_assign(self, ctx, target, value, value_ty)
|
||||
}
|
||||
|
||||
/// Generate code for a while expression.
|
||||
|
187
nac3core/src/codegen/irrt/error_context.rs
Normal file
187
nac3core/src/codegen/irrt/error_context.rs
Normal file
@ -0,0 +1,187 @@
|
||||
use crate::codegen::{model::*, CodeGenContext, CodeGenerator};
|
||||
|
||||
use super::util::{get_sized_dependent_function_name, FunctionBuilder};
|
||||
|
||||
pub struct StrFields<'ctx> {
|
||||
pub content: Field<PointerModel<FixedIntModel<Byte>>>,
|
||||
pub length: Field<IntModel<'ctx>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Str<'ctx> {
|
||||
pub sizet: IntModel<'ctx>,
|
||||
}
|
||||
|
||||
impl<'ctx> IsStruct<'ctx> for Str<'ctx> {
|
||||
type Fields = StrFields<'ctx>;
|
||||
|
||||
fn struct_name(&self) -> &'static str {
|
||||
"Str"
|
||||
}
|
||||
|
||||
fn build_fields(&self, builder: &mut FieldBuilder<'ctx>) -> Self::Fields {
|
||||
Self::Fields {
|
||||
content: builder.add_field_auto("content"),
|
||||
length: builder.add_field("length", self.sizet),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type ErrorId = Int32;
|
||||
pub struct ErrorIdsFields {
|
||||
pub index_error: Field<FixedIntModel<ErrorId>>,
|
||||
pub value_error: Field<FixedIntModel<ErrorId>>,
|
||||
pub assertion_error: Field<FixedIntModel<ErrorId>>,
|
||||
pub runtime_error: Field<FixedIntModel<ErrorId>>,
|
||||
pub type_error: Field<FixedIntModel<ErrorId>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct ErrorIds;
|
||||
|
||||
impl<'ctx> IsStruct<'ctx> for ErrorIds {
|
||||
type Fields = ErrorIdsFields;
|
||||
|
||||
fn struct_name(&self) -> &'static str {
|
||||
"ErrorIds"
|
||||
}
|
||||
|
||||
fn build_fields(&self, builder: &mut FieldBuilder) -> Self::Fields {
|
||||
Self::Fields {
|
||||
index_error: builder.add_field_auto("index_error"),
|
||||
value_error: builder.add_field_auto("value_error"),
|
||||
assertion_error: builder.add_field_auto("assertion_error"),
|
||||
runtime_error: builder.add_field_auto("runtime_error"),
|
||||
type_error: builder.add_field_auto("type_error"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ErrorContextFields {
|
||||
pub error_id: Field<FixedIntModel<ErrorId>>,
|
||||
pub message_template: Field<PointerModel<FixedIntModel<Byte>>>,
|
||||
pub param1: Field<FixedIntModel<Int64>>,
|
||||
pub param2: Field<FixedIntModel<Int64>>,
|
||||
pub param3: Field<FixedIntModel<Int64>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct ErrorContext;
|
||||
|
||||
impl<'ctx> IsStruct<'ctx> for ErrorContext {
|
||||
type Fields = ErrorContextFields;
|
||||
|
||||
fn struct_name(&self) -> &'static str {
|
||||
"ErrorIds"
|
||||
}
|
||||
|
||||
fn build_fields(&self, builder: &mut FieldBuilder) -> Self::Fields {
|
||||
Self::Fields {
|
||||
error_id: builder.add_field_auto("error_id"),
|
||||
message_template: builder.add_field_auto("message_template"),
|
||||
param1: builder.add_field_auto("param1"),
|
||||
param2: builder.add_field_auto("param2"),
|
||||
param3: builder.add_field_auto("param3"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare ErrorIds
|
||||
fn build_error_ids<'ctx>(ctx: &CodeGenContext<'ctx, '_>) -> Pointer<'ctx, StructModel<ErrorIds>> {
|
||||
// ErrorIdsLens.get_fields(ctx.ctx).assertion_error.
|
||||
let error_ids = StructModel(ErrorIds).alloca(ctx, "error_ids");
|
||||
let i32_model = FixedIntModel(Int32);
|
||||
// i32_model.make_constant()
|
||||
|
||||
let get_string_id =
|
||||
|string_id| i32_model.constant(ctx.ctx, ctx.resolver.get_string_id(string_id) as u64);
|
||||
|
||||
error_ids.gep(ctx, |f| f.index_error).store(ctx, get_string_id("0:IndexError"));
|
||||
error_ids.gep(ctx, |f| f.value_error).store(ctx, get_string_id("0:ValueError"));
|
||||
error_ids.gep(ctx, |f| f.assertion_error).store(ctx, get_string_id("0:AssertionError"));
|
||||
error_ids.gep(ctx, |f| f.runtime_error).store(ctx, get_string_id("0:RuntimeError"));
|
||||
error_ids.gep(ctx, |f| f.type_error).store(ctx, get_string_id("0:TypeError"));
|
||||
|
||||
error_ids
|
||||
}
|
||||
|
||||
pub fn call_nac3_error_context_initialize<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
perrctx: Pointer<'ctx, StructModel<ErrorContext>>,
|
||||
perror_ids: Pointer<'ctx, StructModel<ErrorIds>>,
|
||||
) {
|
||||
FunctionBuilder::begin(ctx, "__nac3_error_context_initialize")
|
||||
.arg("errctx", PointerModel(StructModel(ErrorContext)), perrctx)
|
||||
.arg("error_ids", PointerModel(StructModel(ErrorIds)), perror_ids)
|
||||
.returning_void();
|
||||
}
|
||||
|
||||
pub fn call_nac3_error_context_has_no_error<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
errctx: Pointer<'ctx, StructModel<ErrorContext>>,
|
||||
) -> FixedInt<'ctx, Bool> {
|
||||
FunctionBuilder::begin(ctx, "__nac3_error_context_has_no_error")
|
||||
.arg("errctx", PointerModel(StructModel(ErrorContext)), errctx)
|
||||
.returning("has_error", FixedIntModel(Bool))
|
||||
}
|
||||
|
||||
pub fn call_nac3_error_context_get_error_str<'ctx>(
|
||||
sizet: IntModel<'ctx>,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
errctx: Pointer<'ctx, StructModel<ErrorContext>>,
|
||||
dst_str: Pointer<'ctx, StructModel<Str<'ctx>>>,
|
||||
) {
|
||||
FunctionBuilder::begin(
|
||||
ctx,
|
||||
&get_sized_dependent_function_name(sizet, "__nac3_error_context_get_error_str"),
|
||||
)
|
||||
.arg("errctx", PointerModel(StructModel(ErrorContext)), errctx)
|
||||
.arg("dst_str", PointerModel(StructModel(Str { sizet })), dst_str)
|
||||
.returning_void();
|
||||
}
|
||||
|
||||
pub fn prepare_error_context<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
) -> Pointer<'ctx, StructModel<ErrorContext>> {
|
||||
let error_ids = build_error_ids(ctx);
|
||||
let errctx_ptr = StructModel(ErrorContext).alloca(ctx, "errctx");
|
||||
call_nac3_error_context_initialize(ctx, errctx_ptr, error_ids);
|
||||
errctx_ptr
|
||||
}
|
||||
|
||||
pub fn check_error_context<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
errctx_ptr: Pointer<'ctx, StructModel<ErrorContext>>,
|
||||
) {
|
||||
let sizet = IntModel(generator.get_size_type(ctx.ctx));
|
||||
|
||||
let has_error = call_nac3_error_context_has_no_error(ctx, errctx_ptr);
|
||||
let pstr = StructModel(Str { sizet }).alloca(ctx, "error_str");
|
||||
call_nac3_error_context_get_error_str(sizet, ctx, errctx_ptr, pstr);
|
||||
|
||||
let error_id = errctx_ptr.gep(ctx, |f| f.error_id).load(ctx, "error_id");
|
||||
let error_str = pstr.load(ctx, "error_str");
|
||||
let param1 = errctx_ptr.gep(ctx, |f| f.param1).load(ctx, "param1");
|
||||
let param2 = errctx_ptr.gep(ctx, |f| f.param2).load(ctx, "param2");
|
||||
let param3 = errctx_ptr.gep(ctx, |f| f.param3).load(ctx, "param3");
|
||||
ctx.make_assert_impl_by_id(
|
||||
generator,
|
||||
has_error.value,
|
||||
error_id.value,
|
||||
error_str.get_llvm_value(),
|
||||
[Some(param1.value), Some(param2.value), Some(param3.value)],
|
||||
ctx.current_loc,
|
||||
);
|
||||
}
|
||||
|
||||
pub fn call_nac3_dummy_raise<G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext,
|
||||
) {
|
||||
let errctx = prepare_error_context(ctx);
|
||||
FunctionBuilder::begin(ctx, "__nac3_error_dummy_raise")
|
||||
.arg("errctx", PointerModel(StructModel(ErrorContext)), errctx)
|
||||
.returning_void();
|
||||
check_error_context(generator, ctx, errctx);
|
||||
}
|
@ -1,5 +1,10 @@
|
||||
use crate::typecheck::typedef::Type;
|
||||
|
||||
pub mod error_context;
|
||||
pub mod numpy;
|
||||
mod test;
|
||||
mod util;
|
||||
|
||||
use super::{
|
||||
classes::{
|
||||
ArrayLikeIndexer, ArrayLikeValue, ArraySliceValue, ListValue, NDArrayValue,
|
||||
|
4
nac3core/src/codegen/irrt/numpy/mod.rs
Normal file
4
nac3core/src/codegen/irrt/numpy/mod.rs
Normal file
@ -0,0 +1,4 @@
|
||||
pub mod ndarray;
|
||||
pub mod shape;
|
||||
pub mod slice;
|
||||
pub mod subscript;
|
254
nac3core/src/codegen/irrt/numpy/ndarray.rs
Normal file
254
nac3core/src/codegen/irrt/numpy/ndarray.rs
Normal file
@ -0,0 +1,254 @@
|
||||
use inkwell::types::{BasicType, BasicTypeEnum};
|
||||
|
||||
use crate::codegen::{
|
||||
irrt::{
|
||||
error_context::{check_error_context, prepare_error_context, ErrorContext},
|
||||
util::{get_sized_dependent_function_name, FunctionBuilder},
|
||||
},
|
||||
model::*,
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
|
||||
use super::{
|
||||
shape::Producer,
|
||||
slice::{SliceIndex, SliceIndexModel},
|
||||
};
|
||||
|
||||
pub struct NpArrayFields<'ctx> {
|
||||
pub data: Field<PointerModel<ByteModel>>,
|
||||
pub itemsize: Field<IntModel<'ctx>>,
|
||||
pub ndims: Field<IntModel<'ctx>>,
|
||||
pub shape: Field<PointerModel<IntModel<'ctx>>>,
|
||||
pub strides: Field<PointerModel<IntModel<'ctx>>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct NpArray<'ctx> {
|
||||
pub sizet: IntModel<'ctx>,
|
||||
}
|
||||
|
||||
impl<'ctx> IsStruct<'ctx> for NpArray<'ctx> {
|
||||
type Fields = NpArrayFields<'ctx>;
|
||||
|
||||
fn struct_name(&self) -> &'static str {
|
||||
"NDArray"
|
||||
}
|
||||
|
||||
fn build_fields(&self, builder: &mut FieldBuilder<'ctx>) -> Self::Fields {
|
||||
NpArrayFields {
|
||||
data: builder.add_field_auto("data"),
|
||||
itemsize: builder.add_field("itemsize", self.sizet),
|
||||
ndims: builder.add_field("ndims", self.sizet),
|
||||
shape: builder.add_field("shape", PointerModel(self.sizet)),
|
||||
strides: builder.add_field("strides", PointerModel(self.sizet)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> Pointer<'ctx, StructModel<NpArray<'ctx>>> {
|
||||
pub fn shape_slice(&self, ctx: &CodeGenContext<'ctx, '_>) -> ArraySlice<'ctx, IntModel<'ctx>> {
|
||||
let ndims = self.gep(ctx, |f| f.ndims).load(ctx, "ndims");
|
||||
let shape_base_ptr = self.gep(ctx, |f| f.shape).load(ctx, "shape");
|
||||
ArraySlice { num_elements: ndims, pointer: shape_base_ptr }
|
||||
}
|
||||
|
||||
pub fn strides_slice(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
) -> ArraySlice<'ctx, IntModel<'ctx>> {
|
||||
let ndims = self.gep(ctx, |f| f.ndims).load(ctx, "ndims");
|
||||
let strides_base_ptr = self.gep(ctx, |f| f.strides).load(ctx, "strides");
|
||||
ArraySlice { num_elements: ndims, pointer: strides_base_ptr }
|
||||
}
|
||||
}
|
||||
|
||||
pub fn alloca_ndarray<'ctx, G>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
elem_type: BasicTypeEnum<'ctx>,
|
||||
ndims: Int<'ctx>,
|
||||
name: &str,
|
||||
) -> Result<Pointer<'ctx, StructModel<NpArray<'ctx>>>, String>
|
||||
where
|
||||
G: CodeGenerator + ?Sized,
|
||||
{
|
||||
let sizet = IntModel(generator.get_size_type(ctx.ctx));
|
||||
|
||||
// Allocate ndarray
|
||||
let ndarray_ptr = StructModel(NpArray { sizet }).alloca(ctx, name);
|
||||
|
||||
// Set ndims
|
||||
ndarray_ptr.gep(ctx, |f| f.ndims).store(ctx, ndims);
|
||||
|
||||
// Set itemsize
|
||||
let itemsize = Int(elem_type.size_of().unwrap());
|
||||
ndarray_ptr.gep(ctx, |f| f.itemsize).store(ctx, itemsize.signed_cast_to_int(ctx, sizet, ""));
|
||||
|
||||
// Allocate and set shape
|
||||
let shape_array = sizet.array_alloca(ctx, ndims, "shape");
|
||||
ndarray_ptr.gep(ctx, |f| f.shape).store(ctx, shape_array.pointer);
|
||||
|
||||
// Allocate and set strides
|
||||
let strides_array = sizet.array_alloca(ctx, ndims, "strides");
|
||||
ndarray_ptr.gep(ctx, |f| f.strides).store(ctx, strides_array.pointer);
|
||||
|
||||
Ok(ndarray_ptr)
|
||||
}
|
||||
|
||||
pub enum NDArrayInitMode<'ctx, G: CodeGenerator + ?Sized> {
|
||||
NDims { ndims: Int<'ctx> },
|
||||
Shape { shape: Producer<'ctx, G, IntModel<'ctx>> },
|
||||
ShapeAndAllocaData { shape: Producer<'ctx, G, IntModel<'ctx>> },
|
||||
}
|
||||
|
||||
/// TODO: DOCUMENT ME
|
||||
pub fn alloca_ndarray_and_init<'ctx, G>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
elem_type: BasicTypeEnum<'ctx>,
|
||||
init_mode: NDArrayInitMode<'ctx, G>,
|
||||
name: &str,
|
||||
) -> Result<Pointer<'ctx, StructModel<NpArray<'ctx>>>, String>
|
||||
where
|
||||
G: CodeGenerator + ?Sized,
|
||||
{
|
||||
// It is implemented verbosely in order to make the initialization modes super clear in their intent.
|
||||
match init_mode {
|
||||
NDArrayInitMode::NDims { ndims } => {
|
||||
let ndarray_ptr = alloca_ndarray(generator, ctx, elem_type, ndims, name)?;
|
||||
Ok(ndarray_ptr)
|
||||
}
|
||||
NDArrayInitMode::Shape { shape } => {
|
||||
let ndims = shape.count;
|
||||
let ndarray_ptr = alloca_ndarray(generator, ctx, elem_type, ndims, name)?;
|
||||
|
||||
// Fill `ndarray.shape`
|
||||
(shape.write_to_array)(generator, ctx, &ndarray_ptr.shape_slice(ctx))?;
|
||||
|
||||
// Check if `shape` has bad inputs
|
||||
call_nac3_ndarray_util_assert_shape_no_negative(
|
||||
generator,
|
||||
ctx,
|
||||
ndims,
|
||||
ndarray_ptr.gep(ctx, |f| f.shape).load(ctx, "shape"),
|
||||
);
|
||||
|
||||
// NOTE: DO NOT DO `set_strides_by_shape` HERE.
|
||||
// Simply this is because we specified that `SetShape` wouldn't do `set_strides_by_shape`
|
||||
|
||||
Ok(ndarray_ptr)
|
||||
}
|
||||
NDArrayInitMode::ShapeAndAllocaData { shape } => {
|
||||
let ndims = shape.count;
|
||||
let ndarray_ptr = alloca_ndarray(generator, ctx, elem_type, ndims, name)?;
|
||||
|
||||
// Fill `ndarray.shape`
|
||||
(shape.write_to_array)(generator, ctx, &ndarray_ptr.shape_slice(ctx))?;
|
||||
|
||||
// Check if `shape` has bad inputs
|
||||
call_nac3_ndarray_util_assert_shape_no_negative(
|
||||
generator,
|
||||
ctx,
|
||||
ndims,
|
||||
ndarray_ptr.gep(ctx, |f| f.shape).load(ctx, "shape"),
|
||||
);
|
||||
|
||||
// Now we populate `ndarray.data` by alloca-ing.
|
||||
// But first, we need to know the size of the ndarray to know how many elements to alloca,
|
||||
// since calculating nbytes of an ndarray requires `ndarray.shape` to be set.
|
||||
let ndarray_nbytes = call_nac3_ndarray_nbytes(ctx, ndarray_ptr);
|
||||
|
||||
// Alloca `data` and assign it to `ndarray.data`
|
||||
let data_array = FixedIntModel(Byte).array_alloca(ctx, ndarray_nbytes, "data");
|
||||
ndarray_ptr.gep(ctx, |f| f.data).store(ctx, data_array.pointer);
|
||||
|
||||
// Finally, do `set_strides_by_shape`
|
||||
// Check out https://ajcr.net/stride-guide-part-1/ to see what numpy "strides" are.
|
||||
call_nac3_ndarray_set_strides_by_shape(ctx, ndarray_ptr);
|
||||
|
||||
Ok(ndarray_ptr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn call_nac3_ndarray_util_assert_shape_no_negative<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ndims: Int<'ctx>,
|
||||
shape_ptr: Pointer<'ctx, IntModel<'ctx>>,
|
||||
) {
|
||||
let sizet = IntModel(generator.get_size_type(ctx.ctx));
|
||||
|
||||
let errctx = prepare_error_context(ctx);
|
||||
FunctionBuilder::begin(
|
||||
ctx,
|
||||
&get_sized_dependent_function_name(sizet, "__nac3_ndarray_util_assert_shape_no_negative"),
|
||||
)
|
||||
.arg("errctx", PointerModel(StructModel(ErrorContext)), errctx)
|
||||
.arg("ndims", sizet, ndims)
|
||||
.arg("shape", PointerModel(sizet), shape_ptr)
|
||||
.returning_void();
|
||||
check_error_context(generator, ctx, errctx);
|
||||
}
|
||||
|
||||
fn call_nac3_ndarray_set_strides_by_shape<'ctx>(
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ndarray_ptr: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
) {
|
||||
let sizet = ndarray_ptr.element.0.sizet;
|
||||
|
||||
FunctionBuilder::begin(
|
||||
ctx,
|
||||
&get_sized_dependent_function_name(sizet, "__nac3_ndarray_set_strides_by_shape"),
|
||||
)
|
||||
.arg("ndarray", PointerModel(StructModel(NpArray { sizet })), ndarray_ptr)
|
||||
.returning_void();
|
||||
}
|
||||
|
||||
pub fn call_nac3_ndarray_nbytes<'ctx>(
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ndarray_ptr: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
) -> Int<'ctx> {
|
||||
let sizet = ndarray_ptr.element.0.sizet;
|
||||
|
||||
FunctionBuilder::begin(ctx, &get_sized_dependent_function_name(sizet, "__nac3_ndarray_nbytes"))
|
||||
.arg("ndarray", PointerModel(StructModel(NpArray { sizet })), ndarray_ptr)
|
||||
.returning("nbytes", sizet)
|
||||
}
|
||||
|
||||
pub fn call_nac3_ndarray_fill_generic<'ctx>(
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ndarray_ptr: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
fill_value_ptr: Pointer<'ctx, ByteModel>,
|
||||
) {
|
||||
let sizet = ndarray_ptr.element.0.sizet;
|
||||
|
||||
FunctionBuilder::begin(
|
||||
ctx,
|
||||
&get_sized_dependent_function_name(sizet, "__nac3_ndarray_fill_generic"),
|
||||
)
|
||||
.arg("ndarray", PointerModel(StructModel(NpArray { sizet })), ndarray_ptr)
|
||||
.arg("pvalue", PointerModel(FixedIntModel(Byte)), fill_value_ptr)
|
||||
.returning_void();
|
||||
}
|
||||
|
||||
pub fn call_nac3_ndarray_len<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ndarray_ptr: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
) -> SliceIndex<'ctx> {
|
||||
let sizet = ndarray_ptr.element.0.sizet;
|
||||
let slice_index_model = SliceIndexModel::default();
|
||||
|
||||
let dst_len = slice_index_model.alloca(ctx, "dst_len");
|
||||
|
||||
let errctx = prepare_error_context(ctx);
|
||||
FunctionBuilder::begin(ctx, &get_sized_dependent_function_name(sizet, "__nac3_ndarray_len"))
|
||||
.arg("errctx", PointerModel(StructModel(ErrorContext)), errctx)
|
||||
.arg("ndarray", PointerModel(StructModel(NpArray { sizet })), ndarray_ptr)
|
||||
.arg("dst_len", PointerModel(slice_index_model), dst_len)
|
||||
.returning_void();
|
||||
check_error_context(generator, ctx, errctx);
|
||||
|
||||
dst_len.load(ctx, "len")
|
||||
}
|
162
nac3core/src/codegen/irrt/numpy/shape.rs
Normal file
162
nac3core/src/codegen/irrt/numpy/shape.rs
Normal file
@ -0,0 +1,162 @@
|
||||
use inkwell::values::BasicValueEnum;
|
||||
|
||||
use crate::{
|
||||
codegen::{
|
||||
classes::{ListValue, UntypedArrayLikeAccessor},
|
||||
model::*,
|
||||
stmt::gen_for_callback_incrementing,
|
||||
CodeGenContext, CodeGenerator,
|
||||
},
|
||||
typecheck::typedef::{Type, TypeEnum},
|
||||
};
|
||||
|
||||
pub type ProducerWriteToArray<'ctx, G, E> = Box<
|
||||
dyn Fn(&mut G, &mut CodeGenContext<'ctx, '_>, &ArraySlice<'ctx, E>) -> Result<(), String>
|
||||
+ 'ctx,
|
||||
>;
|
||||
|
||||
pub struct Producer<'ctx, G: CodeGenerator + ?Sized, E: Model<'ctx>> {
|
||||
pub count: Int<'ctx>,
|
||||
pub write_to_array: ProducerWriteToArray<'ctx, G, E>,
|
||||
}
|
||||
|
||||
/// TODO: UPDATE DOCUMENTATION
|
||||
/// LLVM-typed implementation for generating a [`Producer`] that sets a list of ints.
|
||||
///
|
||||
/// * `elem_ty` - The element type of the `NDArray`.
|
||||
/// * `shape` - The `shape` parameter used to construct the `NDArray`.
|
||||
///
|
||||
/// ### Notes on `shape`
|
||||
///
|
||||
/// Just like numpy, the `shape` argument can be:
|
||||
/// 1. A list of `int32`; e.g., `np.empty([600, 800, 3])`
|
||||
/// 2. A tuple of `int32`; e.g., `np.empty((600, 800, 3))`
|
||||
/// 3. A scalar `int32`; e.g., `np.empty(3)`, this is functionally equivalent to `np.empty([3])`
|
||||
///
|
||||
/// See also [`typecheck::type_inferencer::fold_numpy_function_call_shape_argument`] to
|
||||
/// learn how `shape` gets from being a Python user expression to here.
|
||||
pub fn parse_input_shape_arg<'ctx, G>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
shape: BasicValueEnum<'ctx>,
|
||||
shape_ty: Type,
|
||||
) -> Producer<'ctx, G, IntModel<'ctx>>
|
||||
where
|
||||
G: CodeGenerator + ?Sized,
|
||||
{
|
||||
let sizet = IntModel(generator.get_size_type(ctx.ctx));
|
||||
|
||||
match &*ctx.unifier.get_ty(shape_ty) {
|
||||
TypeEnum::TObj { obj_id, .. }
|
||||
if *obj_id == ctx.primitives.list.obj_id(&ctx.unifier).unwrap() =>
|
||||
{
|
||||
// 1. A list of ints; e.g., `np.empty([600, 800, 3])`
|
||||
|
||||
// A list has to be a PointerValue
|
||||
let shape_list = ListValue::from_ptr_val(shape.into_pointer_value(), sizet.0, None);
|
||||
|
||||
// Create `Producer`
|
||||
let ndims = Int(shape_list.load_size(ctx, Some("count")));
|
||||
Producer {
|
||||
count: ndims,
|
||||
write_to_array: Box::new(move |ctx, generator, dst_array| {
|
||||
// Basically iterate through the list and write to `dst_slice` accordingly
|
||||
let init_val = sizet.constant(0).0;
|
||||
let max_val = (ndims.0, false);
|
||||
let incr_val = sizet.constant(1).0;
|
||||
gen_for_callback_incrementing(
|
||||
ctx,
|
||||
generator,
|
||||
init_val,
|
||||
max_val,
|
||||
|generator, ctx, _hooks, axis| {
|
||||
let axis = Int(axis);
|
||||
|
||||
// Get the dimension at `axis`
|
||||
let dim = shape_list
|
||||
.data()
|
||||
.get(ctx, generator, &axis.0, None)
|
||||
.into_int_value();
|
||||
|
||||
// Cast `dim` to SizeT
|
||||
let dim = ctx
|
||||
.builder
|
||||
.build_int_s_extend_or_bit_cast(dim, sizet.0, "dim_casted")
|
||||
.unwrap();
|
||||
|
||||
// Write
|
||||
dst_array.ix(generator, ctx, axis, "dim").store(ctx, Int(dim));
|
||||
Ok(())
|
||||
},
|
||||
incr_val,
|
||||
)
|
||||
}),
|
||||
}
|
||||
}
|
||||
TypeEnum::TTuple { ty: tuple_types } => {
|
||||
// 2. A tuple of ints; e.g., `np.empty((600, 800, 3))`
|
||||
|
||||
// Get the length/size of the tuple, which also happens to be the value of `ndims`.
|
||||
let ndims = tuple_types.len();
|
||||
|
||||
// A tuple has to be a StructValue
|
||||
// Read [`codegen::expr::gen_expr`] to see how `nac3core` translates a Python tuple into LLVM.
|
||||
let shape_tuple = shape.into_struct_value();
|
||||
|
||||
Producer {
|
||||
count: sizet.constant(ndims as u64),
|
||||
write_to_array: Box::new(move |generator, ctx, dst_array| {
|
||||
for axis in 0..ndims {
|
||||
// Get the dimension at `axis`
|
||||
let dim = ctx
|
||||
.builder
|
||||
.build_extract_value(
|
||||
shape_tuple,
|
||||
axis as u32,
|
||||
format!("dim{axis}").as_str(),
|
||||
)
|
||||
.unwrap()
|
||||
.into_int_value();
|
||||
|
||||
// Cast `dim` to SizeT
|
||||
let dim = ctx
|
||||
.builder
|
||||
.build_int_s_extend_or_bit_cast(dim, sizet.0, "dim_casted")
|
||||
.unwrap();
|
||||
|
||||
// Write
|
||||
dst_array
|
||||
.ix(generator, ctx, sizet.constant(axis as u64), "dim")
|
||||
.store(ctx, Int(dim));
|
||||
}
|
||||
Ok(())
|
||||
}),
|
||||
}
|
||||
}
|
||||
TypeEnum::TObj { obj_id, .. }
|
||||
if *obj_id == ctx.primitives.int32.obj_id(&ctx.unifier).unwrap() =>
|
||||
{
|
||||
// 3. A scalar int; e.g., `np.empty(3)`, this is functionally equivalent to `np.empty([3])`
|
||||
|
||||
// The value has to be an integer
|
||||
let shape_int = shape.into_int_value();
|
||||
|
||||
Producer {
|
||||
count: sizet.constant(1),
|
||||
write_to_array: Box::new(move |generator, ctx, dst_array| {
|
||||
// Cast `shape_int` to SizeT
|
||||
let dim = ctx
|
||||
.builder
|
||||
.build_int_s_extend_or_bit_cast(shape_int, sizet.0, "dim_casted")
|
||||
.unwrap();
|
||||
|
||||
// Set shape[0] = shape_int
|
||||
dst_array.ix(generator, ctx, sizet.constant(0), "dim").store(ctx, Int(dim));
|
||||
|
||||
Ok(())
|
||||
}),
|
||||
}
|
||||
}
|
||||
_ => panic!("parse_input_shape_arg encountered unknown type"),
|
||||
}
|
||||
}
|
86
nac3core/src/codegen/irrt/numpy/slice.rs
Normal file
86
nac3core/src/codegen/irrt/numpy/slice.rs
Normal file
@ -0,0 +1,86 @@
|
||||
use crate::codegen::{model::*, CodeGenContext};
|
||||
|
||||
// nac3core's slicing index/length values are always int32_t
|
||||
pub type SliceIndexInt = Int32;
|
||||
pub type SliceIndexModel = FixedIntModel<SliceIndexInt>;
|
||||
pub type SliceIndex<'ctx> = FixedInt<'ctx, SliceIndexInt>;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct UserSliceFields {
|
||||
pub start_defined: Field<BoolModel>,
|
||||
pub start: Field<SliceIndexModel>,
|
||||
pub stop_defined: Field<BoolModel>,
|
||||
pub stop: Field<SliceIndexModel>,
|
||||
pub step_defined: Field<BoolModel>,
|
||||
pub step: Field<SliceIndexModel>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default)]
|
||||
pub struct UserSlice;
|
||||
|
||||
impl<'ctx> IsStruct<'ctx> for UserSlice {
|
||||
type Fields = UserSliceFields;
|
||||
|
||||
fn struct_name(&self) -> &'static str {
|
||||
"UserSlice"
|
||||
}
|
||||
|
||||
fn build_fields(&self, builder: &mut FieldBuilder<'ctx>) -> Self::Fields {
|
||||
Self::Fields {
|
||||
start_defined: builder.add_field_auto("start_defined"),
|
||||
start: builder.add_field_auto("start"),
|
||||
stop_defined: builder.add_field_auto("stop_defined"),
|
||||
stop: builder.add_field_auto("stop"),
|
||||
step_defined: builder.add_field_auto("step_defined"),
|
||||
step: builder.add_field_auto("step"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RustUserSlice<'ctx> {
|
||||
pub start: Option<SliceIndex<'ctx>>,
|
||||
pub stop: Option<SliceIndex<'ctx>>,
|
||||
pub step: Option<SliceIndex<'ctx>>,
|
||||
}
|
||||
|
||||
impl<'ctx> RustUserSlice<'ctx> {
|
||||
// Set the values of an LLVM UserSlice
|
||||
// in the format of Python's `slice()`
|
||||
pub fn write_to_user_slice(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
dst_slice_ptr: Pointer<'ctx, StructModel<UserSlice>>,
|
||||
) {
|
||||
// TODO: make this neater, with a helper lambda?
|
||||
|
||||
let bool_model = BoolModel::default();
|
||||
|
||||
let false_ = bool_model.constant(ctx.ctx, 0);
|
||||
let true_ = bool_model.constant(ctx.ctx, 1);
|
||||
|
||||
match self.start {
|
||||
Some(start) => {
|
||||
dst_slice_ptr.gep(ctx, |f| f.start_defined).store(ctx, true_);
|
||||
dst_slice_ptr.gep(ctx, |f| f.start).store(ctx, start);
|
||||
}
|
||||
None => dst_slice_ptr.gep(ctx, |f| f.start_defined).store(ctx, false_),
|
||||
}
|
||||
|
||||
match self.stop {
|
||||
Some(stop) => {
|
||||
dst_slice_ptr.gep(ctx, |f| f.stop_defined).store(ctx, true_);
|
||||
dst_slice_ptr.gep(ctx, |f| f.stop).store(ctx, stop);
|
||||
}
|
||||
None => dst_slice_ptr.gep(ctx, |f| f.stop_defined).store(ctx, false_),
|
||||
}
|
||||
|
||||
match self.step {
|
||||
Some(step) => {
|
||||
dst_slice_ptr.gep(ctx, |f| f.step_defined).store(ctx, true_);
|
||||
dst_slice_ptr.gep(ctx, |f| f.step).store(ctx, step);
|
||||
}
|
||||
None => dst_slice_ptr.gep(ctx, |f| f.step_defined).store(ctx, false_),
|
||||
}
|
||||
}
|
||||
}
|
181
nac3core/src/codegen/irrt/numpy/subscript.rs
Normal file
181
nac3core/src/codegen/irrt/numpy/subscript.rs
Normal file
@ -0,0 +1,181 @@
|
||||
use crate::codegen::{
|
||||
irrt::{
|
||||
error_context::{check_error_context, prepare_error_context, ErrorContext},
|
||||
util::{get_sized_dependent_function_name, FunctionBuilder},
|
||||
},
|
||||
model::*,
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
|
||||
use super::{
|
||||
ndarray::NpArray,
|
||||
slice::{RustUserSlice, SliceIndex, SliceIndexModel, UserSlice},
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct NDSubscriptFields {
|
||||
pub type_: Field<ByteModel>, // Defined to be uint8_t in IRRT
|
||||
pub data: Field<PointerModel<ByteModel>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default)]
|
||||
pub struct NDSubscript;
|
||||
|
||||
impl<'ctx> IsStruct<'ctx> for NDSubscript {
|
||||
type Fields = NDSubscriptFields;
|
||||
|
||||
fn struct_name(&self) -> &'static str {
|
||||
"NDSubscript"
|
||||
}
|
||||
|
||||
fn build_fields(&self, builder: &mut FieldBuilder<'ctx>) -> Self::Fields {
|
||||
Self::Fields { type_: builder.add_field_auto("type"), data: builder.add_field_auto("data") }
|
||||
}
|
||||
}
|
||||
|
||||
// An enum variant to store the content
|
||||
// and type of an NDSubscript in high level.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum RustNDSubscript<'ctx> {
|
||||
Index(SliceIndex<'ctx>),
|
||||
Slice(RustUserSlice<'ctx>),
|
||||
}
|
||||
|
||||
impl<'ctx> RustNDSubscript<'ctx> {
|
||||
fn irrt_subscript_id(&self) -> u64 {
|
||||
// Defined in IRRT
|
||||
match self {
|
||||
RustNDSubscript::Index(_) => 0,
|
||||
RustNDSubscript::Slice(_) => 1,
|
||||
}
|
||||
}
|
||||
|
||||
fn write_to_ndsubscript(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
dst_ndsubscript_ptr: Pointer<'ctx, StructModel<NDSubscript>>,
|
||||
) {
|
||||
let byte_model = ByteModel::default();
|
||||
let slice_index_model = SliceIndexModel::default();
|
||||
let user_slice_model = StructModel(UserSlice);
|
||||
|
||||
// Set `dst_ndsubscript_ptr->type`
|
||||
dst_ndsubscript_ptr
|
||||
.gep(ctx, |f| f.type_)
|
||||
.store(ctx, byte_model.constant(ctx.ctx, self.irrt_subscript_id()));
|
||||
|
||||
// Set `dst_ndsubscript_ptr->data`
|
||||
let data = match self {
|
||||
RustNDSubscript::Index(in_index) => {
|
||||
let index_ptr = slice_index_model.alloca(ctx, "index");
|
||||
index_ptr.store(ctx, *in_index);
|
||||
index_ptr.cast_to(ctx, FixedIntModel(Byte), "")
|
||||
}
|
||||
RustNDSubscript::Slice(in_rust_slice) => {
|
||||
let user_slice_ptr = user_slice_model.alloca(ctx, "user_slice");
|
||||
in_rust_slice.write_to_user_slice(ctx, user_slice_ptr);
|
||||
user_slice_ptr.cast_to(ctx, FixedIntModel(Byte), "")
|
||||
}
|
||||
};
|
||||
dst_ndsubscript_ptr.gep(ctx, |f| f.data).store(ctx, data);
|
||||
}
|
||||
|
||||
// Allocate an array of subscripts onto the stack and return its stack pointer
|
||||
pub fn alloca_subscripts(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
subscripts: &[RustNDSubscript<'ctx>],
|
||||
) -> ArraySlice<'ctx, StructModel<NDSubscript>> {
|
||||
let index_model = Int32Model::default();
|
||||
|
||||
let ndsubscript_model = StructModel(NDSubscript);
|
||||
let ndsubscript_array = ndsubscript_model.array_alloca(
|
||||
ctx,
|
||||
index_model.constant(ctx.ctx, subscripts.len() as u64).to_int(),
|
||||
"ndsubscripts",
|
||||
);
|
||||
|
||||
for (i, rust_ndsubscript) in subscripts.iter().enumerate() {
|
||||
let ndsubscript_ptr = ndsubscript_array.ix_unchecked(
|
||||
ctx,
|
||||
index_model.constant(ctx.ctx, i as u64).to_int(),
|
||||
"",
|
||||
);
|
||||
rust_ndsubscript.write_to_ndsubscript(ctx, ndsubscript_ptr);
|
||||
}
|
||||
|
||||
ndsubscript_array
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn deduce_ndims_after_slicing(slices: &[RustNDSubscript], original_ndims: i32) -> i32 {
|
||||
let mut final_ndims: i32 = original_ndims;
|
||||
for slice in slices {
|
||||
match slice {
|
||||
RustNDSubscript::Index(_) => {
|
||||
// Index subscripts demotes the rank by 1
|
||||
final_ndims -= 1;
|
||||
}
|
||||
RustNDSubscript::Slice(_) => {
|
||||
// Nothing
|
||||
}
|
||||
}
|
||||
}
|
||||
final_ndims
|
||||
}
|
||||
}
|
||||
|
||||
pub fn call_nac3_ndarray_subscript_deduce_ndims_after_slicing<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
sizet: IntModel<'ctx>,
|
||||
ndims: Int<'ctx>,
|
||||
num_ndsubscripts: Int<'ctx>,
|
||||
ndsubscripts: Pointer<'ctx, StructModel<NDSubscript>>,
|
||||
) -> Int<'ctx> {
|
||||
let result = sizet.alloca(ctx, "result");
|
||||
|
||||
let errctx_ptr = prepare_error_context(ctx);
|
||||
FunctionBuilder::begin(
|
||||
ctx,
|
||||
&get_sized_dependent_function_name(
|
||||
sizet,
|
||||
"__nac3_ndarray_subscript_deduce_ndims_after_slicing",
|
||||
),
|
||||
)
|
||||
.arg("errctx", PointerModel(StructModel(ErrorContext)), errctx_ptr)
|
||||
.arg("result", PointerModel(sizet), result)
|
||||
.arg("ndims", sizet, ndims)
|
||||
.arg("num_ndsubscripts", sizet, num_ndsubscripts)
|
||||
.arg("ndsubscripts", PointerModel(StructModel(NDSubscript)), ndsubscripts)
|
||||
.returning_void();
|
||||
check_error_context(generator, ctx, errctx_ptr);
|
||||
|
||||
result.load(ctx, "final_ndims")
|
||||
}
|
||||
|
||||
pub fn call_nac3_ndarray_subscript<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
num_subscripts: SliceIndex<'ctx>,
|
||||
subscripts: Pointer<'ctx, StructModel<NDSubscript>>,
|
||||
src_ndarray: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
dst_ndarray: Pointer<'ctx, StructModel<NpArray<'ctx>>>,
|
||||
) {
|
||||
let sizet = src_ndarray.element.0.sizet;
|
||||
assert!(sizet.same_as(dst_ndarray.element.0.sizet)); // SizeT of src_ndarray and dst_ndarray must match
|
||||
|
||||
let errctx_ptr = prepare_error_context(ctx);
|
||||
|
||||
FunctionBuilder::begin(
|
||||
ctx,
|
||||
&get_sized_dependent_function_name(sizet, "__nac3_ndarray_subscript"),
|
||||
)
|
||||
.arg("errctx", PointerModel(StructModel(ErrorContext)), errctx_ptr)
|
||||
.arg("num_subscripts", SliceIndexModel::default(), num_subscripts)
|
||||
.arg("subscripts", PointerModel(StructModel(NDSubscript)), subscripts)
|
||||
.arg("src_ndarray", PointerModel(StructModel(NpArray { sizet })), src_ndarray)
|
||||
.arg("dst_ndarray", PointerModel(StructModel(NpArray { sizet })), dst_ndarray)
|
||||
.returning_void();
|
||||
|
||||
check_error_context(generator, ctx, errctx_ptr);
|
||||
}
|
26
nac3core/src/codegen/irrt/test.rs
Normal file
26
nac3core/src/codegen/irrt/test.rs
Normal file
@ -0,0 +1,26 @@
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{path::Path, process::Command};
|
||||
|
||||
#[test]
|
||||
fn run_irrt_test() {
|
||||
assert!(
|
||||
cfg!(feature = "test"),
|
||||
"Please do `cargo test -F test` to compile `irrt_test.out` and run test"
|
||||
);
|
||||
|
||||
let irrt_test_out_path = Path::new(concat!(env!("OUT_DIR"), "/irrt_test.out"));
|
||||
let output = Command::new(irrt_test_out_path.to_str().unwrap()).output().unwrap();
|
||||
|
||||
if !output.status.success() {
|
||||
eprintln!("irrt_test failed with status {}:", output.status);
|
||||
eprintln!("====== stdout ======");
|
||||
eprintln!("{}", String::from_utf8(output.stdout).unwrap());
|
||||
eprintln!("====== stderr ======");
|
||||
eprintln!("{}", String::from_utf8(output.stderr).unwrap());
|
||||
eprintln!("====================");
|
||||
|
||||
panic!("irrt_test failed");
|
||||
}
|
||||
}
|
||||
}
|
79
nac3core/src/codegen/irrt/util.rs
Normal file
79
nac3core/src/codegen/irrt/util.rs
Normal file
@ -0,0 +1,79 @@
|
||||
use inkwell::{
|
||||
types::{BasicMetadataTypeEnum, BasicType, IntType},
|
||||
values::{AnyValue, BasicMetadataValueEnum},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
codegen::{model::*, CodeGenContext},
|
||||
util::SizeVariant,
|
||||
};
|
||||
|
||||
fn get_size_variant(ty: IntType) -> SizeVariant {
|
||||
match ty.get_bit_width() {
|
||||
32 => SizeVariant::Bits32,
|
||||
64 => SizeVariant::Bits64,
|
||||
_ => unreachable!("Unsupported int type bit width {}", ty.get_bit_width()),
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn get_sized_dependent_function_name(ty: IntModel, fn_name: &str) -> String {
|
||||
let mut fn_name = fn_name.to_owned();
|
||||
match get_size_variant(ty.0) {
|
||||
SizeVariant::Bits32 => {
|
||||
// Do nothing, `fn_name` already has the correct name
|
||||
}
|
||||
SizeVariant::Bits64 => {
|
||||
// Append "64", this is the naming convention
|
||||
fn_name.push_str("64");
|
||||
}
|
||||
}
|
||||
fn_name
|
||||
}
|
||||
|
||||
// TODO: Variadic argument?
|
||||
pub struct FunctionBuilder<'ctx, 'a> {
|
||||
ctx: &'a CodeGenContext<'ctx, 'a>,
|
||||
fn_name: &'a str,
|
||||
arguments: Vec<(BasicMetadataTypeEnum<'ctx>, BasicMetadataValueEnum<'ctx>)>,
|
||||
}
|
||||
|
||||
impl<'ctx, 'a> FunctionBuilder<'ctx, 'a> {
|
||||
pub fn begin(ctx: &'a CodeGenContext<'ctx, 'a>, fn_name: &'a str) -> Self {
|
||||
FunctionBuilder { ctx, fn_name, arguments: Vec::new() }
|
||||
}
|
||||
|
||||
// The name is for self-documentation
|
||||
#[must_use]
|
||||
pub fn arg<M: Model<'ctx>>(mut self, _name: &'static str, model: M, value: M::Value) -> Self {
|
||||
self.arguments
|
||||
.push((model.get_llvm_type(self.ctx.ctx).into(), value.get_llvm_value().into()));
|
||||
self
|
||||
}
|
||||
|
||||
pub fn returning<M: Model<'ctx>>(self, name: &'static str, return_model: M) -> M::Value {
|
||||
let (param_tys, param_vals): (Vec<_>, Vec<_>) = self.arguments.into_iter().unzip();
|
||||
|
||||
let function = self.ctx.module.get_function(self.fn_name).unwrap_or_else(|| {
|
||||
let return_type = return_model.get_llvm_type(self.ctx.ctx);
|
||||
let fn_type = return_type.fn_type(¶m_tys, false);
|
||||
self.ctx.module.add_function(self.fn_name, fn_type, None)
|
||||
});
|
||||
|
||||
let ret = self.ctx.builder.build_call(function, ¶m_vals, name).unwrap();
|
||||
return_model.review(self.ctx.ctx, ret.as_any_value_enum())
|
||||
}
|
||||
|
||||
// TODO: Code duplication, but otherwise returning<S: Optic<'ctx>> cannot resolve S if return_optic = None
|
||||
pub fn returning_void(self) {
|
||||
let (param_tys, param_vals): (Vec<_>, Vec<_>) = self.arguments.into_iter().unzip();
|
||||
|
||||
let function = self.ctx.module.get_function(self.fn_name).unwrap_or_else(|| {
|
||||
let return_type = self.ctx.ctx.void_type();
|
||||
let fn_type = return_type.fn_type(¶m_tys, false);
|
||||
self.ctx.module.add_function(self.fn_name, fn_type, None)
|
||||
});
|
||||
|
||||
self.ctx.builder.build_call(function, ¶m_vals, "").unwrap();
|
||||
}
|
||||
}
|
@ -35,6 +35,54 @@ fn get_float_intrinsic_repr(ctx: &Context, ft: FloatType) -> &'static str {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
/// Invokes the [`llvm.lifetime.start`](https://releases.llvm.org/14.0.0/docs/LangRef.html#llvm-lifetime-start-intrinsic)
|
||||
/// intrinsic.
|
||||
pub fn call_lifetime_start<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
size: IntValue<'ctx>,
|
||||
ptr: PointerValue<'ctx>,
|
||||
) {
|
||||
const FN_NAME: &str = "llvm.lifetime.start";
|
||||
// NOTE: inkwell temporary workaround, see [`call_stackrestore`] for details
|
||||
let intrinsic_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
|
||||
let llvm_void = ctx.ctx.void_type();
|
||||
let llvm_i64 = ctx.ctx.i64_type();
|
||||
let llvm_p0i8 = ctx.ctx.i8_type().ptr_type(AddressSpace::default());
|
||||
let fn_type = llvm_void.fn_type(&[llvm_i64.into(), llvm_p0i8.into()], false);
|
||||
|
||||
ctx.module.add_function(FN_NAME, fn_type, None)
|
||||
});
|
||||
|
||||
ctx.builder
|
||||
.build_call(intrinsic_fn, &[size.into(), ptr.into()], "")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
/// Invokes the [`llvm.lifetime.end`](https://releases.llvm.org/14.0.0/docs/LangRef.html#llvm-lifetime-end-intrinsic)
|
||||
/// intrinsic.
|
||||
pub fn call_lifetime_end<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
size: IntValue<'ctx>,
|
||||
ptr: PointerValue<'ctx>,
|
||||
) {
|
||||
const FN_NAME: &str = "llvm.lifetime.end";
|
||||
// NOTE: inkwell temporary workaround, see [`call_stackrestore`] for details
|
||||
let intrinsic_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
|
||||
let llvm_void = ctx.ctx.void_type();
|
||||
let llvm_i64 = ctx.ctx.i64_type();
|
||||
let llvm_p0i8 = ctx.ctx.i8_type().ptr_type(AddressSpace::default());
|
||||
let fn_type = llvm_void.fn_type(&[llvm_i64.into(), llvm_p0i8.into()], false);
|
||||
|
||||
ctx.module.add_function(FN_NAME, fn_type, None)
|
||||
});
|
||||
|
||||
ctx.builder
|
||||
.build_call(intrinsic_fn, &[size.into(), ptr.into()], "")
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
/// Invokes the [`llvm.stacksave`](https://llvm.org/docs/LangRef.html#llvm-stacksave-intrinsic)
|
||||
/// intrinsic.
|
||||
pub fn call_stacksave<'ctx>(
|
||||
|
@ -1,7 +1,7 @@
|
||||
use crate::{
|
||||
codegen::classes::{ListType, NDArrayType, ProxyType, RangeType},
|
||||
codegen::classes::{ListType, ProxyType, RangeType},
|
||||
symbol_resolver::{StaticValue, SymbolResolver},
|
||||
toplevel::{helper::PrimDef, numpy::unpack_ndarray_var_tys, TopLevelContext, TopLevelDef},
|
||||
toplevel::{helper::PrimDef, TopLevelContext, TopLevelDef},
|
||||
typecheck::{
|
||||
type_inferencer::{CodeLocation, PrimitiveStore},
|
||||
typedef::{CallId, FuncArg, Type, TypeEnum, Unifier},
|
||||
@ -23,7 +23,9 @@ use inkwell::{
|
||||
values::{BasicValueEnum, FunctionValue, IntValue, PhiValue, PointerValue},
|
||||
AddressSpace, IntPredicate, OptimizationLevel,
|
||||
};
|
||||
use irrt::{error_context::Str, numpy::ndarray::NpArray};
|
||||
use itertools::Itertools;
|
||||
use model::*;
|
||||
use nac3parser::ast::{Location, Stmt, StrRef};
|
||||
use parking_lot::{Condvar, Mutex};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
@ -41,7 +43,9 @@ pub mod extern_fns;
|
||||
mod generator;
|
||||
pub mod irrt;
|
||||
pub mod llvm_intrinsics;
|
||||
pub mod model;
|
||||
pub mod numpy;
|
||||
pub mod numpy_new;
|
||||
pub mod stmt;
|
||||
|
||||
#[cfg(test)]
|
||||
@ -471,12 +475,8 @@ fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>(
|
||||
}
|
||||
|
||||
TObj { obj_id, .. } if *obj_id == PrimDef::NDArray.id() => {
|
||||
let (dtype, _) = unpack_ndarray_var_tys(unifier, ty);
|
||||
let element_type = get_llvm_type(
|
||||
ctx, module, generator, unifier, top_level, type_cache, dtype,
|
||||
);
|
||||
|
||||
NDArrayType::new(generator, ctx, element_type).as_base_type().into()
|
||||
let sizet = IntModel(generator.get_size_type(ctx));
|
||||
PointerModel(StructModel(NpArray { sizet })).get_llvm_type(ctx)
|
||||
}
|
||||
|
||||
_ => unreachable!(
|
||||
@ -646,7 +646,7 @@ pub fn gen_func_impl<
|
||||
..primitives
|
||||
};
|
||||
|
||||
let mut type_cache: HashMap<_, _> = [
|
||||
let mut type_cache: HashMap<_, BasicTypeEnum<'_>> = [
|
||||
(primitives.int32, context.i32_type().into()),
|
||||
(primitives.int64, context.i64_type().into()),
|
||||
(primitives.uint32, context.i32_type().into()),
|
||||
@ -654,19 +654,8 @@ pub fn gen_func_impl<
|
||||
(primitives.float, context.f64_type().into()),
|
||||
(primitives.bool, context.i8_type().into()),
|
||||
(primitives.str, {
|
||||
let name = "str";
|
||||
match module.get_struct_type(name) {
|
||||
None => {
|
||||
let str_type = context.opaque_struct_type("str");
|
||||
let fields = [
|
||||
context.i8_type().ptr_type(AddressSpace::default()).into(),
|
||||
generator.get_size_type(context).into(),
|
||||
];
|
||||
str_type.set_body(&fields, false);
|
||||
str_type.into()
|
||||
}
|
||||
Some(t) => t.as_basic_type_enum(),
|
||||
}
|
||||
let sizet = IntModel(generator.get_size_type(context));
|
||||
StructModel(Str { sizet }).get_llvm_type(context)
|
||||
}),
|
||||
(primitives.range, RangeType::new(context).as_base_type().into()),
|
||||
(primitives.exception, {
|
||||
@ -674,10 +663,12 @@ pub fn gen_func_impl<
|
||||
if let Some(t) = module.get_struct_type(name) {
|
||||
t.ptr_type(AddressSpace::default()).as_basic_type_enum()
|
||||
} else {
|
||||
let sizet = IntModel(generator.get_size_type(context));
|
||||
let str_ty = StructModel(Str { sizet }).get_llvm_type(context);
|
||||
|
||||
let exception = context.opaque_struct_type("Exception");
|
||||
let int32 = context.i32_type().into();
|
||||
let int64 = context.i64_type().into();
|
||||
let str_ty = module.get_struct_type("str").unwrap().as_basic_type_enum();
|
||||
let fields = [int32, str_ty, int32, int32, str_ty, str_ty, int64, int64, int64];
|
||||
exception.set_body(&fields, false);
|
||||
exception.ptr_type(AddressSpace::default()).as_basic_type_enum()
|
||||
|
70
nac3core/src/codegen/model/core.rs
Normal file
70
nac3core/src/codegen/model/core.rs
Normal file
@ -0,0 +1,70 @@
|
||||
use inkwell::{
|
||||
context::Context,
|
||||
types::{AnyTypeEnum, BasicTypeEnum},
|
||||
values::{AnyValueEnum, BasicValueEnum},
|
||||
};
|
||||
|
||||
use crate::codegen::CodeGenContext;
|
||||
|
||||
use super::{slice::ArraySlice, Int, Pointer};
|
||||
|
||||
pub trait ModelValue<'ctx>: Clone + Copy {
|
||||
fn get_llvm_value(&self) -> BasicValueEnum<'ctx>;
|
||||
}
|
||||
|
||||
// Should have been within [`Model<'ctx>`],
|
||||
// but rust object safety requirements made it necessary to
|
||||
// split this interface out
|
||||
pub trait CanCheckLLVMType<'ctx> {
|
||||
/// Check if `scrutinee` matches the same LLVM type of this [`Model<'ctx>`].
|
||||
///
|
||||
/// If they don't not match, a human-readable error message is returned.
|
||||
fn check_llvm_type(
|
||||
&self,
|
||||
ctx: &'ctx Context,
|
||||
scrutinee: AnyTypeEnum<'ctx>,
|
||||
) -> Result<(), String>;
|
||||
}
|
||||
|
||||
/// A [`Model`] is a type-safe concrete representation of a complex LLVM type.
|
||||
pub trait Model<'ctx>: Clone + Copy + CanCheckLLVMType<'ctx> + Sized {
|
||||
/// The values that inhabit this [`Model<'ctx>`].
|
||||
///
|
||||
/// ...that is the type of wrapper that wraps the LLVM values that inhabit [`Model<'ctx>::get_llvm_type()`].
|
||||
type Value: ModelValue<'ctx>;
|
||||
|
||||
/// Get the [`BasicTypeEnum<'ctx>`] this [`Model<'ctx>`] is representing.
|
||||
fn get_llvm_type(&self, ctx: &'ctx Context) -> BasicTypeEnum<'ctx>;
|
||||
|
||||
/// Cast an [`AnyValueEnum<'ctx>`] into a [`Self::Value`].
|
||||
///
|
||||
/// Panics if `value` cannot pass [`CanCheckLLVMType::check_llvm_type()`].
|
||||
fn review(&self, ctx: &'ctx Context, value: AnyValueEnum<'ctx>) -> Self::Value;
|
||||
|
||||
/// Build an instruction to allocate a value of [`Model::get_llvm_type`].
|
||||
fn alloca(&self, ctx: &CodeGenContext<'ctx, '_>, name: &str) -> Pointer<'ctx, Self> {
|
||||
Pointer {
|
||||
element: *self,
|
||||
value: ctx.builder.build_alloca(self.get_llvm_type(ctx.ctx), name).unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Build an instruction to allocate an array of [`Model::get_llvm_type`].
|
||||
fn array_alloca(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
count: Int<'ctx>,
|
||||
name: &str,
|
||||
) -> ArraySlice<'ctx, Self> {
|
||||
ArraySlice {
|
||||
num_elements: count,
|
||||
pointer: Pointer {
|
||||
element: *self,
|
||||
value: ctx
|
||||
.builder
|
||||
.build_array_alloca(self.get_llvm_type(ctx.ctx), count.0, name)
|
||||
.unwrap(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
220
nac3core/src/codegen/model/gep.rs
Normal file
220
nac3core/src/codegen/model/gep.rs
Normal file
@ -0,0 +1,220 @@
|
||||
use inkwell::{
|
||||
context::Context,
|
||||
types::{AnyType, AnyTypeEnum, BasicType, BasicTypeEnum, StructType},
|
||||
values::{AnyValueEnum, BasicValue, BasicValueEnum, StructValue},
|
||||
};
|
||||
use itertools::{izip, Itertools};
|
||||
|
||||
use crate::codegen::CodeGenContext;
|
||||
|
||||
use super::{core::CanCheckLLVMType, Model, ModelValue, Pointer};
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Field<E> {
|
||||
pub gep_index: u64,
|
||||
pub name: &'static str,
|
||||
pub element: E,
|
||||
}
|
||||
|
||||
// Like [`Field<E>`] but element must be [`BasicTypeEnum<'ctx>`]
|
||||
struct FieldLLVM<'ctx> {
|
||||
gep_index: u64,
|
||||
name: &'ctx str,
|
||||
llvm_type: BasicTypeEnum<'ctx>,
|
||||
|
||||
// Only CanCheckLLVMType is needed, dont put in the whole `Model<'ctx>`
|
||||
llvm_type_model: Box<dyn CanCheckLLVMType<'ctx> + 'ctx>,
|
||||
}
|
||||
|
||||
pub struct FieldBuilder<'ctx> {
|
||||
pub ctx: &'ctx Context,
|
||||
gep_index_counter: u64,
|
||||
struct_name: &'ctx str,
|
||||
fields: Vec<FieldLLVM<'ctx>>,
|
||||
}
|
||||
|
||||
impl<'ctx> FieldBuilder<'ctx> {
|
||||
#[must_use]
|
||||
pub fn new(ctx: &'ctx Context, struct_name: &'ctx str) -> Self {
|
||||
FieldBuilder { ctx, gep_index_counter: 0, struct_name, fields: Vec::new() }
|
||||
}
|
||||
|
||||
fn next_gep_index(&mut self) -> u64 {
|
||||
let index = self.gep_index_counter;
|
||||
self.gep_index_counter += 1;
|
||||
index
|
||||
}
|
||||
|
||||
pub fn add_field<E: Model<'ctx> + 'ctx>(&mut self, name: &'static str, element: E) -> Field<E> {
|
||||
let gep_index = self.next_gep_index();
|
||||
|
||||
self.fields.push(FieldLLVM {
|
||||
gep_index,
|
||||
name,
|
||||
llvm_type: element.get_llvm_type(self.ctx),
|
||||
llvm_type_model: Box::new(element),
|
||||
});
|
||||
|
||||
Field { gep_index, name, element }
|
||||
}
|
||||
|
||||
pub fn add_field_auto<E: Model<'ctx> + Default + 'ctx>(
|
||||
&mut self,
|
||||
name: &'static str,
|
||||
) -> Field<E> {
|
||||
self.add_field(name, E::default())
|
||||
}
|
||||
}
|
||||
|
||||
/// A marker trait to mark singleton struct that describes a particular LLVM structure.
|
||||
pub trait IsStruct<'ctx>: Clone + Copy {
|
||||
/// The type of the Rust `struct` that holds all the fields of this LLVM struct.
|
||||
type Fields;
|
||||
|
||||
/// A cosmetic name for this struct.
|
||||
/// TODO: Currently unused. To be used in error reporting.
|
||||
fn struct_name(&self) -> &'static str;
|
||||
|
||||
fn build_fields(&self, builder: &mut FieldBuilder<'ctx>) -> Self::Fields;
|
||||
|
||||
fn get_fields(&self, ctx: &'ctx Context) -> Self::Fields {
|
||||
let mut builder = FieldBuilder::new(ctx, self.struct_name());
|
||||
self.build_fields(&mut builder)
|
||||
}
|
||||
|
||||
/// Get the LLVM struct type this [`IsStruct<'ctx>`] is representing.
|
||||
fn get_struct_type(&self, ctx: &'ctx Context) -> StructType<'ctx> {
|
||||
let mut builder = FieldBuilder::new(ctx, self.struct_name());
|
||||
self.build_fields(&mut builder); // Self::Fields is discarded
|
||||
|
||||
let field_types = builder.fields.iter().map(|f| f.llvm_type).collect_vec();
|
||||
ctx.struct_type(&field_types, false)
|
||||
}
|
||||
|
||||
/// Check if `scrutinee` matches the [`StructType<'ctx>`] this [`IsStruct<'ctx>`] is representing.
|
||||
fn check_struct_type(
|
||||
&self,
|
||||
ctx: &'ctx Context,
|
||||
scrutinee: StructType<'ctx>,
|
||||
) -> Result<(), String> {
|
||||
// Details about scrutinee
|
||||
let scrutinee_field_types = scrutinee.get_field_types();
|
||||
|
||||
// Details about the defined specifications of this struct
|
||||
// We will access them through builder
|
||||
let mut builder = FieldBuilder::new(ctx, self.struct_name());
|
||||
self.build_fields(&mut builder);
|
||||
|
||||
// Check # of fields
|
||||
if builder.fields.len() != scrutinee_field_types.len() {
|
||||
return Err(format!(
|
||||
"Expecting struct to have {} field(s), but scrutinee has {} field(s)",
|
||||
builder.fields.len(),
|
||||
scrutinee_field_types.len()
|
||||
));
|
||||
}
|
||||
|
||||
// Check the types of each field
|
||||
// TODO: Traceback?
|
||||
for (f, scrutinee_field_type) in izip!(builder.fields, scrutinee_field_types) {
|
||||
f.llvm_type_model.check_llvm_type(ctx, scrutinee_field_type.as_any_type_enum())?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A [`Model<'ctx>`] that represents an LLVM struct.
|
||||
///
|
||||
/// `self.0` contains a [`IsStruct<'ctx>`] that gives the details of the LLVM struct.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct StructModel<S>(pub S);
|
||||
|
||||
impl<'ctx, S: IsStruct<'ctx>> CanCheckLLVMType<'ctx> for StructModel<S> {
|
||||
fn check_llvm_type(
|
||||
&self,
|
||||
ctx: &'ctx Context,
|
||||
scrutinee: AnyTypeEnum<'ctx>,
|
||||
) -> Result<(), String> {
|
||||
// Check if scrutinee is even a struct type
|
||||
let AnyTypeEnum::StructType(scrutinee) = scrutinee else {
|
||||
return Err(format!("Expecting a struct type, but got {scrutinee:?}"));
|
||||
};
|
||||
|
||||
// Ok. now check the struct type *thoroughly*
|
||||
self.0.check_struct_type(ctx, scrutinee)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx, S: IsStruct<'ctx>> Model<'ctx> for StructModel<S> {
|
||||
type Value = Struct<'ctx, S>; // TODO: enrich it
|
||||
|
||||
fn get_llvm_type(&self, ctx: &'ctx Context) -> BasicTypeEnum<'ctx> {
|
||||
self.0.get_struct_type(ctx).as_basic_type_enum()
|
||||
}
|
||||
|
||||
fn review(&self, ctx: &'ctx Context, value: AnyValueEnum<'ctx>) -> Self::Value {
|
||||
// Check that `value` is not some bogus values or an incorrect StructValue
|
||||
self.check_llvm_type(ctx, value.get_type()).unwrap();
|
||||
|
||||
Struct { structure: self.0, value: value.into_struct_value() }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Struct<'ctx, S> {
|
||||
pub structure: S,
|
||||
pub value: StructValue<'ctx>,
|
||||
}
|
||||
|
||||
impl<'ctx, S: IsStruct<'ctx>> ModelValue<'ctx> for Struct<'ctx, S> {
|
||||
fn get_llvm_value(&self) -> BasicValueEnum<'ctx> {
|
||||
self.value.as_basic_value_enum()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx, S: IsStruct<'ctx>> Pointer<'ctx, StructModel<S>> {
|
||||
/// Build an instruction that does `getelementptr` on an LLVM structure referenced by this pointer.
|
||||
///
|
||||
/// This provides a nice syntax to chain up `getelementptr` in an intuitive and type-safe way:
|
||||
///
|
||||
/// ```ignore
|
||||
/// let ctx: &CodeGenContext<'ctx, '_>;
|
||||
/// let ndarray: Pointer<'ctx, StructModel<NpArray<'ctx>>>;
|
||||
/// ndarray.gep(ctx, |f| f.ndims).store();
|
||||
/// ```
|
||||
///
|
||||
/// You might even write chains `gep`, i.e.,
|
||||
/// ```ignore
|
||||
/// my_struct
|
||||
/// .gep(ctx, |f| f.thing1)
|
||||
/// .gep(ctx, |f| f.value)
|
||||
/// .store(ctx, my_value) // Equivalent to `my_struct.thing1.value = my_value`
|
||||
/// ```
|
||||
pub fn gep<E, GetFieldFn>(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
get_field: GetFieldFn,
|
||||
) -> Pointer<'ctx, E>
|
||||
where
|
||||
E: Model<'ctx>,
|
||||
GetFieldFn: FnOnce(S::Fields) -> Field<E>,
|
||||
{
|
||||
let fields = self.element.0.get_fields(ctx.ctx);
|
||||
let field = get_field(fields);
|
||||
|
||||
let llvm_i32 = ctx.ctx.i32_type(); // TODO: I think I'm not supposed to *just* use i32 for GEP like that
|
||||
|
||||
let ptr = unsafe {
|
||||
ctx.builder
|
||||
.build_in_bounds_gep(
|
||||
self.value,
|
||||
&[llvm_i32.const_zero(), llvm_i32.const_int(field.gep_index, false)],
|
||||
field.name,
|
||||
)
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
Pointer { element: field.element, value: ptr }
|
||||
}
|
||||
}
|
279
nac3core/src/codegen/model/int.rs
Normal file
279
nac3core/src/codegen/model/int.rs
Normal file
@ -0,0 +1,279 @@
|
||||
use inkwell::{
|
||||
context::Context,
|
||||
types::{AnyType, AnyTypeEnum, BasicType, BasicTypeEnum, IntType},
|
||||
values::{AnyValueEnum, BasicValue, BasicValueEnum, IntValue},
|
||||
};
|
||||
|
||||
use crate::codegen::CodeGenContext;
|
||||
|
||||
use super::core::*;
|
||||
|
||||
/// Helper function to check if `scrutinee` is the same as `expected_int_type`
|
||||
fn check_int_llvm_type<'ctx>(
|
||||
scrutinee: AnyTypeEnum<'ctx>,
|
||||
expected_int_type: IntType<'ctx>,
|
||||
) -> Result<(), String> {
|
||||
// Check if llvm_type is int type
|
||||
let AnyTypeEnum::IntType(scrutinee) = scrutinee else {
|
||||
return Err(format!("Expecting an int type but got {scrutinee:?}"));
|
||||
};
|
||||
|
||||
// Check bit width
|
||||
if scrutinee.get_bit_width() != expected_int_type.get_bit_width() {
|
||||
return Err(format!(
|
||||
"Expecting an int type of {}-bit(s) but got int type {}-bit(s)",
|
||||
expected_int_type.get_bit_width(),
|
||||
scrutinee.get_bit_width()
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Helper function to cast `scrutinee` is into an [`IntValue<'ctx>`].
|
||||
/// The LLVM type of `scrutinee` will be checked with [`check_int_llvm_type`].
|
||||
fn review_int_llvm_value<'ctx>(
|
||||
value: AnyValueEnum<'ctx>,
|
||||
expected_int_type: IntType<'ctx>,
|
||||
) -> Result<IntValue<'ctx>, String> {
|
||||
// Check if value is of int type, error if that is anything else
|
||||
check_int_llvm_type(value.get_type().as_any_type_enum(), expected_int_type)?;
|
||||
|
||||
// Ok, it is must be an int
|
||||
Ok(value.into_int_value())
|
||||
}
|
||||
|
||||
/// A model representing an [`IntType<'ctx>`].
|
||||
///
|
||||
/// Also see [`FixedIntModel`], which is more constrained than [`IntModel`]
|
||||
/// but provides more type-safe mechanisms and even auto-derivation of [`BasicTypeEnum<'ctx>`]
|
||||
/// for creating LLVM structures.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct IntModel<'ctx>(pub IntType<'ctx>);
|
||||
|
||||
/// An inhabitant of an [`IntModel<'ctx>`]
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Int<'ctx>(pub IntValue<'ctx>);
|
||||
|
||||
impl<'ctx> CanCheckLLVMType<'ctx> for IntModel<'ctx> {
|
||||
fn check_llvm_type(
|
||||
&self,
|
||||
_ctx: &'ctx Context,
|
||||
scrutinee: AnyTypeEnum<'ctx>,
|
||||
) -> Result<(), String> {
|
||||
check_int_llvm_type(scrutinee, self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> Model<'ctx> for IntModel<'ctx> {
|
||||
type Value = Int<'ctx>;
|
||||
|
||||
fn get_llvm_type(&self, _ctx: &'ctx Context) -> BasicTypeEnum<'ctx> {
|
||||
self.0.as_basic_type_enum()
|
||||
}
|
||||
|
||||
fn review(&self, ctx: &'ctx Context, value: AnyValueEnum<'ctx>) -> Self::Value {
|
||||
let int = value.into_int_value();
|
||||
self.check_llvm_type(ctx, int.get_type().as_any_type_enum()).unwrap();
|
||||
Int(int)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> ModelValue<'ctx> for Int<'ctx> {
|
||||
fn get_llvm_value(&self) -> BasicValueEnum<'ctx> {
|
||||
self.0.as_basic_value_enum()
|
||||
}
|
||||
}
|
||||
|
||||
// Extra utilities for [`Int<'ctx>`]
|
||||
impl<'ctx> Int<'ctx> {
|
||||
#[must_use]
|
||||
pub fn signed_cast_to_int(
|
||||
self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
target_int: IntModel<'ctx>,
|
||||
name: &str,
|
||||
) -> Int<'ctx> {
|
||||
Int(ctx.builder.build_int_s_extend_or_bit_cast(self.0, target_int.0, name).unwrap())
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn signed_cast_to_fixed<T: IsFixedInt>(
|
||||
self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
target_fixed: T,
|
||||
name: &str,
|
||||
) -> FixedInt<'ctx, T> {
|
||||
FixedInt {
|
||||
int: target_fixed,
|
||||
value: ctx
|
||||
.builder
|
||||
.build_int_s_extend_or_bit_cast(self.0, T::get_int_type(ctx.ctx), name)
|
||||
.unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extra utilities for [`IntModel<'ctx>`]
|
||||
impl<'ctx> IntModel<'ctx> {
|
||||
/// Create a constant value that inhabits this [`IntModel<'ctx>`].
|
||||
#[must_use]
|
||||
pub fn constant(&self, value: u64) -> Int<'ctx> {
|
||||
Int(self.0.const_int(value, false))
|
||||
}
|
||||
|
||||
/// Check if `other` is fully compatible with this [`IntModel<'ctx>`].
|
||||
///
|
||||
/// This simply checks if the underlying [`IntType<'ctx>`] has
|
||||
/// the same number of bits.
|
||||
#[must_use]
|
||||
pub fn same_as(&self, other: IntModel<'ctx>) -> bool {
|
||||
// TODO: or `self.0 == other.0` would also work?
|
||||
self.0.get_bit_width() == other.0.get_bit_width()
|
||||
}
|
||||
}
|
||||
|
||||
/// A model representing a compile-time known [`IntType<'ctx>`].
|
||||
///
|
||||
/// Also see [`IntModel`], which is less constrained than [`FixedIntModel`],
|
||||
/// but enables one to handle [`IntType<'ctx>`] that could be dynamic
|
||||
#[derive(Debug, Clone, Copy, Default)]
|
||||
pub struct FixedIntModel<T>(pub T);
|
||||
|
||||
impl<T: IsFixedInt> FixedIntModel<T> {
|
||||
pub fn to_int_model(self, ctx: &Context) -> IntModel<'_> {
|
||||
IntModel(T::get_int_type(ctx))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx, T: IsFixedInt> CanCheckLLVMType<'ctx> for FixedIntModel<T> {
|
||||
fn check_llvm_type(
|
||||
&self,
|
||||
ctx: &'ctx Context,
|
||||
scrutinee: AnyTypeEnum<'ctx>,
|
||||
) -> Result<(), String> {
|
||||
check_int_llvm_type(scrutinee, T::get_int_type(ctx))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx, T: IsFixedInt> Model<'ctx> for FixedIntModel<T> {
|
||||
type Value = FixedInt<'ctx, T>;
|
||||
|
||||
fn get_llvm_type(&self, ctx: &'ctx Context) -> BasicTypeEnum<'ctx> {
|
||||
T::get_int_type(ctx).as_basic_type_enum()
|
||||
}
|
||||
|
||||
fn review(&self, ctx: &'ctx Context, value: AnyValueEnum<'ctx>) -> Self::Value {
|
||||
let value = review_int_llvm_value(value, T::get_int_type(ctx)).unwrap();
|
||||
FixedInt { int: self.0, value }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx, T: IsFixedInt> FixedIntModel<T> {
|
||||
pub fn constant(&self, ctx: &'ctx Context, value: u64) -> FixedInt<'ctx, T> {
|
||||
FixedInt { int: self.0, value: T::get_int_type(ctx).const_int(value, false) }
|
||||
}
|
||||
}
|
||||
|
||||
/// An inhabitant of [`FixedIntModel<'ctx>`]
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct FixedInt<'ctx, T: IsFixedInt> {
|
||||
pub int: T,
|
||||
pub value: IntValue<'ctx>,
|
||||
}
|
||||
|
||||
/// A marker trait to mark singleton struct that describes a particular fixed integer type.
|
||||
/// See [`Bool`], [`Byte`], [`Int32`], etc.
|
||||
///
|
||||
/// The [`Default`] trait is to enable auto-derivations for utilities like
|
||||
/// [`FieldBuilder::add_field_auto`]
|
||||
pub trait IsFixedInt: Clone + Copy + Default {
|
||||
fn get_int_type(ctx: &Context) -> IntType<'_>;
|
||||
fn get_bit_width() -> u32; // This is required, instead of only relying on get_int_type
|
||||
}
|
||||
|
||||
impl<'ctx, T: IsFixedInt> ModelValue<'ctx> for FixedInt<'ctx, T> {
|
||||
fn get_llvm_value(&self) -> BasicValueEnum<'ctx> {
|
||||
self.value.as_basic_value_enum()
|
||||
}
|
||||
}
|
||||
|
||||
// Extra utilities for [`FixedInt<'ctx, T>`]
|
||||
impl<'ctx, T: IsFixedInt> FixedInt<'ctx, T> {
|
||||
pub fn to_int(self) -> Int<'ctx> {
|
||||
Int(self.value)
|
||||
}
|
||||
|
||||
pub fn signed_cast_to_fixed<R: IsFixedInt>(
|
||||
self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
target_fixed_int: R,
|
||||
name: &str,
|
||||
) -> FixedInt<'ctx, R> {
|
||||
FixedInt {
|
||||
int: target_fixed_int,
|
||||
value: ctx
|
||||
.builder
|
||||
.build_int_s_extend_or_bit_cast(self.value, R::get_int_type(ctx.ctx), name)
|
||||
.unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Some pre-defined fixed integers
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default)]
|
||||
pub struct Bool;
|
||||
pub type BoolModel = FixedIntModel<Bool>;
|
||||
|
||||
impl IsFixedInt for Bool {
|
||||
fn get_int_type(ctx: &Context) -> IntType<'_> {
|
||||
ctx.bool_type()
|
||||
}
|
||||
|
||||
fn get_bit_width() -> u32 {
|
||||
1
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default)]
|
||||
pub struct Byte;
|
||||
pub type ByteModel = FixedIntModel<Byte>;
|
||||
|
||||
impl IsFixedInt for Byte {
|
||||
fn get_int_type(ctx: &Context) -> IntType<'_> {
|
||||
ctx.i8_type()
|
||||
}
|
||||
|
||||
fn get_bit_width() -> u32 {
|
||||
8
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default)]
|
||||
pub struct Int32;
|
||||
pub type Int32Model = FixedIntModel<Int32>;
|
||||
|
||||
impl IsFixedInt for Int32 {
|
||||
fn get_int_type(ctx: &Context) -> IntType<'_> {
|
||||
ctx.i32_type()
|
||||
}
|
||||
|
||||
fn get_bit_width() -> u32 {
|
||||
32
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default)]
|
||||
pub struct Int64;
|
||||
pub type Int64Model = FixedIntModel<Int64>;
|
||||
|
||||
impl IsFixedInt for Int64 {
|
||||
fn get_int_type(ctx: &Context) -> IntType<'_> {
|
||||
ctx.i64_type()
|
||||
}
|
||||
|
||||
fn get_bit_width() -> u32 {
|
||||
64
|
||||
}
|
||||
}
|
11
nac3core/src/codegen/model/mod.rs
Normal file
11
nac3core/src/codegen/model/mod.rs
Normal file
@ -0,0 +1,11 @@
|
||||
pub mod core;
|
||||
pub mod gep;
|
||||
pub mod int;
|
||||
pub mod pointer;
|
||||
pub mod slice;
|
||||
|
||||
pub use core::*;
|
||||
pub use gep::*;
|
||||
pub use int::*;
|
||||
pub use pointer::*;
|
||||
pub use slice::*;
|
184
nac3core/src/codegen/model/pointer.rs
Normal file
184
nac3core/src/codegen/model/pointer.rs
Normal file
@ -0,0 +1,184 @@
|
||||
use inkwell::{
|
||||
context::Context,
|
||||
types::{AnyTypeEnum, BasicType, BasicTypeEnum},
|
||||
values::{AnyValue, AnyValueEnum, BasicValue, BasicValueEnum, PointerValue},
|
||||
AddressSpace,
|
||||
};
|
||||
|
||||
use crate::codegen::CodeGenContext;
|
||||
|
||||
use super::core::*;
|
||||
|
||||
/// An inhabitant of [`PointerModel<E>`]
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Pointer<'ctx, E: Model<'ctx>> {
|
||||
pub element: E,
|
||||
pub value: PointerValue<'ctx>,
|
||||
}
|
||||
|
||||
/// A [`Model<'ctx>`] representing an LLVM [`PointerType<'ctx>`]
|
||||
/// with *full* information on the element u
|
||||
///
|
||||
/// [`self.0`] contains [`Model<'ctx>`] that represents the
|
||||
/// LLVM type of element of the [`PointerType<'ctx>`] is pointing at
|
||||
/// (like `PointerType<'ctx>::get_element_type()`, but abstracted as a [`Model<'ctx>`]).
|
||||
#[derive(Debug, Clone, Copy, Default)]
|
||||
pub struct PointerModel<E>(pub E);
|
||||
|
||||
impl<'ctx, E: Model<'ctx>> ModelValue<'ctx> for Pointer<'ctx, E> {
|
||||
fn get_llvm_value(&self) -> BasicValueEnum<'ctx> {
|
||||
self.value.as_basic_value_enum()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx, E: Model<'ctx>> Pointer<'ctx, E> {
|
||||
/// Build an instruction to store a value into this pointer
|
||||
pub fn store(&self, ctx: &CodeGenContext<'ctx, '_>, val: E::Value) {
|
||||
ctx.builder.build_store(self.value, val.get_llvm_value()).unwrap();
|
||||
}
|
||||
|
||||
/// Build an instruction to load a value from this pointer
|
||||
pub fn load(&self, ctx: &CodeGenContext<'ctx, '_>, name: &str) -> E::Value {
|
||||
let val = ctx.builder.build_load(self.value, name).unwrap();
|
||||
self.element.review(ctx.ctx, val.as_any_value_enum())
|
||||
}
|
||||
|
||||
pub fn to_opaque(self) -> OpaquePointer<'ctx> {
|
||||
OpaquePointer(self.value)
|
||||
}
|
||||
|
||||
pub fn cast_opaque_to(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
element_type: BasicTypeEnum<'ctx>,
|
||||
name: &str,
|
||||
) -> OpaquePointer<'ctx> {
|
||||
self.to_opaque().cast_opaque_to(ctx, element_type, name)
|
||||
}
|
||||
|
||||
pub fn cast_to<R: Model<'ctx>>(
|
||||
self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
element_model: R,
|
||||
name: &str,
|
||||
) -> Pointer<'ctx, R> {
|
||||
let casted_ptr =
|
||||
self.to_opaque().cast_opaque_to(ctx, element_model.get_llvm_type(ctx.ctx), name).0;
|
||||
Pointer { element: element_model, value: casted_ptr }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx, E: Model<'ctx>> CanCheckLLVMType<'ctx> for PointerModel<E> {
|
||||
fn check_llvm_type(
|
||||
&self,
|
||||
ctx: &'ctx Context,
|
||||
scrutinee: AnyTypeEnum<'ctx>,
|
||||
) -> Result<(), String> {
|
||||
// Check if scrutinee is even a PointerValue
|
||||
let AnyTypeEnum::PointerType(scrutinee) = scrutinee else {
|
||||
return Err(format!("Expecting a pointer value, but got {scrutinee:?}"));
|
||||
};
|
||||
|
||||
// Check the type of what the pointer is pointing at
|
||||
// TODO: This will be deprecated by inkwell > llvm14 because `get_element_type()` will be gone
|
||||
self.0.check_llvm_type(ctx, scrutinee.get_element_type())?; // TODO: Include backtrace?
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx, E: Model<'ctx>> Model<'ctx> for PointerModel<E> {
|
||||
type Value = Pointer<'ctx, E>;
|
||||
|
||||
fn get_llvm_type(&self, ctx: &'ctx Context) -> BasicTypeEnum<'ctx> {
|
||||
self.0.get_llvm_type(ctx).ptr_type(AddressSpace::default()).as_basic_type_enum()
|
||||
}
|
||||
|
||||
fn review(&self, ctx: &'ctx Context, value: AnyValueEnum<'ctx>) -> Self::Value {
|
||||
self.check_llvm_type(ctx, value.get_type()).unwrap();
|
||||
|
||||
// TODO: Check get_element_type()? for LLVM 14 at least...
|
||||
Pointer { element: self.0, value: value.into_pointer_value() }
|
||||
}
|
||||
}
|
||||
|
||||
// A pointer of which the element's model is unknown.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct OpaquePointer<'ctx>(pub PointerValue<'ctx>);
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default)]
|
||||
pub struct OpaquePointerModel;
|
||||
|
||||
impl<'ctx> ModelValue<'ctx> for OpaquePointer<'ctx> {
|
||||
fn get_llvm_value(&self) -> BasicValueEnum<'ctx> {
|
||||
self.0.as_basic_value_enum()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> CanCheckLLVMType<'ctx> for OpaquePointerModel {
|
||||
fn check_llvm_type(
|
||||
&self,
|
||||
_ctx: &'ctx Context,
|
||||
scrutinee: AnyTypeEnum<'ctx>,
|
||||
) -> Result<(), String> {
|
||||
// OpaquePointerModel only cares that it is a pointer,
|
||||
// but not what the pointer is pointing at
|
||||
match scrutinee {
|
||||
AnyTypeEnum::PointerType(_) => Ok(()),
|
||||
_ => Err(format!("Expecting a pointer type, but got {scrutinee:?}")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> Model<'ctx> for OpaquePointerModel {
|
||||
type Value = OpaquePointer<'ctx>;
|
||||
|
||||
fn get_llvm_type(&self, ctx: &'ctx Context) -> BasicTypeEnum<'ctx> {
|
||||
ctx.i8_type().ptr_type(AddressSpace::default()).as_basic_type_enum()
|
||||
}
|
||||
|
||||
fn review(&self, ctx: &'ctx Context, value: AnyValueEnum<'ctx>) -> Self::Value {
|
||||
// Check if value is even of a pointer type
|
||||
self.check_llvm_type(ctx, value.get_type()).unwrap();
|
||||
|
||||
OpaquePointer(value.into_pointer_value())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> OpaquePointer<'ctx> {
|
||||
pub fn load_opaque(&self, ctx: &CodeGenContext<'ctx, '_>, name: &str) -> BasicValueEnum<'ctx> {
|
||||
ctx.builder.build_load(self.0, name).unwrap()
|
||||
}
|
||||
|
||||
pub fn store_opaque(&self, ctx: &CodeGenContext<'ctx, '_>, value: BasicValueEnum<'ctx>) {
|
||||
ctx.builder.build_store(self.0, value).unwrap();
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn cast_opaque_to(
|
||||
self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
element_llvm_type: BasicTypeEnum<'ctx>,
|
||||
name: &str,
|
||||
) -> OpaquePointer<'ctx> {
|
||||
OpaquePointer(
|
||||
ctx.builder
|
||||
.build_pointer_cast(
|
||||
self.0,
|
||||
element_llvm_type.ptr_type(AddressSpace::default()),
|
||||
name,
|
||||
)
|
||||
.unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn cast_to<E: Model<'ctx>>(
|
||||
self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
element_model: E,
|
||||
name: &str,
|
||||
) -> Pointer<'ctx, E> {
|
||||
let ptr = self.cast_opaque_to(ctx, element_model.get_llvm_type(ctx.ctx), name).0;
|
||||
Pointer { element: element_model, value: ptr }
|
||||
}
|
||||
}
|
73
nac3core/src/codegen/model/slice.rs
Normal file
73
nac3core/src/codegen/model/slice.rs
Normal file
@ -0,0 +1,73 @@
|
||||
use crate::codegen::{CodeGenContext, CodeGenerator};
|
||||
|
||||
use super::{Int, Model, Pointer};
|
||||
|
||||
pub struct ArraySlice<'ctx, E: Model<'ctx>> {
|
||||
pub num_elements: Int<'ctx>,
|
||||
pub pointer: Pointer<'ctx, E>,
|
||||
}
|
||||
|
||||
impl<'ctx, E: Model<'ctx>> ArraySlice<'ctx, E> {
|
||||
pub fn ix_unchecked(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
idx: Int<'ctx>,
|
||||
name: &str,
|
||||
) -> Pointer<'ctx, E> {
|
||||
let element_addr =
|
||||
unsafe { ctx.builder.build_in_bounds_gep(self.pointer.value, &[idx.0], name).unwrap() };
|
||||
Pointer { value: element_addr, element: self.pointer.element }
|
||||
}
|
||||
|
||||
pub fn ix<G: CodeGenerator + ?Sized>(
|
||||
&self,
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
idx: Int<'ctx>,
|
||||
name: &str,
|
||||
) -> Pointer<'ctx, E> {
|
||||
let int_type = self.num_elements.0.get_type(); // NOTE: Weird get_type(), see comment under `trait Ixed`
|
||||
|
||||
assert_eq!(int_type.get_bit_width(), idx.0.get_type().get_bit_width()); // Might as well check bit width to catch bugs
|
||||
|
||||
// TODO: SGE or UGE? or make it defined by the implementee?
|
||||
|
||||
// Check `0 <= index`
|
||||
let lower_bounded = ctx
|
||||
.builder
|
||||
.build_int_compare(
|
||||
inkwell::IntPredicate::SLE,
|
||||
int_type.const_zero(),
|
||||
idx.0,
|
||||
"lower_bounded",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Check `index < num_elements`
|
||||
let upper_bounded = ctx
|
||||
.builder
|
||||
.build_int_compare(
|
||||
inkwell::IntPredicate::SLT,
|
||||
idx.0,
|
||||
self.num_elements.0,
|
||||
"upper_bounded",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Compute `0 <= index && index < num_elements`
|
||||
let bounded = ctx.builder.build_and(lower_bounded, upper_bounded, "bounded").unwrap();
|
||||
|
||||
// Assert `bounded`
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
bounded,
|
||||
"0:IndexError",
|
||||
"nac3core LLVM codegen attempting to access out of bounds array index {0}. Must satisfy 0 <= index < {2}",
|
||||
[ Some(idx.0), Some(self.num_elements.0), None],
|
||||
ctx.current_loc
|
||||
);
|
||||
|
||||
// ...and finally do indexing
|
||||
self.ix_unchecked(ctx, idx, name)
|
||||
}
|
||||
}
|
@ -9,7 +9,7 @@ use crate::{
|
||||
irrt::{
|
||||
calculate_len_for_slice_range, call_ndarray_calc_broadcast,
|
||||
call_ndarray_calc_broadcast_index, call_ndarray_calc_nd_indices,
|
||||
call_ndarray_calc_size,
|
||||
call_ndarray_calc_size, error_context::call_nac3_dummy_raise,
|
||||
},
|
||||
llvm_intrinsics::{self, call_memcpy_generic},
|
||||
stmt::{gen_for_callback_incrementing, gen_for_range_callback, gen_if_else_expr_callback},
|
||||
@ -538,6 +538,8 @@ fn call_ndarray_zeros_impl<'ctx, G: CodeGenerator + ?Sized>(
|
||||
elem_ty: Type,
|
||||
shape: BasicValueEnum<'ctx>,
|
||||
) -> Result<NDArrayValue<'ctx>, String> {
|
||||
call_nac3_dummy_raise(generator, ctx);
|
||||
|
||||
let supported_types = [
|
||||
ctx.primitives.int32,
|
||||
ctx.primitives.int64,
|
||||
|
195
nac3core/src/codegen/numpy_new.rs
Normal file
195
nac3core/src/codegen/numpy_new.rs
Normal file
@ -0,0 +1,195 @@
|
||||
use inkwell::values::{BasicValue, BasicValueEnum, PointerValue};
|
||||
use nac3parser::ast::StrRef;
|
||||
|
||||
use crate::{
|
||||
symbol_resolver::ValueEnum,
|
||||
toplevel::DefinitionId,
|
||||
typecheck::typedef::{FunSignature, Type},
|
||||
};
|
||||
|
||||
use super::{
|
||||
irrt::numpy::{
|
||||
ndarray::{
|
||||
alloca_ndarray_and_init, call_nac3_ndarray_fill_generic, NDArrayInitMode, NpArray,
|
||||
},
|
||||
shape::parse_input_shape_arg,
|
||||
},
|
||||
model::*,
|
||||
CodeGenContext, CodeGenerator,
|
||||
};
|
||||
|
||||
/// LLVM-typed implementation for generating the implementation for constructing an empty `NDArray`.
|
||||
fn call_ndarray_empty_impl<'ctx, G>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
elem_ty: Type,
|
||||
shape: BasicValueEnum<'ctx>,
|
||||
shape_ty: Type,
|
||||
name: &str,
|
||||
) -> Result<Pointer<'ctx, StructModel<NpArray<'ctx>>>, String>
|
||||
where
|
||||
G: CodeGenerator + ?Sized,
|
||||
{
|
||||
let elem_type = ctx.get_llvm_type(generator, elem_ty);
|
||||
let shape = parse_input_shape_arg(generator, ctx, shape, shape_ty);
|
||||
let ndarray_ptr = alloca_ndarray_and_init(
|
||||
generator,
|
||||
ctx,
|
||||
elem_type,
|
||||
NDArrayInitMode::ShapeAndAllocaData { shape },
|
||||
name,
|
||||
)?;
|
||||
Ok(ndarray_ptr)
|
||||
}
|
||||
|
||||
fn call_ndarray_full_impl<'ctx, G>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
elem_ty: Type,
|
||||
shape: BasicValueEnum<'ctx>,
|
||||
shape_ty: Type,
|
||||
fill_value: BasicValueEnum<'ctx>,
|
||||
name: &str,
|
||||
) -> Result<Pointer<'ctx, StructModel<NpArray<'ctx>>>, String>
|
||||
where
|
||||
G: CodeGenerator + ?Sized,
|
||||
{
|
||||
let ndarray_ptr = call_ndarray_empty_impl(generator, ctx, elem_ty, shape, shape_ty, name)?;
|
||||
|
||||
// NOTE: fill_value's type is not checked!! so be careful with logics
|
||||
|
||||
// Allocate fill_value on the stack and give the corresponding stack pointer
|
||||
// to call_nac3_ndarray_fill_generic
|
||||
let fill_value_ptr = ctx.builder.build_alloca(fill_value.get_type(), "fill_value_ptr").unwrap();
|
||||
let fill_value_ptr = OpaquePointer(fill_value_ptr);
|
||||
fill_value_ptr.store_opaque(ctx, fill_value);
|
||||
|
||||
let fill_value_ptr = fill_value_ptr.cast_to(ctx, FixedIntModel(Byte), "");
|
||||
call_nac3_ndarray_fill_generic(ctx, ndarray_ptr, fill_value_ptr);
|
||||
|
||||
Ok(ndarray_ptr)
|
||||
}
|
||||
|
||||
/// Generates LLVM IR for `np.empty`.
|
||||
pub fn gen_ndarray_empty<'ctx>(
|
||||
context: &mut CodeGenContext<'ctx, '_>,
|
||||
obj: &Option<(Type, ValueEnum<'ctx>)>,
|
||||
fun: (&FunSignature, DefinitionId),
|
||||
args: &[(Option<StrRef>, ValueEnum<'ctx>)],
|
||||
generator: &mut dyn CodeGenerator,
|
||||
) -> Result<PointerValue<'ctx>, String> {
|
||||
assert!(obj.is_none());
|
||||
assert_eq!(args.len(), 1);
|
||||
|
||||
// Parse arguments
|
||||
let shape_ty = fun.0.args[0].ty;
|
||||
let shape = args[0].1.clone().to_basic_value_enum(context, generator, shape_ty)?;
|
||||
|
||||
// Implementation
|
||||
let ndarray_ptr = call_ndarray_empty_impl(
|
||||
generator,
|
||||
context,
|
||||
context.primitives.float,
|
||||
shape,
|
||||
shape_ty,
|
||||
"ndarray",
|
||||
)?;
|
||||
Ok(ndarray_ptr.value)
|
||||
}
|
||||
|
||||
/// Generates LLVM IR for `np.zeros`.
|
||||
pub fn gen_ndarray_zeros<'ctx>(
|
||||
context: &mut CodeGenContext<'ctx, '_>,
|
||||
obj: &Option<(Type, ValueEnum<'ctx>)>,
|
||||
fun: (&FunSignature, DefinitionId),
|
||||
args: &[(Option<StrRef>, ValueEnum<'ctx>)],
|
||||
generator: &mut dyn CodeGenerator,
|
||||
) -> Result<PointerValue<'ctx>, String> {
|
||||
assert!(obj.is_none());
|
||||
assert_eq!(args.len(), 1);
|
||||
|
||||
// Parse arguments
|
||||
let shape_ty = fun.0.args[0].ty;
|
||||
let shape = args[0].1.clone().to_basic_value_enum(context, generator, shape_ty)?;
|
||||
|
||||
// Implementation
|
||||
// NOTE: Currently nac3's `np.zeros` is always `float64`.
|
||||
let float64_ty = context.primitives.float;
|
||||
let float64_llvm_type = context.get_llvm_type(generator, float64_ty).into_float_type();
|
||||
|
||||
let ndarray_ptr = call_ndarray_full_impl(
|
||||
generator,
|
||||
context,
|
||||
float64_ty, // `elem_ty` is always `float64`
|
||||
shape,
|
||||
shape_ty,
|
||||
float64_llvm_type.const_zero().as_basic_value_enum(),
|
||||
"ndarray",
|
||||
)?;
|
||||
Ok(ndarray_ptr.value)
|
||||
}
|
||||
|
||||
/// Generates LLVM IR for `np.ones`.
|
||||
pub fn gen_ndarray_ones<'ctx>(
|
||||
context: &mut CodeGenContext<'ctx, '_>,
|
||||
obj: &Option<(Type, ValueEnum<'ctx>)>,
|
||||
fun: (&FunSignature, DefinitionId),
|
||||
args: &[(Option<StrRef>, ValueEnum<'ctx>)],
|
||||
generator: &mut dyn CodeGenerator,
|
||||
) -> Result<PointerValue<'ctx>, String> {
|
||||
assert!(obj.is_none());
|
||||
assert_eq!(args.len(), 1);
|
||||
|
||||
// Parse arguments
|
||||
let shape_ty = fun.0.args[0].ty;
|
||||
let shape = args[0].1.clone().to_basic_value_enum(context, generator, shape_ty)?;
|
||||
|
||||
// Implementation
|
||||
// NOTE: Currently nac3's `np.ones` is always `float64`.
|
||||
let float64_ty = context.primitives.float;
|
||||
let float64_llvm_type = context.get_llvm_type(generator, float64_ty).into_float_type();
|
||||
|
||||
let ndarray_ptr = call_ndarray_full_impl(
|
||||
generator,
|
||||
context,
|
||||
float64_ty, // `elem_ty` is always `float64`
|
||||
shape,
|
||||
shape_ty,
|
||||
float64_llvm_type.const_float(1.0).as_basic_value_enum(),
|
||||
"ndarray",
|
||||
)?;
|
||||
Ok(ndarray_ptr.value)
|
||||
}
|
||||
|
||||
/// Generates LLVM IR for `ndarray.full`.
|
||||
pub fn gen_ndarray_full<'ctx>(
|
||||
context: &mut CodeGenContext<'ctx, '_>,
|
||||
obj: &Option<(Type, ValueEnum<'ctx>)>,
|
||||
fun: (&FunSignature, DefinitionId),
|
||||
args: &[(Option<StrRef>, ValueEnum<'ctx>)],
|
||||
generator: &mut dyn CodeGenerator,
|
||||
) -> Result<PointerValue<'ctx>, String> {
|
||||
assert!(obj.is_none());
|
||||
assert_eq!(args.len(), 2);
|
||||
|
||||
// Parse argument #1 shape
|
||||
let shape_ty = fun.0.args[0].ty;
|
||||
let shape_arg = args[0].1.clone().to_basic_value_enum(context, generator, shape_ty)?;
|
||||
|
||||
// Parse argument #2 fill_value
|
||||
let fill_value_ty = fun.0.args[1].ty;
|
||||
let fill_value_arg =
|
||||
args[1].1.clone().to_basic_value_enum(context, generator, fill_value_ty)?;
|
||||
|
||||
// Implementation
|
||||
let ndarray_ptr = call_ndarray_full_impl(
|
||||
generator,
|
||||
context,
|
||||
fill_value_ty,
|
||||
shape_arg,
|
||||
shape_ty,
|
||||
fill_value_arg,
|
||||
"ndarray",
|
||||
)?;
|
||||
Ok(ndarray_ptr.value)
|
||||
}
|
@ -13,7 +13,7 @@ use crate::{
|
||||
toplevel::{helper::PrimDef, numpy::unpack_ndarray_var_tys, DefinitionId, TopLevelDef},
|
||||
typecheck::{
|
||||
magic_methods::Binop,
|
||||
typedef::{FunSignature, Type, TypeEnum},
|
||||
typedef::{iter_type_vars, FunSignature, Type, TypeEnum},
|
||||
},
|
||||
};
|
||||
use inkwell::{
|
||||
@ -189,10 +189,6 @@ pub fn gen_store_target<'ctx, G: CodeGenerator>(
|
||||
v.data().ptr_offset(ctx, generator, &index, name)
|
||||
}
|
||||
|
||||
TypeEnum::TObj { obj_id, .. } if *obj_id == PrimDef::NDArray.id() => {
|
||||
todo!()
|
||||
}
|
||||
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
@ -206,90 +202,168 @@ pub fn gen_assign<'ctx, G: CodeGenerator>(
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
target: &Expr<Option<Type>>,
|
||||
value: ValueEnum<'ctx>,
|
||||
value_ty: Type,
|
||||
) -> Result<(), String> {
|
||||
/*
|
||||
To handle assignment statements `target = value`, with
|
||||
special care taken for targets `gen_store_target` cannot handle, these are:
|
||||
- Case 1. target is a Tuple
|
||||
- e.g., `(x, y, z, w) = value`
|
||||
- Case 2. *Sliced* list assignment `list.__setitem__`
|
||||
- e.g., `my_list[1:3] = [100, 101]`, BUT NOT `my_list[0] = 99` (gen_store_target knows how to handle these),
|
||||
- Case 3. Indexed ndarray assignment `ndarray.__setitem__`
|
||||
- e.g., `my_ndarray[::-1, :] = 3`, `my_ndarray[:, 3::-1] = their_ndarray[10::2]`
|
||||
- NOTE: Technically speaking, if `target` is sliced in such as way that it is referencing a
|
||||
single element/scalar, we *could* implement gen_store_target for this special case
|
||||
(to point to the raw address of that scalar in the ndarray); but it is much,
|
||||
*much* simpler to generalize all indexed ndarray assignment without
|
||||
special handling on that edgecase.
|
||||
- Otherwise, use `gen_store_target`
|
||||
*/
|
||||
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
|
||||
match &target.node {
|
||||
ExprKind::Tuple { elts, .. } => {
|
||||
let BasicValueEnum::StructValue(v) =
|
||||
value.to_basic_value_enum(ctx, generator, target.custom.unwrap())?
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
if let ExprKind::Tuple { elts, .. } = &target.node {
|
||||
// Handle Case 1. target is a Tuple
|
||||
let BasicValueEnum::StructValue(v) =
|
||||
value.to_basic_value_enum(ctx, generator, target.custom.unwrap())?
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
for (i, elt) in elts.iter().enumerate() {
|
||||
let v = ctx
|
||||
.builder
|
||||
.build_extract_value(v, u32::try_from(i).unwrap(), "struct_elem")
|
||||
.unwrap();
|
||||
generator.gen_assign(ctx, elt, v.into())?;
|
||||
for (i, elt) in elts.iter().enumerate() {
|
||||
let elem_ty = elt.custom.unwrap();
|
||||
|
||||
let v = ctx
|
||||
.builder
|
||||
.build_extract_value(v, u32::try_from(i).unwrap(), "struct_elem")
|
||||
.unwrap();
|
||||
generator.gen_assign(ctx, elt, v.into(), elem_ty)?;
|
||||
}
|
||||
|
||||
return Ok(()); // Terminate
|
||||
}
|
||||
|
||||
// Else, try checking if it's Case 2 or 3, and they *ONLY*
|
||||
// happen if `target.node` is a `ExprKind::Subscript`, so do a special check
|
||||
if let ExprKind::Subscript { value: target_without_slice, slice, .. } = &target.node {
|
||||
// Get the type of target
|
||||
let target_ty = target.custom.unwrap();
|
||||
let target_ty_enum = &*ctx.unifier.get_ty(target_ty);
|
||||
|
||||
// Pattern match on this pair.
|
||||
// This is done like this because of Case 2 - slice.node has to be in a specific pattern
|
||||
match (target_ty_enum, &slice.node) {
|
||||
(TypeEnum::TObj { obj_id, .. }, ExprKind::Slice { lower, upper, step })
|
||||
if *obj_id == ctx.primitives.list.obj_id(&ctx.unifier).unwrap() =>
|
||||
{
|
||||
// Case 2. *Sliced* list assignment
|
||||
|
||||
let ls = generator
|
||||
.gen_expr(ctx, target_without_slice)?
|
||||
.unwrap()
|
||||
.to_basic_value_enum(ctx, generator, target_without_slice.custom.unwrap())?
|
||||
.into_pointer_value();
|
||||
let ls = ListValue::from_ptr_val(ls, llvm_usize, None);
|
||||
let Some((start, end, step)) = handle_slice_indices(
|
||||
lower,
|
||||
upper,
|
||||
step,
|
||||
ctx,
|
||||
generator,
|
||||
ls.load_size(ctx, None),
|
||||
)?
|
||||
else {
|
||||
return Ok(());
|
||||
};
|
||||
let value = value
|
||||
.to_basic_value_enum(ctx, generator, target.custom.unwrap())?
|
||||
.into_pointer_value();
|
||||
let value = ListValue::from_ptr_val(value, llvm_usize, None);
|
||||
let ty = match &*ctx.unifier.get_ty_immutable(target.custom.unwrap()) {
|
||||
TypeEnum::TObj { obj_id, params, .. } if *obj_id == PrimDef::List.id() => {
|
||||
*params.iter().next().unwrap().1
|
||||
}
|
||||
TypeEnum::TObj { obj_id, .. } if *obj_id == PrimDef::NDArray.id() => {
|
||||
unpack_ndarray_var_tys(&mut ctx.unifier, target.custom.unwrap()).0
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let ty = ctx.get_llvm_type(generator, ty);
|
||||
let Some(src_ind) = handle_slice_indices(
|
||||
&None,
|
||||
&None,
|
||||
&None,
|
||||
ctx,
|
||||
generator,
|
||||
value.load_size(ctx, None),
|
||||
)?
|
||||
else {
|
||||
return Ok(());
|
||||
};
|
||||
list_slice_assignment(generator, ctx, ty, ls, (start, end, step), value, src_ind);
|
||||
|
||||
return Ok(()); // Terminate
|
||||
}
|
||||
(TypeEnum::TObj { obj_id, .. }, _)
|
||||
if *obj_id == ctx.primitives.ndarray.obj_id(&ctx.unifier).unwrap() =>
|
||||
{
|
||||
// Case 3. Indexed ndarray assignment
|
||||
|
||||
let target = generator.gen_expr(ctx, target)?.unwrap().to_basic_value_enum(
|
||||
ctx,
|
||||
generator,
|
||||
target.custom.unwrap(),
|
||||
)?;
|
||||
|
||||
match &*ctx.unifier.get_ty(value_ty) {
|
||||
TypeEnum::TObj { obj_id, .. }
|
||||
if *obj_id == ctx.primitives.ndarray.obj_id(&ctx.unifier).unwrap() =>
|
||||
{
|
||||
// `value` is an `ndarray[dtype, ndims]`
|
||||
todo!()
|
||||
}
|
||||
_ => {
|
||||
// TODO: Inferencer's assignment forces `target` and `value` to have the same type
|
||||
// NOTE: gen_assign() has already been extended, I will keep it in place
|
||||
// in participation for when this is extended to be no longer the case.
|
||||
todo!("support scalar assignment")
|
||||
// panic!(
|
||||
// "Unsupported ndarray assignment value: {}",
|
||||
// ctx.unifier.stringify(value_ty)
|
||||
// );
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(()); // Terminate
|
||||
}
|
||||
_ => {
|
||||
// Fallthrough
|
||||
}
|
||||
}
|
||||
ExprKind::Subscript { value: ls, slice, .. }
|
||||
if matches!(&slice.node, ExprKind::Slice { .. }) =>
|
||||
{
|
||||
let ExprKind::Slice { lower, upper, step } = &slice.node else { unreachable!() };
|
||||
}
|
||||
|
||||
let ls = generator
|
||||
.gen_expr(ctx, ls)?
|
||||
.unwrap()
|
||||
.to_basic_value_enum(ctx, generator, ls.custom.unwrap())?
|
||||
.into_pointer_value();
|
||||
let ls = ListValue::from_ptr_val(ls, llvm_usize, None);
|
||||
let Some((start, end, step)) =
|
||||
handle_slice_indices(lower, upper, step, ctx, generator, ls.load_size(ctx, None))?
|
||||
else {
|
||||
return Ok(());
|
||||
};
|
||||
let value = value
|
||||
.to_basic_value_enum(ctx, generator, target.custom.unwrap())?
|
||||
.into_pointer_value();
|
||||
let value = ListValue::from_ptr_val(value, llvm_usize, None);
|
||||
let ty = match &*ctx.unifier.get_ty_immutable(target.custom.unwrap()) {
|
||||
TypeEnum::TObj { obj_id, params, .. } if *obj_id == PrimDef::List.id() => {
|
||||
*params.iter().next().unwrap().1
|
||||
}
|
||||
TypeEnum::TObj { obj_id, .. } if *obj_id == PrimDef::NDArray.id() => {
|
||||
unpack_ndarray_var_tys(&mut ctx.unifier, target.custom.unwrap()).0
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let ty = ctx.get_llvm_type(generator, ty);
|
||||
let Some(src_ind) = handle_slice_indices(
|
||||
&None,
|
||||
&None,
|
||||
&None,
|
||||
ctx,
|
||||
generator,
|
||||
value.load_size(ctx, None),
|
||||
)?
|
||||
else {
|
||||
return Ok(());
|
||||
};
|
||||
list_slice_assignment(generator, ctx, ty, ls, (start, end, step), value, src_ind);
|
||||
}
|
||||
_ => {
|
||||
let name = if let ExprKind::Name { id, .. } = &target.node {
|
||||
format!("{id}.addr")
|
||||
} else {
|
||||
String::from("target.addr")
|
||||
};
|
||||
let Some(ptr) = generator.gen_store_target(ctx, target, Some(name.as_str()))? else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
if let ExprKind::Name { id, .. } = &target.node {
|
||||
let (_, static_value, counter) = ctx.var_assignment.get_mut(id).unwrap();
|
||||
*counter += 1;
|
||||
if let ValueEnum::Static(s) = &value {
|
||||
*static_value = Some(s.clone());
|
||||
}
|
||||
}
|
||||
let val = value.to_basic_value_enum(ctx, generator, target.custom.unwrap())?;
|
||||
ctx.builder.build_store(ptr, val).unwrap();
|
||||
}
|
||||
// The assignment expression matches none of the special cases.
|
||||
// We should actually use `gen_store_target`.
|
||||
let name = if let ExprKind::Name { id, .. } = &target.node {
|
||||
format!("{id}.addr")
|
||||
} else {
|
||||
String::from("target.addr")
|
||||
};
|
||||
let Some(ptr) = generator.gen_store_target(ctx, target, Some(name.as_str()))? else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
if let ExprKind::Name { id, .. } = &target.node {
|
||||
let (_, static_value, counter) = ctx.var_assignment.get_mut(id).unwrap();
|
||||
*counter += 1;
|
||||
if let ValueEnum::Static(s) = &value {
|
||||
*static_value = Some(s.clone());
|
||||
}
|
||||
}
|
||||
let val = value.to_basic_value_enum(ctx, generator, target.custom.unwrap())?;
|
||||
ctx.builder.build_store(ptr, val).unwrap();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -315,9 +389,6 @@ pub fn gen_for<G: CodeGenerator>(
|
||||
let orelse_bb =
|
||||
if orelse.is_empty() { cont_bb } else { ctx.ctx.append_basic_block(current, "for.orelse") };
|
||||
|
||||
// Whether the iterable is a range() expression
|
||||
let is_iterable_range_expr = ctx.unifier.unioned(iter.custom.unwrap(), ctx.primitives.range);
|
||||
|
||||
// The BB containing the increment expression
|
||||
let incr_bb = ctx.ctx.append_basic_block(current, "for.incr");
|
||||
// The BB containing the loop condition check
|
||||
@ -331,108 +402,136 @@ pub fn gen_for<G: CodeGenerator>(
|
||||
} else {
|
||||
return Ok(());
|
||||
};
|
||||
if is_iterable_range_expr {
|
||||
let iter_val = RangeValue::from_ptr_val(iter_val.into_pointer_value(), Some("range"));
|
||||
// Internal variable for loop; Cannot be assigned
|
||||
let i = generator.gen_var_alloc(ctx, int32.into(), Some("for.i.addr"))?;
|
||||
// Variable declared in "target" expression of the loop; Can be reassigned *or* shadowed
|
||||
let Some(target_i) = generator.gen_store_target(ctx, target, Some("for.target.addr"))?
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
let (start, stop, step) = destructure_range(ctx, iter_val);
|
||||
|
||||
ctx.builder.build_store(i, start).unwrap();
|
||||
|
||||
// Check "If step is zero, ValueError is raised."
|
||||
let rangenez =
|
||||
ctx.builder.build_int_compare(IntPredicate::NE, step, int32.const_zero(), "").unwrap();
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
rangenez,
|
||||
"ValueError",
|
||||
"range() arg 3 must not be zero",
|
||||
[None, None, None],
|
||||
ctx.current_loc,
|
||||
);
|
||||
ctx.builder.build_unconditional_branch(cond_bb).unwrap();
|
||||
|
||||
// The implementation of the for loop logic depends on
|
||||
// the typechecker type of `iter`.
|
||||
let iter_ty = iter.custom.unwrap();
|
||||
match &*ctx.unifier.get_ty(iter_ty) {
|
||||
TypeEnum::TObj { obj_id, params, .. }
|
||||
if *obj_id == ctx.primitives.list.obj_id(&ctx.unifier).unwrap() =>
|
||||
{
|
||||
// `iter` is a `List[T]`, and `T` is the element type
|
||||
|
||||
// Get the `T` out of `List[T]` - it is defined to be the 1st param.
|
||||
let list_elem_ty = iter_type_vars(params).nth(0).unwrap().ty;
|
||||
|
||||
// Implementation
|
||||
let index_addr = generator.gen_var_alloc(ctx, size_t.into(), Some("for.index.addr"))?;
|
||||
ctx.builder.build_store(index_addr, size_t.const_zero()).unwrap();
|
||||
let len = ctx
|
||||
.build_gep_and_load(
|
||||
iter_val.into_pointer_value(),
|
||||
&[zero, int32.const_int(1, false)],
|
||||
Some("len"),
|
||||
)
|
||||
.into_int_value();
|
||||
ctx.builder.build_unconditional_branch(cond_bb).unwrap();
|
||||
|
||||
ctx.builder.position_at_end(cond_bb);
|
||||
ctx.builder
|
||||
.build_conditional_branch(
|
||||
gen_in_range_check(
|
||||
ctx,
|
||||
ctx.builder.build_load(i, "").map(BasicValueEnum::into_int_value).unwrap(),
|
||||
stop,
|
||||
step,
|
||||
),
|
||||
body_bb,
|
||||
orelse_bb,
|
||||
let index = ctx
|
||||
.builder
|
||||
.build_load(index_addr, "for.index")
|
||||
.map(BasicValueEnum::into_int_value)
|
||||
.unwrap();
|
||||
let cmp = ctx.builder.build_int_compare(IntPredicate::SLT, index, len, "cond").unwrap();
|
||||
ctx.builder.build_conditional_branch(cmp, body_bb, orelse_bb).unwrap();
|
||||
|
||||
ctx.builder.position_at_end(incr_bb);
|
||||
let index =
|
||||
ctx.builder.build_load(index_addr, "").map(BasicValueEnum::into_int_value).unwrap();
|
||||
let inc = ctx.builder.build_int_add(index, size_t.const_int(1, true), "inc").unwrap();
|
||||
ctx.builder.build_store(index_addr, inc).unwrap();
|
||||
ctx.builder.build_unconditional_branch(cond_bb).unwrap();
|
||||
|
||||
ctx.builder.position_at_end(body_bb);
|
||||
let arr_ptr = ctx
|
||||
.build_gep_and_load(iter_val.into_pointer_value(), &[zero, zero], Some("arr.addr"))
|
||||
.into_pointer_value();
|
||||
let index = ctx
|
||||
.builder
|
||||
.build_load(index_addr, "for.index")
|
||||
.map(BasicValueEnum::into_int_value)
|
||||
.unwrap();
|
||||
let val = ctx.build_gep_and_load(arr_ptr, &[index], Some("val"));
|
||||
generator.gen_assign(ctx, target, val.into(), list_elem_ty)?;
|
||||
generator.gen_block(ctx, body.iter())?;
|
||||
}
|
||||
TypeEnum::TObj { obj_id, .. }
|
||||
if *obj_id == ctx.primitives.range.obj_id(&ctx.unifier).unwrap() =>
|
||||
{
|
||||
// `iter` is a `range(start, stop, step)`, and `int32` is the element type
|
||||
|
||||
let iter_val = RangeValue::from_ptr_val(iter_val.into_pointer_value(), Some("range"));
|
||||
// Internal variable for loop; Cannot be assigned
|
||||
let i = generator.gen_var_alloc(ctx, int32.into(), Some("for.i.addr"))?;
|
||||
// Variable declared in "target" expression of the loop; Can be reassigned *or* shadowed
|
||||
let Some(target_i) =
|
||||
generator.gen_store_target(ctx, target, Some("for.target.addr"))?
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
let (start, stop, step) = destructure_range(ctx, iter_val);
|
||||
|
||||
ctx.builder.build_store(i, start).unwrap();
|
||||
|
||||
// Check "If step is zero, ValueError is raised."
|
||||
let rangenez = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::NE, step, int32.const_zero(), "")
|
||||
.unwrap();
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
rangenez,
|
||||
"ValueError",
|
||||
"range() arg 3 must not be zero",
|
||||
[None, None, None],
|
||||
ctx.current_loc,
|
||||
);
|
||||
ctx.builder.build_unconditional_branch(cond_bb).unwrap();
|
||||
|
||||
{
|
||||
ctx.builder.position_at_end(cond_bb);
|
||||
ctx.builder
|
||||
.build_conditional_branch(
|
||||
gen_in_range_check(
|
||||
ctx,
|
||||
ctx.builder
|
||||
.build_load(i, "")
|
||||
.map(BasicValueEnum::into_int_value)
|
||||
.unwrap(),
|
||||
stop,
|
||||
step,
|
||||
),
|
||||
body_bb,
|
||||
orelse_bb,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
ctx.builder.position_at_end(incr_bb);
|
||||
let next_i = ctx
|
||||
.builder
|
||||
.build_int_add(
|
||||
ctx.builder.build_load(i, "").map(BasicValueEnum::into_int_value).unwrap(),
|
||||
step,
|
||||
"inc",
|
||||
)
|
||||
.unwrap();
|
||||
ctx.builder.build_store(i, next_i).unwrap();
|
||||
ctx.builder.build_unconditional_branch(cond_bb).unwrap();
|
||||
|
||||
ctx.builder.position_at_end(body_bb);
|
||||
ctx.builder
|
||||
.build_store(
|
||||
target_i,
|
||||
ctx.builder.build_load(i, "").map(BasicValueEnum::into_int_value).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
generator.gen_block(ctx, body.iter())?;
|
||||
}
|
||||
_ => {
|
||||
panic!("unsupported iterator type in for loop: {}", ctx.unifier.stringify(iter_ty))
|
||||
}
|
||||
|
||||
ctx.builder.position_at_end(incr_bb);
|
||||
let next_i = ctx
|
||||
.builder
|
||||
.build_int_add(
|
||||
ctx.builder.build_load(i, "").map(BasicValueEnum::into_int_value).unwrap(),
|
||||
step,
|
||||
"inc",
|
||||
)
|
||||
.unwrap();
|
||||
ctx.builder.build_store(i, next_i).unwrap();
|
||||
ctx.builder.build_unconditional_branch(cond_bb).unwrap();
|
||||
|
||||
ctx.builder.position_at_end(body_bb);
|
||||
ctx.builder
|
||||
.build_store(
|
||||
target_i,
|
||||
ctx.builder.build_load(i, "").map(BasicValueEnum::into_int_value).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
generator.gen_block(ctx, body.iter())?;
|
||||
} else {
|
||||
let index_addr = generator.gen_var_alloc(ctx, size_t.into(), Some("for.index.addr"))?;
|
||||
ctx.builder.build_store(index_addr, size_t.const_zero()).unwrap();
|
||||
let len = ctx
|
||||
.build_gep_and_load(
|
||||
iter_val.into_pointer_value(),
|
||||
&[zero, int32.const_int(1, false)],
|
||||
Some("len"),
|
||||
)
|
||||
.into_int_value();
|
||||
ctx.builder.build_unconditional_branch(cond_bb).unwrap();
|
||||
|
||||
ctx.builder.position_at_end(cond_bb);
|
||||
let index = ctx
|
||||
.builder
|
||||
.build_load(index_addr, "for.index")
|
||||
.map(BasicValueEnum::into_int_value)
|
||||
.unwrap();
|
||||
let cmp = ctx.builder.build_int_compare(IntPredicate::SLT, index, len, "cond").unwrap();
|
||||
ctx.builder.build_conditional_branch(cmp, body_bb, orelse_bb).unwrap();
|
||||
|
||||
ctx.builder.position_at_end(incr_bb);
|
||||
let index =
|
||||
ctx.builder.build_load(index_addr, "").map(BasicValueEnum::into_int_value).unwrap();
|
||||
let inc = ctx.builder.build_int_add(index, size_t.const_int(1, true), "inc").unwrap();
|
||||
ctx.builder.build_store(index_addr, inc).unwrap();
|
||||
ctx.builder.build_unconditional_branch(cond_bb).unwrap();
|
||||
|
||||
ctx.builder.position_at_end(body_bb);
|
||||
let arr_ptr = ctx
|
||||
.build_gep_and_load(iter_val.into_pointer_value(), &[zero, zero], Some("arr.addr"))
|
||||
.into_pointer_value();
|
||||
let index = ctx
|
||||
.builder
|
||||
.build_load(index_addr, "for.index")
|
||||
.map(BasicValueEnum::into_int_value)
|
||||
.unwrap();
|
||||
let val = ctx.build_gep_and_load(arr_ptr, &[index], Some("val"));
|
||||
generator.gen_assign(ctx, target, val.into())?;
|
||||
generator.gen_block(ctx, body.iter())?;
|
||||
}
|
||||
|
||||
for (k, (_, _, counter)) in &var_assignment {
|
||||
@ -1575,14 +1674,18 @@ pub fn gen_stmt<G: CodeGenerator>(
|
||||
}
|
||||
StmtKind::AnnAssign { target, value, .. } => {
|
||||
if let Some(value) = value {
|
||||
let value_ty = value.custom.unwrap();
|
||||
let Some(value) = generator.gen_expr(ctx, value)? else { return Ok(()) };
|
||||
generator.gen_assign(ctx, target, value)?;
|
||||
generator.gen_assign(ctx, target, value, value_ty)?;
|
||||
}
|
||||
}
|
||||
StmtKind::Assign { targets, value, .. } => {
|
||||
// TODO: Is the implementation wrong? It looks very strange.
|
||||
let value_ty = value.custom.unwrap();
|
||||
let Some(value) = generator.gen_expr(ctx, value)? else { return Ok(()) };
|
||||
|
||||
for target in targets {
|
||||
generator.gen_assign(ctx, target, value.clone())?;
|
||||
generator.gen_assign(ctx, target, value.clone(), value_ty)?;
|
||||
}
|
||||
}
|
||||
StmtKind::Continue { .. } => {
|
||||
@ -1596,6 +1699,7 @@ pub fn gen_stmt<G: CodeGenerator>(
|
||||
StmtKind::For { .. } => generator.gen_for(ctx, stmt)?,
|
||||
StmtKind::With { .. } => generator.gen_with(ctx, stmt)?,
|
||||
StmtKind::AugAssign { target, op, value, .. } => {
|
||||
let value_ty = value.custom.unwrap();
|
||||
let value = gen_binop_expr(
|
||||
generator,
|
||||
ctx,
|
||||
@ -1604,7 +1708,7 @@ pub fn gen_stmt<G: CodeGenerator>(
|
||||
value,
|
||||
stmt.location,
|
||||
)?;
|
||||
generator.gen_assign(ctx, target, value.unwrap())?;
|
||||
generator.gen_assign(ctx, target, value.unwrap(), value_ty)?;
|
||||
}
|
||||
StmtKind::Try { .. } => gen_try(generator, ctx, stmt)?,
|
||||
StmtKind::Raise { exc, .. } => {
|
||||
|
@ -23,3 +23,4 @@ pub mod codegen;
|
||||
pub mod symbol_resolver;
|
||||
pub mod toplevel;
|
||||
pub mod typecheck;
|
||||
pub(crate) mod util;
|
||||
|
@ -5,7 +5,7 @@ use indexmap::IndexMap;
|
||||
use inkwell::{
|
||||
attributes::{Attribute, AttributeLoc},
|
||||
types::{BasicMetadataTypeEnum, BasicType},
|
||||
values::{BasicMetadataValueEnum, BasicValue, CallSiteValue},
|
||||
values::{AnyValue, BasicMetadataValueEnum, BasicValue, CallSiteValue},
|
||||
IntPredicate,
|
||||
};
|
||||
use itertools::Either;
|
||||
@ -14,15 +14,24 @@ use strum::IntoEnumIterator;
|
||||
use crate::{
|
||||
codegen::{
|
||||
builtin_fns,
|
||||
classes::{ArrayLikeValue, NDArrayValue, ProxyValue, RangeValue, TypedArrayLikeAccessor},
|
||||
classes::{ProxyValue, RangeValue},
|
||||
expr::destructure_range,
|
||||
irrt::*,
|
||||
numpy::*,
|
||||
irrt::{
|
||||
calculate_len_for_slice_range,
|
||||
numpy::ndarray::{call_nac3_ndarray_len, NpArray},
|
||||
},
|
||||
model::*,
|
||||
numpy::{
|
||||
gen_ndarray_array, gen_ndarray_copy, gen_ndarray_eye, gen_ndarray_fill,
|
||||
gen_ndarray_identity,
|
||||
},
|
||||
numpy_new,
|
||||
stmt::exn_constructor,
|
||||
},
|
||||
symbol_resolver::SymbolValue,
|
||||
toplevel::{helper::PrimDef, numpy::make_ndarray_ty},
|
||||
typecheck::typedef::{into_var_map, iter_type_vars, TypeVar, VarMap},
|
||||
util::SizeVariant,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
@ -278,19 +287,10 @@ pub fn get_builtins(unifier: &mut Unifier, primitives: &PrimitiveStore) -> Built
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// A helper enum used by [`BuiltinBuilder`]
|
||||
#[derive(Clone, Copy)]
|
||||
enum SizeVariant {
|
||||
Bits32,
|
||||
Bits64,
|
||||
}
|
||||
|
||||
impl SizeVariant {
|
||||
fn of_int(self, primitives: &PrimitiveStore) -> Type {
|
||||
match self {
|
||||
SizeVariant::Bits32 => primitives.int32,
|
||||
SizeVariant::Bits64 => primitives.int64,
|
||||
}
|
||||
fn get_size_variant_of_int(size_variant: SizeVariant, primitives: &PrimitiveStore) -> Type {
|
||||
match size_variant {
|
||||
SizeVariant::Bits32 => primitives.int32,
|
||||
SizeVariant::Bits64 => primitives.int64,
|
||||
}
|
||||
}
|
||||
|
||||
@ -1061,7 +1061,7 @@ impl<'a> BuiltinBuilder<'a> {
|
||||
);
|
||||
|
||||
// The size variant of the function determines the size of the returned int.
|
||||
let int_sized = size_variant.of_int(self.primitives);
|
||||
let int_sized = get_size_variant_of_int(size_variant, self.primitives);
|
||||
|
||||
let ndarray_int_sized =
|
||||
make_ndarray_ty(self.unifier, self.primitives, Some(int_sized), Some(common_ndim.ty));
|
||||
@ -1086,7 +1086,7 @@ impl<'a> BuiltinBuilder<'a> {
|
||||
let arg_ty = fun.0.args[0].ty;
|
||||
let arg = args[0].1.clone().to_basic_value_enum(ctx, generator, arg_ty)?;
|
||||
|
||||
let ret_elem_ty = size_variant.of_int(&ctx.primitives);
|
||||
let ret_elem_ty = get_size_variant_of_int(size_variant, &ctx.primitives);
|
||||
Ok(Some(builtin_fns::call_round(generator, ctx, (arg_ty, arg), ret_elem_ty)?))
|
||||
}),
|
||||
)
|
||||
@ -1127,7 +1127,7 @@ impl<'a> BuiltinBuilder<'a> {
|
||||
make_ndarray_ty(self.unifier, self.primitives, Some(float), Some(common_ndim.ty));
|
||||
|
||||
// The size variant of the function determines the type of int returned
|
||||
let int_sized = size_variant.of_int(self.primitives);
|
||||
let int_sized = get_size_variant_of_int(size_variant, self.primitives);
|
||||
let ndarray_int_sized =
|
||||
make_ndarray_ty(self.unifier, self.primitives, Some(int_sized), Some(common_ndim.ty));
|
||||
|
||||
@ -1150,7 +1150,7 @@ impl<'a> BuiltinBuilder<'a> {
|
||||
let arg_ty = fun.0.args[0].ty;
|
||||
let arg = args[0].1.clone().to_basic_value_enum(ctx, generator, arg_ty)?;
|
||||
|
||||
let ret_elem_ty = size_variant.of_int(&ctx.primitives);
|
||||
let ret_elem_ty = get_size_variant_of_int(size_variant, &ctx.primitives);
|
||||
let func = match kind {
|
||||
Kind::Ceil => builtin_fns::call_ceil,
|
||||
Kind::Floor => builtin_fns::call_floor,
|
||||
@ -1202,9 +1202,9 @@ impl<'a> BuiltinBuilder<'a> {
|
||||
&[(self.ndarray_factory_fn_shape_arg_tvar.ty, "shape")],
|
||||
Box::new(move |ctx, obj, fun, args, generator| {
|
||||
let func = match prim {
|
||||
PrimDef::FunNpNDArray | PrimDef::FunNpEmpty => gen_ndarray_empty,
|
||||
PrimDef::FunNpZeros => gen_ndarray_zeros,
|
||||
PrimDef::FunNpOnes => gen_ndarray_ones,
|
||||
PrimDef::FunNpNDArray | PrimDef::FunNpEmpty => numpy_new::gen_ndarray_empty,
|
||||
PrimDef::FunNpZeros => numpy_new::gen_ndarray_zeros,
|
||||
PrimDef::FunNpOnes => numpy_new::gen_ndarray_ones,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
func(ctx, &obj, fun, &args, generator).map(|val| Some(val.as_basic_value_enum()))
|
||||
@ -1272,7 +1272,7 @@ impl<'a> BuiltinBuilder<'a> {
|
||||
// type variable
|
||||
&[(self.list_int32, "shape"), (tv.ty, "fill_value")],
|
||||
Box::new(move |ctx, obj, fun, args, generator| {
|
||||
gen_ndarray_full(ctx, &obj, fun, &args, generator)
|
||||
numpy_new::gen_ndarray_full(ctx, &obj, fun, &args, generator)
|
||||
.map(|val| Some(val.as_basic_value_enum()))
|
||||
}),
|
||||
)
|
||||
@ -1464,51 +1464,19 @@ impl<'a> BuiltinBuilder<'a> {
|
||||
}
|
||||
}
|
||||
TypeEnum::TObj { obj_id, .. } if *obj_id == PrimDef::NDArray.id() => {
|
||||
let llvm_i32 = ctx.ctx.i32_type();
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
// Parse `arg`
|
||||
let sizet = IntModel(generator.get_size_type(ctx.ctx));
|
||||
|
||||
let arg = NDArrayValue::from_ptr_val(
|
||||
arg.into_pointer_value(),
|
||||
llvm_usize,
|
||||
None,
|
||||
);
|
||||
let ndarray_ptr_model =
|
||||
PointerModel(StructModel(NpArray { sizet }));
|
||||
let ndarray_ptr =
|
||||
ndarray_ptr_model.review(ctx.ctx, arg.as_any_value_enum());
|
||||
|
||||
let ndims = arg.dim_sizes().size(ctx, generator);
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
ctx.builder
|
||||
.build_int_compare(
|
||||
IntPredicate::NE,
|
||||
ndims,
|
||||
llvm_usize.const_zero(),
|
||||
"",
|
||||
)
|
||||
.unwrap(),
|
||||
"0:TypeError",
|
||||
&format!("{name}() of unsized object", name = prim.name()),
|
||||
[None, None, None],
|
||||
ctx.current_loc,
|
||||
);
|
||||
|
||||
let len = unsafe {
|
||||
arg.dim_sizes().get_typed_unchecked(
|
||||
ctx,
|
||||
generator,
|
||||
&llvm_usize.const_zero(),
|
||||
None,
|
||||
)
|
||||
};
|
||||
|
||||
if len.get_type().get_bit_width() == 32 {
|
||||
Some(len.into())
|
||||
} else {
|
||||
Some(
|
||||
ctx.builder
|
||||
.build_int_truncate(len, llvm_i32, "len")
|
||||
.map(Into::into)
|
||||
.unwrap(),
|
||||
)
|
||||
}
|
||||
// Calculate len
|
||||
// NOTE: Unsized object is asserted in IRRT
|
||||
let len = call_nac3_ndarray_len(generator, ctx, ndarray_ptr);
|
||||
let len = len.signed_cast_to_fixed(ctx, Int32, "len_i32");
|
||||
Some(len.value.as_basic_value_enum())
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
6
nac3core/src/util.rs
Normal file
6
nac3core/src/util.rs
Normal file
@ -0,0 +1,6 @@
|
||||
/// A helper enum used by [`BuiltinBuilder`]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum SizeVariant {
|
||||
Bits32,
|
||||
Bits64,
|
||||
}
|
Loading…
Reference in New Issue
Block a user