Compare commits

...

9 Commits

Author SHA1 Message Date
lyken e719d9396d asd 2024-07-11 00:34:06 +08:00
lyken 27f2e8b391 core: irrt general numpy broadcasting 2024-07-10 20:10:14 +08:00
lyken 5f4c406b37 core: irrt general numpy slicing 2024-07-10 20:10:14 +08:00
lyken 31ab9675ca core: more irrt 2024-07-10 20:10:14 +08:00
lyken 5fd5d65377 core: build.rs rewrite regex to capture `= type` 2024-07-10 20:10:14 +08:00
lyken 01042aecfb core: move irrt c++ sources to /nac3core/irrt 2024-07-10 20:10:14 +08:00
lyken 8754f252f6 core: IRRT -Werror=return-type 2024-07-10 20:10:14 +08:00
lyken 17207a4ebe core: add irrt_test 2024-07-10 20:10:14 +08:00
lyken e3a4675fc6 core: comment out numpy 2024-07-10 20:10:05 +08:00
26 changed files with 6084 additions and 3663 deletions

View File

@ -13,6 +13,7 @@
'' ''
mkdir -p $out/bin mkdir -p $out/bin
ln -s ${pkgs.llvmPackages_14.clang-unwrapped}/bin/clang $out/bin/clang-irrt ln -s ${pkgs.llvmPackages_14.clang-unwrapped}/bin/clang $out/bin/clang-irrt
ln -s ${pkgs.llvmPackages_14.clang}/bin/clang $out/bin/clang-irrt-test
ln -s ${pkgs.llvmPackages_14.llvm.out}/bin/llvm-as $out/bin/llvm-as-irrt ln -s ${pkgs.llvmPackages_14.llvm.out}/bin/llvm-as $out/bin/llvm-as-irrt
''; '';
nac3artiq = pkgs.python3Packages.toPythonModule ( nac3artiq = pkgs.python3Packages.toPythonModule (
@ -23,6 +24,7 @@
cargoLock = { cargoLock = {
lockFile = ./Cargo.lock; lockFile = ./Cargo.lock;
}; };
cargoTestFlags = [ "--features" "test" ];
passthru.cargoLock = cargoLock; passthru.cargoLock = cargoLock;
nativeBuildInputs = [ pkgs.python3 pkgs.llvmPackages_14.clang llvm-tools-irrt pkgs.llvmPackages_14.llvm.out llvm-nac3 ]; nativeBuildInputs = [ pkgs.python3 pkgs.llvmPackages_14.clang llvm-tools-irrt pkgs.llvmPackages_14.llvm.out llvm-nac3 ];
buildInputs = [ pkgs.python3 llvm-nac3 ]; buildInputs = [ pkgs.python3 llvm-nac3 ];
@ -161,7 +163,10 @@
clippy clippy
pre-commit pre-commit
rustfmt rustfmt
rust-analyzer
]; ];
# https://nixos.wiki/wiki/Rust#Shell.nix_example
RUST_SRC_PATH = "${pkgs.rust.packages.stable.rustPlatform.rustLibSrc}";
}; };
devShells.x86_64-linux.msys2 = pkgs.mkShell { devShells.x86_64-linux.msys2 = pkgs.mkShell {
name = "nac3-dev-shell-msys2"; name = "nac3-dev-shell-msys2";

View File

@ -1,3 +1,6 @@
[features]
test = []
[package] [package]
name = "nac3core" name = "nac3core"
version = "0.1.0" version = "0.1.0"

View File

@ -7,8 +7,8 @@ use std::{
process::{Command, Stdio}, process::{Command, Stdio},
}; };
fn main() { fn compile_irrt(irrt_dir: &Path, out_dir: &Path) {
const FILE: &str = "src/codegen/irrt/irrt.cpp"; let irrt_cpp_path = irrt_dir.join("irrt.cpp");
/* /*
* HACK: Sadly, clang doesn't let us emit generic LLVM bitcode. * HACK: Sadly, clang doesn't let us emit generic LLVM bitcode.
@ -16,7 +16,7 @@ fn main() {
*/ */
let flags: &[&str] = &[ let flags: &[&str] = &[
"--target=wasm32", "--target=wasm32",
FILE, irrt_cpp_path.to_str().unwrap(),
"-x", "-x",
"c++", "c++",
"-fno-discard-value-names", "-fno-discard-value-names",
@ -31,13 +31,14 @@ fn main() {
"-S", "-S",
"-Wall", "-Wall",
"-Wextra", "-Wextra",
"-Werror=return-type",
"-I",
irrt_dir.to_str().unwrap(),
"-o", "-o",
"-", "-",
]; ];
println!("cargo:rerun-if-changed={FILE}"); println!("cargo:rerun-if-changed={}", out_dir.to_str().unwrap());
let out_dir = env::var("OUT_DIR").unwrap();
let out_path = Path::new(&out_dir);
let output = Command::new("clang-irrt") let output = Command::new("clang-irrt")
.args(flags) .args(flags)
@ -52,7 +53,11 @@ fn main() {
let output = std::str::from_utf8(&output.stdout).unwrap().replace("\r\n", "\n"); let output = std::str::from_utf8(&output.stdout).unwrap().replace("\r\n", "\n");
let mut filtered_output = String::with_capacity(output.len()); let mut filtered_output = String::with_capacity(output.len());
let regex_filter = Regex::new(r"(?ms:^define.*?\}$)|(?m:^declare.*?$)").unwrap(); // (?ms:^define.*?\}$) to capture `define` blocks
// (?m:^declare.*?$) to capture `declare` blocks
// (?m:^%.+?=\s*type\s*\{.+?\}$) to capture `type` declarations
let regex_filter =
Regex::new(r"(?ms:^define.*?\}$)|(?m:^declare.*?$)|(?m:^%.+?=\s*type\s*\{.+?\}$)").unwrap();
for f in regex_filter.captures_iter(&output) { for f in regex_filter.captures_iter(&output) {
assert_eq!(f.len(), 1); assert_eq!(f.len(), 1);
filtered_output.push_str(&f[0]); filtered_output.push_str(&f[0]);
@ -65,18 +70,65 @@ fn main() {
println!("cargo:rerun-if-env-changed=DEBUG_DUMP_IRRT"); println!("cargo:rerun-if-env-changed=DEBUG_DUMP_IRRT");
if env::var("DEBUG_DUMP_IRRT").is_ok() { if env::var("DEBUG_DUMP_IRRT").is_ok() {
let mut file = File::create(out_path.join("irrt.ll")).unwrap(); let mut file = File::create(out_dir.join("irrt.ll")).unwrap();
file.write_all(output.as_bytes()).unwrap(); file.write_all(output.as_bytes()).unwrap();
let mut file = File::create(out_path.join("irrt-filtered.ll")).unwrap(); let mut file = File::create(out_dir.join("irrt-filtered.ll")).unwrap();
file.write_all(filtered_output.as_bytes()).unwrap(); file.write_all(filtered_output.as_bytes()).unwrap();
} }
let mut llvm_as = Command::new("llvm-as-irrt") let mut llvm_as = Command::new("llvm-as-irrt")
.stdin(Stdio::piped()) .stdin(Stdio::piped())
.arg("-o") .arg("-o")
.arg(out_path.join("irrt.bc")) .arg(out_dir.join("irrt.bc"))
.spawn() .spawn()
.unwrap(); .unwrap();
llvm_as.stdin.as_mut().unwrap().write_all(filtered_output.as_bytes()).unwrap(); llvm_as.stdin.as_mut().unwrap().write_all(filtered_output.as_bytes()).unwrap();
assert!(llvm_as.wait().unwrap().success()); assert!(llvm_as.wait().unwrap().success());
} }
fn compile_irrt_test(irrt_dir: &Path, out_dir: &Path) {
let irrt_test_cpp_path = irrt_dir.join("irrt_test.cpp");
let exe_path = out_dir.join("irrt_test.out");
let flags: &[&str] = &[
irrt_test_cpp_path.to_str().unwrap(),
"-x",
"c++",
"-I",
irrt_dir.to_str().unwrap(),
"-g",
"-fno-discard-value-names",
"-O0",
"-Wall",
"-Wextra",
"-Werror=return-type",
"-lm", // for `tgamma()`, `lgamma()`
"-o",
exe_path.to_str().unwrap(),
];
Command::new("clang-irrt-test")
.args(flags)
.output()
.map(|o| {
assert!(o.status.success(), "{}", std::str::from_utf8(&o.stderr).unwrap());
o
})
.unwrap();
println!("cargo:rerun-if-changed={}", out_dir.to_str().unwrap());
}
fn main() {
let out_dir = env::var("OUT_DIR").unwrap();
let out_dir = Path::new(&out_dir);
let irrt_dir = Path::new("./irrt");
compile_irrt(irrt_dir, out_dir);
// https://github.com/rust-lang/cargo/issues/2549
// `cargo test -F test` to also build `irrt_test.cpp
if cfg!(feature = "test") {
compile_irrt_test(irrt_dir, out_dir);
}
}

5
nac3core/irrt/irrt.cpp Normal file
View File

@ -0,0 +1,5 @@
#include "irrt_everything.hpp"
/*
This file will be read by `clang-irrt` to conveniently produce LLVM IR for `nac3core/codegen`.
*/

437
nac3core/irrt/irrt.hpp Normal file
View File

@ -0,0 +1,437 @@
#ifndef IRRT_DONT_TYPEDEF_INTS
typedef _BitInt(8) int8_t;
typedef unsigned _BitInt(8) uint8_t;
typedef _BitInt(32) int32_t;
typedef unsigned _BitInt(32) uint32_t;
typedef _BitInt(64) int64_t;
typedef unsigned _BitInt(64) uint64_t;
#endif
// NDArray indices are always `uint32_t`.
typedef uint32_t NDIndex;
// The type of an index or a value describing the length of a range/slice is
// always `int32_t`.
typedef int32_t SliceIndex;
template <typename T>
static T max(T a, T b) {
return a > b ? a : b;
}
template <typename T>
static T min(T a, T b) {
return a > b ? b : a;
}
// adapted from GNU Scientific Library: https://git.savannah.gnu.org/cgit/gsl.git/tree/sys/pow_int.c
// need to make sure `exp >= 0` before calling this function
template <typename T>
static T __nac3_int_exp_impl(T base, T exp) {
T res = 1;
/* repeated squaring method */
do {
if (exp & 1) {
res *= base; /* for n odd */
}
exp >>= 1;
base *= base;
} while (exp);
return res;
}
template <typename SizeT>
static SizeT __nac3_ndarray_calc_size_impl(
const SizeT *list_data,
SizeT list_len,
SizeT begin_idx,
SizeT end_idx
) {
__builtin_assume(end_idx <= list_len);
SizeT num_elems = 1;
for (SizeT i = begin_idx; i < end_idx; ++i) {
SizeT val = list_data[i];
__builtin_assume(val > 0);
num_elems *= val;
}
return num_elems;
}
template <typename SizeT>
static void __nac3_ndarray_calc_nd_indices_impl(
SizeT index,
const SizeT *dims,
SizeT num_dims,
NDIndex *idxs
) {
SizeT stride = 1;
for (SizeT dim = 0; dim < num_dims; dim++) {
SizeT i = num_dims - dim - 1;
__builtin_assume(dims[i] > 0);
idxs[i] = (index / stride) % dims[i];
stride *= dims[i];
}
}
template <typename SizeT>
static SizeT __nac3_ndarray_flatten_index_impl(
const SizeT *dims,
SizeT num_dims,
const NDIndex *indices,
SizeT num_indices
) {
SizeT idx = 0;
SizeT stride = 1;
for (SizeT i = 0; i < num_dims; ++i) {
SizeT ri = num_dims - i - 1;
if (ri < num_indices) {
idx += stride * indices[ri];
}
__builtin_assume(dims[i] > 0);
stride *= dims[ri];
}
return idx;
}
template <typename SizeT>
static void __nac3_ndarray_calc_broadcast_impl(
const SizeT *lhs_dims,
SizeT lhs_ndims,
const SizeT *rhs_dims,
SizeT rhs_ndims,
SizeT *out_dims
) {
SizeT max_ndims = lhs_ndims > rhs_ndims ? lhs_ndims : rhs_ndims;
for (SizeT i = 0; i < max_ndims; ++i) {
const SizeT *lhs_dim_sz = i < lhs_ndims ? &lhs_dims[lhs_ndims - i - 1] : nullptr;
const SizeT *rhs_dim_sz = i < rhs_ndims ? &rhs_dims[rhs_ndims - i - 1] : nullptr;
SizeT *out_dim = &out_dims[max_ndims - i - 1];
if (lhs_dim_sz == nullptr) {
*out_dim = *rhs_dim_sz;
} else if (rhs_dim_sz == nullptr) {
*out_dim = *lhs_dim_sz;
} else if (*lhs_dim_sz == 1) {
*out_dim = *rhs_dim_sz;
} else if (*rhs_dim_sz == 1) {
*out_dim = *lhs_dim_sz;
} else if (*lhs_dim_sz == *rhs_dim_sz) {
*out_dim = *lhs_dim_sz;
} else {
__builtin_unreachable();
}
}
}
template <typename SizeT>
static void __nac3_ndarray_calc_broadcast_idx_impl(
const SizeT *src_dims,
SizeT src_ndims,
const NDIndex *in_idx,
NDIndex *out_idx
) {
for (SizeT i = 0; i < src_ndims; ++i) {
SizeT src_i = src_ndims - i - 1;
out_idx[src_i] = src_dims[src_i] == 1 ? 0 : in_idx[src_i];
}
}
template<typename SizeT>
static void __nac3_ndarray_strides_from_shape_impl(
SizeT ndims,
SizeT *shape,
SizeT *dst_strides
) {
SizeT stride_product = 1;
for (SizeT i = 0; i < ndims; i++) {
int dim_i = ndims - i - 1;
dst_strides[dim_i] = stride_product;
stride_product *= shape[dim_i];
}
}
extern "C" {
#define DEF_nac3_int_exp_(T) \
T __nac3_int_exp_##T(T base, T exp) {\
return __nac3_int_exp_impl(base, exp);\
}
DEF_nac3_int_exp_(int32_t)
DEF_nac3_int_exp_(int64_t)
DEF_nac3_int_exp_(uint32_t)
DEF_nac3_int_exp_(uint64_t)
SliceIndex __nac3_slice_index_bound(SliceIndex i, const SliceIndex len) {
if (i < 0) {
i = len + i;
}
if (i < 0) {
return 0;
} else if (i > len) {
return len;
}
return i;
}
SliceIndex __nac3_range_slice_len(
const SliceIndex start,
const SliceIndex end,
const SliceIndex step
) {
SliceIndex diff = end - start;
if (diff > 0 && step > 0) {
return ((diff - 1) / step) + 1;
} else if (diff < 0 && step < 0) {
return ((diff + 1) / step) + 1;
} else {
return 0;
}
}
// Handle list assignment and dropping part of the list when
// both dest_step and src_step are +1.
// - All the index must *not* be out-of-bound or negative,
// - The end index is *inclusive*,
// - The length of src and dest slice size should already
// be checked: if dest.step == 1 then len(src) <= len(dest) else len(src) == len(dest)
SliceIndex __nac3_list_slice_assign_var_size(
SliceIndex dest_start,
SliceIndex dest_end,
SliceIndex dest_step,
uint8_t *dest_arr,
SliceIndex dest_arr_len,
SliceIndex src_start,
SliceIndex src_end,
SliceIndex src_step,
uint8_t *src_arr,
SliceIndex src_arr_len,
const SliceIndex size
) {
/* if dest_arr_len == 0, do nothing since we do not support extending list */
if (dest_arr_len == 0) return dest_arr_len;
/* if both step is 1, memmove directly, handle the dropping of the list, and shrink size */
if (src_step == dest_step && dest_step == 1) {
const SliceIndex src_len = (src_end >= src_start) ? (src_end - src_start + 1) : 0;
const SliceIndex dest_len = (dest_end >= dest_start) ? (dest_end - dest_start + 1) : 0;
if (src_len > 0) {
__builtin_memmove(
dest_arr + dest_start * size,
src_arr + src_start * size,
src_len * size
);
}
if (dest_len > 0) {
/* dropping */
__builtin_memmove(
dest_arr + (dest_start + src_len) * size,
dest_arr + (dest_end + 1) * size,
(dest_arr_len - dest_end - 1) * size
);
}
/* shrink size */
return dest_arr_len - (dest_len - src_len);
}
/* if two range overlaps, need alloca */
uint8_t need_alloca =
(dest_arr == src_arr)
&& !(
max(dest_start, dest_end) < min(src_start, src_end)
|| max(src_start, src_end) < min(dest_start, dest_end)
);
if (need_alloca) {
uint8_t *tmp = reinterpret_cast<uint8_t *>(__builtin_alloca(src_arr_len * size));
__builtin_memcpy(tmp, src_arr, src_arr_len * size);
src_arr = tmp;
}
SliceIndex src_ind = src_start;
SliceIndex dest_ind = dest_start;
for (;
(src_step > 0) ? (src_ind <= src_end) : (src_ind >= src_end);
src_ind += src_step, dest_ind += dest_step
) {
/* for constant optimization */
if (size == 1) {
__builtin_memcpy(dest_arr + dest_ind, src_arr + src_ind, 1);
} else if (size == 4) {
__builtin_memcpy(dest_arr + dest_ind * 4, src_arr + src_ind * 4, 4);
} else if (size == 8) {
__builtin_memcpy(dest_arr + dest_ind * 8, src_arr + src_ind * 8, 8);
} else {
/* memcpy for var size, cannot overlap after previous alloca */
__builtin_memcpy(dest_arr + dest_ind * size, src_arr + src_ind * size, size);
}
}
/* only dest_step == 1 can we shrink the dest list. */
/* size should be ensured prior to calling this function */
if (dest_step == 1 && dest_end >= dest_start) {
__builtin_memmove(
dest_arr + dest_ind * size,
dest_arr + (dest_end + 1) * size,
(dest_arr_len - dest_end - 1) * size
);
return dest_arr_len - (dest_end - dest_ind) - 1;
}
return dest_arr_len;
}
int32_t __nac3_isinf(double x) {
return __builtin_isinf(x);
}
int32_t __nac3_isnan(double x) {
return __builtin_isnan(x);
}
double tgamma(double arg);
double __nac3_gamma(double z) {
// Handling for denormals
// | x | Python gamma(x) | C tgamma(x) |
// --- | ----------------- | --------------- | ----------- |
// (1) | nan | nan | nan |
// (2) | -inf | -inf | inf |
// (3) | inf | inf | inf |
// (4) | 0.0 | inf | inf |
// (5) | {-1.0, -2.0, ...} | inf | nan |
// (1)-(3)
if (__builtin_isinf(z) || __builtin_isnan(z)) {
return z;
}
double v = tgamma(z);
// (4)-(5)
return __builtin_isinf(v) || __builtin_isnan(v) ? __builtin_inf() : v;
}
double lgamma(double arg);
double __nac3_gammaln(double x) {
// libm's handling of value overflows differs from scipy:
// - scipy: gammaln(-inf) -> -inf
// - libm : lgamma(-inf) -> inf
if (__builtin_isinf(x)) {
return x;
}
return lgamma(x);
}
double j0(double x);
double __nac3_j0(double x) {
// libm's handling of value overflows differs from scipy:
// - scipy: j0(inf) -> nan
// - libm : j0(inf) -> 0.0
if (__builtin_isinf(x)) {
return __builtin_nan("");
}
return j0(x);
}
uint32_t __nac3_ndarray_calc_size(
const uint32_t *list_data,
uint32_t list_len,
uint32_t begin_idx,
uint32_t end_idx
) {
return __nac3_ndarray_calc_size_impl(list_data, list_len, begin_idx, end_idx);
}
uint64_t __nac3_ndarray_calc_size64(
const uint64_t *list_data,
uint64_t list_len,
uint64_t begin_idx,
uint64_t end_idx
) {
return __nac3_ndarray_calc_size_impl(list_data, list_len, begin_idx, end_idx);
}
void __nac3_ndarray_calc_nd_indices(
uint32_t index,
const uint32_t* dims,
uint32_t num_dims,
NDIndex* idxs
) {
__nac3_ndarray_calc_nd_indices_impl(index, dims, num_dims, idxs);
}
void __nac3_ndarray_calc_nd_indices64(
uint64_t index,
const uint64_t* dims,
uint64_t num_dims,
NDIndex* idxs
) {
__nac3_ndarray_calc_nd_indices_impl(index, dims, num_dims, idxs);
}
uint32_t __nac3_ndarray_flatten_index(
const uint32_t* dims,
uint32_t num_dims,
const NDIndex* indices,
uint32_t num_indices
) {
return __nac3_ndarray_flatten_index_impl(dims, num_dims, indices, num_indices);
}
uint64_t __nac3_ndarray_flatten_index64(
const uint64_t* dims,
uint64_t num_dims,
const NDIndex* indices,
uint64_t num_indices
) {
return __nac3_ndarray_flatten_index_impl(dims, num_dims, indices, num_indices);
}
void __nac3_ndarray_calc_broadcast(
const uint32_t *lhs_dims,
uint32_t lhs_ndims,
const uint32_t *rhs_dims,
uint32_t rhs_ndims,
uint32_t *out_dims
) {
return __nac3_ndarray_calc_broadcast_impl(lhs_dims, lhs_ndims, rhs_dims, rhs_ndims, out_dims);
}
void __nac3_ndarray_calc_broadcast64(
const uint64_t *lhs_dims,
uint64_t lhs_ndims,
const uint64_t *rhs_dims,
uint64_t rhs_ndims,
uint64_t *out_dims
) {
return __nac3_ndarray_calc_broadcast_impl(lhs_dims, lhs_ndims, rhs_dims, rhs_ndims, out_dims);
}
void __nac3_ndarray_calc_broadcast_idx(
const uint32_t *src_dims,
uint32_t src_ndims,
const NDIndex *in_idx,
NDIndex *out_idx
) {
__nac3_ndarray_calc_broadcast_idx_impl(src_dims, src_ndims, in_idx, out_idx);
}
void __nac3_ndarray_calc_broadcast_idx64(
const uint64_t *src_dims,
uint64_t src_ndims,
const NDIndex *in_idx,
NDIndex *out_idx
) {
__nac3_ndarray_calc_broadcast_idx_impl(src_dims, src_ndims, in_idx, out_idx);
}
void __nac3_ndarray_strides_from_shape(uint32_t ndims, uint32_t* shape, uint32_t* dst_strides) {
__nac3_ndarray_strides_from_shape_impl(ndims, shape, dst_strides);
}
void __nac3_ndarray_strides_from_shape64(uint64_t ndims, uint64_t* shape, uint64_t* dst_strides) {
__nac3_ndarray_strides_from_shape_impl(ndims, shape, dst_strides);
}
}

View File

@ -0,0 +1,216 @@
#pragma once
#include "irrt_utils.hpp"
#include "irrt_typedefs.hpp"
/*
This header contains IRRT implementations
that do not deserved to be categorized (e.g., into numpy, etc.)
Check out other *.hpp files before including them here!!
*/
// The type of an index or a value describing the length of a range/slice is
// always `int32_t`.
namespace {
// adapted from GNU Scientific Library: https://git.savannah.gnu.org/cgit/gsl.git/tree/sys/pow_int.c
// need to make sure `exp >= 0` before calling this function
template <typename T>
T __nac3_int_exp_impl(T base, T exp) {
T res = 1;
/* repeated squaring method */
do {
if (exp & 1) {
res *= base; /* for n odd */
}
exp >>= 1;
base *= base;
} while (exp);
return res;
}
}
extern "C" {
#define DEF_nac3_int_exp_(T) \
T __nac3_int_exp_##T(T base, T exp) {\
return __nac3_int_exp_impl(base, exp);\
}
DEF_nac3_int_exp_(int32_t)
DEF_nac3_int_exp_(int64_t)
DEF_nac3_int_exp_(uint32_t)
DEF_nac3_int_exp_(uint64_t)
SliceIndex __nac3_slice_index_bound(SliceIndex i, const SliceIndex len) {
if (i < 0) {
i = len + i;
}
if (i < 0) {
return 0;
} else if (i > len) {
return len;
}
return i;
}
SliceIndex __nac3_range_slice_len(
const SliceIndex start,
const SliceIndex end,
const SliceIndex step
) {
SliceIndex diff = end - start;
if (diff > 0 && step > 0) {
return ((diff - 1) / step) + 1;
} else if (diff < 0 && step < 0) {
return ((diff + 1) / step) + 1;
} else {
return 0;
}
}
// Handle list assignment and dropping part of the list when
// both dest_step and src_step are +1.
// - All the index must *not* be out-of-bound or negative,
// - The end index is *inclusive*,
// - The length of src and dest slice size should already
// be checked: if dest.step == 1 then len(src) <= len(dest) else len(src) == len(dest)
SliceIndex __nac3_list_slice_assign_var_size(
SliceIndex dest_start,
SliceIndex dest_end,
SliceIndex dest_step,
uint8_t *dest_arr,
SliceIndex dest_arr_len,
SliceIndex src_start,
SliceIndex src_end,
SliceIndex src_step,
uint8_t *src_arr,
SliceIndex src_arr_len,
const SliceIndex size
) {
/* if dest_arr_len == 0, do nothing since we do not support extending list */
if (dest_arr_len == 0) return dest_arr_len;
/* if both step is 1, memmove directly, handle the dropping of the list, and shrink size */
if (src_step == dest_step && dest_step == 1) {
const SliceIndex src_len = (src_end >= src_start) ? (src_end - src_start + 1) : 0;
const SliceIndex dest_len = (dest_end >= dest_start) ? (dest_end - dest_start + 1) : 0;
if (src_len > 0) {
__builtin_memmove(
dest_arr + dest_start * size,
src_arr + src_start * size,
src_len * size
);
}
if (dest_len > 0) {
/* dropping */
__builtin_memmove(
dest_arr + (dest_start + src_len) * size,
dest_arr + (dest_end + 1) * size,
(dest_arr_len - dest_end - 1) * size
);
}
/* shrink size */
return dest_arr_len - (dest_len - src_len);
}
/* if two range overlaps, need alloca */
uint8_t need_alloca =
(dest_arr == src_arr)
&& !(
max(dest_start, dest_end) < min(src_start, src_end)
|| max(src_start, src_end) < min(dest_start, dest_end)
);
if (need_alloca) {
uint8_t *tmp = reinterpret_cast<uint8_t *>(__builtin_alloca(src_arr_len * size));
__builtin_memcpy(tmp, src_arr, src_arr_len * size);
src_arr = tmp;
}
SliceIndex src_ind = src_start;
SliceIndex dest_ind = dest_start;
for (;
(src_step > 0) ? (src_ind <= src_end) : (src_ind >= src_end);
src_ind += src_step, dest_ind += dest_step
) {
/* for constant optimization */
if (size == 1) {
__builtin_memcpy(dest_arr + dest_ind, src_arr + src_ind, 1);
} else if (size == 4) {
__builtin_memcpy(dest_arr + dest_ind * 4, src_arr + src_ind * 4, 4);
} else if (size == 8) {
__builtin_memcpy(dest_arr + dest_ind * 8, src_arr + src_ind * 8, 8);
} else {
/* memcpy for var size, cannot overlap after previous alloca */
__builtin_memcpy(dest_arr + dest_ind * size, src_arr + src_ind * size, size);
}
}
/* only dest_step == 1 can we shrink the dest list. */
/* size should be ensured prior to calling this function */
if (dest_step == 1 && dest_end >= dest_start) {
__builtin_memmove(
dest_arr + dest_ind * size,
dest_arr + (dest_end + 1) * size,
(dest_arr_len - dest_end - 1) * size
);
return dest_arr_len - (dest_end - dest_ind) - 1;
}
return dest_arr_len;
}
int32_t __nac3_isinf(double x) {
return __builtin_isinf(x);
}
int32_t __nac3_isnan(double x) {
return __builtin_isnan(x);
}
double tgamma(double arg);
double __nac3_gamma(double z) {
// Handling for denormals
// | x | Python gamma(x) | C tgamma(x) |
// --- | ----------------- | --------------- | ----------- |
// (1) | nan | nan | nan |
// (2) | -inf | -inf | inf |
// (3) | inf | inf | inf |
// (4) | 0.0 | inf | inf |
// (5) | {-1.0, -2.0, ...} | inf | nan |
// (1)-(3)
if (__builtin_isinf(z) || __builtin_isnan(z)) {
return z;
}
double v = tgamma(z);
// (4)-(5)
return __builtin_isinf(v) || __builtin_isnan(v) ? __builtin_inf() : v;
}
double lgamma(double arg);
double __nac3_gammaln(double x) {
// libm's handling of value overflows differs from scipy:
// - scipy: gammaln(-inf) -> -inf
// - libm : lgamma(-inf) -> inf
if (__builtin_isinf(x)) {
return x;
}
return lgamma(x);
}
double j0(double x);
double __nac3_j0(double x) {
// libm's handling of value overflows differs from scipy:
// - scipy: j0(inf) -> nan
// - libm : j0(inf) -> 0.0
if (__builtin_isinf(x)) {
return __builtin_nan("");
}
return j0(x);
}
}

View File

@ -0,0 +1,14 @@
#pragma once
#include "irrt_utils.hpp"
#include "irrt_typedefs.hpp"
#include "irrt_basic.hpp"
#include "irrt_slice.hpp"
#include "irrt_numpy_ndarray.hpp"
/*
All IRRT implementations.
We don't have any pre-compiled objects, so we are writing all implementations in headers and
concatenate them with `#include` into one massive source file that contains all the IRRT stuff.
*/

View File

@ -0,0 +1,466 @@
#pragma once
#include "irrt_utils.hpp"
#include "irrt_typedefs.hpp"
#include "irrt_slice.hpp"
/*
NDArray-related implementations.
`*/
// NDArray indices are always `uint32_t`.
using NDIndex = uint32_t;
namespace {
namespace ndarray_util {
template <typename SizeT>
static void set_indices_by_nth(SizeT ndims, const SizeT* shape, SizeT* indices, SizeT nth) {
for (int32_t i = 0; i < ndims; i++) {
int32_t dim_i = ndims - i - 1;
int32_t dim = shape[dim_i];
indices[dim_i] = nth % dim;
nth /= dim;
}
}
// Compute the strides of an ndarray given an ndarray `shape`
// and assuming that the ndarray is *fully C-contagious*.
//
// You might want to read up on https://ajcr.net/stride-guide-part-1/.
template <typename SizeT>
static void set_strides_by_shape(SizeT itemsize, SizeT ndims, SizeT* dst_strides, const SizeT* shape) {
SizeT stride_product = 1;
for (SizeT i = 0; i < ndims; i++) {
int dim_i = ndims - i - 1;
dst_strides[dim_i] = stride_product * itemsize;
stride_product *= shape[dim_i];
}
}
// Compute the size/# of elements of an ndarray given its shape
template <typename SizeT>
static SizeT calc_size_from_shape(SizeT ndims, const SizeT* shape) {
SizeT size = 1;
for (SizeT dim_i = 0; dim_i < ndims; dim_i++) size *= shape[dim_i];
return size;
}
template <typename SizeT>
static bool can_broadcast_shape_to(
const SizeT target_ndims,
const SizeT *target_shape,
const SizeT src_ndims,
const SizeT *src_shape
) {
/*
// See https://numpy.org/doc/stable/user/basics.broadcasting.html
This function handles this example:
```
Image (3d array): 256 x 256 x 3
Scale (1d array): 3
Result (3d array): 256 x 256 x 3
```
Other interesting examples to consider:
- `can_broadcast_shape_to([3], [1, 1, 1, 1, 3]) == true`
- `can_broadcast_shape_to([3], [3, 1]) == false`
- `can_broadcast_shape_to([256, 256, 3], [256, 1, 3]) == true`
In cases when the shapes contain zero(es):
- `can_broadcast_shape_to([0], [1]) == true`
- `can_broadcast_shape_to([0], [2]) == false`
- `can_broadcast_shape_to([0, 4, 0, 0], [1]) == true`
- `can_broadcast_shape_to([0, 4, 0, 0], [1, 1, 1, 1]) == true`
- `can_broadcast_shape_to([0, 4, 0, 0], [1, 4, 1, 1]) == true`
- `can_broadcast_shape_to([4, 3], [0, 3]) == false`
- `can_broadcast_shape_to([4, 3], [0, 0]) == false`
*/
// This is essentially doing the following in Python:
// `for target_dim, src_dim in itertools.zip_longest(target_shape[::-1], src_shape[::-1], fillvalue=1)`
for (SizeT i = 0; i < max(target_ndims, src_ndims); i++) {
SizeT target_dim_i = target_ndims - i - 1;
SizeT src_dim_i = src_ndims - i - 1;
bool target_dim_exists = target_dim_i >= 0;
bool src_dim_exists = src_dim_i >= 0;
SizeT target_dim = target_dim_exists ? target_shape[target_dim_i] : 1;
SizeT src_dim = src_dim_exists ? src_shape[src_dim_i] : 1;
bool ok = src_dim == 1 || target_dim == src_dim;
if (!ok) return false;
}
return true;
}
}
typedef uint8_t NDSliceType;
extern "C" {
const NDSliceType INPUT_SLICE_TYPE_INDEX = 0;
const NDSliceType INPUT_SLICE_TYPE_SLICE = 1;
}
struct NDSlice {
// A poor-man's `std::variant<int, UserRange>`
NDSliceType type;
/*
if type == INPUT_SLICE_TYPE_INDEX => `slice` points to a single `SizeT`
if type == INPUT_SLICE_TYPE_SLICE => `slice` points to a single `UserRange`
*/
uint8_t *slice;
};
namespace ndarray_util {
template<typename SizeT>
SizeT deduce_ndims_after_slicing(SizeT ndims, SizeT num_slices, const NDSlice *slices) {
irrt_assert(num_slices <= ndims);
SizeT final_ndims = ndims;
for (SizeT i = 0; i < num_slices; i++) {
if (slices[i].type == INPUT_SLICE_TYPE_INDEX) {
final_ndims--; // An integer slice demotes the rank by 1
}
}
return final_ndims;
}
}
template <typename SizeT>
struct NDArrayIndicesIter {
SizeT ndims;
const SizeT *shape;
SizeT *indices;
void set_indices_zero() {
__builtin_memset(indices, 0, sizeof(SizeT) * ndims);
}
void next() {
for (SizeT i = 0; i < ndims; i++) {
SizeT dim_i = ndims - i - 1;
indices[dim_i]++;
if (indices[dim_i] < shape[dim_i]) {
break;
} else {
indices[dim_i] = 0;
}
}
}
};
// The NDArray object. `SizeT` is the *signed* size type of this ndarray.
//
// NOTE: The order of fields is IMPORTANT. DON'T TOUCH IT
//
// Some resources you might find helpful:
// - The official numpy implementations:
// - https://github.com/numpy/numpy/blob/735a477f0bc2b5b84d0e72d92f224bde78d4e069/doc/source/reference/c-api/types-and-structures.rst
// - On strides (about reshaping, slicing, C-contagiousness, etc)
// - https://ajcr.net/stride-guide-part-1/.
// - https://ajcr.net/stride-guide-part-2/.
// - https://ajcr.net/stride-guide-part-3/.
template <typename SizeT>
struct NDArray {
// The underlying data this `ndarray` is pointing to.
//
// NOTE: Formally this should be of type `void *`, but clang
// translates `void *` to `i8 *` when run with `-S -emit-llvm`,
// so we will put `uint8_t *` here for clarity.
uint8_t *data;
// The number of bytes of a single element in `data`.
//
// The `SizeT` is treated as `unsigned`.
SizeT itemsize;
// The number of dimensions of this shape.
//
// The `SizeT` is treated as `unsigned`.
SizeT ndims;
// Array shape, with length equal to `ndims`.
//
// The `SizeT` is treated as `unsigned`.
//
// NOTE: `shape` can contain 0.
// (those appear when the user makes an out of bounds slice into an ndarray, e.g., `np.zeros((3, 3))[400:].shape == (0, 3)`)
SizeT *shape;
// Array strides (stride value is in number of bytes, NOT number of elements), with length equal to `ndims`.
//
// The `SizeT` is treated as `signed`.
//
// NOTE: `strides` can have negative numbers.
// (those appear when there is a slice with a negative step, e.g., `my_array[::-1]`)
SizeT *strides;
// Calculate the size/# of elements of an `ndarray`.
// This function corresponds to `np.size(<ndarray>)` or `ndarray.size`
SizeT size() {
return ndarray_util::calc_size_from_shape(ndims, shape);
}
// Calculate the number of bytes of its content of an `ndarray` *in its view*.
// This function corresponds to `ndarray.nbytes`
SizeT nbytes() {
return this->size() * itemsize;
}
void set_pelement_value(uint8_t* pelement, const uint8_t* pvalue) {
__builtin_memcpy(pelement, pvalue, itemsize);
}
uint8_t* get_pelement_by_indices(const SizeT *indices) {
uint8_t* element = data;
for (SizeT dim_i = 0; dim_i < ndims; dim_i++)
element += indices[dim_i] * strides[dim_i];
return element;
}
uint8_t* get_nth_pelement(SizeT nth) {
irrt_assert(0 <= nth);
irrt_assert(nth < this->size());
SizeT* indices = (SizeT*) __builtin_alloca(sizeof(SizeT) * this->ndims);
ndarray_util::set_indices_by_nth(this->ndims, this->shape, indices, nth);
return get_pelement_by_indices(indices);
}
// Get pointer to the first element of this ndarray, assuming
// `this->size() > 0`, i.e., not "degenerate" due to zeroes in `this->shape`)
//
// This is particularly useful for when the ndarray is just containing a single scalar.
uint8_t* get_first_pelement() {
irrt_assert(this->size() > 0);
return this->data; // ...It is simply `this->data`
}
// Is the given `indices` valid/in-bounds?
bool in_bounds(const SizeT *indices) {
for (SizeT dim_i = 0; dim_i < ndims; dim_i++) {
bool dim_ok = indices[dim_i] < shape[dim_i];
if (!dim_ok) return false;
}
return true;
}
// Fill the ndarray with a value
void fill_generic(const uint8_t* pvalue) {
NDArrayIndicesIter<SizeT> iter;
iter.ndims = this->ndims;
iter.shape = this->shape;
iter.indices = (SizeT*) __builtin_alloca(sizeof(SizeT) * ndims);
iter.set_indices_zero();
for (SizeT i = 0; i < this->size(); i++, iter.next()) {
uint8_t* pelement = get_pelement_by_indices(iter.indices);
set_pelement_value(pelement, pvalue);
}
}
// Set the strides of the ndarray with `ndarray_util::set_strides_by_shape`
void set_strides_by_shape() {
ndarray_util::set_strides_by_shape(itemsize, ndims, strides, shape);
}
// https://numpy.org/doc/stable/reference/generated/numpy.eye.html
void set_to_eye(SizeT k, const uint8_t* zero_pvalue, const uint8_t* one_pvalue) {
__builtin_assume(ndims == 2);
// TODO: Better implementation
fill_generic(zero_pvalue);
for (SizeT i = 0; i < min(shape[0], shape[1]); i++) {
SizeT row = i;
SizeT col = i + k;
SizeT indices[2] = { row, col };
if (!in_bounds(indices)) continue;
uint8_t* pelement = get_pelement_by_indices(indices);
set_pelement_value(pelement, one_pvalue);
}
}
// To support numpy complex slices (e.g., `my_array[:50:2,4,:2:-1]`)
//
// Things assumed by this function:
// - `dst_ndarray` is allocated by the caller
// - `dst_ndarray.ndims` has the correct value (according to `ndarray_util::deduce_ndims_after_slicing`).
// - ... and `dst_ndarray.shape` and `dst_ndarray.strides` have been allocated by the caller as well
//
// Other notes:
// - `dst_ndarray->data` does not have to be set, it will be derived.
// - `dst_ndarray->itemsize` does not have to be set, it will be set to `this->itemsize`
// - `dst_ndarray->shape` and `dst_ndarray.strides` can contain empty values
void slice(SizeT num_ndslices, NDSlice* ndslices, NDArray<SizeT>* dst_ndarray) {
// REFERENCE CODE (check out `_index_helper` in `__getitem__`):
// https://github.com/wadetb/tinynumpy/blob/0d23d22e07062ffab2afa287374c7b366eebdda1/tinynumpy/tinynumpy.py#L652
irrt_assert(dst_ndarray->ndims == ndarray_util::deduce_ndims_after_slicing(this->ndims, num_ndslices, ndslices));
dst_ndarray->data = this->data;
SizeT this_axis = 0;
SizeT dst_axis = 0;
for (SizeT i = 0; i < num_ndslices; i++) {
NDSlice *ndslice = &ndslices[i];
if (ndslice->type == INPUT_SLICE_TYPE_INDEX) {
// Handle when the ndslice is just a single (possibly negative) integer
// e.g., `my_array[::2, -5, ::-1]`
// ^^------ like this
SizeT index_user = *((SizeT*) ndslice->slice);
SizeT index = resolve_index_in_length(this->shape[this_axis], index_user);
dst_ndarray->data += index * this->strides[this_axis]; // Add offset
// Next
this_axis++;
} else if (ndslice->type == INPUT_SLICE_TYPE_SLICE) {
// Handle when the ndslice is a slice (represented by UserSlice in IRRT)
// e.g., `my_array[::2, -5, ::-1]`
// ^^^------^^^^----- like these
UserSlice<SizeT>* user_slice = (UserSlice<SizeT>*) ndslice->slice;
Slice<SizeT> slice = user_slice->indices(this->shape[this_axis]); // To resolve negative indices and other funny stuff written by the user
// NOTE: There is no need to write special code to handle negative steps/strides.
// This simple implementation meticulously handles both positive and negative steps/strides.
// Check out the tinynumpy and IRRT's test cases if you are not convinced.
dst_ndarray->data += slice.start * this->strides[this_axis]; // Add offset (NOTE: no need to `* itemsize`, strides count in # of bytes)
dst_ndarray->strides[dst_axis] = slice.step * this->strides[this_axis]; // Determine stride
dst_ndarray->shape[dst_axis] = slice.len(); // Determine shape dimension
// Next
dst_axis++;
this_axis++;
} else {
__builtin_unreachable();
}
}
irrt_assert(dst_axis == dst_ndarray->ndims); // Sanity check on the implementation
}
// Similar to `np.broadcast_to(<ndarray>, <target_shape>)`
// Assumptions:
// - `this` has to be fully initialized.
// - `dst_ndarray->ndims` has to be set.
// - `dst_ndarray->shape` has to be set, this determines the shape `this` broadcasts to.
//
// Other notes:
// - `dst_ndarray->data` does not have to be set, it will be set to `this->data`.
// - `dst_ndarray->itemsize` does not have to be set, it will be set to `this->data`.
// - `dst_ndarray->strides` does not have to be set, it will be overwritten.
//
// Cautions:
// ```
// xs = np.zeros((4,))
// ys = np.zero((4, 1))
// ys[:] = xs # ok
//
// xs = np.zeros((1, 4))
// ys = np.zero((4,))
// ys[:] = xs # allowed
// # However `np.broadcast_to(xs, (4,))` would fails, as per numpy's broadcasting rule.
// # and apparently numpy will "deprecate" this? SEE https://github.com/numpy/numpy/issues/21744
// # This implementation will NOT support this assignment.
// ```
void broadcast_to(NDArray<SizeT>* dst_ndarray) {
dst_ndarray->data = this->data;
dst_ndarray->itemsize = this->itemsize;
irrt_assert(
ndarray_util::can_broadcast_shape_to(
dst_ndarray->ndims,
dst_ndarray->shape,
this->ndims,
this->shape
)
);
SizeT stride_product = 1;
for (SizeT i = 0; i < max(this->ndims, dst_ndarray->ndims); i++) {
SizeT this_dim_i = this->ndims - i - 1;
SizeT dst_dim_i = dst_ndarray->ndims - i - 1;
bool this_dim_exists = this_dim_i >= 0;
bool dst_dim_exists = dst_dim_i >= 0;
// TODO: Explain how this works
bool c1 = this_dim_exists && this->shape[this_dim_i] == 1;
bool c2 = dst_dim_exists && dst_ndarray->shape[dst_dim_i] != 1;
if (!this_dim_exists || (c1 && c2)) {
dst_ndarray->strides[dst_dim_i] = 0; // Freeze it in-place
} else {
dst_ndarray->strides[dst_dim_i] = stride_product * this->itemsize;
stride_product *= this->shape[this_dim_i]; // NOTE: this_dim_exist must be true here.
}
}
}
// Simulates `this_ndarray[:] = src_ndarray`, with automatic broadcasting.
// Caution on https://github.com/numpy/numpy/issues/21744
// Also see `NDArray::broadcast_to`
void assign_with(NDArray<SizeT>* src_ndarray) {
irrt_assert(
ndarray_util::can_broadcast_shape_to(
this->ndims,
this->shape,
src_ndarray->ndims,
src_ndarray->shape
)
);
// Broadcast the `src_ndarray` to make the reading process *much* easier
SizeT* broadcasted_src_ndarray_strides = __builtin_alloca(sizeof(SizeT) * this->ndims); // Remember to allocate strides beforehand
NDArray<SizeT> broadcasted_src_ndarray = {
.ndims = this->ndims,
.shape = this->shape,
.strides = broadcasted_src_ndarray_strides
};
src_ndarray->broadcast_to(&broadcasted_src_ndarray);
// Using iter instead of `get_nth_pelement` because it is slightly faster
SizeT* indices = __builtin_alloca(sizeof(SizeT) * this->ndims);
auto iter = NDArrayIndicesIter<SizeT> {
.ndims = this->ndims,
.shape = this->shape,
.indices = indices
};
const SizeT this_size = this->size();
for (SizeT i = 0; i < this_size; i++, iter.next()) {
uint8_t* src_pelement = broadcasted_src_ndarray_strides->get_pelement_by_indices(indices);
uint8_t* this_pelement = this->get_pelement_by_indices(indices);
this->set_pelement_value(src_pelement, src_pelement);
}
}
};
}
extern "C" {
uint32_t __nac3_ndarray_size(NDArray<int32_t>* ndarray) {
return ndarray->size();
}
uint64_t __nac3_ndarray_size64(NDArray<int64_t>* ndarray) {
return ndarray->size();
}
void __nac3_ndarray_fill_generic(NDArray<int32_t>* ndarray, uint8_t* pvalue) {
ndarray->fill_generic(pvalue);
}
void __nac3_ndarray_fill_generic64(NDArray<int64_t>* ndarray, uint8_t* pvalue) {
ndarray->fill_generic(pvalue);
}
// void __nac3_ndarray_slice(NDArray<int32_t>* ndarray, int32_t num_slices, NDSlice<int32_t> *slices, NDArray<int32_t> *dst_ndarray) {
// // ndarray->slice(num_slices, slices, dst_ndarray);
// }
}

View File

@ -0,0 +1,80 @@
#pragma once
#include "irrt_utils.hpp"
#include "irrt_typedefs.hpp"
namespace {
// A proper slice in IRRT, all negative indices have be resolved to absolute values.
// Even though nac3core's slices are always `int32_t`, we will template slice anyway
// since this struct is used as a general utility.
template <typename T>
struct Slice {
T start;
T stop;
T step;
// The length/The number of elements of the slice if it were a range,
// i.e., the value of `len(range(this->start, this->stop, this->end))`
T len() {
T diff = stop - start;
if (diff > 0 && step > 0) {
return ((diff - 1) / step) + 1;
} else if (diff < 0 && step < 0) {
return ((diff + 1) / step) + 1;
} else {
return 0;
}
}
};
template<typename T>
T resolve_index_in_length(T length, T index) {
irrt_assert(length >= 0);
if (index < 0) {
// Remember that index is negative, so do a plus here
return max(length + index, 0);
} else {
return min(length, index);
}
}
// NOTE: using a bitfield for the `*_defined` is better, at the
// cost of a more annoying implementation in nac3core inkwell
template <typename T>
struct UserSlice {
uint8_t start_defined;
T start;
uint8_t stop_defined;
T stop;
uint8_t step_defined;
T step;
// Like Python's `slice(start, stop, step).indices(length)`
Slice<T> indices(T length) {
// NOTE: This function implements Python's `slice.indices` *FAITHFULLY*.
// SEE: https://github.com/python/cpython/blob/f62161837e68c1c77961435f1b954412dd5c2b65/Objects/sliceobject.c#L546
irrt_assert(length >= 0);
irrt_assert(!step_defined || step != 0); // step_defined -> step != 0; step cannot be zero if specified by user
Slice<T> result;
result.step = step_defined ? step : 1;
bool step_is_negative = result.step < 0;
if (start_defined) {
result.start = resolve_index_in_length(length, start);
} else {
result.start = step_is_negative ? length - 1 : 0;
}
if (stop_defined) {
result.stop = resolve_index_in_length(length, stop);
} else {
result.stop = step_is_negative ? -1 : length;
}
return result;
}
};
}

648
nac3core/irrt/irrt_test.cpp Normal file
View File

@ -0,0 +1,648 @@
// This file will be compiled like a real C++ program,
// and we do have the luxury to use the standard libraries.
// That is if the nix flakes do not have issues... especially on msys2...
#include <cstdint>
#include <cstdio>
#include <cstdlib>
// Set `IRRT_DONT_TYPEDEF_INTS` because `cstdint` defines them
#define IRRT_DONT_TYPEDEF_INTS
#include "irrt_everything.hpp"
void test_fail() {
printf("[!] Test failed\n");
exit(1);
}
void __begin_test(const char* function_name, const char* file, int line) {
printf("######### Running %s @ %s:%d\n", function_name, file, line);
}
#define BEGIN_TEST() __begin_test(__FUNCTION__, __FILE__, __LINE__)
template <typename T>
void debug_print_array(const char* format, int len, T* as) {
printf("[");
for (int i = 0; i < len; i++) {
if (i != 0) printf(", ");
printf(format, as[i]);
}
printf("]");
}
template <typename T>
void assert_arrays_match(const char* label, const char* format, int len, T* expected, T* got) {
if (!arrays_match(len, expected, got)) {
printf(">>>>>>> %s\n", label);
printf(" Expecting = ");
debug_print_array(format, len, expected);
printf("\n");
printf(" Got = ");
debug_print_array(format, len, got);
printf("\n");
test_fail();
}
}
template <typename T>
void assert_values_match(const char* label, const char* format, T expected, T got) {
if (expected != got) {
printf(">>>>>>> %s\n", label);
printf(" Expecting = ");
printf(format, expected);
printf("\n");
printf(" Got = ");
printf(format, got);
printf("\n");
test_fail();
}
}
void print_repeated(const char *str, int count) {
for (int i = 0; i < count; i++) {
printf("%s", str);
}
}
template<typename SizeT, typename ElementT>
void __print_ndarray_aux(const char *format, bool first, bool last, SizeT* cursor, SizeT depth, NDArray<SizeT>* ndarray) {
// A really lazy recursive implementation
// Add left padding unless its the first entry (since there would be "[[[" before it)
if (!first) {
print_repeated(" ", depth);
}
const SizeT dim = ndarray->shape[depth];
if (depth + 1 == ndarray->ndims) {
// Recursed down to last dimension, print the values in a nice list
printf("[");
SizeT* indices = (SizeT*) __builtin_alloca(sizeof(SizeT) * ndarray->ndims);
for (SizeT i = 0; i < dim; i++) {
ndarray_util::set_indices_by_nth(ndarray->ndims, ndarray->shape, indices, *cursor);
ElementT* pelement = (ElementT*) ndarray->get_pelement_by_indices(indices);
ElementT element = *pelement;
if (i != 0) printf(", "); // List delimiter
printf(format, element);
printf("(@");
debug_print_array("%d", ndarray->ndims, indices);
printf(")");
(*cursor)++;
}
printf("]");
} else {
printf("[");
for (SizeT i = 0; i < ndarray->shape[depth]; i++) {
__print_ndarray_aux<SizeT, ElementT>(
format,
i == 0, // first?
i + 1 == dim, // last?
cursor,
depth + 1,
ndarray
);
}
printf("]");
}
// Add newline unless its the last entry (since there will be "]]]" after it)
if (!last) {
print_repeated("\n", depth);
}
}
template<typename SizeT, typename ElementT>
void print_ndarray(const char *format, NDArray<SizeT>* ndarray) {
if (ndarray->ndims == 0) {
printf("<empty ndarray>");
} else {
SizeT cursor = 0;
__print_ndarray_aux<SizeT, ElementT>(format, true, true, &cursor, 0, ndarray);
}
printf("\n");
}
void test_calc_size_from_shape_normal() {
// Test shapes with normal values
BEGIN_TEST();
int32_t shape[4] = { 2, 3, 5, 7 };
assert_values_match("size", "%d", 210, ndarray_util::calc_size_from_shape<int32_t>(4, shape));
}
void test_calc_size_from_shape_has_zero() {
// Test shapes with 0 in them
BEGIN_TEST();
int32_t shape[4] = { 2, 0, 5, 7 };
assert_values_match("size", "%d", 0, ndarray_util::calc_size_from_shape<int32_t>(4, shape));
}
void test_set_strides_by_shape() {
// Test `set_strides_by_shape()`
BEGIN_TEST();
int32_t shape[4] = { 99, 3, 5, 7 };
int32_t strides[4] = { 0 };
ndarray_util::set_strides_by_shape((int32_t) sizeof(int32_t), 4, strides, shape);
int32_t expected_strides[4] = {
105 * sizeof(int32_t),
35 * sizeof(int32_t),
7 * sizeof(int32_t),
1 * sizeof(int32_t)
};
assert_arrays_match("strides", "%u", 4u, expected_strides, strides);
}
void test_ndarray_indices_iter_normal() {
// Test NDArrayIndicesIter normal behavior
BEGIN_TEST();
int32_t shape[3] = { 1, 2, 3 };
int32_t indices[3] = { 0, 0, 0 };
auto iter = NDArrayIndicesIter<int32_t> {
.ndims = 3,
.shape = shape,
.indices = indices
};
assert_arrays_match("indices #0", "%u", 3u, iter.indices, (int32_t[3]) { 0, 0, 0 });
iter.next();
assert_arrays_match("indices #1", "%u", 3u, iter.indices, (int32_t[3]) { 0, 0, 1 });
iter.next();
assert_arrays_match("indices #2", "%u", 3u, iter.indices, (int32_t[3]) { 0, 0, 2 });
iter.next();
assert_arrays_match("indices #3", "%u", 3u, iter.indices, (int32_t[3]) { 0, 1, 0 });
iter.next();
assert_arrays_match("indices #4", "%u", 3u, iter.indices, (int32_t[3]) { 0, 1, 1 });
iter.next();
assert_arrays_match("indices #5", "%u", 3u, iter.indices, (int32_t[3]) { 0, 1, 2 });
iter.next();
assert_arrays_match("indices #6", "%u", 3u, iter.indices, (int32_t[3]) { 0, 0, 0 }); // Loops back
iter.next();
assert_arrays_match("indices #7", "%u", 3u, iter.indices, (int32_t[3]) { 0, 0, 1 });
}
void test_ndarray_fill_generic() {
// Test ndarray fill_generic
BEGIN_TEST();
// Choose a type that's neither int32_t nor uint64_t (candidates of SizeT) to spice it up
// Also make all the octets non-zero, to see if `memcpy` in `fill_generic` is working perfectly.
uint16_t fill_value = 0xFACE;
uint16_t in_data[6] = { 100, 101, 102, 103, 104, 105 }; // Fill `data` with values that != `999`
int32_t in_itemsize = sizeof(uint16_t);
const int32_t in_ndims = 2;
int32_t in_shape[in_ndims] = { 2, 3 };
int32_t in_strides[in_ndims] = {};
NDArray<int32_t> ndarray = {
.data = (uint8_t*) in_data,
.itemsize = in_itemsize,
.ndims = in_ndims,
.shape = in_shape,
.strides = in_strides,
};
ndarray.set_strides_by_shape();
ndarray.fill_generic((uint8_t*) &fill_value); // `fill_generic` here
uint16_t expected_data[6] = { fill_value, fill_value, fill_value, fill_value, fill_value, fill_value };
assert_arrays_match("data", "0x%hX", 6, expected_data, in_data);
}
void test_ndarray_set_to_eye() {
// Test `set_to_eye` behavior (helper function to implement `np.eye()`)
BEGIN_TEST();
double in_data[9] = { 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0 };
int32_t in_itemsize = sizeof(double);
const int32_t in_ndims = 2;
int32_t in_shape[in_ndims] = { 3, 3 };
int32_t in_strides[in_ndims] = {};
NDArray<int32_t> ndarray = {
.data = (uint8_t*) in_data,
.itemsize = in_itemsize,
.ndims = in_ndims,
.shape = in_shape,
.strides = in_strides,
};
ndarray.set_strides_by_shape();
double zero = 0.0;
double one = 1.0;
ndarray.set_to_eye(1, (uint8_t*) &zero, (uint8_t*) &one);
assert_values_match("in_data[0]", "%f", 0.0, in_data[0]);
assert_values_match("in_data[1]", "%f", 1.0, in_data[1]);
assert_values_match("in_data[2]", "%f", 0.0, in_data[2]);
assert_values_match("in_data[3]", "%f", 0.0, in_data[3]);
assert_values_match("in_data[4]", "%f", 0.0, in_data[4]);
assert_values_match("in_data[5]", "%f", 1.0, in_data[5]);
assert_values_match("in_data[6]", "%f", 0.0, in_data[6]);
assert_values_match("in_data[7]", "%f", 0.0, in_data[7]);
assert_values_match("in_data[8]", "%f", 0.0, in_data[8]);
}
void test_slice_1() {
// Test `slice(5, None, None).indices(100) == slice(5, 100, 1)`
BEGIN_TEST();
UserSlice<int> user_slice = {
.start_defined = 1,
.start = 5,
.stop_defined = 0,
.step_defined = 0,
};
auto slice = user_slice.indices(100);
assert_values_match("start", "%d", 5, slice.start);
assert_values_match("stop", "%d", 100, slice.stop);
assert_values_match("step", "%d", 1, slice.step);
}
void test_slice_2() {
// Test `slice(400, 999, None).indices(100) == slice(100, 100, 1)`
BEGIN_TEST();
UserSlice<int> user_slice = {
.start_defined = 1,
.start = 400,
.stop_defined = 0,
.step_defined = 0,
};
auto slice = user_slice.indices(100);
assert_values_match("start", "%d", 100, slice.start);
assert_values_match("stop", "%d", 100, slice.stop);
assert_values_match("step", "%d", 1, slice.step);
}
void test_slice_3() {
// Test `slice(-10, -5, None).indices(100) == slice(90, 95, 1)`
BEGIN_TEST();
UserSlice<int> user_slice = {
.start_defined = 1,
.start = -10,
.stop_defined = 1,
.stop = -5,
.step_defined = 0,
};
auto slice = user_slice.indices(100);
assert_values_match("start", "%d", 90, slice.start);
assert_values_match("stop", "%d", 95, slice.stop);
assert_values_match("step", "%d", 1, slice.step);
}
void test_slice_4() {
// Test `slice(None, None, -5).indices(100) == (99, -1, -5)`
BEGIN_TEST();
UserSlice<int> user_slice = {
.start_defined = 0,
.stop_defined = 0,
.step_defined = 1,
.step = -5
};
auto slice = user_slice.indices(100);
assert_values_match("start", "%d", 99, slice.start);
assert_values_match("stop", "%d", -1, slice.stop);
assert_values_match("step", "%d", -5, slice.step);
}
void test_ndslice_1() {
/*
Reference Python code:
```python
ndarray = np.arange(12, dtype=np.float64).reshape((3, 4));
# array([[ 0., 1., 2., 3.],
# [ 4., 5., 6., 7.],
# [ 8., 9., 10., 11.]])
dst_ndarray = ndarray[-2:, 1::2]
# array([[ 5., 7.],
# [ 9., 11.]])
assert dst_ndarray.shape == (2, 2)
assert dst_ndarray.strides == (32, 16)
assert dst_ndarray[0, 0] == 5.0
assert dst_ndarray[0, 1] == 7.0
assert dst_ndarray[1, 0] == 9.0
assert dst_ndarray[1, 1] == 11.0
```
*/
BEGIN_TEST();
double in_data[12] = { 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0 };
int32_t in_itemsize = sizeof(double);
const int32_t in_ndims = 2;
int32_t in_shape[in_ndims] = { 3, 4 };
int32_t in_strides[in_ndims] = {};
NDArray<int32_t> ndarray = {
.data = (uint8_t*) in_data,
.itemsize = in_itemsize,
.ndims = in_ndims,
.shape = in_shape,
.strides = in_strides
};
ndarray.set_strides_by_shape();
// Destination ndarray
// As documented, ndims and shape & strides must be allocated and determined by the caller.
const int32_t dst_ndims = 2;
int32_t dst_shape[dst_ndims] = {999, 999}; // Empty values
int32_t dst_strides[dst_ndims] = {999, 999}; // Empty values
NDArray<int32_t> dst_ndarray = {
.data = nullptr,
.ndims = dst_ndims,
.shape = dst_shape,
.strides = dst_strides
};
// Create the slice in `ndarray[-2::, 1::2]`
UserSlice<int32_t> user_slice_1 = {
.start_defined = 1,
.start = -2,
.stop_defined = 0,
.step_defined = 0
};
UserSlice<int32_t> user_slice_2 = {
.start_defined = 1,
.start = 1,
.stop_defined = 0,
.step_defined = 1,
.step = 2
};
const int32_t num_ndslices = 2;
NDSlice ndslices[num_ndslices] = {
{ .type = INPUT_SLICE_TYPE_SLICE, .slice = (uint8_t*) &user_slice_1 },
{ .type = INPUT_SLICE_TYPE_SLICE, .slice = (uint8_t*) &user_slice_2 }
};
ndarray.slice(num_ndslices, ndslices, &dst_ndarray);
int32_t expected_shape[dst_ndims] = { 2, 2 };
int32_t expected_strides[dst_ndims] = { 32, 16 };
assert_arrays_match("shape", "%d", dst_ndims, expected_shape, dst_ndarray.shape);
assert_arrays_match("strides", "%d", dst_ndims, expected_strides, dst_ndarray.strides);
assert_values_match("dst_ndarray[0, 0]", "%f", 5.0, *((double *) dst_ndarray.get_pelement_by_indices((int32_t[dst_ndims]) { 0, 0 })));
assert_values_match("dst_ndarray[0, 1]", "%f", 7.0, *((double *) dst_ndarray.get_pelement_by_indices((int32_t[dst_ndims]) { 0, 1 })));
assert_values_match("dst_ndarray[1, 0]", "%f", 9.0, *((double *) dst_ndarray.get_pelement_by_indices((int32_t[dst_ndims]) { 1, 0 })));
assert_values_match("dst_ndarray[1, 1]", "%f", 11.0, *((double *) dst_ndarray.get_pelement_by_indices((int32_t[dst_ndims]) { 1, 1 })));
}
void test_ndslice_2() {
/*
```python
ndarray = np.arange(12, dtype=np.float64).reshape((3, 4))
# array([[ 0., 1., 2., 3.],
# [ 4., 5., 6., 7.],
# [ 8., 9., 10., 11.]])
dst_ndarray = ndarray[2, ::-2]
# array([11., 9.])
assert dst_ndarray.shape == (2,)
assert dst_ndarray.strides == (-16,)
assert dst_ndarray[0] == 11.0
assert dst_ndarray[1] == 9.0
dst_ndarray[1, 0] == 99 # If you write to `dst_ndarray`
assert ndarray[1, 3] == 99 # `ndarray` also updates!!
```
*/
BEGIN_TEST();
double in_data[12] = { 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0 };
int32_t in_itemsize = sizeof(double);
const int32_t in_ndims = 2;
int32_t in_shape[in_ndims] = { 3, 4 };
int32_t in_strides[in_ndims] = {};
NDArray<int32_t> ndarray = {
.data = (uint8_t*) in_data,
.itemsize = in_itemsize,
.ndims = in_ndims,
.shape = in_shape,
.strides = in_strides
};
ndarray.set_strides_by_shape();
// Destination ndarray
// As documented, ndims and shape & strides must be allocated and determined by the caller.
const int32_t dst_ndims = 1;
int32_t dst_shape[dst_ndims] = {999}; // Empty values
int32_t dst_strides[dst_ndims] = {999}; // Empty values
NDArray<int32_t> dst_ndarray = {
.data = nullptr,
.ndims = dst_ndims,
.shape = dst_shape,
.strides = dst_strides
};
// Create the slice in `ndarray[2, ::-2]`
int32_t user_slice_1 = 2;
UserSlice<int32_t> user_slice_2 = {
.start_defined = 0,
.stop_defined = 0,
.step_defined = 1,
.step = -2
};
const int32_t num_ndslices = 2;
NDSlice ndslices[num_ndslices] = {
{ .type = INPUT_SLICE_TYPE_INDEX, .slice = (uint8_t*) &user_slice_1 },
{ .type = INPUT_SLICE_TYPE_SLICE, .slice = (uint8_t*) &user_slice_2 }
};
ndarray.slice(num_ndslices, ndslices, &dst_ndarray);
int32_t expected_shape[dst_ndims] = { 2 };
int32_t expected_strides[dst_ndims] = { -16 };
assert_arrays_match("shape", "%d", dst_ndims, expected_shape, dst_ndarray.shape);
assert_arrays_match("strides", "%d", dst_ndims, expected_strides, dst_ndarray.strides);
// [5.0, 3.0]
assert_values_match("dst_ndarray[0]", "%f", 11.0, *((double *) dst_ndarray.get_pelement_by_indices((int32_t[dst_ndims]) { 0 })));
assert_values_match("dst_ndarray[1]", "%f", 9.0, *((double *) dst_ndarray.get_pelement_by_indices((int32_t[dst_ndims]) { 1 })));
}
void test_can_broadcast_shape() {
BEGIN_TEST();
assert_values_match(
"can_broadcast_shape_to([3], [1, 1, 1, 1, 3]) == true",
"%d",
true,
ndarray_util::can_broadcast_shape_to(1, (int32_t[]) { 3 }, 5, (int32_t[]) { 1, 1, 1, 1, 3 })
);
assert_values_match(
"can_broadcast_shape_to([3], [3, 1]) == false",
"%d",
false,
ndarray_util::can_broadcast_shape_to(1, (int32_t[]) { 3 }, 2, (int32_t[]) { 3, 1 }));
assert_values_match(
"can_broadcast_shape_to([3], [3]) == true",
"%d",
true,
ndarray_util::can_broadcast_shape_to(1, (int32_t[]) { 3 }, 1, (int32_t[]) { 3 }));
assert_values_match(
"can_broadcast_shape_to([1], [3]) == false",
"%d",
false,
ndarray_util::can_broadcast_shape_to(1, (int32_t[]) { 1 }, 1, (int32_t[]) { 3 }));
assert_values_match(
"can_broadcast_shape_to([1], [1]) == true",
"%d",
true,
ndarray_util::can_broadcast_shape_to(1, (int32_t[]) { 1 }, 1, (int32_t[]) { 1 }));
assert_values_match(
"can_broadcast_shape_to([256, 256, 3], [256, 1, 3]) == true",
"%d",
true,
ndarray_util::can_broadcast_shape_to(3, (int32_t[]) { 256, 256, 3 }, 3, (int32_t[]) { 256, 1, 3 })
);
assert_values_match(
"can_broadcast_shape_to([256, 256, 3], [3]) == true",
"%d",
true,
ndarray_util::can_broadcast_shape_to(3, (int32_t[]) { 256, 256, 3 }, 1, (int32_t[]) { 3 })
);
assert_values_match(
"can_broadcast_shape_to([256, 256, 3], [2]) == false",
"%d",
false,
ndarray_util::can_broadcast_shape_to(3, (int32_t[]) { 256, 256, 3 }, 1, (int32_t[]) { 2 })
);
assert_values_match(
"can_broadcast_shape_to([256, 256, 3], [1]) == true",
"%d",
true,
ndarray_util::can_broadcast_shape_to(3, (int32_t[]) { 256, 256, 3 }, 1, (int32_t[]) { 1 })
);
// In cases when the shapes contain zero(es)
assert_values_match(
"can_broadcast_shape_to([0], [1]) == true",
"%d",
true,
ndarray_util::can_broadcast_shape_to(1, (int32_t[]) { 0 }, 1, (int32_t[]) { 1 })
);
assert_values_match(
"can_broadcast_shape_to([0], [2]) == false",
"%d",
false,
ndarray_util::can_broadcast_shape_to(1, (int32_t[]) { 0 }, 1, (int32_t[]) { 2 })
);
assert_values_match(
"can_broadcast_shape_to([0, 4, 0, 0], [1]) == true",
"%d",
true,
ndarray_util::can_broadcast_shape_to(4, (int32_t[]) { 0, 4, 0, 0 }, 1, (int32_t[]) { 1 })
);
assert_values_match(
"can_broadcast_shape_to([0, 4, 0, 0], [1, 1, 1, 1]) == true",
"%d",
true,
ndarray_util::can_broadcast_shape_to(4, (int32_t[]) { 0, 4, 0, 0 }, 4, (int32_t[]) { 1, 1, 1, 1 })
);
assert_values_match(
"can_broadcast_shape_to([0, 4, 0, 0], [1, 4, 1, 1]) == true",
"%d",
true,
ndarray_util::can_broadcast_shape_to(4, (int32_t[]) { 0, 4, 0, 0 }, 4, (int32_t[]) { 1, 4, 1, 1 })
);
assert_values_match(
"can_broadcast_shape_to([4, 3], [0, 3]) == false",
"%d",
false,
ndarray_util::can_broadcast_shape_to(2, (int32_t[]) { 4, 3 }, 2, (int32_t[]) { 0, 3 })
);
assert_values_match(
"can_broadcast_shape_to([4, 3], [0, 0]) == false",
"%d",
false,
ndarray_util::can_broadcast_shape_to(2, (int32_t[]) { 4, 3 }, 2, (int32_t[]) { 0, 0 })
);
}
void test_ndarray_broadcast_1() {
/*
# array = np.array([[19.9, 29.9, 39.9, 49.9]], dtype=np.float64)
# >>> [[19.9 29.9 39.9 49.9]]
#
# array = np.broadcast_to(array, (2, 3, 4))
# >>> [[[19.9 29.9 39.9 49.9]
# >>> [19.9 29.9 39.9 49.9]
# >>> [19.9 29.9 39.9 49.9]]
# >>> [[19.9 29.9 39.9 49.9]
# >>> [19.9 29.9 39.9 49.9]
# >>> [19.9 29.9 39.9 49.9]]]
#
# assery array.strides == (0, 0, 8)
*/
BEGIN_TEST();
double in_data[4] = { 19.9, 29.9, 39.9, 49.9 };
const int32_t in_ndims = 2;
int32_t in_shape[in_ndims] = {1, 4};
int32_t in_strides[in_ndims] = {};
NDArray<int32_t> ndarray = {
.data = (uint8_t*) in_data,
.itemsize = sizeof(double),
.ndims = in_ndims,
.shape = in_shape,
.strides = in_strides
};
ndarray.set_strides_by_shape();
const int32_t dst_ndims = 3;
int32_t dst_shape[dst_ndims] = {2, 3, 4};
int32_t dst_strides[dst_ndims] = {};
NDArray<int32_t> dst_ndarray = {
.ndims = dst_ndims,
.shape = dst_shape,
.strides = dst_strides
};
ndarray.broadcast_to(&dst_ndarray);
assert_arrays_match("dst_ndarray->strides", "%d", dst_ndims, (int32_t[]) { 0, 0, 8 }, dst_ndarray.strides);
assert_values_match("dst_ndarray[0, 0, 0]", "%f", 19.9, *((double*) dst_ndarray.get_pelement_by_indices((int32_t[]) {0, 0, 0})));
assert_values_match("dst_ndarray[0, 0, 1]", "%f", 29.9, *((double*) dst_ndarray.get_pelement_by_indices((int32_t[]) {0, 0, 1})));
assert_values_match("dst_ndarray[0, 0, 2]", "%f", 39.9, *((double*) dst_ndarray.get_pelement_by_indices((int32_t[]) {0, 0, 2})));
assert_values_match("dst_ndarray[0, 0, 3]", "%f", 49.9, *((double*) dst_ndarray.get_pelement_by_indices((int32_t[]) {0, 0, 3})));
assert_values_match("dst_ndarray[0, 1, 0]", "%f", 19.9, *((double*) dst_ndarray.get_pelement_by_indices((int32_t[]) {0, 1, 0})));
assert_values_match("dst_ndarray[0, 1, 1]", "%f", 29.9, *((double*) dst_ndarray.get_pelement_by_indices((int32_t[]) {0, 1, 1})));
assert_values_match("dst_ndarray[0, 1, 2]", "%f", 39.9, *((double*) dst_ndarray.get_pelement_by_indices((int32_t[]) {0, 1, 2})));
assert_values_match("dst_ndarray[0, 1, 3]", "%f", 49.9, *((double*) dst_ndarray.get_pelement_by_indices((int32_t[]) {0, 1, 3})));
assert_values_match("dst_ndarray[1, 2, 3]", "%f", 49.9, *((double*) dst_ndarray.get_pelement_by_indices((int32_t[]) {1, 2, 3})));
}
int main() {
test_calc_size_from_shape_normal();
test_calc_size_from_shape_has_zero();
test_set_strides_by_shape();
test_ndarray_indices_iter_normal();
test_ndarray_fill_generic();
test_ndarray_set_to_eye();
test_slice_1();
test_slice_2();
test_slice_3();
test_slice_4();
test_ndslice_1();
test_ndslice_2();
test_can_broadcast_shape();
test_ndarray_broadcast_1();
return 0;
}

View File

@ -0,0 +1,14 @@
#pragma once
// This is made toggleable since `irrt_test.cpp` itself would include
// headers that define the `int_t` family.
#ifndef IRRT_DONT_TYPEDEF_INTS
typedef _BitInt(8) int8_t;
typedef unsigned _BitInt(8) uint8_t;
typedef _BitInt(32) int32_t;
typedef unsigned _BitInt(32) uint32_t;
typedef _BitInt(64) int64_t;
typedef unsigned _BitInt(64) uint64_t;
#endif
typedef int32_t SliceIndex;

View File

@ -0,0 +1,37 @@
#pragma once
#include "irrt_typedefs.hpp"
namespace {
template <typename T>
T max(T a, T b) {
return a > b ? a : b;
}
template <typename T>
T min(T a, T b) {
return a > b ? b : a;
}
template <typename T>
bool arrays_match(int len, T *as, T *bs) {
for (int i = 0; i < len; i++) {
if (as[i] != bs[i]) return false;
}
return true;
}
void irrt_panic() {
// Crash the program for now.
// TODO: Don't crash the program
// ... or at least produce a good message when doing testing IRRT
uint8_t* death = nullptr;
*death = 0; // TODO: address 0 on hardware might be writable?
}
// TODO: Make this a macro and allow it to be toggled on/off (e.g., debug vs release)
void irrt_assert(bool condition) {
if (!condition) irrt_panic();
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,9 @@
use crate::codegen::{ use crate::codegen::{
irrt::{call_ndarray_calc_size, call_ndarray_flatten_index}, // irrt::{call_ndarray_calc_size, call_ndarray_flatten_index},
llvm_intrinsics::call_int_umin, llvm_intrinsics::call_int_umin,
stmt::gen_for_callback_incrementing, stmt::gen_for_callback_incrementing,
CodeGenContext, CodeGenerator, CodeGenContext,
CodeGenerator,
}; };
use inkwell::context::Context; use inkwell::context::Context;
use inkwell::types::{ArrayType, BasicType, StructType}; use inkwell::types::{ArrayType, BasicType, StructType};
@ -12,6 +13,7 @@ use inkwell::{
values::{BasicValueEnum, IntValue, PointerValue}, values::{BasicValueEnum, IntValue, PointerValue},
AddressSpace, IntPredicate, AddressSpace, IntPredicate,
}; };
use itertools::Itertools;
/// A LLVM type that is used to represent a non-primitive type in NAC3. /// A LLVM type that is used to represent a non-primitive type in NAC3.
pub trait ProxyType<'ctx>: Into<Self::Base> { pub trait ProxyType<'ctx>: Into<Self::Base> {
@ -1208,25 +1210,27 @@ impl<'ctx> NDArrayType<'ctx> {
ctx: &'ctx Context, ctx: &'ctx Context,
dtype: BasicTypeEnum<'ctx>, dtype: BasicTypeEnum<'ctx>,
) -> Self { ) -> Self {
let llvm_usize = generator.get_size_type(ctx); todo!()
// struct NDArray { num_dims: size_t, dims: size_t*, data: T* } // let llvm_usize = generator.get_size_type(ctx);
//
// * num_dims: Number of dimensions in the array
// * dims: Pointer to an array containing the size of each dimension
// * data: Pointer to an array containing the array data
let llvm_ndarray = ctx
.struct_type(
&[
llvm_usize.into(),
llvm_usize.ptr_type(AddressSpace::default()).into(),
dtype.ptr_type(AddressSpace::default()).into(),
],
false,
)
.ptr_type(AddressSpace::default());
NDArrayType::from_type(llvm_ndarray, llvm_usize) // // struct NDArray { num_dims: size_t, dims: size_t*, data: T* }
// //
// // * num_dims: Number of dimensions in the array
// // * dims: Pointer to an array containing the size of each dimension
// // * data: Pointer to an array containing the array data
// let llvm_ndarray = ctx
// .struct_type(
// &[
// llvm_usize.into(),
// llvm_usize.ptr_type(AddressSpace::default()).into(),
// dtype.ptr_type(AddressSpace::default()).into(),
// ],
// false,
// )
// .ptr_type(AddressSpace::default());
// NDArrayType::from_type(llvm_ndarray, llvm_usize)
} }
/// Creates an [`NDArrayType`] from a [`PointerType`]. /// Creates an [`NDArrayType`] from a [`PointerType`].
@ -1601,7 +1605,8 @@ impl<'ctx> ArrayLikeValue<'ctx> for NDArrayDataProxy<'ctx, '_> {
ctx: &CodeGenContext<'ctx, '_>, ctx: &CodeGenContext<'ctx, '_>,
generator: &G, generator: &G,
) -> IntValue<'ctx> { ) -> IntValue<'ctx> {
call_ndarray_calc_size(generator, ctx, &self.as_slice_value(ctx, generator), (None, None)) todo!()
// call_ndarray_calc_size(generator, ctx, &self.as_slice_value(ctx, generator), (None, None))
} }
} }
@ -1659,33 +1664,34 @@ impl<'ctx, Index: UntypedArrayLikeAccessor<'ctx>> ArrayLikeIndexer<'ctx, Index>
indices: &Index, indices: &Index,
name: Option<&str>, name: Option<&str>,
) -> PointerValue<'ctx> { ) -> PointerValue<'ctx> {
let llvm_usize = generator.get_size_type(ctx.ctx); todo!()
// let llvm_usize = generator.get_size_type(ctx.ctx);
let indices_elem_ty = indices // let indices_elem_ty = indices
.ptr_offset(ctx, generator, &llvm_usize.const_zero(), None) // .ptr_offset(ctx, generator, &llvm_usize.const_zero(), None)
.get_type() // .get_type()
.get_element_type(); // .get_element_type();
let Ok(indices_elem_ty) = IntType::try_from(indices_elem_ty) else { // let Ok(indices_elem_ty) = IntType::try_from(indices_elem_ty) else {
panic!("Expected list[int32] but got {indices_elem_ty}") // panic!("Expected list[int32] but got {indices_elem_ty}")
}; // };
assert_eq!( // assert_eq!(
indices_elem_ty.get_bit_width(), // indices_elem_ty.get_bit_width(),
32, // 32,
"Expected list[int32] but got list[int{}]", // "Expected list[int32] but got list[int{}]",
indices_elem_ty.get_bit_width() // indices_elem_ty.get_bit_width()
); // );
let index = call_ndarray_flatten_index(generator, ctx, *self.0, indices); // let index = call_ndarray_flatten_index(generator, ctx, *self.0, indices);
unsafe { // unsafe {
ctx.builder // ctx.builder
.build_in_bounds_gep( // .build_in_bounds_gep(
self.base_ptr(ctx, generator), // self.base_ptr(ctx, generator),
&[index], // &[index],
name.unwrap_or_default(), // name.unwrap_or_default(),
) // )
.unwrap() // .unwrap()
} // }
} }
fn ptr_offset<G: CodeGenerator + ?Sized>( fn ptr_offset<G: CodeGenerator + ?Sized>(
@ -1761,3 +1767,341 @@ impl<'ctx, Index: UntypedArrayLikeAccessor<'ctx>> UntypedArrayLikeMutator<'ctx,
for NDArrayDataProxy<'ctx, '_> for NDArrayDataProxy<'ctx, '_>
{ {
} }
#[derive(Debug, Clone, Copy)]
pub struct StructField<'ctx> {
/// The GEP index of this struct field.
pub gep_index: u32,
/// Name of this struct field.
///
/// Used for generating names.
pub name: &'static str,
/// The type of this struct field.
pub ty: BasicTypeEnum<'ctx>,
}
pub struct StructFields<'ctx> {
/// Name of the struct.
///
/// Used for generating names.
pub name: &'static str,
/// All the [`StructField`]s of this struct.
///
/// **NOTE:** The index position of a [`StructField`]
/// matches the element's [`StructField::index`].
pub fields: Vec<StructField<'ctx>>,
}
struct StructFieldsBuilder<'ctx> {
gep_index_counter: u32,
/// Name of the struct to be built.
name: &'static str,
fields: Vec<StructField<'ctx>>,
}
impl<'ctx> StructField<'ctx> {
/// TODO: DOCUMENT ME
pub fn gep(
&self,
ctx: &CodeGenContext<'ctx, '_>,
struct_ptr: PointerValue<'ctx>,
) -> PointerValue<'ctx> {
let index_type = ctx.ctx.i32_type(); // TODO: I think I'm not supposed to use i32 for GEP like that
unsafe {
ctx.builder
.build_in_bounds_gep(
struct_ptr,
&[index_type.const_zero(), index_type.const_int(self.gep_index as u64, false)],
self.name,
)
.unwrap()
}
}
/// TODO: DOCUMENT ME
pub fn load(
&self,
ctx: &CodeGenContext<'ctx, '_>,
struct_ptr: PointerValue<'ctx>,
) -> BasicValueEnum<'ctx> {
ctx.builder.build_load(self.gep(ctx, struct_ptr), self.name).unwrap()
}
/// TODO: DOCUMENT ME
pub fn store<V>(&self, ctx: &CodeGenContext<'ctx, '_>, struct_ptr: PointerValue<'ctx>, value: V)
where
V: BasicValue<'ctx>,
{
ctx.builder.build_store(self.gep(ctx, struct_ptr), value).unwrap();
}
}
type IsInstanceError = String;
type IsInstanceResult = Result<(), IsInstanceError>;
pub fn check_basic_types_match<'ctx, A, B>(expected: A, got: B) -> IsInstanceResult
where
A: BasicType<'ctx>,
B: BasicType<'ctx>,
{
let expected = expected.as_basic_type_enum();
let got = got.as_basic_type_enum();
// Put those logic into here,
// otherwise there is always a fallback reporting on any kind of mismatch
match (expected, got) {
(BasicTypeEnum::IntType(expected), BasicTypeEnum::IntType(got)) => {
if expected.get_bit_width() != got.get_bit_width() {
return Err(format!(
"Expected IntType ({expected}-bit(s)), got IntType ({got}-bit(s))"
));
}
}
(expected, got) => {
if expected != got {
return Err(format!("Expected {expected}, got {got}"));
}
}
}
Ok(())
}
impl<'ctx> StructFields<'ctx> {
pub fn num_fields(&self) -> u32 {
self.fields.len() as u32
}
pub fn get_struct_type(&self, ctx: &'ctx Context) -> StructType<'ctx> {
let llvm_fields = self.fields.iter().map(|field| field.ty).collect_vec();
ctx.struct_type(llvm_fields.as_slice(), false)
}
pub fn is_type(&self, scrutinee: StructType<'ctx>) -> IsInstanceResult {
// Check scrutinee's number of struct fields
if scrutinee.count_fields() != self.num_fields() {
return Err(format!(
"Expected {expected_count} field(s) in `{struct_name}` type, got {got_count}",
struct_name = self.name,
expected_count = self.num_fields(),
got_count = scrutinee.count_fields(),
));
}
// Check the scrutinee's field types
for field in self.fields.iter() {
let expected_field_ty = field.ty;
let got_field_ty = scrutinee.get_field_type_at_index(field.gep_index).unwrap();
if let Err(field_err) = check_basic_types_match(expected_field_ty, got_field_ty) {
return Err(format!(
"Field GEP index {gep_index} does not match the expected type of ({struct_name}::{field_name}): {field_err}",
gep_index = field.gep_index,
struct_name = self.name,
field_name = field.name,
));
}
}
// Done
Ok(())
}
}
impl<'ctx> StructFieldsBuilder<'ctx> {
fn start(name: &'static str) -> Self {
StructFieldsBuilder { gep_index_counter: 0, name, fields: Vec::new() }
}
fn add_field(&mut self, name: &'static str, ty: BasicTypeEnum<'ctx>) -> StructField<'ctx> {
let index = self.gep_index_counter;
self.gep_index_counter += 1;
let field = StructField { gep_index: index, name, ty };
self.fields.push(field); // Register into self.fields
field // Return to the caller to conveniently let them do whatever they want
}
fn end(self) -> StructFields<'ctx> {
StructFields { name: self.name, fields: self.fields }
}
}
#[derive(Debug, Clone, Copy)]
pub struct NpArrayType<'ctx> {
pub size_type: IntType<'ctx>,
pub elem_type: BasicTypeEnum<'ctx>,
}
pub struct NpArrayStructFields<'ctx> {
pub whole_struct: StructFields<'ctx>,
pub data: StructField<'ctx>,
pub itemsize: StructField<'ctx>,
pub ndims: StructField<'ctx>,
pub shape: StructField<'ctx>,
pub strides: StructField<'ctx>,
}
impl<'ctx> NpArrayType<'ctx> {
pub fn new_opaque_elem(
ctx: &CodeGenContext<'ctx, '_>,
size_type: IntType<'ctx>,
) -> NpArrayType<'ctx> {
NpArrayType { size_type, elem_type: ctx.ctx.i8_type().as_basic_type_enum() }
}
pub fn get_struct_type(&self, ctx: &'ctx Context) -> StructType<'ctx> {
self.fields().whole_struct.get_struct_type(ctx)
}
pub fn fields(&self) -> NpArrayStructFields<'ctx> {
let mut builder = StructFieldsBuilder::start("NpArray");
let addrspace = AddressSpace::default();
let byte_type = self.size_type.get_context().i8_type();
// Make sure the struct matches PERFECTLY with that defined in `nac3core/irrt`.
let data = builder.add_field("data", byte_type.ptr_type(addrspace).into());
let itemsize = builder.add_field("itemsize", self.size_type.into());
let ndims = builder.add_field("ndims", self.size_type.into());
let shape = builder.add_field("shape", self.size_type.ptr_type(addrspace).into());
let strides = builder.add_field("strides", self.size_type.ptr_type(addrspace).into());
NpArrayStructFields { whole_struct: builder.end(), data, itemsize, ndims, shape, strides }
}
/// Allocate an `ndarray` on stack, with the following notes:
///
/// - `ndarray.ndims` will be initialized to `in_ndims`.
/// - `ndarray.itemsize` will be initialized to the size of `self.elem_type.size_of()`.
/// - `ndarray.shape` and `ndarray.strides` will be allocated on the stack with number of elements being `in_ndims`,
/// all with empty/uninitialized values.
pub fn var_alloc<G>(
&self,
generator: &mut G,
ctx: &mut CodeGenContext<'ctx, '_>,
in_ndims: IntValue<'ctx>,
name: Option<&str>,
) -> NpArrayValue<'ctx>
where
G: CodeGenerator + ?Sized,
{
let ptr = generator
.gen_var_alloc(ctx, self.get_struct_type(ctx.ctx).as_basic_type_enum(), name)
.unwrap();
// Allocate `in_dims` number of `size_type` on the stack for `shape` and `strides`
let allocated_shape = generator
.gen_array_var_alloc(
ctx,
self.size_type.as_basic_type_enum(),
in_ndims,
Some("allocated_shape"),
)
.unwrap();
let allocated_strides = generator
.gen_array_var_alloc(
ctx,
self.size_type.as_basic_type_enum(),
in_ndims,
Some("allocated_strides"),
)
.unwrap();
let value = NpArrayValue { ty: *self, ptr };
value.store_ndims(ctx, in_ndims);
value.store_itemsize(ctx, self.elem_type.size_of().unwrap());
value.store_shape(ctx, allocated_shape.base_ptr(ctx, generator));
value.store_strides(ctx, allocated_strides.base_ptr(ctx, generator));
return value;
}
}
#[derive(Debug, Clone, Copy)]
pub struct NpArrayValue<'ctx> {
pub ty: NpArrayType<'ctx>,
pub ptr: PointerValue<'ctx>,
}
impl<'ctx> NpArrayValue<'ctx> {
pub fn load_ndims(&self, ctx: &CodeGenContext<'ctx, '_>) -> IntValue<'ctx> {
let field = self.ty.fields().ndims;
field.load(ctx, self.ptr).into_int_value()
}
pub fn store_ndims(&self, ctx: &CodeGenContext<'ctx, '_>, value: IntValue<'ctx>) {
let field = self.ty.fields().ndims;
field.store(ctx, self.ptr, value);
}
pub fn load_itemsize(&self, ctx: &CodeGenContext<'ctx, '_>) -> IntValue<'ctx> {
let field = self.ty.fields().itemsize;
field.load(ctx, self.ptr).into_int_value()
}
pub fn store_itemsize(&self, ctx: &CodeGenContext<'ctx, '_>, value: IntValue<'ctx>) {
let field = self.ty.fields().itemsize;
field.store(ctx, self.ptr, value);
}
pub fn load_shape(&self, ctx: &CodeGenContext<'ctx, '_>) -> PointerValue<'ctx> {
let field = self.ty.fields().shape;
field.load(ctx, self.ptr).into_pointer_value()
}
pub fn store_shape(&self, ctx: &CodeGenContext<'ctx, '_>, value: PointerValue<'ctx>) {
let field = self.ty.fields().shape;
field.store(ctx, self.ptr, value);
}
pub fn load_strides(&self, ctx: &CodeGenContext<'ctx, '_>) -> PointerValue<'ctx> {
let field = self.ty.fields().strides;
field.load(ctx, self.ptr).into_pointer_value()
}
pub fn store_strides(&self, ctx: &CodeGenContext<'ctx, '_>, value: PointerValue<'ctx>) {
let field = self.ty.fields().strides;
field.store(ctx, self.ptr, value);
}
/// TODO: DOCUMENT ME -- NDIMS WOULD NEVER CHANGE!!!!!
pub fn shape_slice(
&self,
ctx: &CodeGenContext<'ctx, '_>,
) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
// Get the pointer to `shape`
let field = self.ty.fields().shape;
let shape = field.load(ctx, self.ptr).into_pointer_value();
// Load `ndims`
let ndims = self.load_ndims(ctx);
TypedArrayLikeAdapter {
adapted: ArraySliceValue(shape, ndims, Some(field.name)),
downcast_fn: Box::new(|_ctx, x| x.into_int_value()),
upcast_fn: Box::new(|_ctx, x| x.as_basic_value_enum()),
}
}
/// TODO: DOCUMENT ME -- NDIMS WOULD NEVER CHANGE!!!!!
pub fn strides_slice(
&self,
ctx: &CodeGenContext<'ctx, '_>,
) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
// Get the pointer to `strides`
let field = self.ty.fields().strides;
let strides = field.load(ctx, self.ptr).into_pointer_value();
// Load `ndims`
let ndims = self.load_ndims(ctx);
TypedArrayLikeAdapter {
adapted: ArraySliceValue(strides, ndims, Some(field.name)),
downcast_fn: Box::new(|_ctx, x| x.into_int_value()),
upcast_fn: Box::new(|_ctx, x| x.as_basic_value_enum()),
}
}
}

View File

@ -1362,100 +1362,101 @@ pub fn gen_binop_expr_with_values<'ctx, G: CodeGenerator>(
} else if ty1.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) } else if ty1.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id())
|| ty2.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) || ty2.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id())
{ {
let llvm_usize = generator.get_size_type(ctx.ctx); todo!()
// let llvm_usize = generator.get_size_type(ctx.ctx);
let is_ndarray1 = ty1.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()); // let is_ndarray1 = ty1.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id());
let is_ndarray2 = ty2.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()); // let is_ndarray2 = ty2.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id());
if is_ndarray1 && is_ndarray2 { // if is_ndarray1 && is_ndarray2 {
let (ndarray_dtype1, _) = unpack_ndarray_var_tys(&mut ctx.unifier, ty1); // let (ndarray_dtype1, _) = unpack_ndarray_var_tys(&mut ctx.unifier, ty1);
let (ndarray_dtype2, _) = unpack_ndarray_var_tys(&mut ctx.unifier, ty2); // let (ndarray_dtype2, _) = unpack_ndarray_var_tys(&mut ctx.unifier, ty2);
assert!(ctx.unifier.unioned(ndarray_dtype1, ndarray_dtype2)); // assert!(ctx.unifier.unioned(ndarray_dtype1, ndarray_dtype2));
let left_val = // let left_val =
NDArrayValue::from_ptr_val(left_val.into_pointer_value(), llvm_usize, None); // NDArrayValue::from_ptr_val(left_val.into_pointer_value(), llvm_usize, None);
let right_val = // let right_val =
NDArrayValue::from_ptr_val(right_val.into_pointer_value(), llvm_usize, None); // NDArrayValue::from_ptr_val(right_val.into_pointer_value(), llvm_usize, None);
let res = if op.base == Operator::MatMult { // let res = if op.base == Operator::MatMult {
// MatMult is the only binop which is not an elementwise op // // MatMult is the only binop which is not an elementwise op
numpy::ndarray_matmul_2d( // numpy::ndarray_matmul_2d(
generator, // generator,
ctx, // ctx,
ndarray_dtype1, // ndarray_dtype1,
match op.variant { // match op.variant {
BinopVariant::Normal => None, // BinopVariant::Normal => None,
BinopVariant::AugAssign => Some(left_val), // BinopVariant::AugAssign => Some(left_val),
}, // },
left_val, // left_val,
right_val, // right_val,
)? // )?
} else { // } else {
numpy::ndarray_elementwise_binop_impl( // numpy::ndarray_elementwise_binop_impl(
generator, // generator,
ctx, // ctx,
ndarray_dtype1, // ndarray_dtype1,
match op.variant { // match op.variant {
BinopVariant::Normal => None, // BinopVariant::Normal => None,
BinopVariant::AugAssign => Some(left_val), // BinopVariant::AugAssign => Some(left_val),
}, // },
(left_val.as_base_value().into(), false), // (left_val.as_base_value().into(), false),
(right_val.as_base_value().into(), false), // (right_val.as_base_value().into(), false),
|generator, ctx, (lhs, rhs)| { // |generator, ctx, (lhs, rhs)| {
gen_binop_expr_with_values( // gen_binop_expr_with_values(
generator, // generator,
ctx, // ctx,
(&Some(ndarray_dtype1), lhs), // (&Some(ndarray_dtype1), lhs),
op, // op,
(&Some(ndarray_dtype2), rhs), // (&Some(ndarray_dtype2), rhs),
ctx.current_loc, // ctx.current_loc,
)? // )?
.unwrap() // .unwrap()
.to_basic_value_enum( // .to_basic_value_enum(
ctx, // ctx,
generator, // generator,
ndarray_dtype1, // ndarray_dtype1,
) // )
}, // },
)? // )?
}; // };
Ok(Some(res.as_base_value().into())) // Ok(Some(res.as_base_value().into()))
} else { // } else {
let (ndarray_dtype, _) = // let (ndarray_dtype, _) =
unpack_ndarray_var_tys(&mut ctx.unifier, if is_ndarray1 { ty1 } else { ty2 }); // unpack_ndarray_var_tys(&mut ctx.unifier, if is_ndarray1 { ty1 } else { ty2 });
let ndarray_val = NDArrayValue::from_ptr_val( // let ndarray_val = NDArrayValue::from_ptr_val(
if is_ndarray1 { left_val } else { right_val }.into_pointer_value(), // if is_ndarray1 { left_val } else { right_val }.into_pointer_value(),
llvm_usize, // llvm_usize,
None, // None,
); // );
let res = numpy::ndarray_elementwise_binop_impl( // let res = numpy::ndarray_elementwise_binop_impl(
generator, // generator,
ctx, // ctx,
ndarray_dtype, // ndarray_dtype,
match op.variant { // match op.variant {
BinopVariant::Normal => None, // BinopVariant::Normal => None,
BinopVariant::AugAssign => Some(ndarray_val), // BinopVariant::AugAssign => Some(ndarray_val),
}, // },
(left_val, !is_ndarray1), // (left_val, !is_ndarray1),
(right_val, !is_ndarray2), // (right_val, !is_ndarray2),
|generator, ctx, (lhs, rhs)| { // |generator, ctx, (lhs, rhs)| {
gen_binop_expr_with_values( // gen_binop_expr_with_values(
generator, // generator,
ctx, // ctx,
(&Some(ndarray_dtype), lhs), // (&Some(ndarray_dtype), lhs),
op, // op,
(&Some(ndarray_dtype), rhs), // (&Some(ndarray_dtype), rhs),
ctx.current_loc, // ctx.current_loc,
)? // )?
.unwrap() // .unwrap()
.to_basic_value_enum(ctx, generator, ndarray_dtype) // .to_basic_value_enum(ctx, generator, ndarray_dtype)
}, // },
)?; // )?;
Ok(Some(res.as_base_value().into())) // Ok(Some(res.as_base_value().into()))
} // }
} else { } else {
let left_ty_enum = ctx.unifier.get_ty_immutable(left_ty.unwrap()); let left_ty_enum = ctx.unifier.get_ty_immutable(left_ty.unwrap());
let TypeEnum::TObj { fields, obj_id, .. } = left_ty_enum.as_ref() else { let TypeEnum::TObj { fields, obj_id, .. } = left_ty_enum.as_ref() else {
@ -1612,40 +1613,41 @@ pub fn gen_unaryop_expr_with_values<'ctx, G: CodeGenerator>(
_ => val.into(), _ => val.into(),
} }
} else if ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) { } else if ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) {
let llvm_usize = generator.get_size_type(ctx.ctx); todo!()
let (ndarray_dtype, _) = unpack_ndarray_var_tys(&mut ctx.unifier, ty); // let llvm_usize = generator.get_size_type(ctx.ctx);
// let (ndarray_dtype, _) = unpack_ndarray_var_tys(&mut ctx.unifier, ty);
let val = NDArrayValue::from_ptr_val(val.into_pointer_value(), llvm_usize, None); // let val = NDArrayValue::from_ptr_val(val.into_pointer_value(), llvm_usize, None);
// ndarray uses `~` rather than `not` to perform elementwise inversion, convert it before // // ndarray uses `~` rather than `not` to perform elementwise inversion, convert it before
// passing it to the elementwise codegen function // // passing it to the elementwise codegen function
let op = if ndarray_dtype.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::Bool.id()) { // let op = if ndarray_dtype.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::Bool.id()) {
if op == ast::Unaryop::Invert { // if op == ast::Unaryop::Invert {
ast::Unaryop::Not // ast::Unaryop::Not
} else { // } else {
unreachable!( // unreachable!(
"ufunc {} not supported for ndarray[bool, N]", // "ufunc {} not supported for ndarray[bool, N]",
op.op_info().method_name, // op.op_info().method_name,
) // )
} // }
} else { // } else {
op // op
}; // };
let res = numpy::ndarray_elementwise_unaryop_impl( // let res = numpy::ndarray_elementwise_unaryop_impl(
generator, // generator,
ctx, // ctx,
ndarray_dtype, // ndarray_dtype,
None, // None,
val, // val,
|generator, ctx, val| { // |generator, ctx, val| {
gen_unaryop_expr_with_values(generator, ctx, op, (&Some(ndarray_dtype), val))? // gen_unaryop_expr_with_values(generator, ctx, op, (&Some(ndarray_dtype), val))?
.unwrap() // .unwrap()
.to_basic_value_enum(ctx, generator, ndarray_dtype) // .to_basic_value_enum(ctx, generator, ndarray_dtype)
}, // },
)?; // )?;
res.as_base_value().into() // res.as_base_value().into()
} else { } else {
unimplemented!() unimplemented!()
})) }))
@ -1688,85 +1690,86 @@ pub fn gen_cmpop_expr_with_values<'ctx, G: CodeGenerator>(
if left_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) if left_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id())
|| right_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) || right_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id())
{ {
let llvm_usize = generator.get_size_type(ctx.ctx); todo!()
// let llvm_usize = generator.get_size_type(ctx.ctx);
let (Some(left_ty), lhs) = left else { unreachable!() }; // let (Some(left_ty), lhs) = left else { unreachable!() };
let (Some(right_ty), rhs) = comparators[0] else { unreachable!() }; // let (Some(right_ty), rhs) = comparators[0] else { unreachable!() };
let op = ops[0]; // let op = ops[0];
let is_ndarray1 = // let is_ndarray1 =
left_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()); // left_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id());
let is_ndarray2 = // let is_ndarray2 =
right_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()); // right_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id());
return if is_ndarray1 && is_ndarray2 { // return if is_ndarray1 && is_ndarray2 {
let (ndarray_dtype1, _) = unpack_ndarray_var_tys(&mut ctx.unifier, left_ty); // let (ndarray_dtype1, _) = unpack_ndarray_var_tys(&mut ctx.unifier, left_ty);
let (ndarray_dtype2, _) = unpack_ndarray_var_tys(&mut ctx.unifier, right_ty); // let (ndarray_dtype2, _) = unpack_ndarray_var_tys(&mut ctx.unifier, right_ty);
assert!(ctx.unifier.unioned(ndarray_dtype1, ndarray_dtype2)); // assert!(ctx.unifier.unioned(ndarray_dtype1, ndarray_dtype2));
let left_val = // let left_val =
NDArrayValue::from_ptr_val(lhs.into_pointer_value(), llvm_usize, None); // NDArrayValue::from_ptr_val(lhs.into_pointer_value(), llvm_usize, None);
let res = numpy::ndarray_elementwise_binop_impl( // let res = numpy::ndarray_elementwise_binop_impl(
generator, // generator,
ctx, // ctx,
ctx.primitives.bool, // ctx.primitives.bool,
None, // None,
(left_val.as_base_value().into(), false), // (left_val.as_base_value().into(), false),
(rhs, false), // (rhs, false),
|generator, ctx, (lhs, rhs)| { // |generator, ctx, (lhs, rhs)| {
let val = gen_cmpop_expr_with_values( // let val = gen_cmpop_expr_with_values(
generator, // generator,
ctx, // ctx,
(Some(ndarray_dtype1), lhs), // (Some(ndarray_dtype1), lhs),
&[op], // &[op],
&[(Some(ndarray_dtype2), rhs)], // &[(Some(ndarray_dtype2), rhs)],
)? // )?
.unwrap() // .unwrap()
.to_basic_value_enum( // .to_basic_value_enum(
ctx, // ctx,
generator, // generator,
ctx.primitives.bool, // ctx.primitives.bool,
)?; // )?;
Ok(generator.bool_to_i8(ctx, val.into_int_value()).into()) // Ok(generator.bool_to_i8(ctx, val.into_int_value()).into())
}, // },
)?; // )?;
Ok(Some(res.as_base_value().into())) // Ok(Some(res.as_base_value().into()))
} else { // } else {
let (ndarray_dtype, _) = unpack_ndarray_var_tys( // let (ndarray_dtype, _) = unpack_ndarray_var_tys(
&mut ctx.unifier, // &mut ctx.unifier,
if is_ndarray1 { left_ty } else { right_ty }, // if is_ndarray1 { left_ty } else { right_ty },
); // );
let res = numpy::ndarray_elementwise_binop_impl( // let res = numpy::ndarray_elementwise_binop_impl(
generator, // generator,
ctx, // ctx,
ctx.primitives.bool, // ctx.primitives.bool,
None, // None,
(lhs, !is_ndarray1), // (lhs, !is_ndarray1),
(rhs, !is_ndarray2), // (rhs, !is_ndarray2),
|generator, ctx, (lhs, rhs)| { // |generator, ctx, (lhs, rhs)| {
let val = gen_cmpop_expr_with_values( // let val = gen_cmpop_expr_with_values(
generator, // generator,
ctx, // ctx,
(Some(ndarray_dtype), lhs), // (Some(ndarray_dtype), lhs),
&[op], // &[op],
&[(Some(ndarray_dtype), rhs)], // &[(Some(ndarray_dtype), rhs)],
)? // )?
.unwrap() // .unwrap()
.to_basic_value_enum( // .to_basic_value_enum(
ctx, // ctx,
generator, // generator,
ctx.primitives.bool, // ctx.primitives.bool,
)?; // )?;
Ok(generator.bool_to_i8(ctx, val.into_int_value()).into()) // Ok(generator.bool_to_i8(ctx, val.into_int_value()).into())
}, // },
)?; // )?;
Ok(Some(res.as_base_value().into())) // Ok(Some(res.as_base_value().into()))
}; // };
} }
} }
@ -2102,310 +2105,312 @@ fn gen_ndarray_subscript_expr<'ctx, G: CodeGenerator>(
v: NDArrayValue<'ctx>, v: NDArrayValue<'ctx>,
slice: &Expr<Option<Type>>, slice: &Expr<Option<Type>>,
) -> Result<Option<ValueEnum<'ctx>>, String> { ) -> Result<Option<ValueEnum<'ctx>>, String> {
let llvm_i1 = ctx.ctx.bool_type(); todo!()
let llvm_i32 = ctx.ctx.i32_type();
let llvm_usize = generator.get_size_type(ctx.ctx);
let TypeEnum::TLiteral { values, .. } = &*ctx.unifier.get_ty_immutable(ndims) else { // let llvm_i1 = ctx.ctx.bool_type();
unreachable!() // let llvm_i32 = ctx.ctx.i32_type();
}; // let llvm_usize = generator.get_size_type(ctx.ctx);
let ndims = values // let TypeEnum::TLiteral { values, .. } = &*ctx.unifier.get_ty_immutable(ndims) else {
.iter() // unreachable!()
.map(|ndim| u64::try_from(ndim.clone()).map_err(|()| ndim.clone())) // };
.collect::<Result<Vec<_>, _>>()
.map_err(|val| {
format!(
"Expected non-negative literal for ndarray.ndims, got {}",
i128::try_from(val).unwrap()
)
})?;
assert!(!ndims.is_empty()); // let ndims = values
// .iter()
// .map(|ndim| u64::try_from(ndim.clone()).map_err(|()| ndim.clone()))
// .collect::<Result<Vec<_>, _>>()
// .map_err(|val| {
// format!(
// "Expected non-negative literal for ndarray.ndims, got {}",
// i128::try_from(val).unwrap()
// )
// })?;
// The number of dimensions subscripted by the index expression. // assert!(!ndims.is_empty());
// Slicing a ndarray will yield the same number of dimensions, whereas indexing into a
// dimension will remove a dimension.
let subscripted_dims = match &slice.node {
ExprKind::Tuple { elts, .. } => elts.iter().fold(0, |acc, value_subexpr| {
if let ExprKind::Slice { .. } = &value_subexpr.node {
acc
} else {
acc + 1
}
}),
ExprKind::Slice { .. } => 0, // // The number of dimensions subscripted by the index expression.
_ => 1, // // Slicing a ndarray will yield the same number of dimensions, whereas indexing into a
}; // // dimension will remove a dimension.
// let subscripted_dims = match &slice.node {
// ExprKind::Tuple { elts, .. } => elts.iter().fold(0, |acc, value_subexpr| {
// if let ExprKind::Slice { .. } = &value_subexpr.node {
// acc
// } else {
// acc + 1
// }
// }),
let ndarray_ndims_ty = ctx.unifier.get_fresh_literal( // ExprKind::Slice { .. } => 0,
ndims.iter().map(|v| SymbolValue::U64(v - subscripted_dims)).collect(), // _ => 1,
None, // };
);
let ndarray_ty =
make_ndarray_ty(&mut ctx.unifier, &ctx.primitives, Some(ty), Some(ndarray_ndims_ty));
let llvm_pndarray_t = ctx.get_llvm_type(generator, ndarray_ty).into_pointer_type();
let llvm_ndarray_t = llvm_pndarray_t.get_element_type().into_struct_type();
let llvm_ndarray_data_t = ctx.get_llvm_type(generator, ty).as_basic_type_enum();
// Check that len is non-zero // let ndarray_ndims_ty = ctx.unifier.get_fresh_literal(
let len = v.load_ndims(ctx); // ndims.iter().map(|v| SymbolValue::U64(v - subscripted_dims)).collect(),
ctx.make_assert( // None,
generator, // );
ctx.builder.build_int_compare(IntPredicate::SGT, len, llvm_usize.const_zero(), "").unwrap(), // let ndarray_ty =
"0:IndexError", // make_ndarray_ty(&mut ctx.unifier, &ctx.primitives, Some(ty), Some(ndarray_ndims_ty));
"too many indices for array: array is {0}-dimensional but 1 were indexed", // let llvm_pndarray_t = ctx.get_llvm_type(generator, ndarray_ty).into_pointer_type();
[Some(len), None, None], // let llvm_ndarray_t = llvm_pndarray_t.get_element_type().into_struct_type();
slice.location, // let llvm_ndarray_data_t = ctx.get_llvm_type(generator, ty).as_basic_type_enum();
);
// Normalizes a possibly-negative index to its corresponding positive index // // Check that len is non-zero
let normalize_index = |generator: &mut G, // let len = v.load_ndims(ctx);
ctx: &mut CodeGenContext<'ctx, '_>, // ctx.make_assert(
index: IntValue<'ctx>, // generator,
dim: u64| { // ctx.builder.build_int_compare(IntPredicate::SGT, len, llvm_usize.const_zero(), "").unwrap(),
gen_if_else_expr_callback( // "0:IndexError",
generator, // "too many indices for array: array is {0}-dimensional but 1 were indexed",
ctx, // [Some(len), None, None],
|_, ctx| { // slice.location,
Ok(ctx // );
.builder
.build_int_compare(IntPredicate::SGE, index, index.get_type().const_zero(), "")
.unwrap())
},
|_, _| Ok(Some(index)),
|generator, ctx| {
let llvm_i32 = ctx.ctx.i32_type();
let len = unsafe { // // Normalizes a possibly-negative index to its corresponding positive index
v.dim_sizes().get_typed_unchecked( // let normalize_index = |generator: &mut G,
ctx, // ctx: &mut CodeGenContext<'ctx, '_>,
generator, // index: IntValue<'ctx>,
&llvm_usize.const_int(dim, true), // dim: u64| {
None, // gen_if_else_expr_callback(
) // generator,
}; // ctx,
// |_, ctx| {
// Ok(ctx
// .builder
// .build_int_compare(IntPredicate::SGE, index, index.get_type().const_zero(), "")
// .unwrap())
// },
// |_, _| Ok(Some(index)),
// |generator, ctx| {
// let llvm_i32 = ctx.ctx.i32_type();
let index = ctx // let len = unsafe {
.builder // v.dim_sizes().get_typed_unchecked(
.build_int_add( // ctx,
len, // generator,
ctx.builder.build_int_s_extend(index, llvm_usize, "").unwrap(), // &llvm_usize.const_int(dim, true),
"", // None,
) // )
.unwrap(); // };
Ok(Some(ctx.builder.build_int_truncate(index, llvm_i32, "").unwrap())) // let index = ctx
}, // .builder
) // .build_int_add(
.map(|v| v.map(BasicValueEnum::into_int_value)) // len,
}; // ctx.builder.build_int_s_extend(index, llvm_usize, "").unwrap(),
// "",
// )
// .unwrap();
// Converts a slice expression into a slice-range tuple // Ok(Some(ctx.builder.build_int_truncate(index, llvm_i32, "").unwrap()))
let expr_to_slice = |generator: &mut G, // },
ctx: &mut CodeGenContext<'ctx, '_>, // )
node: &ExprKind<Option<Type>>, // .map(|v| v.map(BasicValueEnum::into_int_value))
dim: u64| { // };
match node {
ExprKind::Constant { value: Constant::Int(v), .. } => {
let Some(index) =
normalize_index(generator, ctx, llvm_i32.const_int(*v as u64, true), dim)?
else {
return Ok(None);
};
Ok(Some((index, index, llvm_i32.const_int(1, true)))) // // Converts a slice expression into a slice-range tuple
} // let expr_to_slice = |generator: &mut G,
// ctx: &mut CodeGenContext<'ctx, '_>,
// node: &ExprKind<Option<Type>>,
// dim: u64| {
// match node {
// ExprKind::Constant { value: Constant::Int(v), .. } => {
// let Some(index) =
// normalize_index(generator, ctx, llvm_i32.const_int(*v as u64, true), dim)?
// else {
// return Ok(None);
// };
ExprKind::Slice { lower, upper, step } => { // Ok(Some((index, index, llvm_i32.const_int(1, true))))
let dim_sz = unsafe { // }
v.dim_sizes().get_typed_unchecked(
ctx,
generator,
&llvm_usize.const_int(dim, false),
None,
)
};
handle_slice_indices(lower, upper, step, ctx, generator, dim_sz) // ExprKind::Slice { lower, upper, step } => {
} // let dim_sz = unsafe {
// v.dim_sizes().get_typed_unchecked(
// ctx,
// generator,
// &llvm_usize.const_int(dim, false),
// None,
// )
// };
_ => { // handle_slice_indices(lower, upper, step, ctx, generator, dim_sz)
let Some(index) = generator.gen_expr(ctx, slice)? else { return Ok(None) }; // }
let index = index
.to_basic_value_enum(ctx, generator, slice.custom.unwrap())?
.into_int_value();
let Some(index) = normalize_index(generator, ctx, index, dim)? else {
return Ok(None);
};
Ok(Some((index, index, llvm_i32.const_int(1, true)))) // _ => {
} // let Some(index) = generator.gen_expr(ctx, slice)? else { return Ok(None) };
} // let index = index
}; // .to_basic_value_enum(ctx, generator, slice.custom.unwrap())?
// .into_int_value();
// let Some(index) = normalize_index(generator, ctx, index, dim)? else {
// return Ok(None);
// };
let make_indices_arr = |generator: &mut G, // Ok(Some((index, index, llvm_i32.const_int(1, true))))
ctx: &mut CodeGenContext<'ctx, '_>| // }
-> Result<_, String> { // }
Ok(if let ExprKind::Tuple { elts, .. } = &slice.node { // };
let llvm_int_ty = ctx.get_llvm_type(generator, elts[0].custom.unwrap());
let index_addr = generator.gen_array_var_alloc(
ctx,
llvm_int_ty,
llvm_usize.const_int(elts.len() as u64, false),
None,
)?;
for (i, elt) in elts.iter().enumerate() { // let make_indices_arr = |generator: &mut G,
let Some(index) = generator.gen_expr(ctx, elt)? else { // ctx: &mut CodeGenContext<'ctx, '_>|
return Ok(None); // -> Result<_, String> {
}; // Ok(if let ExprKind::Tuple { elts, .. } = &slice.node {
// let llvm_int_ty = ctx.get_llvm_type(generator, elts[0].custom.unwrap());
// let index_addr = generator.gen_array_var_alloc(
// ctx,
// llvm_int_ty,
// llvm_usize.const_int(elts.len() as u64, false),
// None,
// )?;
let index = index // for (i, elt) in elts.iter().enumerate() {
.to_basic_value_enum(ctx, generator, elt.custom.unwrap())? // let Some(index) = generator.gen_expr(ctx, elt)? else {
.into_int_value(); // return Ok(None);
let Some(index) = normalize_index(generator, ctx, index, 0)? else { // };
return Ok(None);
};
let store_ptr = unsafe { // let index = index
index_addr.ptr_offset_unchecked( // .to_basic_value_enum(ctx, generator, elt.custom.unwrap())?
ctx, // .into_int_value();
generator, // let Some(index) = normalize_index(generator, ctx, index, 0)? else {
&llvm_usize.const_int(i as u64, false), // return Ok(None);
None, // };
)
};
ctx.builder.build_store(store_ptr, index).unwrap();
}
Some(index_addr) // let store_ptr = unsafe {
} else if let Some(index) = generator.gen_expr(ctx, slice)? { // index_addr.ptr_offset_unchecked(
let llvm_int_ty = ctx.get_llvm_type(generator, slice.custom.unwrap()); // ctx,
let index_addr = generator.gen_array_var_alloc( // generator,
ctx, // &llvm_usize.const_int(i as u64, false),
llvm_int_ty, // None,
llvm_usize.const_int(1u64, false), // )
None, // };
)?; // ctx.builder.build_store(store_ptr, index).unwrap();
// }
let index = // Some(index_addr)
index.to_basic_value_enum(ctx, generator, slice.custom.unwrap())?.into_int_value(); // } else if let Some(index) = generator.gen_expr(ctx, slice)? {
let Some(index) = normalize_index(generator, ctx, index, 0)? else { return Ok(None) }; // let llvm_int_ty = ctx.get_llvm_type(generator, slice.custom.unwrap());
// let index_addr = generator.gen_array_var_alloc(
// ctx,
// llvm_int_ty,
// llvm_usize.const_int(1u64, false),
// None,
// )?;
let store_ptr = unsafe { // let index =
index_addr.ptr_offset_unchecked(ctx, generator, &llvm_usize.const_zero(), None) // index.to_basic_value_enum(ctx, generator, slice.custom.unwrap())?.into_int_value();
}; // let Some(index) = normalize_index(generator, ctx, index, 0)? else { return Ok(None) };
ctx.builder.build_store(store_ptr, index).unwrap();
Some(index_addr) // let store_ptr = unsafe {
} else { // index_addr.ptr_offset_unchecked(ctx, generator, &llvm_usize.const_zero(), None)
None // };
}) // ctx.builder.build_store(store_ptr, index).unwrap();
};
Ok(Some(if ndims.len() == 1 && ndims[0] - subscripted_dims == 0 { // Some(index_addr)
let Some(index_addr) = make_indices_arr(generator, ctx)? else { return Ok(None) }; // } else {
// None
// })
// };
v.data().get(ctx, generator, &index_addr, None).into() // Ok(Some(if ndims.len() == 1 && ndims[0] - subscripted_dims == 0 {
} else { // let Some(index_addr) = make_indices_arr(generator, ctx)? else { return Ok(None) };
match &slice.node {
ExprKind::Tuple { elts, .. } => {
let slices = elts
.iter()
.enumerate()
.map(|(dim, elt)| expr_to_slice(generator, ctx, &elt.node, dim as u64))
.take_while_inclusive(|slice| slice.as_ref().is_ok_and(Option::is_some))
.collect::<Result<Vec<_>, _>>()?;
if slices.len() < elts.len() {
return Ok(None);
}
let slices = slices.into_iter().map(Option::unwrap).collect_vec(); // v.data().get(ctx, generator, &index_addr, None).into()
// } else {
// match &slice.node {
// ExprKind::Tuple { elts, .. } => {
// let slices = elts
// .iter()
// .enumerate()
// .map(|(dim, elt)| expr_to_slice(generator, ctx, &elt.node, dim as u64))
// .take_while_inclusive(|slice| slice.as_ref().is_ok_and(Option::is_some))
// .collect::<Result<Vec<_>, _>>()?;
// if slices.len() < elts.len() {
// return Ok(None);
// }
numpy::ndarray_sliced_copy(generator, ctx, ty, v, &slices)?.as_base_value().into() // let slices = slices.into_iter().map(Option::unwrap).collect_vec();
}
ExprKind::Slice { .. } => { // numpy::ndarray_sliced_copy(generator, ctx, ty, v, &slices)?.as_base_value().into()
let Some(slice) = expr_to_slice(generator, ctx, &slice.node, 0)? else { // }
return Ok(None);
};
numpy::ndarray_sliced_copy(generator, ctx, ty, v, &[slice])?.as_base_value().into() // ExprKind::Slice { .. } => {
} // let Some(slice) = expr_to_slice(generator, ctx, &slice.node, 0)? else {
// return Ok(None);
// };
_ => { // numpy::ndarray_sliced_copy(generator, ctx, ty, v, &[slice])?.as_base_value().into()
// Accessing an element from a multi-dimensional `ndarray` // }
let Some(index_addr) = make_indices_arr(generator, ctx)? else { return Ok(None) }; // _ => {
// // Accessing an element from a multi-dimensional `ndarray`
// Create a new array, remove the top dimension from the dimension-size-list, and copy the // let Some(index_addr) = make_indices_arr(generator, ctx)? else { return Ok(None) };
// elements over
let subscripted_ndarray =
generator.gen_var_alloc(ctx, llvm_ndarray_t.into(), None)?;
let ndarray = NDArrayValue::from_ptr_val(subscripted_ndarray, llvm_usize, None);
let num_dims = v.load_ndims(ctx); // // Create a new array, remove the top dimension from the dimension-size-list, and copy the
ndarray.store_ndims( // // elements over
ctx, // let subscripted_ndarray =
generator, // generator.gen_var_alloc(ctx, llvm_ndarray_t.into(), None)?;
ctx.builder // let ndarray = NDArrayValue::from_ptr_val(subscripted_ndarray, llvm_usize, None);
.build_int_sub(num_dims, llvm_usize.const_int(1, false), "")
.unwrap(),
);
let ndarray_num_dims = ndarray.load_ndims(ctx); // let num_dims = v.load_ndims(ctx);
ndarray.create_dim_sizes(ctx, llvm_usize, ndarray_num_dims); // ndarray.store_ndims(
// ctx,
// generator,
// ctx.builder
// .build_int_sub(num_dims, llvm_usize.const_int(1, false), "")
// .unwrap(),
// );
let ndarray_num_dims = ndarray.load_ndims(ctx); // let ndarray_num_dims = ndarray.load_ndims(ctx);
let v_dims_src_ptr = unsafe { // ndarray.create_dim_sizes(ctx, llvm_usize, ndarray_num_dims);
v.dim_sizes().ptr_offset_unchecked(
ctx,
generator,
&llvm_usize.const_int(1, false),
None,
)
};
call_memcpy_generic(
ctx,
ndarray.dim_sizes().base_ptr(ctx, generator),
v_dims_src_ptr,
ctx.builder
.build_int_mul(ndarray_num_dims, llvm_usize.size_of(), "")
.map(Into::into)
.unwrap(),
llvm_i1.const_zero(),
);
let ndarray_num_elems = call_ndarray_calc_size( // let ndarray_num_dims = ndarray.load_ndims(ctx);
generator, // let v_dims_src_ptr = unsafe {
ctx, // v.dim_sizes().ptr_offset_unchecked(
&ndarray.dim_sizes().as_slice_value(ctx, generator), // ctx,
(None, None), // generator,
); // &llvm_usize.const_int(1, false),
ndarray.create_data(ctx, llvm_ndarray_data_t, ndarray_num_elems); // None,
// )
// };
// call_memcpy_generic(
// ctx,
// ndarray.dim_sizes().base_ptr(ctx, generator),
// v_dims_src_ptr,
// ctx.builder
// .build_int_mul(ndarray_num_dims, llvm_usize.size_of(), "")
// .map(Into::into)
// .unwrap(),
// llvm_i1.const_zero(),
// );
let v_data_src_ptr = v.data().ptr_offset(ctx, generator, &index_addr, None); // let ndarray_num_elems = call_ndarray_calc_size(
call_memcpy_generic( // generator,
ctx, // ctx,
ndarray.data().base_ptr(ctx, generator), // &ndarray.dim_sizes().as_slice_value(ctx, generator),
v_data_src_ptr, // (None, None),
ctx.builder // );
.build_int_mul( // ndarray.create_data(ctx, llvm_ndarray_data_t, ndarray_num_elems);
ndarray_num_elems,
llvm_ndarray_data_t.size_of().unwrap(),
"",
)
.map(Into::into)
.unwrap(),
llvm_i1.const_zero(),
);
ndarray.as_base_value().into() // let v_data_src_ptr = v.data().ptr_offset(ctx, generator, &index_addr, None);
} // call_memcpy_generic(
} // ctx,
})) // ndarray.data().base_ptr(ctx, generator),
// v_data_src_ptr,
// ctx.builder
// .build_int_mul(
// ndarray_num_elems,
// llvm_ndarray_data_t.size_of().unwrap(),
// "",
// )
// .map(Into::into)
// .unwrap(),
// llvm_i1.const_zero(),
// );
// ndarray.as_base_value().into()
// }
// }
// }))
} }
/// See [`CodeGenerator::gen_expr`]. /// See [`CodeGenerator::gen_expr`].

View File

@ -1,414 +0,0 @@
using int8_t = _BitInt(8);
using uint8_t = unsigned _BitInt(8);
using int32_t = _BitInt(32);
using uint32_t = unsigned _BitInt(32);
using int64_t = _BitInt(64);
using uint64_t = unsigned _BitInt(64);
// NDArray indices are always `uint32_t`.
using NDIndex = uint32_t;
// The type of an index or a value describing the length of a range/slice is always `int32_t`.
using SliceIndex = int32_t;
namespace {
template <typename T>
const T& max(const T& a, const T& b) {
return a > b ? a : b;
}
template <typename T>
const T& min(const T& a, const T& b) {
return a > b ? b : a;
}
// adapted from GNU Scientific Library: https://git.savannah.gnu.org/cgit/gsl.git/tree/sys/pow_int.c
// need to make sure `exp >= 0` before calling this function
template <typename T>
T __nac3_int_exp_impl(T base, T exp) {
T res = 1;
/* repeated squaring method */
do {
if (exp & 1) {
res *= base; /* for n odd */
}
exp >>= 1;
base *= base;
} while (exp);
return res;
}
template <typename SizeT>
SizeT __nac3_ndarray_calc_size_impl(
const SizeT* list_data,
SizeT list_len,
SizeT begin_idx,
SizeT end_idx
) {
__builtin_assume(end_idx <= list_len);
SizeT num_elems = 1;
for (SizeT i = begin_idx; i < end_idx; ++i) {
SizeT val = list_data[i];
__builtin_assume(val > 0);
num_elems *= val;
}
return num_elems;
}
template <typename SizeT>
void __nac3_ndarray_calc_nd_indices_impl(
SizeT index,
const SizeT* dims,
SizeT num_dims,
NDIndex* idxs
) {
SizeT stride = 1;
for (SizeT dim = 0; dim < num_dims; dim++) {
SizeT i = num_dims - dim - 1;
__builtin_assume(dims[i] > 0);
idxs[i] = (index / stride) % dims[i];
stride *= dims[i];
}
}
template <typename SizeT>
SizeT __nac3_ndarray_flatten_index_impl(
const SizeT* dims,
SizeT num_dims,
const NDIndex* indices,
SizeT num_indices
) {
SizeT idx = 0;
SizeT stride = 1;
for (SizeT i = 0; i < num_dims; ++i) {
SizeT ri = num_dims - i - 1;
if (ri < num_indices) {
idx += stride * indices[ri];
}
__builtin_assume(dims[i] > 0);
stride *= dims[ri];
}
return idx;
}
template <typename SizeT>
void __nac3_ndarray_calc_broadcast_impl(
const SizeT* lhs_dims,
SizeT lhs_ndims,
const SizeT* rhs_dims,
SizeT rhs_ndims,
SizeT* out_dims
) {
SizeT max_ndims = lhs_ndims > rhs_ndims ? lhs_ndims : rhs_ndims;
for (SizeT i = 0; i < max_ndims; ++i) {
const SizeT* lhs_dim_sz = i < lhs_ndims ? &lhs_dims[lhs_ndims - i - 1] : nullptr;
const SizeT* rhs_dim_sz = i < rhs_ndims ? &rhs_dims[rhs_ndims - i - 1] : nullptr;
SizeT* out_dim = &out_dims[max_ndims - i - 1];
if (lhs_dim_sz == nullptr) {
*out_dim = *rhs_dim_sz;
} else if (rhs_dim_sz == nullptr) {
*out_dim = *lhs_dim_sz;
} else if (*lhs_dim_sz == 1) {
*out_dim = *rhs_dim_sz;
} else if (*rhs_dim_sz == 1) {
*out_dim = *lhs_dim_sz;
} else if (*lhs_dim_sz == *rhs_dim_sz) {
*out_dim = *lhs_dim_sz;
} else {
__builtin_unreachable();
}
}
}
template <typename SizeT>
void __nac3_ndarray_calc_broadcast_idx_impl(
const SizeT* src_dims,
SizeT src_ndims,
const NDIndex* in_idx,
NDIndex* out_idx
) {
for (SizeT i = 0; i < src_ndims; ++i) {
SizeT src_i = src_ndims - i - 1;
out_idx[src_i] = src_dims[src_i] == 1 ? 0 : in_idx[src_i];
}
}
} // namespace
extern "C" {
#define DEF_nac3_int_exp_(T) \
T __nac3_int_exp_##T(T base, T exp) {\
return __nac3_int_exp_impl(base, exp);\
}
DEF_nac3_int_exp_(int32_t)
DEF_nac3_int_exp_(int64_t)
DEF_nac3_int_exp_(uint32_t)
DEF_nac3_int_exp_(uint64_t)
SliceIndex __nac3_slice_index_bound(SliceIndex i, const SliceIndex len) {
if (i < 0) {
i = len + i;
}
if (i < 0) {
return 0;
} else if (i > len) {
return len;
}
return i;
}
SliceIndex __nac3_range_slice_len(
const SliceIndex start,
const SliceIndex end,
const SliceIndex step
) {
SliceIndex diff = end - start;
if (diff > 0 && step > 0) {
return ((diff - 1) / step) + 1;
} else if (diff < 0 && step < 0) {
return ((diff + 1) / step) + 1;
} else {
return 0;
}
}
// Handle list assignment and dropping part of the list when
// both dest_step and src_step are +1.
// - All the index must *not* be out-of-bound or negative,
// - The end index is *inclusive*,
// - The length of src and dest slice size should already
// be checked: if dest.step == 1 then len(src) <= len(dest) else len(src) == len(dest)
SliceIndex __nac3_list_slice_assign_var_size(
SliceIndex dest_start,
SliceIndex dest_end,
SliceIndex dest_step,
uint8_t* dest_arr,
SliceIndex dest_arr_len,
SliceIndex src_start,
SliceIndex src_end,
SliceIndex src_step,
uint8_t* src_arr,
SliceIndex src_arr_len,
const SliceIndex size
) {
/* if dest_arr_len == 0, do nothing since we do not support extending list */
if (dest_arr_len == 0) return dest_arr_len;
/* if both step is 1, memmove directly, handle the dropping of the list, and shrink size */
if (src_step == dest_step && dest_step == 1) {
const SliceIndex src_len = (src_end >= src_start) ? (src_end - src_start + 1) : 0;
const SliceIndex dest_len = (dest_end >= dest_start) ? (dest_end - dest_start + 1) : 0;
if (src_len > 0) {
__builtin_memmove(
dest_arr + dest_start * size,
src_arr + src_start * size,
src_len * size
);
}
if (dest_len > 0) {
/* dropping */
__builtin_memmove(
dest_arr + (dest_start + src_len) * size,
dest_arr + (dest_end + 1) * size,
(dest_arr_len - dest_end - 1) * size
);
}
/* shrink size */
return dest_arr_len - (dest_len - src_len);
}
/* if two range overlaps, need alloca */
uint8_t need_alloca =
(dest_arr == src_arr)
&& !(
max(dest_start, dest_end) < min(src_start, src_end)
|| max(src_start, src_end) < min(dest_start, dest_end)
);
if (need_alloca) {
uint8_t* tmp = reinterpret_cast<uint8_t *>(__builtin_alloca(src_arr_len * size));
__builtin_memcpy(tmp, src_arr, src_arr_len * size);
src_arr = tmp;
}
SliceIndex src_ind = src_start;
SliceIndex dest_ind = dest_start;
for (;
(src_step > 0) ? (src_ind <= src_end) : (src_ind >= src_end);
src_ind += src_step, dest_ind += dest_step
) {
/* for constant optimization */
if (size == 1) {
__builtin_memcpy(dest_arr + dest_ind, src_arr + src_ind, 1);
} else if (size == 4) {
__builtin_memcpy(dest_arr + dest_ind * 4, src_arr + src_ind * 4, 4);
} else if (size == 8) {
__builtin_memcpy(dest_arr + dest_ind * 8, src_arr + src_ind * 8, 8);
} else {
/* memcpy for var size, cannot overlap after previous alloca */
__builtin_memcpy(dest_arr + dest_ind * size, src_arr + src_ind * size, size);
}
}
/* only dest_step == 1 can we shrink the dest list. */
/* size should be ensured prior to calling this function */
if (dest_step == 1 && dest_end >= dest_start) {
__builtin_memmove(
dest_arr + dest_ind * size,
dest_arr + (dest_end + 1) * size,
(dest_arr_len - dest_end - 1) * size
);
return dest_arr_len - (dest_end - dest_ind) - 1;
}
return dest_arr_len;
}
int32_t __nac3_isinf(double x) {
return __builtin_isinf(x);
}
int32_t __nac3_isnan(double x) {
return __builtin_isnan(x);
}
double tgamma(double arg);
double __nac3_gamma(double z) {
// Handling for denormals
// | x | Python gamma(x) | C tgamma(x) |
// --- | ----------------- | --------------- | ----------- |
// (1) | nan | nan | nan |
// (2) | -inf | -inf | inf |
// (3) | inf | inf | inf |
// (4) | 0.0 | inf | inf |
// (5) | {-1.0, -2.0, ...} | inf | nan |
// (1)-(3)
if (__builtin_isinf(z) || __builtin_isnan(z)) {
return z;
}
double v = tgamma(z);
// (4)-(5)
return __builtin_isinf(v) || __builtin_isnan(v) ? __builtin_inf() : v;
}
double lgamma(double arg);
double __nac3_gammaln(double x) {
// libm's handling of value overflows differs from scipy:
// - scipy: gammaln(-inf) -> -inf
// - libm : lgamma(-inf) -> inf
if (__builtin_isinf(x)) {
return x;
}
return lgamma(x);
}
double j0(double x);
double __nac3_j0(double x) {
// libm's handling of value overflows differs from scipy:
// - scipy: j0(inf) -> nan
// - libm : j0(inf) -> 0.0
if (__builtin_isinf(x)) {
return __builtin_nan("");
}
return j0(x);
}
uint32_t __nac3_ndarray_calc_size(
const uint32_t* list_data,
uint32_t list_len,
uint32_t begin_idx,
uint32_t end_idx
) {
return __nac3_ndarray_calc_size_impl(list_data, list_len, begin_idx, end_idx);
}
uint64_t __nac3_ndarray_calc_size64(
const uint64_t* list_data,
uint64_t list_len,
uint64_t begin_idx,
uint64_t end_idx
) {
return __nac3_ndarray_calc_size_impl(list_data, list_len, begin_idx, end_idx);
}
void __nac3_ndarray_calc_nd_indices(
uint32_t index,
const uint32_t* dims,
uint32_t num_dims,
NDIndex* idxs
) {
__nac3_ndarray_calc_nd_indices_impl(index, dims, num_dims, idxs);
}
void __nac3_ndarray_calc_nd_indices64(
uint64_t index,
const uint64_t* dims,
uint64_t num_dims,
NDIndex* idxs
) {
__nac3_ndarray_calc_nd_indices_impl(index, dims, num_dims, idxs);
}
uint32_t __nac3_ndarray_flatten_index(
const uint32_t* dims,
uint32_t num_dims,
const NDIndex* indices,
uint32_t num_indices
) {
return __nac3_ndarray_flatten_index_impl(dims, num_dims, indices, num_indices);
}
uint64_t __nac3_ndarray_flatten_index64(
const uint64_t* dims,
uint64_t num_dims,
const NDIndex* indices,
uint64_t num_indices
) {
return __nac3_ndarray_flatten_index_impl(dims, num_dims, indices, num_indices);
}
void __nac3_ndarray_calc_broadcast(
const uint32_t* lhs_dims,
uint32_t lhs_ndims,
const uint32_t* rhs_dims,
uint32_t rhs_ndims,
uint32_t* out_dims
) {
return __nac3_ndarray_calc_broadcast_impl(lhs_dims, lhs_ndims, rhs_dims, rhs_ndims, out_dims);
}
void __nac3_ndarray_calc_broadcast64(
const uint64_t* lhs_dims,
uint64_t lhs_ndims,
const uint64_t* rhs_dims,
uint64_t rhs_ndims,
uint64_t* out_dims
) {
return __nac3_ndarray_calc_broadcast_impl(lhs_dims, lhs_ndims, rhs_dims, rhs_ndims, out_dims);
}
void __nac3_ndarray_calc_broadcast_idx(
const uint32_t* src_dims,
uint32_t src_ndims,
const NDIndex* in_idx,
NDIndex* out_idx
) {
__nac3_ndarray_calc_broadcast_idx_impl(src_dims, src_ndims, in_idx, out_idx);
}
void __nac3_ndarray_calc_broadcast_idx64(
const uint64_t* src_dims,
uint64_t src_ndims,
const NDIndex* in_idx,
NDIndex* out_idx
) {
__nac3_ndarray_calc_broadcast_idx_impl(src_dims, src_ndims, in_idx, out_idx);
}
} // extern "C"

View File

@ -1,9 +1,11 @@
use crate::typecheck::typedef::Type; use crate::{typecheck::typedef::Type, util::SizeVariant};
mod test;
use super::{ use super::{
classes::{ classes::{
ArrayLikeIndexer, ArrayLikeValue, ArraySliceValue, ListValue, NDArrayValue, check_basic_types_match, ArrayLikeIndexer, ArrayLikeValue, ArraySliceValue, ListValue,
TypedArrayLikeAdapter, UntypedArrayLikeAccessor, NDArrayValue, NpArrayType, NpArrayValue, TypedArrayLikeAdapter, UntypedArrayLikeAccessor,
}, },
llvm_intrinsics, CodeGenContext, CodeGenerator, llvm_intrinsics, CodeGenContext, CodeGenerator,
}; };
@ -14,8 +16,8 @@ use inkwell::{
context::Context, context::Context,
memory_buffer::MemoryBuffer, memory_buffer::MemoryBuffer,
module::Module, module::Module,
types::{BasicTypeEnum, IntType}, types::{BasicType, BasicTypeEnum, FunctionType, IntType, PointerType},
values::{BasicValueEnum, CallSiteValue, FloatValue, IntValue}, values::{BasicValue, BasicValueEnum, CallSiteValue, FloatValue, FunctionValue, IntValue},
AddressSpace, IntPredicate, AddressSpace, IntPredicate,
}; };
use itertools::Either; use itertools::Either;
@ -563,367 +565,475 @@ pub fn call_j0<'ctx>(ctx: &CodeGenContext<'ctx, '_>, v: FloatValue<'ctx>) -> Flo
.unwrap() .unwrap()
} }
/// Generates a call to `__nac3_ndarray_calc_size`. Returns an [`IntValue`] representing the // /// Generates a call to `__nac3_ndarray_calc_size`. Returns an [`IntValue`] representing the
/// calculated total size. // /// calculated total size.
/// // ///
/// * `dims` - An [`ArrayLikeIndexer`] containing the size of each dimension. // /// * `dims` - An [`ArrayLikeIndexer`] containing the size of each dimension.
/// * `range` - The dimension index to begin and end (exclusively) calculating the dimensions for, // /// * `range` - The dimension index to begin and end (exclusively) calculating the dimensions for,
/// or [`None`] if starting from the first dimension and ending at the last dimension respectively. // /// or [`None`] if starting from the first dimension and ending at the last dimension respectively.
pub fn call_ndarray_calc_size<'ctx, G, Dims>( // pub fn call_ndarray_calc_size<'ctx, G, Dims>(
generator: &G, // generator: &G,
ctx: &CodeGenContext<'ctx, '_>, // ctx: &CodeGenContext<'ctx, '_>,
dims: &Dims, // dims: &Dims,
(begin, end): (Option<IntValue<'ctx>>, Option<IntValue<'ctx>>), // (begin, end): (Option<IntValue<'ctx>>, Option<IntValue<'ctx>>),
) -> IntValue<'ctx> // ) -> IntValue<'ctx>
where // where
G: CodeGenerator + ?Sized, // G: CodeGenerator + ?Sized,
Dims: ArrayLikeIndexer<'ctx>, // Dims: ArrayLikeIndexer<'ctx>,
{ // {
let llvm_usize = generator.get_size_type(ctx.ctx); // let llvm_usize = generator.get_size_type(ctx.ctx);
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default()); // let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
//
// let ndarray_calc_size_fn_name = match llvm_usize.get_bit_width() {
// 32 => "__nac3_ndarray_calc_size",
// 64 => "__nac3_ndarray_calc_size64",
// bw => unreachable!("Unsupported size type bit width: {}", bw),
// };
// let ndarray_calc_size_fn_t = llvm_usize.fn_type(
// &[llvm_pusize.into(), llvm_usize.into(), llvm_usize.into(), llvm_usize.into()],
// false,
// );
// let ndarray_calc_size_fn =
// ctx.module.get_function(ndarray_calc_size_fn_name).unwrap_or_else(|| {
// ctx.module.add_function(ndarray_calc_size_fn_name, ndarray_calc_size_fn_t, None)
// });
//
// let begin = begin.unwrap_or_else(|| llvm_usize.const_zero());
// let end = end.unwrap_or_else(|| dims.size(ctx, generator));
// ctx.builder
// .build_call(
// ndarray_calc_size_fn,
// &[
// dims.base_ptr(ctx, generator).into(),
// dims.size(ctx, generator).into(),
// begin.into(),
// end.into(),
// ],
// "",
// )
// .map(CallSiteValue::try_as_basic_value)
// .map(|v| v.map_left(BasicValueEnum::into_int_value))
// .map(Either::unwrap_left)
// .unwrap()
// }
//
// /// Generates a call to `__nac3_ndarray_calc_nd_indices`. Returns a [`TypeArrayLikeAdpater`]
// /// containing `i32` indices of the flattened index.
// ///
// /// * `index` - The index to compute the multidimensional index for.
// /// * `ndarray` - LLVM pointer to the `NDArray`. This value must be the LLVM representation of an
// /// `NDArray`.
// pub fn call_ndarray_calc_nd_indices<'ctx, G: CodeGenerator + ?Sized>(
// generator: &G,
// ctx: &mut CodeGenContext<'ctx, '_>,
// index: IntValue<'ctx>,
// ndarray: NDArrayValue<'ctx>,
// ) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
// let llvm_void = ctx.ctx.void_type();
// let llvm_i32 = ctx.ctx.i32_type();
// let llvm_usize = generator.get_size_type(ctx.ctx);
// let llvm_pi32 = llvm_i32.ptr_type(AddressSpace::default());
// let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
//
// let ndarray_calc_nd_indices_fn_name = match llvm_usize.get_bit_width() {
// 32 => "__nac3_ndarray_calc_nd_indices",
// 64 => "__nac3_ndarray_calc_nd_indices64",
// bw => unreachable!("Unsupported size type bit width: {}", bw),
// };
// let ndarray_calc_nd_indices_fn =
// ctx.module.get_function(ndarray_calc_nd_indices_fn_name).unwrap_or_else(|| {
// let fn_type = llvm_void.fn_type(
// &[llvm_usize.into(), llvm_pusize.into(), llvm_usize.into(), llvm_pi32.into()],
// false,
// );
//
// ctx.module.add_function(ndarray_calc_nd_indices_fn_name, fn_type, None)
// });
//
// let ndarray_num_dims = ndarray.load_ndims(ctx);
// let ndarray_dims = ndarray.dim_sizes();
//
// let indices = ctx.builder.build_array_alloca(llvm_i32, ndarray_num_dims, "").unwrap();
//
// ctx.builder
// .build_call(
// ndarray_calc_nd_indices_fn,
// &[
// index.into(),
// ndarray_dims.base_ptr(ctx, generator).into(),
// ndarray_num_dims.into(),
// indices.into(),
// ],
// "",
// )
// .unwrap();
//
// TypedArrayLikeAdapter::from(
// ArraySliceValue::from_ptr_val(indices, ndarray_num_dims, None),
// Box::new(|_, v| v.into_int_value()),
// Box::new(|_, v| v.into()),
// )
// }
//
// fn call_ndarray_flatten_index_impl<'ctx, G, Indices>(
// generator: &G,
// ctx: &CodeGenContext<'ctx, '_>,
// ndarray: NDArrayValue<'ctx>,
// indices: &Indices,
// ) -> IntValue<'ctx>
// where
// G: CodeGenerator + ?Sized,
// Indices: ArrayLikeIndexer<'ctx>,
// {
// let llvm_i32 = ctx.ctx.i32_type();
// let llvm_usize = generator.get_size_type(ctx.ctx);
//
// let llvm_pi32 = llvm_i32.ptr_type(AddressSpace::default());
// let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
//
// debug_assert_eq!(
// IntType::try_from(indices.element_type(ctx, generator))
// .map(IntType::get_bit_width)
// .unwrap_or_default(),
// llvm_i32.get_bit_width(),
// "Expected i32 value for argument `indices` to `call_ndarray_flatten_index_impl`"
// );
// debug_assert_eq!(
// indices.size(ctx, generator).get_type().get_bit_width(),
// llvm_usize.get_bit_width(),
// "Expected usize integer value for argument `indices_size` to `call_ndarray_flatten_index_impl`"
// );
//
// let ndarray_flatten_index_fn_name = match llvm_usize.get_bit_width() {
// 32 => "__nac3_ndarray_flatten_index",
// 64 => "__nac3_ndarray_flatten_index64",
// bw => unreachable!("Unsupported size type bit width: {}", bw),
// };
// let ndarray_flatten_index_fn =
// ctx.module.get_function(ndarray_flatten_index_fn_name).unwrap_or_else(|| {
// let fn_type = llvm_usize.fn_type(
// &[llvm_pusize.into(), llvm_usize.into(), llvm_pi32.into(), llvm_usize.into()],
// false,
// );
//
// ctx.module.add_function(ndarray_flatten_index_fn_name, fn_type, None)
// });
//
// let ndarray_num_dims = ndarray.load_ndims(ctx);
// let ndarray_dims = ndarray.dim_sizes();
//
// let index = ctx
// .builder
// .build_call(
// ndarray_flatten_index_fn,
// &[
// ndarray_dims.base_ptr(ctx, generator).into(),
// ndarray_num_dims.into(),
// indices.base_ptr(ctx, generator).into(),
// indices.size(ctx, generator).into(),
// ],
// "",
// )
// .map(CallSiteValue::try_as_basic_value)
// .map(|v| v.map_left(BasicValueEnum::into_int_value))
// .map(Either::unwrap_left)
// .unwrap();
//
// index
// }
//
// /// Generates a call to `__nac3_ndarray_flatten_index`. Returns the flattened index for the
// /// multidimensional index.
// ///
// /// * `ndarray` - LLVM pointer to the `NDArray`. This value must be the LLVM representation of an
// /// `NDArray`.
// /// * `indices` - The multidimensional index to compute the flattened index for.
// pub fn call_ndarray_flatten_index<'ctx, G, Index>(
// generator: &mut G,
// ctx: &mut CodeGenContext<'ctx, '_>,
// ndarray: NDArrayValue<'ctx>,
// indices: &Index,
// ) -> IntValue<'ctx>
// where
// G: CodeGenerator + ?Sized,
// Index: ArrayLikeIndexer<'ctx>,
// {
// call_ndarray_flatten_index_impl(generator, ctx, ndarray, indices)
// }
//
// /// Generates a call to `__nac3_ndarray_calc_broadcast`. Returns a tuple containing the number of
// /// dimension and size of each dimension of the resultant `ndarray`.
// pub fn call_ndarray_calc_broadcast<'ctx, G: CodeGenerator + ?Sized>(
// generator: &mut G,
// ctx: &mut CodeGenContext<'ctx, '_>,
// lhs: NDArrayValue<'ctx>,
// rhs: NDArrayValue<'ctx>,
// ) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
// let llvm_usize = generator.get_size_type(ctx.ctx);
// let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
//
// let ndarray_calc_broadcast_fn_name = match llvm_usize.get_bit_width() {
// 32 => "__nac3_ndarray_calc_broadcast",
// 64 => "__nac3_ndarray_calc_broadcast64",
// bw => unreachable!("Unsupported size type bit width: {}", bw),
// };
// let ndarray_calc_broadcast_fn =
// ctx.module.get_function(ndarray_calc_broadcast_fn_name).unwrap_or_else(|| {
// let fn_type = llvm_usize.fn_type(
// &[
// llvm_pusize.into(),
// llvm_usize.into(),
// llvm_pusize.into(),
// llvm_usize.into(),
// llvm_pusize.into(),
// ],
// false,
// );
//
// ctx.module.add_function(ndarray_calc_broadcast_fn_name, fn_type, None)
// });
//
// let lhs_ndims = lhs.load_ndims(ctx);
// let rhs_ndims = rhs.load_ndims(ctx);
// let min_ndims = llvm_intrinsics::call_int_umin(ctx, lhs_ndims, rhs_ndims, None);
//
// gen_for_callback_incrementing(
// generator,
// ctx,
// llvm_usize.const_zero(),
// (min_ndims, false),
// |generator, ctx, _, idx| {
// let idx = ctx.builder.build_int_sub(min_ndims, idx, "").unwrap();
// let (lhs_dim_sz, rhs_dim_sz) = unsafe {
// (
// lhs.dim_sizes().get_typed_unchecked(ctx, generator, &idx, None),
// rhs.dim_sizes().get_typed_unchecked(ctx, generator, &idx, None),
// )
// };
//
// let llvm_usize_const_one = llvm_usize.const_int(1, false);
// let lhs_eqz = ctx
// .builder
// .build_int_compare(IntPredicate::EQ, lhs_dim_sz, llvm_usize_const_one, "")
// .unwrap();
// let rhs_eqz = ctx
// .builder
// .build_int_compare(IntPredicate::EQ, rhs_dim_sz, llvm_usize_const_one, "")
// .unwrap();
// let lhs_or_rhs_eqz = ctx.builder.build_or(lhs_eqz, rhs_eqz, "").unwrap();
//
// let lhs_eq_rhs = ctx
// .builder
// .build_int_compare(IntPredicate::EQ, lhs_dim_sz, rhs_dim_sz, "")
// .unwrap();
//
// let is_compatible = ctx.builder.build_or(lhs_or_rhs_eqz, lhs_eq_rhs, "").unwrap();
//
// ctx.make_assert(
// generator,
// is_compatible,
// "0:ValueError",
// "operands could not be broadcast together",
// [None, None, None],
// ctx.current_loc,
// );
//
// Ok(())
// },
// llvm_usize.const_int(1, false),
// )
// .unwrap();
//
// let max_ndims = llvm_intrinsics::call_int_umax(ctx, lhs_ndims, rhs_ndims, None);
// let lhs_dims = lhs.dim_sizes().base_ptr(ctx, generator);
// let lhs_ndims = lhs.load_ndims(ctx);
// let rhs_dims = rhs.dim_sizes().base_ptr(ctx, generator);
// let rhs_ndims = rhs.load_ndims(ctx);
// let out_dims = ctx.builder.build_array_alloca(llvm_usize, max_ndims, "").unwrap();
// let out_dims = ArraySliceValue::from_ptr_val(out_dims, max_ndims, None);
//
// ctx.builder
// .build_call(
// ndarray_calc_broadcast_fn,
// &[
// lhs_dims.into(),
// lhs_ndims.into(),
// rhs_dims.into(),
// rhs_ndims.into(),
// out_dims.base_ptr(ctx, generator).into(),
// ],
// "",
// )
// .unwrap();
//
// TypedArrayLikeAdapter::from(
// out_dims,
// Box::new(|_, v| v.into_int_value()),
// Box::new(|_, v| v.into()),
// )
// }
//
// /// Generates a call to `__nac3_ndarray_calc_broadcast_idx`. Returns an [`ArrayAllocaValue`]
// /// containing the indices used for accessing `array` corresponding to the index of the broadcasted
// /// array `broadcast_idx`.
// pub fn call_ndarray_calc_broadcast_index<
// 'ctx,
// G: CodeGenerator + ?Sized,
// BroadcastIdx: UntypedArrayLikeAccessor<'ctx>,
// >(
// generator: &mut G,
// ctx: &mut CodeGenContext<'ctx, '_>,
// array: NDArrayValue<'ctx>,
// broadcast_idx: &BroadcastIdx,
// ) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
// let llvm_i32 = ctx.ctx.i32_type();
// let llvm_usize = generator.get_size_type(ctx.ctx);
// let llvm_pi32 = llvm_i32.ptr_type(AddressSpace::default());
// let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
//
// let ndarray_calc_broadcast_fn_name = match llvm_usize.get_bit_width() {
// 32 => "__nac3_ndarray_calc_broadcast_idx",
// 64 => "__nac3_ndarray_calc_broadcast_idx64",
// bw => unreachable!("Unsupported size type bit width: {}", bw),
// };
// let ndarray_calc_broadcast_fn =
// ctx.module.get_function(ndarray_calc_broadcast_fn_name).unwrap_or_else(|| {
// let fn_type = llvm_usize.fn_type(
// &[llvm_pusize.into(), llvm_usize.into(), llvm_pi32.into(), llvm_pi32.into()],
// false,
// );
//
// ctx.module.add_function(ndarray_calc_broadcast_fn_name, fn_type, None)
// });
//
// let broadcast_size = broadcast_idx.size(ctx, generator);
// let out_idx = ctx.builder.build_array_alloca(llvm_i32, broadcast_size, "").unwrap();
//
// let array_dims = array.dim_sizes().base_ptr(ctx, generator);
// let array_ndims = array.load_ndims(ctx);
// let broadcast_idx_ptr = unsafe {
// broadcast_idx.ptr_offset_unchecked(ctx, generator, &llvm_usize.const_zero(), None)
// };
//
// ctx.builder
// .build_call(
// ndarray_calc_broadcast_fn,
// &[array_dims.into(), array_ndims.into(), broadcast_idx_ptr.into(), out_idx.into()],
// "",
// )
// .unwrap();
//
// TypedArrayLikeAdapter::from(
// ArraySliceValue::from_ptr_val(out_idx, broadcast_size, None),
// Box::new(|_, v| v.into_int_value()),
// Box::new(|_, v| v.into()),
// )
// }
let ndarray_calc_size_fn_name = match llvm_usize.get_bit_width() { fn get_size_variant<'ctx>(ty: IntType<'ctx>) -> SizeVariant {
32 => "__nac3_ndarray_calc_size", match ty.get_bit_width() {
64 => "__nac3_ndarray_calc_size64", 32 => SizeVariant::Bits32,
bw => unreachable!("Unsupported size type bit width: {}", bw), 64 => SizeVariant::Bits64,
}; _ => unreachable!("Unsupported int type bit width {}", ty.get_bit_width()),
let ndarray_calc_size_fn_t = llvm_usize.fn_type( }
&[llvm_pusize.into(), llvm_usize.into(), llvm_usize.into(), llvm_usize.into()], }
false,
); fn get_size_type_dependent_function<'ctx, BuildFuncTypeFn>(
let ndarray_calc_size_fn = ctx: &CodeGenContext<'ctx, '_>,
ctx.module.get_function(ndarray_calc_size_fn_name).unwrap_or_else(|| { size_type: IntType<'ctx>,
ctx.module.add_function(ndarray_calc_size_fn_name, ndarray_calc_size_fn_t, None) base_name: &str,
build_func_type: BuildFuncTypeFn,
) -> FunctionValue<'ctx>
where
BuildFuncTypeFn: Fn() -> FunctionType<'ctx>,
{
let mut fn_name = base_name.to_owned();
match get_size_variant(size_type) {
SizeVariant::Bits32 => {
// The original fn_name is the correct function name
}
SizeVariant::Bits64 => {
// Append "64" at the end, this is the naming convention for 64-bit
fn_name.push_str("64");
}
}
// Get (or declare then get if does not exist) the corresponding function
ctx.module.get_function(&fn_name).unwrap_or_else(|| {
let fn_type = build_func_type();
ctx.module.add_function(&fn_name, fn_type, None)
})
}
fn get_irrt_ndarray_ptr_type<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
size_type: IntType<'ctx>,
) -> PointerType<'ctx> {
let i8_type = ctx.ctx.i8_type();
let ndarray_ty = NpArrayType { size_type, elem_type: i8_type.as_basic_type_enum() };
let struct_ty = ndarray_ty.get_struct_type(ctx.ctx);
struct_ty.ptr_type(AddressSpace::default())
}
fn get_irrt_opaque_uint8_ptr_type<'ctx>(ctx: &CodeGenContext<'ctx, '_>) -> PointerType<'ctx> {
ctx.ctx.i8_type().ptr_type(AddressSpace::default())
}
pub fn call_nac3_ndarray_size<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
ndarray: NpArrayValue<'ctx>,
) -> IntValue<'ctx> {
let size_type = ndarray.ty.size_type;
let function = get_size_type_dependent_function(ctx, size_type, "__nac3_ndarray_size", || {
size_type.fn_type(&[get_irrt_ndarray_ptr_type(ctx, size_type).into()], false)
}); });
let begin = begin.unwrap_or_else(|| llvm_usize.const_zero());
let end = end.unwrap_or_else(|| dims.size(ctx, generator));
ctx.builder ctx.builder
.build_call( .build_call(function, &[ndarray.ptr.into()], "size")
ndarray_calc_size_fn,
&[
dims.base_ptr(ctx, generator).into(),
dims.size(ctx, generator).into(),
begin.into(),
end.into(),
],
"",
)
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_int_value))
.map(Either::unwrap_left)
.unwrap() .unwrap()
.try_as_basic_value()
.unwrap_left()
.into_int_value()
} }
/// Generates a call to `__nac3_ndarray_calc_nd_indices`. Returns a [`TypeArrayLikeAdpater`] pub fn call_nac3_ndarray_fill_generic<'ctx>(
/// containing `i32` indices of the flattened index.
///
/// * `index` - The index to compute the multidimensional index for.
/// * `ndarray` - LLVM pointer to the `NDArray`. This value must be the LLVM representation of an
/// `NDArray`.
pub fn call_ndarray_calc_nd_indices<'ctx, G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &mut CodeGenContext<'ctx, '_>,
index: IntValue<'ctx>,
ndarray: NDArrayValue<'ctx>,
) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
let llvm_void = ctx.ctx.void_type();
let llvm_i32 = ctx.ctx.i32_type();
let llvm_usize = generator.get_size_type(ctx.ctx);
let llvm_pi32 = llvm_i32.ptr_type(AddressSpace::default());
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
let ndarray_calc_nd_indices_fn_name = match llvm_usize.get_bit_width() {
32 => "__nac3_ndarray_calc_nd_indices",
64 => "__nac3_ndarray_calc_nd_indices64",
bw => unreachable!("Unsupported size type bit width: {}", bw),
};
let ndarray_calc_nd_indices_fn =
ctx.module.get_function(ndarray_calc_nd_indices_fn_name).unwrap_or_else(|| {
let fn_type = llvm_void.fn_type(
&[llvm_usize.into(), llvm_pusize.into(), llvm_usize.into(), llvm_pi32.into()],
false,
);
ctx.module.add_function(ndarray_calc_nd_indices_fn_name, fn_type, None)
});
let ndarray_num_dims = ndarray.load_ndims(ctx);
let ndarray_dims = ndarray.dim_sizes();
let indices = ctx.builder.build_array_alloca(llvm_i32, ndarray_num_dims, "").unwrap();
ctx.builder
.build_call(
ndarray_calc_nd_indices_fn,
&[
index.into(),
ndarray_dims.base_ptr(ctx, generator).into(),
ndarray_num_dims.into(),
indices.into(),
],
"",
)
.unwrap();
TypedArrayLikeAdapter::from(
ArraySliceValue::from_ptr_val(indices, ndarray_num_dims, None),
Box::new(|_, v| v.into_int_value()),
Box::new(|_, v| v.into()),
)
}
fn call_ndarray_flatten_index_impl<'ctx, G, Indices>(
generator: &G,
ctx: &CodeGenContext<'ctx, '_>, ctx: &CodeGenContext<'ctx, '_>,
ndarray: NDArrayValue<'ctx>, ndarray: NpArrayValue<'ctx>,
indices: &Indices, fill_value: BasicValueEnum<'ctx>,
) -> IntValue<'ctx> ) {
where // Sanity check on type of `fill_value`
G: CodeGenerator + ?Sized, check_basic_types_match(ndarray.ty.elem_type, fill_value.get_type().as_basic_type_enum())
Indices: ArrayLikeIndexer<'ctx>,
{
let llvm_i32 = ctx.ctx.i32_type();
let llvm_usize = generator.get_size_type(ctx.ctx);
let llvm_pi32 = llvm_i32.ptr_type(AddressSpace::default());
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
debug_assert_eq!(
IntType::try_from(indices.element_type(ctx, generator))
.map(IntType::get_bit_width)
.unwrap_or_default(),
llvm_i32.get_bit_width(),
"Expected i32 value for argument `indices` to `call_ndarray_flatten_index_impl`"
);
debug_assert_eq!(
indices.size(ctx, generator).get_type().get_bit_width(),
llvm_usize.get_bit_width(),
"Expected usize integer value for argument `indices_size` to `call_ndarray_flatten_index_impl`"
);
let ndarray_flatten_index_fn_name = match llvm_usize.get_bit_width() {
32 => "__nac3_ndarray_flatten_index",
64 => "__nac3_ndarray_flatten_index64",
bw => unreachable!("Unsupported size type bit width: {}", bw),
};
let ndarray_flatten_index_fn =
ctx.module.get_function(ndarray_flatten_index_fn_name).unwrap_or_else(|| {
let fn_type = llvm_usize.fn_type(
&[llvm_pusize.into(), llvm_usize.into(), llvm_pi32.into(), llvm_usize.into()],
false,
);
ctx.module.add_function(ndarray_flatten_index_fn_name, fn_type, None)
});
let ndarray_num_dims = ndarray.load_ndims(ctx);
let ndarray_dims = ndarray.dim_sizes();
let index = ctx
.builder
.build_call(
ndarray_flatten_index_fn,
&[
ndarray_dims.base_ptr(ctx, generator).into(),
ndarray_num_dims.into(),
indices.base_ptr(ctx, generator).into(),
indices.size(ctx, generator).into(),
],
"",
)
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_int_value))
.map(Either::unwrap_left)
.unwrap(); .unwrap();
index let size_type = ndarray.ty.size_type;
} let function =
get_size_type_dependent_function(ctx, size_type, "__nac3_ndarray_fill_generic", || {
/// Generates a call to `__nac3_ndarray_flatten_index`. Returns the flattened index for the ctx.ctx.void_type().fn_type(
/// multidimensional index.
///
/// * `ndarray` - LLVM pointer to the `NDArray`. This value must be the LLVM representation of an
/// `NDArray`.
/// * `indices` - The multidimensional index to compute the flattened index for.
pub fn call_ndarray_flatten_index<'ctx, G, Index>(
generator: &mut G,
ctx: &mut CodeGenContext<'ctx, '_>,
ndarray: NDArrayValue<'ctx>,
indices: &Index,
) -> IntValue<'ctx>
where
G: CodeGenerator + ?Sized,
Index: ArrayLikeIndexer<'ctx>,
{
call_ndarray_flatten_index_impl(generator, ctx, ndarray, indices)
}
/// Generates a call to `__nac3_ndarray_calc_broadcast`. Returns a tuple containing the number of
/// dimension and size of each dimension of the resultant `ndarray`.
pub fn call_ndarray_calc_broadcast<'ctx, G: CodeGenerator + ?Sized>(
generator: &mut G,
ctx: &mut CodeGenContext<'ctx, '_>,
lhs: NDArrayValue<'ctx>,
rhs: NDArrayValue<'ctx>,
) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
let llvm_usize = generator.get_size_type(ctx.ctx);
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
let ndarray_calc_broadcast_fn_name = match llvm_usize.get_bit_width() {
32 => "__nac3_ndarray_calc_broadcast",
64 => "__nac3_ndarray_calc_broadcast64",
bw => unreachable!("Unsupported size type bit width: {}", bw),
};
let ndarray_calc_broadcast_fn =
ctx.module.get_function(ndarray_calc_broadcast_fn_name).unwrap_or_else(|| {
let fn_type = llvm_usize.fn_type(
&[ &[
llvm_pusize.into(), get_irrt_ndarray_ptr_type(ctx, size_type).into(), // NDArray<SizeT>* ndarray
llvm_usize.into(), get_irrt_opaque_uint8_ptr_type(ctx).into(), // uint8_t* pvalue
llvm_pusize.into(),
llvm_usize.into(),
llvm_pusize.into(),
], ],
false, false,
); )
ctx.module.add_function(ndarray_calc_broadcast_fn_name, fn_type, None)
}); });
let lhs_ndims = lhs.load_ndims(ctx); // Put `fill_value` onto the stack and get a pointer to it, and that pointer will be `pvalue`
let rhs_ndims = rhs.load_ndims(ctx); let pvalue = ctx.builder.build_alloca(ndarray.ty.elem_type, "fill_value").unwrap();
let min_ndims = llvm_intrinsics::call_int_umin(ctx, lhs_ndims, rhs_ndims, None); ctx.builder.build_store(pvalue, fill_value).unwrap();
gen_for_callback_incrementing( // Cast pvalue to `uint8_t*`
generator, let pvalue = ctx.builder.build_pointer_cast(pvalue, get_irrt_opaque_uint8_ptr_type(ctx), "").unwrap();
ctx,
llvm_usize.const_zero(),
(min_ndims, false),
|generator, ctx, _, idx| {
let idx = ctx.builder.build_int_sub(min_ndims, idx, "").unwrap();
let (lhs_dim_sz, rhs_dim_sz) = unsafe {
(
lhs.dim_sizes().get_typed_unchecked(ctx, generator, &idx, None),
rhs.dim_sizes().get_typed_unchecked(ctx, generator, &idx, None),
)
};
let llvm_usize_const_one = llvm_usize.const_int(1, false);
let lhs_eqz = ctx
.builder
.build_int_compare(IntPredicate::EQ, lhs_dim_sz, llvm_usize_const_one, "")
.unwrap();
let rhs_eqz = ctx
.builder
.build_int_compare(IntPredicate::EQ, rhs_dim_sz, llvm_usize_const_one, "")
.unwrap();
let lhs_or_rhs_eqz = ctx.builder.build_or(lhs_eqz, rhs_eqz, "").unwrap();
let lhs_eq_rhs = ctx
.builder
.build_int_compare(IntPredicate::EQ, lhs_dim_sz, rhs_dim_sz, "")
.unwrap();
let is_compatible = ctx.builder.build_or(lhs_or_rhs_eqz, lhs_eq_rhs, "").unwrap();
ctx.make_assert(
generator,
is_compatible,
"0:ValueError",
"operands could not be broadcast together",
[None, None, None],
ctx.current_loc,
);
Ok(())
},
llvm_usize.const_int(1, false),
)
.unwrap();
let max_ndims = llvm_intrinsics::call_int_umax(ctx, lhs_ndims, rhs_ndims, None);
let lhs_dims = lhs.dim_sizes().base_ptr(ctx, generator);
let lhs_ndims = lhs.load_ndims(ctx);
let rhs_dims = rhs.dim_sizes().base_ptr(ctx, generator);
let rhs_ndims = rhs.load_ndims(ctx);
let out_dims = ctx.builder.build_array_alloca(llvm_usize, max_ndims, "").unwrap();
let out_dims = ArraySliceValue::from_ptr_val(out_dims, max_ndims, None);
// Call the IRRT function
ctx.builder ctx.builder
.build_call( .build_call(
ndarray_calc_broadcast_fn, function,
&[ &[
lhs_dims.into(), ndarray.ptr.into(), // ndarray
lhs_ndims.into(), pvalue.into(), // pvalue
rhs_dims.into(),
rhs_ndims.into(),
out_dims.base_ptr(ctx, generator).into(),
], ],
"", "",
) )
.unwrap(); .unwrap();
TypedArrayLikeAdapter::from(
out_dims,
Box::new(|_, v| v.into_int_value()),
Box::new(|_, v| v.into()),
)
}
/// Generates a call to `__nac3_ndarray_calc_broadcast_idx`. Returns an [`ArrayAllocaValue`]
/// containing the indices used for accessing `array` corresponding to the index of the broadcasted
/// array `broadcast_idx`.
pub fn call_ndarray_calc_broadcast_index<
'ctx,
G: CodeGenerator + ?Sized,
BroadcastIdx: UntypedArrayLikeAccessor<'ctx>,
>(
generator: &mut G,
ctx: &mut CodeGenContext<'ctx, '_>,
array: NDArrayValue<'ctx>,
broadcast_idx: &BroadcastIdx,
) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
let llvm_i32 = ctx.ctx.i32_type();
let llvm_usize = generator.get_size_type(ctx.ctx);
let llvm_pi32 = llvm_i32.ptr_type(AddressSpace::default());
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
let ndarray_calc_broadcast_fn_name = match llvm_usize.get_bit_width() {
32 => "__nac3_ndarray_calc_broadcast_idx",
64 => "__nac3_ndarray_calc_broadcast_idx64",
bw => unreachable!("Unsupported size type bit width: {}", bw),
};
let ndarray_calc_broadcast_fn =
ctx.module.get_function(ndarray_calc_broadcast_fn_name).unwrap_or_else(|| {
let fn_type = llvm_usize.fn_type(
&[llvm_pusize.into(), llvm_usize.into(), llvm_pi32.into(), llvm_pi32.into()],
false,
);
ctx.module.add_function(ndarray_calc_broadcast_fn_name, fn_type, None)
});
let broadcast_size = broadcast_idx.size(ctx, generator);
let out_idx = ctx.builder.build_array_alloca(llvm_i32, broadcast_size, "").unwrap();
let array_dims = array.dim_sizes().base_ptr(ctx, generator);
let array_ndims = array.load_ndims(ctx);
let broadcast_idx_ptr = unsafe {
broadcast_idx.ptr_offset_unchecked(ctx, generator, &llvm_usize.const_zero(), None)
};
ctx.builder
.build_call(
ndarray_calc_broadcast_fn,
&[array_dims.into(), array_ndims.into(), broadcast_idx_ptr.into(), out_idx.into()],
"",
)
.unwrap();
TypedArrayLikeAdapter::from(
ArraySliceValue::from_ptr_val(out_idx, broadcast_size, None),
Box::new(|_, v| v.into_int_value()),
Box::new(|_, v| v.into()),
)
} }

View File

@ -0,0 +1,26 @@
#[cfg(test)]
mod tests {
use std::{path::Path, process::Command};
#[test]
fn run_irrt_test() {
assert!(
cfg!(feature = "test"),
"Please do `cargo test -F test` to compile `irrt_test.out` and run test"
);
let irrt_test_out_path = Path::new(concat!(env!("OUT_DIR"), "/irrt_test.out"));
let output = Command::new(irrt_test_out_path.to_str().unwrap()).output().unwrap();
if !output.status.success() {
eprintln!("irrt_test failed with status {}:", output.status);
eprintln!("====== stdout ======");
eprintln!("{}", String::from_utf8(output.stdout).unwrap());
eprintln!("====== stderr ======");
eprintln!("{}", String::from_utf8(output.stderr).unwrap());
eprintln!("====================");
panic!("irrt_test failed");
}
}
}

View File

@ -7,6 +7,7 @@ use crate::{
typedef::{CallId, FuncArg, Type, TypeEnum, Unifier}, typedef::{CallId, FuncArg, Type, TypeEnum, Unifier},
}, },
}; };
use classes::NpArrayType;
use crossbeam::channel::{unbounded, Receiver, Sender}; use crossbeam::channel::{unbounded, Receiver, Sender};
use inkwell::{ use inkwell::{
attributes::{Attribute, AttributeLoc}, attributes::{Attribute, AttributeLoc},
@ -476,7 +477,11 @@ fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>(
ctx, module, generator, unifier, top_level, type_cache, dtype, ctx, module, generator, unifier, top_level, type_cache, dtype,
); );
NDArrayType::new(generator, ctx, element_type).as_base_type().into() let ndarray_ty = NpArrayType {
size_type: generator.get_size_type(ctx),
elem_type: element_type,
};
ndarray_ty.get_struct_type(ctx).as_basic_type_enum()
} }
_ => unreachable!( _ => unreachable!(

File diff suppressed because it is too large Load Diff

View File

@ -23,3 +23,4 @@ pub mod codegen;
pub mod symbol_resolver; pub mod symbol_resolver;
pub mod toplevel; pub mod toplevel;
pub mod typecheck; pub mod typecheck;
pub mod util;

View File

@ -1,5 +1,6 @@
use std::iter::once; use std::iter::once;
use crate::util::SizeVariant;
use helper::{debug_assert_prim_is_allowed, make_exception_fields, PrimDefDetails}; use helper::{debug_assert_prim_is_allowed, make_exception_fields, PrimDefDetails};
use indexmap::IndexMap; use indexmap::IndexMap;
use inkwell::{ use inkwell::{
@ -278,20 +279,11 @@ pub fn get_builtins(unifier: &mut Unifier, primitives: &PrimitiveStore) -> Built
.collect() .collect()
} }
/// A helper enum used by [`BuiltinBuilder`] fn size_variant_to_int_type(variant: SizeVariant, primitives: &PrimitiveStore) -> Type {
#[derive(Clone, Copy)] match variant {
enum SizeVariant {
Bits32,
Bits64,
}
impl SizeVariant {
fn of_int(self, primitives: &PrimitiveStore) -> Type {
match self {
SizeVariant::Bits32 => primitives.int32, SizeVariant::Bits32 => primitives.int32,
SizeVariant::Bits64 => primitives.int64, SizeVariant::Bits64 => primitives.int64,
} }
}
} }
struct BuiltinBuilder<'a> { struct BuiltinBuilder<'a> {
@ -961,8 +953,9 @@ impl<'a> BuiltinBuilder<'a> {
resolver: None, resolver: None,
codegen_callback: Some(Arc::new(GenCall::new(Box::new( codegen_callback: Some(Arc::new(GenCall::new(Box::new(
|ctx, obj, fun, args, generator| { |ctx, obj, fun, args, generator| {
gen_ndarray_copy(ctx, &obj, fun, &args, generator) todo!()
.map(|val| Some(val.as_basic_value_enum())) // gen_ndarray_copy(ctx, &obj, fun, &args, generator)
// .map(|val| Some(val.as_basic_value_enum()))
}, },
)))), )))),
loc: None, loc: None,
@ -978,8 +971,9 @@ impl<'a> BuiltinBuilder<'a> {
resolver: None, resolver: None,
codegen_callback: Some(Arc::new(GenCall::new(Box::new( codegen_callback: Some(Arc::new(GenCall::new(Box::new(
|ctx, obj, fun, args, generator| { |ctx, obj, fun, args, generator| {
gen_ndarray_fill(ctx, &obj, fun, &args, generator)?; todo!()
Ok(None) // gen_ndarray_fill(ctx, &obj, fun, &args, generator)?;
// Ok(None)
}, },
)))), )))),
loc: None, loc: None,
@ -1059,7 +1053,7 @@ impl<'a> BuiltinBuilder<'a> {
); );
// The size variant of the function determines the size of the returned int. // The size variant of the function determines the size of the returned int.
let int_sized = size_variant.of_int(self.primitives); let int_sized = size_variant_to_int_type(size_variant, self.primitives);
let ndarray_int_sized = let ndarray_int_sized =
make_ndarray_ty(self.unifier, self.primitives, Some(int_sized), Some(common_ndim.ty)); make_ndarray_ty(self.unifier, self.primitives, Some(int_sized), Some(common_ndim.ty));
@ -1084,7 +1078,7 @@ impl<'a> BuiltinBuilder<'a> {
let arg_ty = fun.0.args[0].ty; let arg_ty = fun.0.args[0].ty;
let arg = args[0].1.clone().to_basic_value_enum(ctx, generator, arg_ty)?; let arg = args[0].1.clone().to_basic_value_enum(ctx, generator, arg_ty)?;
let ret_elem_ty = size_variant.of_int(&ctx.primitives); let ret_elem_ty = size_variant_to_int_type(size_variant, &ctx.primitives);
Ok(Some(builtin_fns::call_round(generator, ctx, (arg_ty, arg), ret_elem_ty)?)) Ok(Some(builtin_fns::call_round(generator, ctx, (arg_ty, arg), ret_elem_ty)?))
}), }),
) )
@ -1125,7 +1119,7 @@ impl<'a> BuiltinBuilder<'a> {
make_ndarray_ty(self.unifier, self.primitives, Some(float), Some(common_ndim.ty)); make_ndarray_ty(self.unifier, self.primitives, Some(float), Some(common_ndim.ty));
// The size variant of the function determines the type of int returned // The size variant of the function determines the type of int returned
let int_sized = size_variant.of_int(self.primitives); let int_sized = size_variant_to_int_type(size_variant, self.primitives);
let ndarray_int_sized = let ndarray_int_sized =
make_ndarray_ty(self.unifier, self.primitives, Some(int_sized), Some(common_ndim.ty)); make_ndarray_ty(self.unifier, self.primitives, Some(int_sized), Some(common_ndim.ty));
@ -1148,7 +1142,7 @@ impl<'a> BuiltinBuilder<'a> {
let arg_ty = fun.0.args[0].ty; let arg_ty = fun.0.args[0].ty;
let arg = args[0].1.clone().to_basic_value_enum(ctx, generator, arg_ty)?; let arg = args[0].1.clone().to_basic_value_enum(ctx, generator, arg_ty)?;
let ret_elem_ty = size_variant.of_int(&ctx.primitives); let ret_elem_ty = size_variant_to_int_type(size_variant, &ctx.primitives);
let func = match kind { let func = match kind {
Kind::Ceil => builtin_fns::call_ceil, Kind::Ceil => builtin_fns::call_ceil,
Kind::Floor => builtin_fns::call_floor, Kind::Floor => builtin_fns::call_floor,
@ -1202,7 +1196,7 @@ impl<'a> BuiltinBuilder<'a> {
let func = match prim { let func = match prim {
PrimDef::FunNpNDArray | PrimDef::FunNpEmpty => gen_ndarray_empty, PrimDef::FunNpNDArray | PrimDef::FunNpEmpty => gen_ndarray_empty,
PrimDef::FunNpZeros => gen_ndarray_zeros, PrimDef::FunNpZeros => gen_ndarray_zeros,
PrimDef::FunNpOnes => gen_ndarray_ones, PrimDef::FunNpOnes => todo!(), // gen_ndarray_ones,
_ => unreachable!(), _ => unreachable!(),
}; };
func(ctx, &obj, fun, &args, generator).map(|val| Some(val.as_basic_value_enum())) func(ctx, &obj, fun, &args, generator).map(|val| Some(val.as_basic_value_enum()))
@ -1251,8 +1245,9 @@ impl<'a> BuiltinBuilder<'a> {
resolver: None, resolver: None,
codegen_callback: Some(Arc::new(GenCall::new(Box::new( codegen_callback: Some(Arc::new(GenCall::new(Box::new(
|ctx, obj, fun, args, generator| { |ctx, obj, fun, args, generator| {
gen_ndarray_array(ctx, &obj, fun, &args, generator) todo!()
.map(|val| Some(val.as_basic_value_enum())) // gen_ndarray_array(ctx, &obj, fun, &args, generator)
// .map(|val| Some(val.as_basic_value_enum()))
}, },
)))), )))),
loc: None, loc: None,
@ -1270,8 +1265,9 @@ impl<'a> BuiltinBuilder<'a> {
// type variable // type variable
&[(self.list_int32, "shape"), (tv.ty, "fill_value")], &[(self.list_int32, "shape"), (tv.ty, "fill_value")],
Box::new(move |ctx, obj, fun, args, generator| { Box::new(move |ctx, obj, fun, args, generator| {
gen_ndarray_full(ctx, &obj, fun, &args, generator) todo!()
.map(|val| Some(val.as_basic_value_enum())) // gen_ndarray_full(ctx, &obj, fun, &args, generator)
// .map(|val| Some(val.as_basic_value_enum()))
}), }),
) )
} }
@ -1303,8 +1299,9 @@ impl<'a> BuiltinBuilder<'a> {
resolver: None, resolver: None,
codegen_callback: Some(Arc::new(GenCall::new(Box::new( codegen_callback: Some(Arc::new(GenCall::new(Box::new(
|ctx, obj, fun, args, generator| { |ctx, obj, fun, args, generator| {
gen_ndarray_eye(ctx, &obj, fun, &args, generator) todo!()
.map(|val| Some(val.as_basic_value_enum())) // gen_ndarray_eye(ctx, &obj, fun, &args, generator)
// .map(|val| Some(val.as_basic_value_enum()))
}, },
)))), )))),
loc: None, loc: None,
@ -1317,8 +1314,9 @@ impl<'a> BuiltinBuilder<'a> {
self.ndarray_float_2d, self.ndarray_float_2d,
&[(int32, "n")], &[(int32, "n")],
Box::new(|ctx, obj, fun, args, generator| { Box::new(|ctx, obj, fun, args, generator| {
gen_ndarray_identity(ctx, &obj, fun, &args, generator) todo!()
.map(|val| Some(val.as_basic_value_enum())) // gen_ndarray_identity(ctx, &obj, fun, &args, generator)
// .map(|val| Some(val.as_basic_value_enum()))
}), }),
), ),
_ => unreachable!(), _ => unreachable!(),

5
nac3core/src/util.rs Normal file
View File

@ -0,0 +1,5 @@
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SizeVariant {
Bits32,
Bits64,
}

View File

@ -0,0 +1,3 @@
def run() -> int32:
hello = np_zeros((3, 4))
return 0

View File

@ -449,6 +449,9 @@ fn main() {
.create_target_machine(llvm_options.opt_level) .create_target_machine(llvm_options.opt_level)
.expect("couldn't create target machine"); .expect("couldn't create target machine");
// NOTE: DEBUG PRINT
main.print_to_file("standalone.ll").unwrap();
let pass_options = PassBuilderOptions::create(); let pass_options = PassBuilderOptions::create();
pass_options.set_merge_functions(true); pass_options.set_merge_functions(true);
let passes = format!("default<O{}>", opt_level as u32); let passes = format!("default<O{}>", opt_level as u32);

View File

@ -81,6 +81,7 @@ in rec {
'' ''
mkdir -p $out/bin mkdir -p $out/bin
ln -s ${llvm-nac3}/bin/clang.exe $out/bin/clang-irrt.exe ln -s ${llvm-nac3}/bin/clang.exe $out/bin/clang-irrt.exe
ln -s ${llvm-nac3}/bin/clang.exe $out/bin/clang-irrt-test.exe
ln -s ${llvm-nac3}/bin/llvm-as.exe $out/bin/llvm-as-irrt.exe ln -s ${llvm-nac3}/bin/llvm-as.exe $out/bin/llvm-as-irrt.exe
''; '';
nac3artiq = pkgs.rustPlatform.buildRustPackage { nac3artiq = pkgs.rustPlatform.buildRustPackage {