Compare commits
9 Commits
master
...
ndarray-st
Author | SHA1 | Date |
---|---|---|
lyken | 43e9a9539d | |
lyken | 4209ad0dff | |
lyken | 9d546f36bc | |
lyken | 3528286679 | |
lyken | eb048f7f6b | |
lyken | e35dfc6453 | |
lyken | f73ced560e | |
lyken | a2cfc24091 | |
lyken | 6233f84ee9 |
|
@ -0,0 +1,3 @@
|
|||
#!/usr/bin/env bash
|
||||
clang-irrt --target=wasm32 -x c++ -fno-discard-value-names -fno-exceptions -fno-rtti -O0 -emit-llvm -S -Wall -Wextra nac3core/irrt/irrt.cpp
|
||||
clang -x c++ -fno-discard-value-names -fno-exceptions -fno-rtti -O0 -emit-llvm -S -Wall -Wextra nac3core/irrt/irrt_test.cpp
|
|
@ -13,6 +13,7 @@
|
|||
''
|
||||
mkdir -p $out/bin
|
||||
ln -s ${pkgs.llvmPackages_14.clang-unwrapped}/bin/clang $out/bin/clang-irrt
|
||||
ln -s ${pkgs.llvmPackages_14.clang}/bin/clang $out/bin/clang-irrt-test
|
||||
ln -s ${pkgs.llvmPackages_14.llvm.out}/bin/llvm-as $out/bin/llvm-as-irrt
|
||||
'';
|
||||
nac3artiq = pkgs.python3Packages.toPythonModule (
|
||||
|
@ -23,6 +24,7 @@
|
|||
cargoLock = {
|
||||
lockFile = ./Cargo.lock;
|
||||
};
|
||||
cargoTestFlags = [ "--features" "test" ];
|
||||
passthru.cargoLock = cargoLock;
|
||||
nativeBuildInputs = [ pkgs.python3 pkgs.llvmPackages_14.clang llvm-tools-irrt pkgs.llvmPackages_14.llvm.out llvm-nac3 ];
|
||||
buildInputs = [ pkgs.python3 llvm-nac3 ];
|
||||
|
@ -39,7 +41,7 @@
|
|||
'';
|
||||
installPhase =
|
||||
''
|
||||
PYTHON_SITEPACKAGES=$out/${pkgs.python3Packages.python.sitePackages}
|
||||
u PYTHON_SITEPACKAGES=$out/${pkgs.python3Packages.python.sitePackages}
|
||||
mkdir -p $PYTHON_SITEPACKAGES
|
||||
cp target/x86_64-unknown-linux-gnu/release/libnac3artiq.so $PYTHON_SITEPACKAGES/nac3artiq.so
|
||||
|
||||
|
@ -161,7 +163,10 @@
|
|||
clippy
|
||||
pre-commit
|
||||
rustfmt
|
||||
rust-analyzer
|
||||
];
|
||||
# https://nixos.wiki/wiki/Rust#Shell.nix_example
|
||||
RUST_SRC_PATH = "${pkgs.rust.packages.stable.rustPlatform.rustLibSrc}";
|
||||
};
|
||||
devShells.x86_64-linux.msys2 = pkgs.mkShell {
|
||||
name = "nac3-dev-shell-msys2";
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
[features]
|
||||
test = []
|
||||
|
||||
[package]
|
||||
name = "nac3core"
|
||||
version = "0.1.0"
|
||||
|
|
|
@ -7,8 +7,8 @@ use std::{
|
|||
process::{Command, Stdio},
|
||||
};
|
||||
|
||||
fn main() {
|
||||
const FILE: &str = "src/codegen/irrt/irrt.cpp";
|
||||
fn compile_irrt(irrt_dir: &Path, out_dir: &Path) {
|
||||
let irrt_cpp_path = irrt_dir.join("irrt.cpp");
|
||||
|
||||
/*
|
||||
* HACK: Sadly, clang doesn't let us emit generic LLVM bitcode.
|
||||
|
@ -16,7 +16,7 @@ fn main() {
|
|||
*/
|
||||
let flags: &[&str] = &[
|
||||
"--target=wasm32",
|
||||
FILE,
|
||||
irrt_cpp_path.to_str().unwrap(),
|
||||
"-x",
|
||||
"c++",
|
||||
"-fno-discard-value-names",
|
||||
|
@ -31,13 +31,14 @@ fn main() {
|
|||
"-S",
|
||||
"-Wall",
|
||||
"-Wextra",
|
||||
"-Werror=return-type",
|
||||
"-I",
|
||||
irrt_dir.to_str().unwrap(),
|
||||
"-o",
|
||||
"-",
|
||||
];
|
||||
|
||||
println!("cargo:rerun-if-changed={FILE}");
|
||||
let out_dir = env::var("OUT_DIR").unwrap();
|
||||
let out_path = Path::new(&out_dir);
|
||||
println!("cargo:rerun-if-changed={}", out_dir.to_str().unwrap());
|
||||
|
||||
let output = Command::new("clang-irrt")
|
||||
.args(flags)
|
||||
|
@ -52,7 +53,11 @@ fn main() {
|
|||
let output = std::str::from_utf8(&output.stdout).unwrap().replace("\r\n", "\n");
|
||||
let mut filtered_output = String::with_capacity(output.len());
|
||||
|
||||
let regex_filter = Regex::new(r"(?ms:^define.*?\}$)|(?m:^declare.*?$)").unwrap();
|
||||
// (?ms:^define.*?\}$) to capture `define` blocks
|
||||
// (?m:^declare.*?$) to capture `declare` blocks
|
||||
// (?m:^%.+?=\s*type\s*\{.+?\}$) to capture `type` declarations
|
||||
let regex_filter =
|
||||
Regex::new(r"(?ms:^define.*?\}$)|(?m:^declare.*?$)|(?m:^%.+?=\s*type\s*\{.+?\}$)").unwrap();
|
||||
for f in regex_filter.captures_iter(&output) {
|
||||
assert_eq!(f.len(), 1);
|
||||
filtered_output.push_str(&f[0]);
|
||||
|
@ -65,18 +70,66 @@ fn main() {
|
|||
|
||||
println!("cargo:rerun-if-env-changed=DEBUG_DUMP_IRRT");
|
||||
if env::var("DEBUG_DUMP_IRRT").is_ok() {
|
||||
let mut file = File::create(out_path.join("irrt.ll")).unwrap();
|
||||
let mut file = File::create(out_dir.join("irrt.ll")).unwrap();
|
||||
file.write_all(output.as_bytes()).unwrap();
|
||||
let mut file = File::create(out_path.join("irrt-filtered.ll")).unwrap();
|
||||
let mut file = File::create(out_dir.join("irrt-filtered.ll")).unwrap();
|
||||
file.write_all(filtered_output.as_bytes()).unwrap();
|
||||
}
|
||||
|
||||
let mut llvm_as = Command::new("llvm-as-irrt")
|
||||
.stdin(Stdio::piped())
|
||||
.arg("-o")
|
||||
.arg(out_path.join("irrt.bc"))
|
||||
.arg(out_dir.join("irrt.bc"))
|
||||
.spawn()
|
||||
.unwrap();
|
||||
llvm_as.stdin.as_mut().unwrap().write_all(filtered_output.as_bytes()).unwrap();
|
||||
assert!(llvm_as.wait().unwrap().success());
|
||||
}
|
||||
|
||||
fn compile_irrt_test(irrt_dir: &Path, out_dir: &Path) {
|
||||
let irrt_test_cpp_path = irrt_dir.join("irrt_test.cpp");
|
||||
let exe_path = out_dir.join("irrt_test.out");
|
||||
|
||||
let flags: &[&str] = &[
|
||||
irrt_test_cpp_path.to_str().unwrap(),
|
||||
"-x",
|
||||
"c++",
|
||||
"-I",
|
||||
irrt_dir.to_str().unwrap(),
|
||||
"-g",
|
||||
"-fno-discard-value-names",
|
||||
"-O0",
|
||||
"-Wall",
|
||||
"-Wextra",
|
||||
"-Werror=return-type",
|
||||
"-lm", // for `tgamma()`, `lgamma()`
|
||||
"-o",
|
||||
exe_path.to_str().unwrap(),
|
||||
];
|
||||
println!("{:?}", flags);
|
||||
|
||||
Command::new("clang-irrt-test")
|
||||
.args(flags)
|
||||
.output()
|
||||
.map(|o| {
|
||||
assert!(o.status.success(), "{}", std::str::from_utf8(&o.stderr).unwrap());
|
||||
o
|
||||
})
|
||||
.unwrap();
|
||||
println!("cargo:rerun-if-changed={}", out_dir.to_str().unwrap());
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let out_dir = env::var("OUT_DIR").unwrap();
|
||||
let out_dir = Path::new(&out_dir);
|
||||
|
||||
let irrt_dir = Path::new("./irrt");
|
||||
|
||||
compile_irrt(irrt_dir, out_dir);
|
||||
|
||||
// https://github.com/rust-lang/cargo/issues/2549
|
||||
// `cargo test -F test` to also build `irrt_test.cpp
|
||||
if cfg!(feature = "test") {
|
||||
compile_irrt_test(irrt_dir, out_dir);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
#include "irrt_everything.hpp"
|
||||
|
||||
/*
|
||||
This file will be read by `clang-irrt` to conveniently produce LLVM IR for `nac3core/codegen`.
|
||||
*/
|
|
@ -0,0 +1,215 @@
|
|||
#pragma once
|
||||
|
||||
#include "irrt_utils.hpp"
|
||||
#include "irrt_typedefs.hpp"
|
||||
|
||||
/*
|
||||
This header contains IRRT implementations
|
||||
that do not deserved to be categorized (e.g., into numpy, etc.)
|
||||
|
||||
Check out other *.hpp files before including them here!!
|
||||
*/
|
||||
|
||||
// The type of an index or a value describing the length of a range/slice is
|
||||
// always `int32_t`.
|
||||
typedef int32_t SliceIndex;
|
||||
|
||||
// adapted from GNU Scientific Library: https://git.savannah.gnu.org/cgit/gsl.git/tree/sys/pow_int.c
|
||||
// need to make sure `exp >= 0` before calling this function
|
||||
template <typename T>
|
||||
static T __nac3_int_exp_impl(T base, T exp) {
|
||||
T res = 1;
|
||||
/* repeated squaring method */
|
||||
do {
|
||||
if (exp & 1) {
|
||||
res *= base; /* for n odd */
|
||||
}
|
||||
exp >>= 1;
|
||||
base *= base;
|
||||
} while (exp);
|
||||
return res;
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
#define DEF_nac3_int_exp_(T) \
|
||||
T __nac3_int_exp_##T(T base, T exp) {\
|
||||
return __nac3_int_exp_impl(base, exp);\
|
||||
}
|
||||
|
||||
DEF_nac3_int_exp_(int32_t)
|
||||
DEF_nac3_int_exp_(int64_t)
|
||||
DEF_nac3_int_exp_(uint32_t)
|
||||
DEF_nac3_int_exp_(uint64_t)
|
||||
|
||||
SliceIndex __nac3_slice_index_bound(SliceIndex i, const SliceIndex len) {
|
||||
if (i < 0) {
|
||||
i = len + i;
|
||||
}
|
||||
if (i < 0) {
|
||||
return 0;
|
||||
} else if (i > len) {
|
||||
return len;
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
SliceIndex __nac3_range_slice_len(
|
||||
const SliceIndex start,
|
||||
const SliceIndex end,
|
||||
const SliceIndex step
|
||||
) {
|
||||
SliceIndex diff = end - start;
|
||||
if (diff > 0 && step > 0) {
|
||||
return ((diff - 1) / step) + 1;
|
||||
} else if (diff < 0 && step < 0) {
|
||||
return ((diff + 1) / step) + 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle list assignment and dropping part of the list when
|
||||
// both dest_step and src_step are +1.
|
||||
// - All the index must *not* be out-of-bound or negative,
|
||||
// - The end index is *inclusive*,
|
||||
// - The length of src and dest slice size should already
|
||||
// be checked: if dest.step == 1 then len(src) <= len(dest) else len(src) == len(dest)
|
||||
SliceIndex __nac3_list_slice_assign_var_size(
|
||||
SliceIndex dest_start,
|
||||
SliceIndex dest_end,
|
||||
SliceIndex dest_step,
|
||||
uint8_t *dest_arr,
|
||||
SliceIndex dest_arr_len,
|
||||
SliceIndex src_start,
|
||||
SliceIndex src_end,
|
||||
SliceIndex src_step,
|
||||
uint8_t *src_arr,
|
||||
SliceIndex src_arr_len,
|
||||
const SliceIndex size
|
||||
) {
|
||||
/* if dest_arr_len == 0, do nothing since we do not support extending list */
|
||||
if (dest_arr_len == 0) return dest_arr_len;
|
||||
/* if both step is 1, memmove directly, handle the dropping of the list, and shrink size */
|
||||
if (src_step == dest_step && dest_step == 1) {
|
||||
const SliceIndex src_len = (src_end >= src_start) ? (src_end - src_start + 1) : 0;
|
||||
const SliceIndex dest_len = (dest_end >= dest_start) ? (dest_end - dest_start + 1) : 0;
|
||||
if (src_len > 0) {
|
||||
__builtin_memmove(
|
||||
dest_arr + dest_start * size,
|
||||
src_arr + src_start * size,
|
||||
src_len * size
|
||||
);
|
||||
}
|
||||
if (dest_len > 0) {
|
||||
/* dropping */
|
||||
__builtin_memmove(
|
||||
dest_arr + (dest_start + src_len) * size,
|
||||
dest_arr + (dest_end + 1) * size,
|
||||
(dest_arr_len - dest_end - 1) * size
|
||||
);
|
||||
}
|
||||
/* shrink size */
|
||||
return dest_arr_len - (dest_len - src_len);
|
||||
}
|
||||
/* if two range overlaps, need alloca */
|
||||
uint8_t need_alloca =
|
||||
(dest_arr == src_arr)
|
||||
&& !(
|
||||
max(dest_start, dest_end) < min(src_start, src_end)
|
||||
|| max(src_start, src_end) < min(dest_start, dest_end)
|
||||
);
|
||||
if (need_alloca) {
|
||||
uint8_t *tmp = reinterpret_cast<uint8_t *>(__builtin_alloca(src_arr_len * size));
|
||||
__builtin_memcpy(tmp, src_arr, src_arr_len * size);
|
||||
src_arr = tmp;
|
||||
}
|
||||
SliceIndex src_ind = src_start;
|
||||
SliceIndex dest_ind = dest_start;
|
||||
for (;
|
||||
(src_step > 0) ? (src_ind <= src_end) : (src_ind >= src_end);
|
||||
src_ind += src_step, dest_ind += dest_step
|
||||
) {
|
||||
/* for constant optimization */
|
||||
if (size == 1) {
|
||||
__builtin_memcpy(dest_arr + dest_ind, src_arr + src_ind, 1);
|
||||
} else if (size == 4) {
|
||||
__builtin_memcpy(dest_arr + dest_ind * 4, src_arr + src_ind * 4, 4);
|
||||
} else if (size == 8) {
|
||||
__builtin_memcpy(dest_arr + dest_ind * 8, src_arr + src_ind * 8, 8);
|
||||
} else {
|
||||
/* memcpy for var size, cannot overlap after previous alloca */
|
||||
__builtin_memcpy(dest_arr + dest_ind * size, src_arr + src_ind * size, size);
|
||||
}
|
||||
}
|
||||
/* only dest_step == 1 can we shrink the dest list. */
|
||||
/* size should be ensured prior to calling this function */
|
||||
if (dest_step == 1 && dest_end >= dest_start) {
|
||||
__builtin_memmove(
|
||||
dest_arr + dest_ind * size,
|
||||
dest_arr + (dest_end + 1) * size,
|
||||
(dest_arr_len - dest_end - 1) * size
|
||||
);
|
||||
return dest_arr_len - (dest_end - dest_ind) - 1;
|
||||
}
|
||||
return dest_arr_len;
|
||||
}
|
||||
|
||||
int32_t __nac3_isinf(double x) {
|
||||
return __builtin_isinf(x);
|
||||
}
|
||||
|
||||
int32_t __nac3_isnan(double x) {
|
||||
return __builtin_isnan(x);
|
||||
}
|
||||
|
||||
double tgamma(double arg);
|
||||
|
||||
double __nac3_gamma(double z) {
|
||||
// Handling for denormals
|
||||
// | x | Python gamma(x) | C tgamma(x) |
|
||||
// --- | ----------------- | --------------- | ----------- |
|
||||
// (1) | nan | nan | nan |
|
||||
// (2) | -inf | -inf | inf |
|
||||
// (3) | inf | inf | inf |
|
||||
// (4) | 0.0 | inf | inf |
|
||||
// (5) | {-1.0, -2.0, ...} | inf | nan |
|
||||
|
||||
// (1)-(3)
|
||||
if (__builtin_isinf(z) || __builtin_isnan(z)) {
|
||||
return z;
|
||||
}
|
||||
|
||||
double v = tgamma(z);
|
||||
|
||||
// (4)-(5)
|
||||
return __builtin_isinf(v) || __builtin_isnan(v) ? __builtin_inf() : v;
|
||||
}
|
||||
|
||||
double lgamma(double arg);
|
||||
|
||||
double __nac3_gammaln(double x) {
|
||||
// libm's handling of value overflows differs from scipy:
|
||||
// - scipy: gammaln(-inf) -> -inf
|
||||
// - libm : lgamma(-inf) -> inf
|
||||
|
||||
if (__builtin_isinf(x)) {
|
||||
return x;
|
||||
}
|
||||
|
||||
return lgamma(x);
|
||||
}
|
||||
|
||||
double j0(double x);
|
||||
|
||||
double __nac3_j0(double x) {
|
||||
// libm's handling of value overflows differs from scipy:
|
||||
// - scipy: j0(inf) -> nan
|
||||
// - libm : j0(inf) -> 0.0
|
||||
|
||||
if (__builtin_isinf(x)) {
|
||||
return __builtin_nan("");
|
||||
}
|
||||
|
||||
return j0(x);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
#pragma once
|
||||
|
||||
#include "irrt_basic.hpp"
|
||||
#include "irrt_numpy_ndarray.hpp"
|
||||
|
||||
/*
|
||||
All IRRT implementations.
|
||||
|
||||
We don't have any pre-compiled objects, so we are writing all implementations in headers and
|
||||
concatenate them with `#include` into one massive source file that contains all the IRRT stuff.
|
||||
*/
|
|
@ -0,0 +1,196 @@
|
|||
#pragma once
|
||||
|
||||
#include "irrt_utils.hpp"
|
||||
#include "irrt_typedefs.hpp"
|
||||
|
||||
/*
|
||||
NDArray-related implementations.
|
||||
`*/
|
||||
|
||||
// NDArray indices are always `uint32_t`.
|
||||
using NDIndex = uint32_t;
|
||||
|
||||
namespace {
|
||||
namespace ndarray_util {
|
||||
// Compute the strides of an ndarray given an ndarray `shape`
|
||||
// and assuming that the ndarray is *fully C-contagious*.
|
||||
//
|
||||
// You might want to read up on https://ajcr.net/stride-guide-part-1/.
|
||||
template <typename SizeT>
|
||||
static void set_strides_by_shape(SizeT ndims, SizeT* dst_strides, const SizeT* shape) {
|
||||
SizeT stride_product = 1;
|
||||
for (SizeT i = 0; i < ndims; i++) {
|
||||
int dim_i = ndims - i - 1;
|
||||
dst_strides[dim_i] = stride_product;
|
||||
stride_product *= shape[dim_i];
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the size/# of elements of an ndarray given its shape
|
||||
template <typename SizeT>
|
||||
static SizeT calc_size_from_shape(SizeT ndims, const SizeT* shape) {
|
||||
SizeT size = 1;
|
||||
for (SizeT dim_i = 0; dim_i < ndims; dim_i++) size *= shape[dim_i];
|
||||
return size;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
struct NDArrayIndicesIter {
|
||||
SizeT ndims;
|
||||
const SizeT *shape;
|
||||
SizeT *indices;
|
||||
|
||||
void set_indices_zero() {
|
||||
__builtin_memset(indices, 0, sizeof(SizeT) * ndims);
|
||||
}
|
||||
|
||||
void next() {
|
||||
for (SizeT i = 0; i < ndims; i++) {
|
||||
SizeT dim_i = ndims - i - 1;
|
||||
|
||||
indices[dim_i]++;
|
||||
if (indices[dim_i] < shape[dim_i]) {
|
||||
break;
|
||||
} else {
|
||||
indices[dim_i] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// The NDArray object. `SizeT` is the *signed* size type of this ndarray.
|
||||
//
|
||||
// NOTE: The order of fields is IMPORTANT. DON'T TOUCH IT
|
||||
//
|
||||
// Some resources you might find helpful:
|
||||
// - The official numpy implementations:
|
||||
// - https://github.com/numpy/numpy/blob/735a477f0bc2b5b84d0e72d92f224bde78d4e069/doc/source/reference/c-api/types-and-structures.rst
|
||||
// - On strides (about reshaping, slicing, C-contagiousness, etc)
|
||||
// - https://ajcr.net/stride-guide-part-1/.
|
||||
// - https://ajcr.net/stride-guide-part-2/.
|
||||
// - https://ajcr.net/stride-guide-part-3/.
|
||||
template <typename SizeT>
|
||||
struct NDArray {
|
||||
// The underlying data this `ndarray` is pointing to.
|
||||
//
|
||||
// NOTE: Formally this should be of type `void *`, but clang
|
||||
// translates `void *` to `i8 *` when run with `-S -emit-llvm`,
|
||||
// so we will put `uint8_t *` here for clarity.
|
||||
uint8_t *data;
|
||||
|
||||
// The number of bytes of a single element in `data`.
|
||||
//
|
||||
// The `SizeT` is treated as `unsigned`.
|
||||
SizeT itemsize;
|
||||
|
||||
// The number of dimensions of this shape.
|
||||
//
|
||||
// The `SizeT` is treated as `unsigned`.
|
||||
SizeT ndims;
|
||||
|
||||
// Array shape, with length equal to `ndims`.
|
||||
//
|
||||
// The `SizeT` is treated as `unsigned`.
|
||||
//
|
||||
// NOTE: `shape` can contain 0.
|
||||
// (those appear when the user makes an out of bounds slice into an ndarray, e.g., `np.zeros((3, 3))[400:].shape == (0, 3)`)
|
||||
SizeT *shape;
|
||||
|
||||
// Array strides (stride value is in number of bytes, NOT number of elements), with length equal to `ndims`.
|
||||
//
|
||||
// The `SizeT` is treated as `signed`.
|
||||
//
|
||||
// NOTE: `strides` can have negative numbers.
|
||||
// (those appear when there is a slice with a negative step, e.g., `my_array[::-1]`)
|
||||
SizeT *strides;
|
||||
|
||||
// Calculate the size/# of elements of an `ndarray`.
|
||||
// This function corresponds to `np.size(<ndarray>)` or `ndarray.size`
|
||||
SizeT size() {
|
||||
return ndarray_util::calc_size_from_shape(ndims, shape);
|
||||
}
|
||||
|
||||
// Calculate the number of bytes of its content of an `ndarray` *in its view*.
|
||||
// This function corresponds to `ndarray.nbytes`
|
||||
SizeT nbytes() {
|
||||
return this->size() * itemsize;
|
||||
}
|
||||
|
||||
void set_value_at_pelement(uint8_t* pelement, uint8_t* pvalue) {
|
||||
__builtin_memcpy(pelement, pvalue, itemsize);
|
||||
}
|
||||
|
||||
uint8_t* get_pelement(SizeT *indices) {
|
||||
uint8_t* element = data;
|
||||
for (SizeT dim_i = 0; dim_i < ndims; dim_i++)
|
||||
element += indices[dim_i] * strides[dim_i] * itemsize;
|
||||
return element;
|
||||
}
|
||||
|
||||
// Is the given `indices` valid/in-bounds?
|
||||
bool in_bounds(SizeT *indices) {
|
||||
for (SizeT dim_i = 0; dim_i < ndims; dim_i++) {
|
||||
bool dim_ok = indices[dim_i] < shape[dim_i];
|
||||
if (!dim_ok) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Fill the ndarray with a value
|
||||
void fill_generic(uint8_t* pvalue) {
|
||||
NDArrayIndicesIter<SizeT> iter;
|
||||
iter.ndims = this->ndims;
|
||||
iter.shape = this->shape;
|
||||
iter.indices = (SizeT*) __builtin_alloca(sizeof(SizeT) * ndims);
|
||||
iter.set_indices_zero();
|
||||
|
||||
for (SizeT i = 0; i < this->size(); i++, iter.next()) {
|
||||
uint8_t* pelement = get_pelement(iter.indices);
|
||||
set_value_at_pelement(pelement, pvalue);
|
||||
}
|
||||
}
|
||||
|
||||
// Set the strides of the ndarray with `ndarray_util::set_strides_by_shape`
|
||||
void set_strides_by_shape() {
|
||||
ndarray_util::set_strides_by_shape(ndims, strides, shape);
|
||||
}
|
||||
|
||||
// https://numpy.org/doc/stable/reference/generated/numpy.eye.html
|
||||
void set_to_eye(SizeT k, uint8_t* zero_pvalue, uint8_t* one_pvalue) {
|
||||
__builtin_assume(ndims == 2);
|
||||
|
||||
// TODO: Better implementation
|
||||
|
||||
fill_generic(zero_pvalue);
|
||||
for (SizeT i = 0; i < min(shape[0], shape[1]); i++) {
|
||||
SizeT row = i;
|
||||
SizeT col = i + k;
|
||||
SizeT indices[2] = { row, col };
|
||||
|
||||
if (!in_bounds(indices)) continue;
|
||||
|
||||
uint8_t* pelement = get_pelement(indices);
|
||||
set_value_at_pelement(pelement, one_pvalue);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
uint32_t __nac3_ndarray_size(NDArray<int32_t>* ndarray) {
|
||||
return ndarray->size();
|
||||
}
|
||||
|
||||
uint64_t __nac3_ndarray_size64(NDArray<int64_t>* ndarray) {
|
||||
return ndarray->size();
|
||||
}
|
||||
|
||||
void __nac3_ndarray_fill_generic(NDArray<int32_t>* ndarray, uint8_t* pvalue) {
|
||||
ndarray->fill_generic(pvalue);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_fill_generic64(NDArray<int64_t>* ndarray, uint8_t* pvalue) {
|
||||
ndarray->fill_generic(pvalue);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,189 @@
|
|||
#include <cstdint>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
|
||||
// set `IRRT_DONT_TYPEDEF_INTS` because `cstdint` has it all
|
||||
#define IRRT_DONT_TYPEDEF_INTS
|
||||
#include "irrt_everything.hpp"
|
||||
|
||||
namespace {
|
||||
static void test_fail() {
|
||||
printf("[!] Test failed\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
static void __begin_test(const char* function_name, const char* file, int line) {
|
||||
printf("######### Running %s @ %s:%d\n", function_name, file, line);
|
||||
}
|
||||
|
||||
#define BEGIN_TEST() __begin_test(__FUNCTION__, __FILE__, __LINE__)
|
||||
|
||||
template <typename T>
|
||||
bool arrays_match(int len, T *as, T *bs) {
|
||||
for (int i = 0; i < len; i++) {
|
||||
if (as[i] != bs[i]) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void debug_print_array(const char* format, int len, T* as) {
|
||||
printf("[");
|
||||
for (int i = 0; i < len; i++) {
|
||||
if (i != 0) printf(", ");
|
||||
printf(format, as[i]);
|
||||
}
|
||||
printf("]");
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void assert_arrays_match(const char* label, const char* format, int len, T* expected, T* got) {
|
||||
if (!arrays_match(len, expected, got)) {
|
||||
printf("expected %s: ", label);
|
||||
debug_print_array(format, len, expected);
|
||||
printf("\n");
|
||||
printf("got %s: ", label);
|
||||
debug_print_array(format, len, got);
|
||||
printf("\n");
|
||||
test_fail();
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void assert_values_match(const char* label, const char* format, T expected, T got) {
|
||||
if (expected != got) {
|
||||
printf("expected %s: ", label);
|
||||
printf(format, expected);
|
||||
printf("\n");
|
||||
printf("got %s: ", label);
|
||||
printf(format, got);
|
||||
printf("\n");
|
||||
test_fail();
|
||||
}
|
||||
}
|
||||
|
||||
void test_calc_size_from_shape_normal() {
|
||||
// Test shapes with normal values
|
||||
BEGIN_TEST();
|
||||
|
||||
int32_t shape[4] = { 2, 3, 5, 7 };
|
||||
debug_print_array("%d", 4, shape);
|
||||
assert_values_match("size", "%d", 210, ndarray_util::calc_size_from_shape<int32_t>(4, shape));
|
||||
}
|
||||
|
||||
void test_calc_size_from_shape_has_zero() {
|
||||
// Test shapes with 0 in them
|
||||
BEGIN_TEST();
|
||||
|
||||
int32_t shape[4] = { 2, 0, 5, 7 };
|
||||
assert_values_match("size", "%d", 0, ndarray_util::calc_size_from_shape<int32_t>(4, shape));
|
||||
}
|
||||
|
||||
void test_set_strides_by_shape() {
|
||||
// Test `set_strides_by_shape()`
|
||||
BEGIN_TEST();
|
||||
|
||||
int32_t shape[4] = { 99, 3, 5, 7 };
|
||||
int32_t strides[4] = { 0 };
|
||||
ndarray_util::set_strides_by_shape(4, strides, shape);
|
||||
|
||||
int32_t expected_strides[4] = { 105, 35, 7, 1 };
|
||||
assert_arrays_match("strides", "%u", 4u, expected_strides, strides);
|
||||
}
|
||||
|
||||
void test_ndarray_indices_iter_normal() {
|
||||
// Test NDArrayIndicesIter normal behavior
|
||||
BEGIN_TEST();
|
||||
|
||||
int32_t shape[3] = { 1, 2, 3 };
|
||||
int32_t indices[3] = { 0, 0, 0 };
|
||||
auto iter = NDArrayIndicesIter<int32_t> {
|
||||
.ndims = 3u,
|
||||
.shape = shape,
|
||||
.indices = indices
|
||||
};
|
||||
|
||||
assert_arrays_match("indices #0", "%u", 3u, iter.indices, (int32_t[3]) { 0, 0, 0 });
|
||||
iter.next();
|
||||
assert_arrays_match("indices #1", "%u", 3u, iter.indices, (int32_t[3]) { 0, 0, 1 });
|
||||
iter.next();
|
||||
assert_arrays_match("indices #2", "%u", 3u, iter.indices, (int32_t[3]) { 0, 0, 2 });
|
||||
iter.next();
|
||||
assert_arrays_match("indices #3", "%u", 3u, iter.indices, (int32_t[3]) { 0, 1, 0 });
|
||||
iter.next();
|
||||
assert_arrays_match("indices #4", "%u", 3u, iter.indices, (int32_t[3]) { 0, 1, 1 });
|
||||
iter.next();
|
||||
assert_arrays_match("indices #5", "%u", 3u, iter.indices, (int32_t[3]) { 0, 1, 2 });
|
||||
iter.next();
|
||||
assert_arrays_match("indices #6", "%u", 3u, iter.indices, (int32_t[3]) { 0, 0, 0 }); // Loops back
|
||||
iter.next();
|
||||
assert_arrays_match("indices #7", "%u", 3u, iter.indices, (int32_t[3]) { 0, 0, 1 });
|
||||
}
|
||||
|
||||
void test_ndarray_fill_generic() {
|
||||
// Test ndarray fill_generic
|
||||
BEGIN_TEST();
|
||||
|
||||
// Choose a type that's neither int32_t nor uint64_t (candidates of SizeT) to spice it up
|
||||
// Also make all the octets non-zero, to see if `memcpy` in `fill_generic` is working perfectly.
|
||||
uint16_t fill_value = 0xFACE;
|
||||
|
||||
uint16_t in_data[6] = { 100, 101, 102, 103, 104, 105 }; // Fill `data` with values that != `999`
|
||||
int32_t in_itemsize = sizeof(uint16_t);
|
||||
const int32_t in_ndims = 2;
|
||||
int32_t in_shape[in_ndims] = { 2, 3 };
|
||||
int32_t in_strides[in_ndims] = {};
|
||||
NDArray<int32_t> ndarray = {
|
||||
.data = (uint8_t*) in_data,
|
||||
.itemsize = in_itemsize,
|
||||
.ndims = in_ndims,
|
||||
.shape = in_shape,
|
||||
.strides = in_strides,
|
||||
};
|
||||
ndarray.set_strides_by_shape();
|
||||
ndarray.fill_generic((uint8_t*) &fill_value); // `fill_generic` here
|
||||
|
||||
uint16_t expected_data[6] = { fill_value, fill_value, fill_value, fill_value, fill_value, fill_value };
|
||||
assert_arrays_match("data", "0x%hX", 6, expected_data, in_data);
|
||||
}
|
||||
|
||||
void test_ndarray_set_to_eye() {
|
||||
double in_data[9] = { 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0 };
|
||||
int32_t in_itemsize = sizeof(double);
|
||||
const int32_t in_ndims = 2;
|
||||
int32_t in_shape[in_ndims] = { 3, 3 };
|
||||
int32_t in_strides[in_ndims] = {};
|
||||
NDArray<int32_t> ndarray = {
|
||||
.data = (uint8_t*) in_data,
|
||||
.itemsize = in_itemsize,
|
||||
.ndims = in_ndims,
|
||||
.shape = in_shape,
|
||||
.strides = in_strides,
|
||||
};
|
||||
ndarray.set_strides_by_shape();
|
||||
|
||||
double zero = 0.0;
|
||||
double one = 1.0;
|
||||
ndarray.set_to_eye(1, (uint8_t*) &zero, (uint8_t*) &one);
|
||||
|
||||
assert_values_match("in_data[0]", "%f", 0.0, in_data[0]);
|
||||
assert_values_match("in_data[1]", "%f", 1.0, in_data[1]);
|
||||
assert_values_match("in_data[2]", "%f", 0.0, in_data[2]);
|
||||
assert_values_match("in_data[3]", "%f", 0.0, in_data[3]);
|
||||
assert_values_match("in_data[4]", "%f", 0.0, in_data[4]);
|
||||
assert_values_match("in_data[5]", "%f", 1.0, in_data[5]);
|
||||
assert_values_match("in_data[6]", "%f", 0.0, in_data[6]);
|
||||
assert_values_match("in_data[7]", "%f", 0.0, in_data[7]);
|
||||
assert_values_match("in_data[8]", "%f", 0.0, in_data[8]);
|
||||
}
|
||||
}
|
||||
|
||||
int main() {
|
||||
test_calc_size_from_shape_normal();
|
||||
test_calc_size_from_shape_has_zero();
|
||||
test_set_strides_by_shape();
|
||||
test_ndarray_indices_iter_normal();
|
||||
test_ndarray_fill_generic();
|
||||
test_ndarray_set_to_eye();
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
#pragma once
|
||||
|
||||
// This is made toggleable since `irrt_test.cpp` itself would include
|
||||
// headers that define the `int_t` family.
|
||||
#ifndef IRRT_DONT_TYPEDEF_INTS
|
||||
typedef _BitInt(8) int8_t;
|
||||
typedef unsigned _BitInt(8) uint8_t;
|
||||
typedef _BitInt(32) int32_t;
|
||||
typedef unsigned _BitInt(32) uint32_t;
|
||||
typedef _BitInt(64) int64_t;
|
||||
typedef unsigned _BitInt(64) uint64_t;
|
||||
#endif
|
|
@ -0,0 +1,11 @@
|
|||
#pragma once
|
||||
|
||||
template <typename T>
|
||||
static T max(T a, T b) {
|
||||
return a > b ? a : b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static T min(T a, T b) {
|
||||
return a > b ? b : a;
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,8 +1,6 @@
|
|||
use crate::codegen::{
|
||||
irrt::{call_ndarray_calc_size, call_ndarray_flatten_index},
|
||||
llvm_intrinsics::call_int_umin,
|
||||
stmt::gen_for_callback_incrementing,
|
||||
CodeGenContext, CodeGenerator,
|
||||
llvm_intrinsics::call_int_umin, stmt::gen_for_callback_incrementing, CodeGenContext,
|
||||
CodeGenerator,
|
||||
};
|
||||
use inkwell::context::Context;
|
||||
use inkwell::types::{ArrayType, BasicType, StructType};
|
||||
|
@ -12,6 +10,7 @@ use inkwell::{
|
|||
values::{BasicValueEnum, IntValue, PointerValue},
|
||||
AddressSpace, IntPredicate,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
|
||||
/// A LLVM type that is used to represent a non-primitive type in NAC3.
|
||||
pub trait ProxyType<'ctx>: Into<Self::Base> {
|
||||
|
@ -1601,7 +1600,8 @@ impl<'ctx> ArrayLikeValue<'ctx> for NDArrayDataProxy<'ctx, '_> {
|
|||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
generator: &G,
|
||||
) -> IntValue<'ctx> {
|
||||
call_ndarray_calc_size(generator, ctx, &self.as_slice_value(ctx, generator), (None, None))
|
||||
todo!()
|
||||
// call_ndarray_calc_size(generator, ctx, &self.as_slice_value(ctx, generator), (None, None))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1675,17 +1675,19 @@ impl<'ctx, Index: UntypedArrayLikeAccessor<'ctx>> ArrayLikeIndexer<'ctx, Index>
|
|||
indices_elem_ty.get_bit_width()
|
||||
);
|
||||
|
||||
let index = call_ndarray_flatten_index(generator, ctx, *self.0, indices);
|
||||
todo!()
|
||||
|
||||
unsafe {
|
||||
ctx.builder
|
||||
.build_in_bounds_gep(
|
||||
self.base_ptr(ctx, generator),
|
||||
&[index],
|
||||
name.unwrap_or_default(),
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
// let index = call_ndarray_flatten_index(generator, ctx, *self.0, indices);
|
||||
|
||||
// unsafe {
|
||||
// ctx.builder
|
||||
// .build_in_bounds_gep(
|
||||
// self.base_ptr(ctx, generator),
|
||||
// &[index],
|
||||
// name.unwrap_or_default(),
|
||||
// )
|
||||
// .unwrap()
|
||||
// }
|
||||
}
|
||||
|
||||
fn ptr_offset<G: CodeGenerator + ?Sized>(
|
||||
|
@ -1761,3 +1763,307 @@ impl<'ctx, Index: UntypedArrayLikeAccessor<'ctx>> UntypedArrayLikeMutator<'ctx,
|
|||
for NDArrayDataProxy<'ctx, '_>
|
||||
{
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct StructField<'ctx> {
|
||||
/// The GEP index of this struct field.
|
||||
pub gep_index: u32,
|
||||
/// Name of this struct field.
|
||||
///
|
||||
/// Used for generating names.
|
||||
pub name: &'static str,
|
||||
/// The type of this struct field.
|
||||
pub ty: BasicTypeEnum<'ctx>,
|
||||
}
|
||||
|
||||
pub struct StructFields<'ctx> {
|
||||
/// Name of the struct.
|
||||
///
|
||||
/// Used for generating names.
|
||||
pub name: &'static str,
|
||||
|
||||
/// All the [`StructField`]s of this struct.
|
||||
///
|
||||
/// **NOTE:** The index position of a [`StructField`]
|
||||
/// matches the element's [`StructField::index`].
|
||||
pub fields: Vec<StructField<'ctx>>,
|
||||
}
|
||||
|
||||
struct StructFieldsBuilder<'ctx> {
|
||||
gep_index_counter: u32,
|
||||
/// Name of the struct to be built.
|
||||
name: &'static str,
|
||||
fields: Vec<StructField<'ctx>>,
|
||||
}
|
||||
|
||||
impl<'ctx> StructField<'ctx> {
|
||||
pub fn gep(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
ptr: PointerValue<'ctx>,
|
||||
) -> PointerValue<'ctx> {
|
||||
ctx.builder.build_struct_gep(ptr, self.gep_index, self.name).unwrap()
|
||||
}
|
||||
|
||||
pub fn load(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
ptr: PointerValue<'ctx>,
|
||||
) -> BasicValueEnum<'ctx> {
|
||||
ctx.builder.build_load(self.gep(ctx, ptr), self.name).unwrap()
|
||||
}
|
||||
|
||||
pub fn store<V>(&self, ctx: &CodeGenContext<'ctx, '_>, ptr: PointerValue<'ctx>, value: V)
|
||||
where
|
||||
V: BasicValue<'ctx>,
|
||||
{
|
||||
ctx.builder.build_store(ptr, value).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
type IsInstanceError = String;
|
||||
type IsInstanceResult = Result<(), IsInstanceError>;
|
||||
|
||||
pub fn check_basic_types_match<'ctx, A, B>(expected: A, got: B) -> IsInstanceResult
|
||||
where
|
||||
A: BasicType<'ctx>,
|
||||
B: BasicType<'ctx>,
|
||||
{
|
||||
let expected = expected.as_basic_type_enum();
|
||||
let got = got.as_basic_type_enum();
|
||||
|
||||
// Put those logic into here,
|
||||
// otherwise there is always a fallback reporting on any kind of mismatch
|
||||
match (expected, got) {
|
||||
(BasicTypeEnum::IntType(expected), BasicTypeEnum::IntType(got)) => {
|
||||
if expected.get_bit_width() != got.get_bit_width() {
|
||||
return Err(format!(
|
||||
"Expected IntType ({expected}-bit(s)), got IntType ({got}-bit(s))"
|
||||
));
|
||||
}
|
||||
}
|
||||
(expected, got) => {
|
||||
if expected != got {
|
||||
return Err(format!("Expected {expected}, got {got}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl<'ctx> StructFields<'ctx> {
|
||||
pub fn num_fields(&self) -> u32 {
|
||||
self.fields.len() as u32
|
||||
}
|
||||
|
||||
pub fn as_struct_type(&self, ctx: &'ctx Context) -> StructType<'ctx> {
|
||||
let llvm_fields = self.fields.iter().map(|field| field.ty).collect_vec();
|
||||
ctx.struct_type(llvm_fields.as_slice(), false)
|
||||
}
|
||||
|
||||
pub fn is_type(&self, scrutinee: StructType<'ctx>) -> IsInstanceResult {
|
||||
// Check scrutinee's number of struct fields
|
||||
if scrutinee.count_fields() != self.num_fields() {
|
||||
return Err(format!(
|
||||
"Expected {expected_count} field(s) in `{struct_name}` type, got {got_count}",
|
||||
struct_name = self.name,
|
||||
expected_count = self.num_fields(),
|
||||
got_count = scrutinee.count_fields(),
|
||||
));
|
||||
}
|
||||
|
||||
// Check the scrutinee's field types
|
||||
for field in self.fields.iter() {
|
||||
let expected_field_ty = field.ty;
|
||||
let got_field_ty = scrutinee.get_field_type_at_index(field.gep_index).unwrap();
|
||||
|
||||
if let Err(field_err) = check_basic_types_match(expected_field_ty, got_field_ty) {
|
||||
return Err(format!(
|
||||
"Field GEP index {gep_index} does not match the expected type of ({struct_name}::{field_name}): {field_err}",
|
||||
gep_index = field.gep_index,
|
||||
struct_name = self.name,
|
||||
field_name = field.name,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Done
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'ctx> StructFieldsBuilder<'ctx> {
|
||||
fn start(name: &'static str) -> Self {
|
||||
StructFieldsBuilder { gep_index_counter: 0, name, fields: Vec::new() }
|
||||
}
|
||||
|
||||
fn add_field(&mut self, name: &'static str, ty: BasicTypeEnum<'ctx>) -> StructField<'ctx> {
|
||||
let index = self.gep_index_counter;
|
||||
self.gep_index_counter += 1;
|
||||
StructField { gep_index: index, name, ty }
|
||||
}
|
||||
|
||||
fn end(self) -> StructFields<'ctx> {
|
||||
StructFields { name: self.name, fields: self.fields }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct NpArrayType<'ctx> {
|
||||
pub size_type: IntType<'ctx>,
|
||||
pub elem_type: BasicTypeEnum<'ctx>,
|
||||
}
|
||||
|
||||
pub struct NpArrayStructFields<'ctx> {
|
||||
pub whole_struct: StructFields<'ctx>,
|
||||
pub data: StructField<'ctx>,
|
||||
pub itemsize: StructField<'ctx>,
|
||||
pub ndims: StructField<'ctx>,
|
||||
pub shape: StructField<'ctx>,
|
||||
pub strides: StructField<'ctx>,
|
||||
}
|
||||
|
||||
impl<'ctx> NpArrayType<'ctx> {
|
||||
pub fn new_opaque_elem(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
size_type: IntType<'ctx>,
|
||||
) -> NpArrayType<'ctx> {
|
||||
NpArrayType { size_type, elem_type: ctx.ctx.i8_type().as_basic_type_enum() }
|
||||
}
|
||||
|
||||
pub fn struct_type(&self, ctx: &CodeGenContext<'ctx, '_>) -> StructType<'ctx> {
|
||||
self.fields().whole_struct.as_struct_type(ctx.ctx)
|
||||
}
|
||||
|
||||
pub fn fields(&self) -> NpArrayStructFields<'ctx> {
|
||||
let mut builder = StructFieldsBuilder::start("NpArray");
|
||||
|
||||
let addrspace = AddressSpace::default();
|
||||
|
||||
let byte_type = self.size_type.get_context().i8_type();
|
||||
|
||||
// Make sure the struct matches PERFECTLY with that defined in `nac3core/irrt`.
|
||||
let data = builder.add_field("data", byte_type.ptr_type(addrspace).into());
|
||||
let itemsize = builder.add_field("itemsize", self.size_type.into());
|
||||
let ndims = builder.add_field("ndims", self.size_type.into());
|
||||
let shape = builder.add_field("shape", self.size_type.ptr_type(addrspace).into());
|
||||
let strides = builder.add_field("strides", self.size_type.ptr_type(addrspace).into());
|
||||
|
||||
NpArrayStructFields { whole_struct: builder.end(), data, itemsize, ndims, shape, strides }
|
||||
}
|
||||
|
||||
/// Allocate an `ndarray` on stack, with the following notes:
|
||||
///
|
||||
/// - `ndarray.ndims` will be initialized to `in_ndims`.
|
||||
/// - `ndarray.itemsize` will be initialized to the size of `self.elem_type.size_of()`.
|
||||
/// - `ndarray.shape` and `ndarray.strides` will be allocated on the stack with number of elements being `in_ndims`,
|
||||
/// all with empty/uninitialized values.
|
||||
pub fn alloca(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
in_ndims: IntValue<'ctx>,
|
||||
name: &str,
|
||||
) -> NpArrayValue<'ctx> {
|
||||
let fields = self.fields();
|
||||
let ptr =
|
||||
ctx.builder.build_alloca(fields.whole_struct.as_struct_type(ctx.ctx), name).unwrap();
|
||||
|
||||
// Allocate `in_dims` number of `size_type` on the stack for `shape` and `strides`
|
||||
let allocated_shape =
|
||||
ctx.builder.build_array_alloca(fields.shape.ty, in_ndims, "allocated_shape").unwrap();
|
||||
let allocated_strides = ctx
|
||||
.builder
|
||||
.build_array_alloca(fields.strides.ty, in_ndims, "allocated_strides")
|
||||
.unwrap();
|
||||
|
||||
let value = NpArrayValue { ty: *self, ptr };
|
||||
value.store_ndims(ctx, in_ndims);
|
||||
value.store_itemsize(ctx, self.elem_type.size_of().unwrap());
|
||||
value.store_shape(ctx, allocated_shape);
|
||||
value.store_strides(ctx, allocated_strides);
|
||||
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct NpArrayValue<'ctx> {
|
||||
pub ty: NpArrayType<'ctx>,
|
||||
pub ptr: PointerValue<'ctx>,
|
||||
}
|
||||
|
||||
impl<'ctx> NpArrayValue<'ctx> {
|
||||
pub fn load_ndims(&self, ctx: &CodeGenContext<'ctx, '_>) -> IntValue<'ctx> {
|
||||
let field = self.ty.fields().ndims;
|
||||
field.load(ctx, self.ptr).into_int_value()
|
||||
}
|
||||
|
||||
pub fn store_ndims(&self, ctx: &CodeGenContext<'ctx, '_>, value: IntValue<'ctx>) {
|
||||
let field = self.ty.fields().ndims;
|
||||
field.store(ctx, self.ptr, value);
|
||||
}
|
||||
|
||||
pub fn load_itemsize(&self, ctx: &CodeGenContext<'ctx, '_>) -> IntValue<'ctx> {
|
||||
let field = self.ty.fields().itemsize;
|
||||
field.load(ctx, self.ptr).into_int_value()
|
||||
}
|
||||
|
||||
pub fn store_itemsize(&self, ctx: &CodeGenContext<'ctx, '_>, value: IntValue<'ctx>) {
|
||||
let field = self.ty.fields().itemsize;
|
||||
field.store(ctx, self.ptr, value);
|
||||
}
|
||||
|
||||
pub fn load_shape(&self, ctx: &CodeGenContext<'ctx, '_>) -> PointerValue<'ctx> {
|
||||
let field = self.ty.fields().shape;
|
||||
field.load(ctx, self.ptr).into_pointer_value()
|
||||
}
|
||||
|
||||
pub fn store_shape(&self, ctx: &CodeGenContext<'ctx, '_>, value: PointerValue<'ctx>) {
|
||||
let field = self.ty.fields().shape;
|
||||
field.store(ctx, self.ptr, value);
|
||||
}
|
||||
|
||||
pub fn load_strides(&self, ctx: &CodeGenContext<'ctx, '_>) -> PointerValue<'ctx> {
|
||||
let field = self.ty.fields().strides;
|
||||
field.load(ctx, self.ptr).into_pointer_value()
|
||||
}
|
||||
|
||||
pub fn store_strides(&self, ctx: &CodeGenContext<'ctx, '_>, value: PointerValue<'ctx>) {
|
||||
let field = self.ty.fields().strides;
|
||||
field.store(ctx, self.ptr, value);
|
||||
}
|
||||
|
||||
/// TODO: DOCUMENT ME -- NDIMS WOULD NEVER CHANGE!!!!!
|
||||
pub fn shape_slice(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
|
||||
let field = self.ty.fields().shape;
|
||||
field.gep(ctx, self.ptr);
|
||||
|
||||
let ndims = self.load_ndims(ctx);
|
||||
|
||||
TypedArrayLikeAdapter {
|
||||
adapted: ArraySliceValue(self.ptr, ndims, Some(field.name)),
|
||||
downcast_fn: Box::new(|_ctx, x| x.into_int_value()),
|
||||
upcast_fn: Box::new(|_ctx, x| x.as_basic_value_enum()),
|
||||
}
|
||||
}
|
||||
|
||||
/// TODO: DOCUMENT ME -- NDIMS WOULD NEVER CHANGE!!!!!
|
||||
pub fn strides_slice(
|
||||
&self,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
|
||||
let field = self.ty.fields().strides;
|
||||
field.gep(ctx, self.ptr);
|
||||
|
||||
let ndims = self.load_ndims(ctx);
|
||||
|
||||
TypedArrayLikeAdapter {
|
||||
adapted: ArraySliceValue(self.ptr, ndims, Some(field.name)),
|
||||
downcast_fn: Box::new(|_ctx, x| x.into_int_value()),
|
||||
upcast_fn: Box::new(|_ctx, x| x.as_basic_value_enum()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1362,100 +1362,101 @@ pub fn gen_binop_expr_with_values<'ctx, G: CodeGenerator>(
|
|||
} else if ty1.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id())
|
||||
|| ty2.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id())
|
||||
{
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
todo!()
|
||||
// let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
|
||||
let is_ndarray1 = ty1.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id());
|
||||
let is_ndarray2 = ty2.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id());
|
||||
// let is_ndarray1 = ty1.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id());
|
||||
// let is_ndarray2 = ty2.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id());
|
||||
|
||||
if is_ndarray1 && is_ndarray2 {
|
||||
let (ndarray_dtype1, _) = unpack_ndarray_var_tys(&mut ctx.unifier, ty1);
|
||||
let (ndarray_dtype2, _) = unpack_ndarray_var_tys(&mut ctx.unifier, ty2);
|
||||
// if is_ndarray1 && is_ndarray2 {
|
||||
// let (ndarray_dtype1, _) = unpack_ndarray_var_tys(&mut ctx.unifier, ty1);
|
||||
// let (ndarray_dtype2, _) = unpack_ndarray_var_tys(&mut ctx.unifier, ty2);
|
||||
|
||||
assert!(ctx.unifier.unioned(ndarray_dtype1, ndarray_dtype2));
|
||||
// assert!(ctx.unifier.unioned(ndarray_dtype1, ndarray_dtype2));
|
||||
|
||||
let left_val =
|
||||
NDArrayValue::from_ptr_val(left_val.into_pointer_value(), llvm_usize, None);
|
||||
let right_val =
|
||||
NDArrayValue::from_ptr_val(right_val.into_pointer_value(), llvm_usize, None);
|
||||
// let left_val =
|
||||
// NDArrayValue::from_ptr_val(left_val.into_pointer_value(), llvm_usize, None);
|
||||
// let right_val =
|
||||
// NDArrayValue::from_ptr_val(right_val.into_pointer_value(), llvm_usize, None);
|
||||
|
||||
let res = if op.base == Operator::MatMult {
|
||||
// MatMult is the only binop which is not an elementwise op
|
||||
numpy::ndarray_matmul_2d(
|
||||
generator,
|
||||
ctx,
|
||||
ndarray_dtype1,
|
||||
match op.variant {
|
||||
BinopVariant::Normal => None,
|
||||
BinopVariant::AugAssign => Some(left_val),
|
||||
},
|
||||
left_val,
|
||||
right_val,
|
||||
)?
|
||||
} else {
|
||||
numpy::ndarray_elementwise_binop_impl(
|
||||
generator,
|
||||
ctx,
|
||||
ndarray_dtype1,
|
||||
match op.variant {
|
||||
BinopVariant::Normal => None,
|
||||
BinopVariant::AugAssign => Some(left_val),
|
||||
},
|
||||
(left_val.as_base_value().into(), false),
|
||||
(right_val.as_base_value().into(), false),
|
||||
|generator, ctx, (lhs, rhs)| {
|
||||
gen_binop_expr_with_values(
|
||||
generator,
|
||||
ctx,
|
||||
(&Some(ndarray_dtype1), lhs),
|
||||
op,
|
||||
(&Some(ndarray_dtype2), rhs),
|
||||
ctx.current_loc,
|
||||
)?
|
||||
.unwrap()
|
||||
.to_basic_value_enum(
|
||||
ctx,
|
||||
generator,
|
||||
ndarray_dtype1,
|
||||
)
|
||||
},
|
||||
)?
|
||||
};
|
||||
// let res = if op.base == Operator::MatMult {
|
||||
// // MatMult is the only binop which is not an elementwise op
|
||||
// numpy::ndarray_matmul_2d(
|
||||
// generator,
|
||||
// ctx,
|
||||
// ndarray_dtype1,
|
||||
// match op.variant {
|
||||
// BinopVariant::Normal => None,
|
||||
// BinopVariant::AugAssign => Some(left_val),
|
||||
// },
|
||||
// left_val,
|
||||
// right_val,
|
||||
// )?
|
||||
// } else {
|
||||
// numpy::ndarray_elementwise_binop_impl(
|
||||
// generator,
|
||||
// ctx,
|
||||
// ndarray_dtype1,
|
||||
// match op.variant {
|
||||
// BinopVariant::Normal => None,
|
||||
// BinopVariant::AugAssign => Some(left_val),
|
||||
// },
|
||||
// (left_val.as_base_value().into(), false),
|
||||
// (right_val.as_base_value().into(), false),
|
||||
// |generator, ctx, (lhs, rhs)| {
|
||||
// gen_binop_expr_with_values(
|
||||
// generator,
|
||||
// ctx,
|
||||
// (&Some(ndarray_dtype1), lhs),
|
||||
// op,
|
||||
// (&Some(ndarray_dtype2), rhs),
|
||||
// ctx.current_loc,
|
||||
// )?
|
||||
// .unwrap()
|
||||
// .to_basic_value_enum(
|
||||
// ctx,
|
||||
// generator,
|
||||
// ndarray_dtype1,
|
||||
// )
|
||||
// },
|
||||
// )?
|
||||
// };
|
||||
|
||||
Ok(Some(res.as_base_value().into()))
|
||||
} else {
|
||||
let (ndarray_dtype, _) =
|
||||
unpack_ndarray_var_tys(&mut ctx.unifier, if is_ndarray1 { ty1 } else { ty2 });
|
||||
let ndarray_val = NDArrayValue::from_ptr_val(
|
||||
if is_ndarray1 { left_val } else { right_val }.into_pointer_value(),
|
||||
llvm_usize,
|
||||
None,
|
||||
);
|
||||
let res = numpy::ndarray_elementwise_binop_impl(
|
||||
generator,
|
||||
ctx,
|
||||
ndarray_dtype,
|
||||
match op.variant {
|
||||
BinopVariant::Normal => None,
|
||||
BinopVariant::AugAssign => Some(ndarray_val),
|
||||
},
|
||||
(left_val, !is_ndarray1),
|
||||
(right_val, !is_ndarray2),
|
||||
|generator, ctx, (lhs, rhs)| {
|
||||
gen_binop_expr_with_values(
|
||||
generator,
|
||||
ctx,
|
||||
(&Some(ndarray_dtype), lhs),
|
||||
op,
|
||||
(&Some(ndarray_dtype), rhs),
|
||||
ctx.current_loc,
|
||||
)?
|
||||
.unwrap()
|
||||
.to_basic_value_enum(ctx, generator, ndarray_dtype)
|
||||
},
|
||||
)?;
|
||||
// Ok(Some(res.as_base_value().into()))
|
||||
// } else {
|
||||
// let (ndarray_dtype, _) =
|
||||
// unpack_ndarray_var_tys(&mut ctx.unifier, if is_ndarray1 { ty1 } else { ty2 });
|
||||
// let ndarray_val = NDArrayValue::from_ptr_val(
|
||||
// if is_ndarray1 { left_val } else { right_val }.into_pointer_value(),
|
||||
// llvm_usize,
|
||||
// None,
|
||||
// );
|
||||
// let res = numpy::ndarray_elementwise_binop_impl(
|
||||
// generator,
|
||||
// ctx,
|
||||
// ndarray_dtype,
|
||||
// match op.variant {
|
||||
// BinopVariant::Normal => None,
|
||||
// BinopVariant::AugAssign => Some(ndarray_val),
|
||||
// },
|
||||
// (left_val, !is_ndarray1),
|
||||
// (right_val, !is_ndarray2),
|
||||
// |generator, ctx, (lhs, rhs)| {
|
||||
// gen_binop_expr_with_values(
|
||||
// generator,
|
||||
// ctx,
|
||||
// (&Some(ndarray_dtype), lhs),
|
||||
// op,
|
||||
// (&Some(ndarray_dtype), rhs),
|
||||
// ctx.current_loc,
|
||||
// )?
|
||||
// .unwrap()
|
||||
// .to_basic_value_enum(ctx, generator, ndarray_dtype)
|
||||
// },
|
||||
// )?;
|
||||
|
||||
Ok(Some(res.as_base_value().into()))
|
||||
}
|
||||
// Ok(Some(res.as_base_value().into()))
|
||||
// }
|
||||
} else {
|
||||
let left_ty_enum = ctx.unifier.get_ty_immutable(left_ty.unwrap());
|
||||
let TypeEnum::TObj { fields, obj_id, .. } = left_ty_enum.as_ref() else {
|
||||
|
@ -1612,40 +1613,41 @@ pub fn gen_unaryop_expr_with_values<'ctx, G: CodeGenerator>(
|
|||
_ => val.into(),
|
||||
}
|
||||
} else if ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) {
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
let (ndarray_dtype, _) = unpack_ndarray_var_tys(&mut ctx.unifier, ty);
|
||||
todo!()
|
||||
// let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
// let (ndarray_dtype, _) = unpack_ndarray_var_tys(&mut ctx.unifier, ty);
|
||||
|
||||
let val = NDArrayValue::from_ptr_val(val.into_pointer_value(), llvm_usize, None);
|
||||
// let val = NDArrayValue::from_ptr_val(val.into_pointer_value(), llvm_usize, None);
|
||||
|
||||
// ndarray uses `~` rather than `not` to perform elementwise inversion, convert it before
|
||||
// passing it to the elementwise codegen function
|
||||
let op = if ndarray_dtype.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::Bool.id()) {
|
||||
if op == ast::Unaryop::Invert {
|
||||
ast::Unaryop::Not
|
||||
} else {
|
||||
unreachable!(
|
||||
"ufunc {} not supported for ndarray[bool, N]",
|
||||
op.op_info().method_name,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
op
|
||||
};
|
||||
// // ndarray uses `~` rather than `not` to perform elementwise inversion, convert it before
|
||||
// // passing it to the elementwise codegen function
|
||||
// let op = if ndarray_dtype.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::Bool.id()) {
|
||||
// if op == ast::Unaryop::Invert {
|
||||
// ast::Unaryop::Not
|
||||
// } else {
|
||||
// unreachable!(
|
||||
// "ufunc {} not supported for ndarray[bool, N]",
|
||||
// op.op_info().method_name,
|
||||
// )
|
||||
// }
|
||||
// } else {
|
||||
// op
|
||||
// };
|
||||
|
||||
let res = numpy::ndarray_elementwise_unaryop_impl(
|
||||
generator,
|
||||
ctx,
|
||||
ndarray_dtype,
|
||||
None,
|
||||
val,
|
||||
|generator, ctx, val| {
|
||||
gen_unaryop_expr_with_values(generator, ctx, op, (&Some(ndarray_dtype), val))?
|
||||
.unwrap()
|
||||
.to_basic_value_enum(ctx, generator, ndarray_dtype)
|
||||
},
|
||||
)?;
|
||||
// let res = numpy::ndarray_elementwise_unaryop_impl(
|
||||
// generator,
|
||||
// ctx,
|
||||
// ndarray_dtype,
|
||||
// None,
|
||||
// val,
|
||||
// |generator, ctx, val| {
|
||||
// gen_unaryop_expr_with_values(generator, ctx, op, (&Some(ndarray_dtype), val))?
|
||||
// .unwrap()
|
||||
// .to_basic_value_enum(ctx, generator, ndarray_dtype)
|
||||
// },
|
||||
// )?;
|
||||
|
||||
res.as_base_value().into()
|
||||
// res.as_base_value().into()
|
||||
} else {
|
||||
unimplemented!()
|
||||
}))
|
||||
|
@ -1688,85 +1690,86 @@ pub fn gen_cmpop_expr_with_values<'ctx, G: CodeGenerator>(
|
|||
if left_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id())
|
||||
|| right_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id())
|
||||
{
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
todo!()
|
||||
// let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
|
||||
let (Some(left_ty), lhs) = left else { unreachable!() };
|
||||
let (Some(right_ty), rhs) = comparators[0] else { unreachable!() };
|
||||
let op = ops[0];
|
||||
// let (Some(left_ty), lhs) = left else { unreachable!() };
|
||||
// let (Some(right_ty), rhs) = comparators[0] else { unreachable!() };
|
||||
// let op = ops[0];
|
||||
|
||||
let is_ndarray1 =
|
||||
left_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id());
|
||||
let is_ndarray2 =
|
||||
right_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id());
|
||||
// let is_ndarray1 =
|
||||
// left_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id());
|
||||
// let is_ndarray2 =
|
||||
// right_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id());
|
||||
|
||||
return if is_ndarray1 && is_ndarray2 {
|
||||
let (ndarray_dtype1, _) = unpack_ndarray_var_tys(&mut ctx.unifier, left_ty);
|
||||
let (ndarray_dtype2, _) = unpack_ndarray_var_tys(&mut ctx.unifier, right_ty);
|
||||
// return if is_ndarray1 && is_ndarray2 {
|
||||
// let (ndarray_dtype1, _) = unpack_ndarray_var_tys(&mut ctx.unifier, left_ty);
|
||||
// let (ndarray_dtype2, _) = unpack_ndarray_var_tys(&mut ctx.unifier, right_ty);
|
||||
|
||||
assert!(ctx.unifier.unioned(ndarray_dtype1, ndarray_dtype2));
|
||||
// assert!(ctx.unifier.unioned(ndarray_dtype1, ndarray_dtype2));
|
||||
|
||||
let left_val =
|
||||
NDArrayValue::from_ptr_val(lhs.into_pointer_value(), llvm_usize, None);
|
||||
let res = numpy::ndarray_elementwise_binop_impl(
|
||||
generator,
|
||||
ctx,
|
||||
ctx.primitives.bool,
|
||||
None,
|
||||
(left_val.as_base_value().into(), false),
|
||||
(rhs, false),
|
||||
|generator, ctx, (lhs, rhs)| {
|
||||
let val = gen_cmpop_expr_with_values(
|
||||
generator,
|
||||
ctx,
|
||||
(Some(ndarray_dtype1), lhs),
|
||||
&[op],
|
||||
&[(Some(ndarray_dtype2), rhs)],
|
||||
)?
|
||||
.unwrap()
|
||||
.to_basic_value_enum(
|
||||
ctx,
|
||||
generator,
|
||||
ctx.primitives.bool,
|
||||
)?;
|
||||
// let left_val =
|
||||
// NDArrayValue::from_ptr_val(lhs.into_pointer_value(), llvm_usize, None);
|
||||
// let res = numpy::ndarray_elementwise_binop_impl(
|
||||
// generator,
|
||||
// ctx,
|
||||
// ctx.primitives.bool,
|
||||
// None,
|
||||
// (left_val.as_base_value().into(), false),
|
||||
// (rhs, false),
|
||||
// |generator, ctx, (lhs, rhs)| {
|
||||
// let val = gen_cmpop_expr_with_values(
|
||||
// generator,
|
||||
// ctx,
|
||||
// (Some(ndarray_dtype1), lhs),
|
||||
// &[op],
|
||||
// &[(Some(ndarray_dtype2), rhs)],
|
||||
// )?
|
||||
// .unwrap()
|
||||
// .to_basic_value_enum(
|
||||
// ctx,
|
||||
// generator,
|
||||
// ctx.primitives.bool,
|
||||
// )?;
|
||||
|
||||
Ok(generator.bool_to_i8(ctx, val.into_int_value()).into())
|
||||
},
|
||||
)?;
|
||||
// Ok(generator.bool_to_i8(ctx, val.into_int_value()).into())
|
||||
// },
|
||||
// )?;
|
||||
|
||||
Ok(Some(res.as_base_value().into()))
|
||||
} else {
|
||||
let (ndarray_dtype, _) = unpack_ndarray_var_tys(
|
||||
&mut ctx.unifier,
|
||||
if is_ndarray1 { left_ty } else { right_ty },
|
||||
);
|
||||
let res = numpy::ndarray_elementwise_binop_impl(
|
||||
generator,
|
||||
ctx,
|
||||
ctx.primitives.bool,
|
||||
None,
|
||||
(lhs, !is_ndarray1),
|
||||
(rhs, !is_ndarray2),
|
||||
|generator, ctx, (lhs, rhs)| {
|
||||
let val = gen_cmpop_expr_with_values(
|
||||
generator,
|
||||
ctx,
|
||||
(Some(ndarray_dtype), lhs),
|
||||
&[op],
|
||||
&[(Some(ndarray_dtype), rhs)],
|
||||
)?
|
||||
.unwrap()
|
||||
.to_basic_value_enum(
|
||||
ctx,
|
||||
generator,
|
||||
ctx.primitives.bool,
|
||||
)?;
|
||||
// Ok(Some(res.as_base_value().into()))
|
||||
// } else {
|
||||
// let (ndarray_dtype, _) = unpack_ndarray_var_tys(
|
||||
// &mut ctx.unifier,
|
||||
// if is_ndarray1 { left_ty } else { right_ty },
|
||||
// );
|
||||
// let res = numpy::ndarray_elementwise_binop_impl(
|
||||
// generator,
|
||||
// ctx,
|
||||
// ctx.primitives.bool,
|
||||
// None,
|
||||
// (lhs, !is_ndarray1),
|
||||
// (rhs, !is_ndarray2),
|
||||
// |generator, ctx, (lhs, rhs)| {
|
||||
// let val = gen_cmpop_expr_with_values(
|
||||
// generator,
|
||||
// ctx,
|
||||
// (Some(ndarray_dtype), lhs),
|
||||
// &[op],
|
||||
// &[(Some(ndarray_dtype), rhs)],
|
||||
// )?
|
||||
// .unwrap()
|
||||
// .to_basic_value_enum(
|
||||
// ctx,
|
||||
// generator,
|
||||
// ctx.primitives.bool,
|
||||
// )?;
|
||||
|
||||
Ok(generator.bool_to_i8(ctx, val.into_int_value()).into())
|
||||
},
|
||||
)?;
|
||||
// Ok(generator.bool_to_i8(ctx, val.into_int_value()).into())
|
||||
// },
|
||||
// )?;
|
||||
|
||||
Ok(Some(res.as_base_value().into()))
|
||||
};
|
||||
// Ok(Some(res.as_base_value().into()))
|
||||
// };
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2102,310 +2105,312 @@ fn gen_ndarray_subscript_expr<'ctx, G: CodeGenerator>(
|
|||
v: NDArrayValue<'ctx>,
|
||||
slice: &Expr<Option<Type>>,
|
||||
) -> Result<Option<ValueEnum<'ctx>>, String> {
|
||||
let llvm_i1 = ctx.ctx.bool_type();
|
||||
let llvm_i32 = ctx.ctx.i32_type();
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
todo!()
|
||||
|
||||
let TypeEnum::TLiteral { values, .. } = &*ctx.unifier.get_ty_immutable(ndims) else {
|
||||
unreachable!()
|
||||
};
|
||||
// let llvm_i1 = ctx.ctx.bool_type();
|
||||
// let llvm_i32 = ctx.ctx.i32_type();
|
||||
// let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
|
||||
let ndims = values
|
||||
.iter()
|
||||
.map(|ndim| u64::try_from(ndim.clone()).map_err(|()| ndim.clone()))
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(|val| {
|
||||
format!(
|
||||
"Expected non-negative literal for ndarray.ndims, got {}",
|
||||
i128::try_from(val).unwrap()
|
||||
)
|
||||
})?;
|
||||
// let TypeEnum::TLiteral { values, .. } = &*ctx.unifier.get_ty_immutable(ndims) else {
|
||||
// unreachable!()
|
||||
// };
|
||||
|
||||
assert!(!ndims.is_empty());
|
||||
// let ndims = values
|
||||
// .iter()
|
||||
// .map(|ndim| u64::try_from(ndim.clone()).map_err(|()| ndim.clone()))
|
||||
// .collect::<Result<Vec<_>, _>>()
|
||||
// .map_err(|val| {
|
||||
// format!(
|
||||
// "Expected non-negative literal for ndarray.ndims, got {}",
|
||||
// i128::try_from(val).unwrap()
|
||||
// )
|
||||
// })?;
|
||||
|
||||
// The number of dimensions subscripted by the index expression.
|
||||
// Slicing a ndarray will yield the same number of dimensions, whereas indexing into a
|
||||
// dimension will remove a dimension.
|
||||
let subscripted_dims = match &slice.node {
|
||||
ExprKind::Tuple { elts, .. } => elts.iter().fold(0, |acc, value_subexpr| {
|
||||
if let ExprKind::Slice { .. } = &value_subexpr.node {
|
||||
acc
|
||||
} else {
|
||||
acc + 1
|
||||
}
|
||||
}),
|
||||
// assert!(!ndims.is_empty());
|
||||
|
||||
ExprKind::Slice { .. } => 0,
|
||||
_ => 1,
|
||||
};
|
||||
// // The number of dimensions subscripted by the index expression.
|
||||
// // Slicing a ndarray will yield the same number of dimensions, whereas indexing into a
|
||||
// // dimension will remove a dimension.
|
||||
// let subscripted_dims = match &slice.node {
|
||||
// ExprKind::Tuple { elts, .. } => elts.iter().fold(0, |acc, value_subexpr| {
|
||||
// if let ExprKind::Slice { .. } = &value_subexpr.node {
|
||||
// acc
|
||||
// } else {
|
||||
// acc + 1
|
||||
// }
|
||||
// }),
|
||||
|
||||
let ndarray_ndims_ty = ctx.unifier.get_fresh_literal(
|
||||
ndims.iter().map(|v| SymbolValue::U64(v - subscripted_dims)).collect(),
|
||||
None,
|
||||
);
|
||||
let ndarray_ty =
|
||||
make_ndarray_ty(&mut ctx.unifier, &ctx.primitives, Some(ty), Some(ndarray_ndims_ty));
|
||||
let llvm_pndarray_t = ctx.get_llvm_type(generator, ndarray_ty).into_pointer_type();
|
||||
let llvm_ndarray_t = llvm_pndarray_t.get_element_type().into_struct_type();
|
||||
let llvm_ndarray_data_t = ctx.get_llvm_type(generator, ty).as_basic_type_enum();
|
||||
// ExprKind::Slice { .. } => 0,
|
||||
// _ => 1,
|
||||
// };
|
||||
|
||||
// Check that len is non-zero
|
||||
let len = v.load_ndims(ctx);
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
ctx.builder.build_int_compare(IntPredicate::SGT, len, llvm_usize.const_zero(), "").unwrap(),
|
||||
"0:IndexError",
|
||||
"too many indices for array: array is {0}-dimensional but 1 were indexed",
|
||||
[Some(len), None, None],
|
||||
slice.location,
|
||||
);
|
||||
// let ndarray_ndims_ty = ctx.unifier.get_fresh_literal(
|
||||
// ndims.iter().map(|v| SymbolValue::U64(v - subscripted_dims)).collect(),
|
||||
// None,
|
||||
// );
|
||||
// let ndarray_ty =
|
||||
// make_ndarray_ty(&mut ctx.unifier, &ctx.primitives, Some(ty), Some(ndarray_ndims_ty));
|
||||
// let llvm_pndarray_t = ctx.get_llvm_type(generator, ndarray_ty).into_pointer_type();
|
||||
// let llvm_ndarray_t = llvm_pndarray_t.get_element_type().into_struct_type();
|
||||
// let llvm_ndarray_data_t = ctx.get_llvm_type(generator, ty).as_basic_type_enum();
|
||||
|
||||
// Normalizes a possibly-negative index to its corresponding positive index
|
||||
let normalize_index = |generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
index: IntValue<'ctx>,
|
||||
dim: u64| {
|
||||
gen_if_else_expr_callback(
|
||||
generator,
|
||||
ctx,
|
||||
|_, ctx| {
|
||||
Ok(ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::SGE, index, index.get_type().const_zero(), "")
|
||||
.unwrap())
|
||||
},
|
||||
|_, _| Ok(Some(index)),
|
||||
|generator, ctx| {
|
||||
let llvm_i32 = ctx.ctx.i32_type();
|
||||
// // Check that len is non-zero
|
||||
// let len = v.load_ndims(ctx);
|
||||
// ctx.make_assert(
|
||||
// generator,
|
||||
// ctx.builder.build_int_compare(IntPredicate::SGT, len, llvm_usize.const_zero(), "").unwrap(),
|
||||
// "0:IndexError",
|
||||
// "too many indices for array: array is {0}-dimensional but 1 were indexed",
|
||||
// [Some(len), None, None],
|
||||
// slice.location,
|
||||
// );
|
||||
|
||||
let len = unsafe {
|
||||
v.dim_sizes().get_typed_unchecked(
|
||||
ctx,
|
||||
generator,
|
||||
&llvm_usize.const_int(dim, true),
|
||||
None,
|
||||
)
|
||||
};
|
||||
// // Normalizes a possibly-negative index to its corresponding positive index
|
||||
// let normalize_index = |generator: &mut G,
|
||||
// ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
// index: IntValue<'ctx>,
|
||||
// dim: u64| {
|
||||
// gen_if_else_expr_callback(
|
||||
// generator,
|
||||
// ctx,
|
||||
// |_, ctx| {
|
||||
// Ok(ctx
|
||||
// .builder
|
||||
// .build_int_compare(IntPredicate::SGE, index, index.get_type().const_zero(), "")
|
||||
// .unwrap())
|
||||
// },
|
||||
// |_, _| Ok(Some(index)),
|
||||
// |generator, ctx| {
|
||||
// let llvm_i32 = ctx.ctx.i32_type();
|
||||
|
||||
let index = ctx
|
||||
.builder
|
||||
.build_int_add(
|
||||
len,
|
||||
ctx.builder.build_int_s_extend(index, llvm_usize, "").unwrap(),
|
||||
"",
|
||||
)
|
||||
.unwrap();
|
||||
// let len = unsafe {
|
||||
// v.dim_sizes().get_typed_unchecked(
|
||||
// ctx,
|
||||
// generator,
|
||||
// &llvm_usize.const_int(dim, true),
|
||||
// None,
|
||||
// )
|
||||
// };
|
||||
|
||||
Ok(Some(ctx.builder.build_int_truncate(index, llvm_i32, "").unwrap()))
|
||||
},
|
||||
)
|
||||
.map(|v| v.map(BasicValueEnum::into_int_value))
|
||||
};
|
||||
// let index = ctx
|
||||
// .builder
|
||||
// .build_int_add(
|
||||
// len,
|
||||
// ctx.builder.build_int_s_extend(index, llvm_usize, "").unwrap(),
|
||||
// "",
|
||||
// )
|
||||
// .unwrap();
|
||||
|
||||
// Converts a slice expression into a slice-range tuple
|
||||
let expr_to_slice = |generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
node: &ExprKind<Option<Type>>,
|
||||
dim: u64| {
|
||||
match node {
|
||||
ExprKind::Constant { value: Constant::Int(v), .. } => {
|
||||
let Some(index) =
|
||||
normalize_index(generator, ctx, llvm_i32.const_int(*v as u64, true), dim)?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
// Ok(Some(ctx.builder.build_int_truncate(index, llvm_i32, "").unwrap()))
|
||||
// },
|
||||
// )
|
||||
// .map(|v| v.map(BasicValueEnum::into_int_value))
|
||||
// };
|
||||
|
||||
Ok(Some((index, index, llvm_i32.const_int(1, true))))
|
||||
}
|
||||
// // Converts a slice expression into a slice-range tuple
|
||||
// let expr_to_slice = |generator: &mut G,
|
||||
// ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
// node: &ExprKind<Option<Type>>,
|
||||
// dim: u64| {
|
||||
// match node {
|
||||
// ExprKind::Constant { value: Constant::Int(v), .. } => {
|
||||
// let Some(index) =
|
||||
// normalize_index(generator, ctx, llvm_i32.const_int(*v as u64, true), dim)?
|
||||
// else {
|
||||
// return Ok(None);
|
||||
// };
|
||||
|
||||
ExprKind::Slice { lower, upper, step } => {
|
||||
let dim_sz = unsafe {
|
||||
v.dim_sizes().get_typed_unchecked(
|
||||
ctx,
|
||||
generator,
|
||||
&llvm_usize.const_int(dim, false),
|
||||
None,
|
||||
)
|
||||
};
|
||||
// Ok(Some((index, index, llvm_i32.const_int(1, true))))
|
||||
// }
|
||||
|
||||
handle_slice_indices(lower, upper, step, ctx, generator, dim_sz)
|
||||
}
|
||||
// ExprKind::Slice { lower, upper, step } => {
|
||||
// let dim_sz = unsafe {
|
||||
// v.dim_sizes().get_typed_unchecked(
|
||||
// ctx,
|
||||
// generator,
|
||||
// &llvm_usize.const_int(dim, false),
|
||||
// None,
|
||||
// )
|
||||
// };
|
||||
|
||||
_ => {
|
||||
let Some(index) = generator.gen_expr(ctx, slice)? else { return Ok(None) };
|
||||
let index = index
|
||||
.to_basic_value_enum(ctx, generator, slice.custom.unwrap())?
|
||||
.into_int_value();
|
||||
let Some(index) = normalize_index(generator, ctx, index, dim)? else {
|
||||
return Ok(None);
|
||||
};
|
||||
// handle_slice_indices(lower, upper, step, ctx, generator, dim_sz)
|
||||
// }
|
||||
|
||||
Ok(Some((index, index, llvm_i32.const_int(1, true))))
|
||||
}
|
||||
}
|
||||
};
|
||||
// _ => {
|
||||
// let Some(index) = generator.gen_expr(ctx, slice)? else { return Ok(None) };
|
||||
// let index = index
|
||||
// .to_basic_value_enum(ctx, generator, slice.custom.unwrap())?
|
||||
// .into_int_value();
|
||||
// let Some(index) = normalize_index(generator, ctx, index, dim)? else {
|
||||
// return Ok(None);
|
||||
// };
|
||||
|
||||
let make_indices_arr = |generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>|
|
||||
-> Result<_, String> {
|
||||
Ok(if let ExprKind::Tuple { elts, .. } = &slice.node {
|
||||
let llvm_int_ty = ctx.get_llvm_type(generator, elts[0].custom.unwrap());
|
||||
let index_addr = generator.gen_array_var_alloc(
|
||||
ctx,
|
||||
llvm_int_ty,
|
||||
llvm_usize.const_int(elts.len() as u64, false),
|
||||
None,
|
||||
)?;
|
||||
// Ok(Some((index, index, llvm_i32.const_int(1, true))))
|
||||
// }
|
||||
// }
|
||||
// };
|
||||
|
||||
for (i, elt) in elts.iter().enumerate() {
|
||||
let Some(index) = generator.gen_expr(ctx, elt)? else {
|
||||
return Ok(None);
|
||||
};
|
||||
// let make_indices_arr = |generator: &mut G,
|
||||
// ctx: &mut CodeGenContext<'ctx, '_>|
|
||||
// -> Result<_, String> {
|
||||
// Ok(if let ExprKind::Tuple { elts, .. } = &slice.node {
|
||||
// let llvm_int_ty = ctx.get_llvm_type(generator, elts[0].custom.unwrap());
|
||||
// let index_addr = generator.gen_array_var_alloc(
|
||||
// ctx,
|
||||
// llvm_int_ty,
|
||||
// llvm_usize.const_int(elts.len() as u64, false),
|
||||
// None,
|
||||
// )?;
|
||||
|
||||
let index = index
|
||||
.to_basic_value_enum(ctx, generator, elt.custom.unwrap())?
|
||||
.into_int_value();
|
||||
let Some(index) = normalize_index(generator, ctx, index, 0)? else {
|
||||
return Ok(None);
|
||||
};
|
||||
// for (i, elt) in elts.iter().enumerate() {
|
||||
// let Some(index) = generator.gen_expr(ctx, elt)? else {
|
||||
// return Ok(None);
|
||||
// };
|
||||
|
||||
let store_ptr = unsafe {
|
||||
index_addr.ptr_offset_unchecked(
|
||||
ctx,
|
||||
generator,
|
||||
&llvm_usize.const_int(i as u64, false),
|
||||
None,
|
||||
)
|
||||
};
|
||||
ctx.builder.build_store(store_ptr, index).unwrap();
|
||||
}
|
||||
// let index = index
|
||||
// .to_basic_value_enum(ctx, generator, elt.custom.unwrap())?
|
||||
// .into_int_value();
|
||||
// let Some(index) = normalize_index(generator, ctx, index, 0)? else {
|
||||
// return Ok(None);
|
||||
// };
|
||||
|
||||
Some(index_addr)
|
||||
} else if let Some(index) = generator.gen_expr(ctx, slice)? {
|
||||
let llvm_int_ty = ctx.get_llvm_type(generator, slice.custom.unwrap());
|
||||
let index_addr = generator.gen_array_var_alloc(
|
||||
ctx,
|
||||
llvm_int_ty,
|
||||
llvm_usize.const_int(1u64, false),
|
||||
None,
|
||||
)?;
|
||||
// let store_ptr = unsafe {
|
||||
// index_addr.ptr_offset_unchecked(
|
||||
// ctx,
|
||||
// generator,
|
||||
// &llvm_usize.const_int(i as u64, false),
|
||||
// None,
|
||||
// )
|
||||
// };
|
||||
// ctx.builder.build_store(store_ptr, index).unwrap();
|
||||
// }
|
||||
|
||||
let index =
|
||||
index.to_basic_value_enum(ctx, generator, slice.custom.unwrap())?.into_int_value();
|
||||
let Some(index) = normalize_index(generator, ctx, index, 0)? else { return Ok(None) };
|
||||
// Some(index_addr)
|
||||
// } else if let Some(index) = generator.gen_expr(ctx, slice)? {
|
||||
// let llvm_int_ty = ctx.get_llvm_type(generator, slice.custom.unwrap());
|
||||
// let index_addr = generator.gen_array_var_alloc(
|
||||
// ctx,
|
||||
// llvm_int_ty,
|
||||
// llvm_usize.const_int(1u64, false),
|
||||
// None,
|
||||
// )?;
|
||||
|
||||
let store_ptr = unsafe {
|
||||
index_addr.ptr_offset_unchecked(ctx, generator, &llvm_usize.const_zero(), None)
|
||||
};
|
||||
ctx.builder.build_store(store_ptr, index).unwrap();
|
||||
// let index =
|
||||
// index.to_basic_value_enum(ctx, generator, slice.custom.unwrap())?.into_int_value();
|
||||
// let Some(index) = normalize_index(generator, ctx, index, 0)? else { return Ok(None) };
|
||||
|
||||
Some(index_addr)
|
||||
} else {
|
||||
None
|
||||
})
|
||||
};
|
||||
// let store_ptr = unsafe {
|
||||
// index_addr.ptr_offset_unchecked(ctx, generator, &llvm_usize.const_zero(), None)
|
||||
// };
|
||||
// ctx.builder.build_store(store_ptr, index).unwrap();
|
||||
|
||||
Ok(Some(if ndims.len() == 1 && ndims[0] - subscripted_dims == 0 {
|
||||
let Some(index_addr) = make_indices_arr(generator, ctx)? else { return Ok(None) };
|
||||
// Some(index_addr)
|
||||
// } else {
|
||||
// None
|
||||
// })
|
||||
// };
|
||||
|
||||
v.data().get(ctx, generator, &index_addr, None).into()
|
||||
} else {
|
||||
match &slice.node {
|
||||
ExprKind::Tuple { elts, .. } => {
|
||||
let slices = elts
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(dim, elt)| expr_to_slice(generator, ctx, &elt.node, dim as u64))
|
||||
.take_while_inclusive(|slice| slice.as_ref().is_ok_and(Option::is_some))
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
if slices.len() < elts.len() {
|
||||
return Ok(None);
|
||||
}
|
||||
// Ok(Some(if ndims.len() == 1 && ndims[0] - subscripted_dims == 0 {
|
||||
// let Some(index_addr) = make_indices_arr(generator, ctx)? else { return Ok(None) };
|
||||
|
||||
let slices = slices.into_iter().map(Option::unwrap).collect_vec();
|
||||
// v.data().get(ctx, generator, &index_addr, None).into()
|
||||
// } else {
|
||||
// match &slice.node {
|
||||
// ExprKind::Tuple { elts, .. } => {
|
||||
// let slices = elts
|
||||
// .iter()
|
||||
// .enumerate()
|
||||
// .map(|(dim, elt)| expr_to_slice(generator, ctx, &elt.node, dim as u64))
|
||||
// .take_while_inclusive(|slice| slice.as_ref().is_ok_and(Option::is_some))
|
||||
// .collect::<Result<Vec<_>, _>>()?;
|
||||
// if slices.len() < elts.len() {
|
||||
// return Ok(None);
|
||||
// }
|
||||
|
||||
numpy::ndarray_sliced_copy(generator, ctx, ty, v, &slices)?.as_base_value().into()
|
||||
}
|
||||
// let slices = slices.into_iter().map(Option::unwrap).collect_vec();
|
||||
|
||||
ExprKind::Slice { .. } => {
|
||||
let Some(slice) = expr_to_slice(generator, ctx, &slice.node, 0)? else {
|
||||
return Ok(None);
|
||||
};
|
||||
// numpy::ndarray_sliced_copy(generator, ctx, ty, v, &slices)?.as_base_value().into()
|
||||
// }
|
||||
|
||||
numpy::ndarray_sliced_copy(generator, ctx, ty, v, &[slice])?.as_base_value().into()
|
||||
}
|
||||
// ExprKind::Slice { .. } => {
|
||||
// let Some(slice) = expr_to_slice(generator, ctx, &slice.node, 0)? else {
|
||||
// return Ok(None);
|
||||
// };
|
||||
|
||||
_ => {
|
||||
// Accessing an element from a multi-dimensional `ndarray`
|
||||
// numpy::ndarray_sliced_copy(generator, ctx, ty, v, &[slice])?.as_base_value().into()
|
||||
// }
|
||||
|
||||
let Some(index_addr) = make_indices_arr(generator, ctx)? else { return Ok(None) };
|
||||
// _ => {
|
||||
// // Accessing an element from a multi-dimensional `ndarray`
|
||||
|
||||
// Create a new array, remove the top dimension from the dimension-size-list, and copy the
|
||||
// elements over
|
||||
let subscripted_ndarray =
|
||||
generator.gen_var_alloc(ctx, llvm_ndarray_t.into(), None)?;
|
||||
let ndarray = NDArrayValue::from_ptr_val(subscripted_ndarray, llvm_usize, None);
|
||||
// let Some(index_addr) = make_indices_arr(generator, ctx)? else { return Ok(None) };
|
||||
|
||||
let num_dims = v.load_ndims(ctx);
|
||||
ndarray.store_ndims(
|
||||
ctx,
|
||||
generator,
|
||||
ctx.builder
|
||||
.build_int_sub(num_dims, llvm_usize.const_int(1, false), "")
|
||||
.unwrap(),
|
||||
);
|
||||
// // Create a new array, remove the top dimension from the dimension-size-list, and copy the
|
||||
// // elements over
|
||||
// let subscripted_ndarray =
|
||||
// generator.gen_var_alloc(ctx, llvm_ndarray_t.into(), None)?;
|
||||
// let ndarray = NDArrayValue::from_ptr_val(subscripted_ndarray, llvm_usize, None);
|
||||
|
||||
let ndarray_num_dims = ndarray.load_ndims(ctx);
|
||||
ndarray.create_dim_sizes(ctx, llvm_usize, ndarray_num_dims);
|
||||
// let num_dims = v.load_ndims(ctx);
|
||||
// ndarray.store_ndims(
|
||||
// ctx,
|
||||
// generator,
|
||||
// ctx.builder
|
||||
// .build_int_sub(num_dims, llvm_usize.const_int(1, false), "")
|
||||
// .unwrap(),
|
||||
// );
|
||||
|
||||
let ndarray_num_dims = ndarray.load_ndims(ctx);
|
||||
let v_dims_src_ptr = unsafe {
|
||||
v.dim_sizes().ptr_offset_unchecked(
|
||||
ctx,
|
||||
generator,
|
||||
&llvm_usize.const_int(1, false),
|
||||
None,
|
||||
)
|
||||
};
|
||||
call_memcpy_generic(
|
||||
ctx,
|
||||
ndarray.dim_sizes().base_ptr(ctx, generator),
|
||||
v_dims_src_ptr,
|
||||
ctx.builder
|
||||
.build_int_mul(ndarray_num_dims, llvm_usize.size_of(), "")
|
||||
.map(Into::into)
|
||||
.unwrap(),
|
||||
llvm_i1.const_zero(),
|
||||
);
|
||||
// let ndarray_num_dims = ndarray.load_ndims(ctx);
|
||||
// ndarray.create_dim_sizes(ctx, llvm_usize, ndarray_num_dims);
|
||||
|
||||
let ndarray_num_elems = call_ndarray_calc_size(
|
||||
generator,
|
||||
ctx,
|
||||
&ndarray.dim_sizes().as_slice_value(ctx, generator),
|
||||
(None, None),
|
||||
);
|
||||
ndarray.create_data(ctx, llvm_ndarray_data_t, ndarray_num_elems);
|
||||
// let ndarray_num_dims = ndarray.load_ndims(ctx);
|
||||
// let v_dims_src_ptr = unsafe {
|
||||
// v.dim_sizes().ptr_offset_unchecked(
|
||||
// ctx,
|
||||
// generator,
|
||||
// &llvm_usize.const_int(1, false),
|
||||
// None,
|
||||
// )
|
||||
// };
|
||||
// call_memcpy_generic(
|
||||
// ctx,
|
||||
// ndarray.dim_sizes().base_ptr(ctx, generator),
|
||||
// v_dims_src_ptr,
|
||||
// ctx.builder
|
||||
// .build_int_mul(ndarray_num_dims, llvm_usize.size_of(), "")
|
||||
// .map(Into::into)
|
||||
// .unwrap(),
|
||||
// llvm_i1.const_zero(),
|
||||
// );
|
||||
|
||||
let v_data_src_ptr = v.data().ptr_offset(ctx, generator, &index_addr, None);
|
||||
call_memcpy_generic(
|
||||
ctx,
|
||||
ndarray.data().base_ptr(ctx, generator),
|
||||
v_data_src_ptr,
|
||||
ctx.builder
|
||||
.build_int_mul(
|
||||
ndarray_num_elems,
|
||||
llvm_ndarray_data_t.size_of().unwrap(),
|
||||
"",
|
||||
)
|
||||
.map(Into::into)
|
||||
.unwrap(),
|
||||
llvm_i1.const_zero(),
|
||||
);
|
||||
// let ndarray_num_elems = call_ndarray_calc_size(
|
||||
// generator,
|
||||
// ctx,
|
||||
// &ndarray.dim_sizes().as_slice_value(ctx, generator),
|
||||
// (None, None),
|
||||
// );
|
||||
// ndarray.create_data(ctx, llvm_ndarray_data_t, ndarray_num_elems);
|
||||
|
||||
ndarray.as_base_value().into()
|
||||
}
|
||||
}
|
||||
}))
|
||||
// let v_data_src_ptr = v.data().ptr_offset(ctx, generator, &index_addr, None);
|
||||
// call_memcpy_generic(
|
||||
// ctx,
|
||||
// ndarray.data().base_ptr(ctx, generator),
|
||||
// v_data_src_ptr,
|
||||
// ctx.builder
|
||||
// .build_int_mul(
|
||||
// ndarray_num_elems,
|
||||
// llvm_ndarray_data_t.size_of().unwrap(),
|
||||
// "",
|
||||
// )
|
||||
// .map(Into::into)
|
||||
// .unwrap(),
|
||||
// llvm_i1.const_zero(),
|
||||
// );
|
||||
|
||||
// ndarray.as_base_value().into()
|
||||
// }
|
||||
// }
|
||||
// }))
|
||||
}
|
||||
|
||||
/// See [`CodeGenerator::gen_expr`].
|
||||
|
|
|
@ -1,414 +0,0 @@
|
|||
using int8_t = _BitInt(8);
|
||||
using uint8_t = unsigned _BitInt(8);
|
||||
using int32_t = _BitInt(32);
|
||||
using uint32_t = unsigned _BitInt(32);
|
||||
using int64_t = _BitInt(64);
|
||||
using uint64_t = unsigned _BitInt(64);
|
||||
|
||||
// NDArray indices are always `uint32_t`.
|
||||
using NDIndex = uint32_t;
|
||||
// The type of an index or a value describing the length of a range/slice is always `int32_t`.
|
||||
using SliceIndex = int32_t;
|
||||
|
||||
namespace {
|
||||
template <typename T>
|
||||
const T& max(const T& a, const T& b) {
|
||||
return a > b ? a : b;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
const T& min(const T& a, const T& b) {
|
||||
return a > b ? b : a;
|
||||
}
|
||||
|
||||
// adapted from GNU Scientific Library: https://git.savannah.gnu.org/cgit/gsl.git/tree/sys/pow_int.c
|
||||
// need to make sure `exp >= 0` before calling this function
|
||||
template <typename T>
|
||||
T __nac3_int_exp_impl(T base, T exp) {
|
||||
T res = 1;
|
||||
/* repeated squaring method */
|
||||
do {
|
||||
if (exp & 1) {
|
||||
res *= base; /* for n odd */
|
||||
}
|
||||
exp >>= 1;
|
||||
base *= base;
|
||||
} while (exp);
|
||||
return res;
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
SizeT __nac3_ndarray_calc_size_impl(
|
||||
const SizeT* list_data,
|
||||
SizeT list_len,
|
||||
SizeT begin_idx,
|
||||
SizeT end_idx
|
||||
) {
|
||||
__builtin_assume(end_idx <= list_len);
|
||||
|
||||
SizeT num_elems = 1;
|
||||
for (SizeT i = begin_idx; i < end_idx; ++i) {
|
||||
SizeT val = list_data[i];
|
||||
__builtin_assume(val > 0);
|
||||
num_elems *= val;
|
||||
}
|
||||
return num_elems;
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
void __nac3_ndarray_calc_nd_indices_impl(
|
||||
SizeT index,
|
||||
const SizeT* dims,
|
||||
SizeT num_dims,
|
||||
NDIndex* idxs
|
||||
) {
|
||||
SizeT stride = 1;
|
||||
for (SizeT dim = 0; dim < num_dims; dim++) {
|
||||
SizeT i = num_dims - dim - 1;
|
||||
__builtin_assume(dims[i] > 0);
|
||||
idxs[i] = (index / stride) % dims[i];
|
||||
stride *= dims[i];
|
||||
}
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
SizeT __nac3_ndarray_flatten_index_impl(
|
||||
const SizeT* dims,
|
||||
SizeT num_dims,
|
||||
const NDIndex* indices,
|
||||
SizeT num_indices
|
||||
) {
|
||||
SizeT idx = 0;
|
||||
SizeT stride = 1;
|
||||
for (SizeT i = 0; i < num_dims; ++i) {
|
||||
SizeT ri = num_dims - i - 1;
|
||||
if (ri < num_indices) {
|
||||
idx += stride * indices[ri];
|
||||
}
|
||||
|
||||
__builtin_assume(dims[i] > 0);
|
||||
stride *= dims[ri];
|
||||
}
|
||||
return idx;
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
void __nac3_ndarray_calc_broadcast_impl(
|
||||
const SizeT* lhs_dims,
|
||||
SizeT lhs_ndims,
|
||||
const SizeT* rhs_dims,
|
||||
SizeT rhs_ndims,
|
||||
SizeT* out_dims
|
||||
) {
|
||||
SizeT max_ndims = lhs_ndims > rhs_ndims ? lhs_ndims : rhs_ndims;
|
||||
|
||||
for (SizeT i = 0; i < max_ndims; ++i) {
|
||||
const SizeT* lhs_dim_sz = i < lhs_ndims ? &lhs_dims[lhs_ndims - i - 1] : nullptr;
|
||||
const SizeT* rhs_dim_sz = i < rhs_ndims ? &rhs_dims[rhs_ndims - i - 1] : nullptr;
|
||||
SizeT* out_dim = &out_dims[max_ndims - i - 1];
|
||||
|
||||
if (lhs_dim_sz == nullptr) {
|
||||
*out_dim = *rhs_dim_sz;
|
||||
} else if (rhs_dim_sz == nullptr) {
|
||||
*out_dim = *lhs_dim_sz;
|
||||
} else if (*lhs_dim_sz == 1) {
|
||||
*out_dim = *rhs_dim_sz;
|
||||
} else if (*rhs_dim_sz == 1) {
|
||||
*out_dim = *lhs_dim_sz;
|
||||
} else if (*lhs_dim_sz == *rhs_dim_sz) {
|
||||
*out_dim = *lhs_dim_sz;
|
||||
} else {
|
||||
__builtin_unreachable();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename SizeT>
|
||||
void __nac3_ndarray_calc_broadcast_idx_impl(
|
||||
const SizeT* src_dims,
|
||||
SizeT src_ndims,
|
||||
const NDIndex* in_idx,
|
||||
NDIndex* out_idx
|
||||
) {
|
||||
for (SizeT i = 0; i < src_ndims; ++i) {
|
||||
SizeT src_i = src_ndims - i - 1;
|
||||
out_idx[src_i] = src_dims[src_i] == 1 ? 0 : in_idx[src_i];
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
extern "C" {
|
||||
#define DEF_nac3_int_exp_(T) \
|
||||
T __nac3_int_exp_##T(T base, T exp) {\
|
||||
return __nac3_int_exp_impl(base, exp);\
|
||||
}
|
||||
|
||||
DEF_nac3_int_exp_(int32_t)
|
||||
DEF_nac3_int_exp_(int64_t)
|
||||
DEF_nac3_int_exp_(uint32_t)
|
||||
DEF_nac3_int_exp_(uint64_t)
|
||||
|
||||
SliceIndex __nac3_slice_index_bound(SliceIndex i, const SliceIndex len) {
|
||||
if (i < 0) {
|
||||
i = len + i;
|
||||
}
|
||||
if (i < 0) {
|
||||
return 0;
|
||||
} else if (i > len) {
|
||||
return len;
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
SliceIndex __nac3_range_slice_len(
|
||||
const SliceIndex start,
|
||||
const SliceIndex end,
|
||||
const SliceIndex step
|
||||
) {
|
||||
SliceIndex diff = end - start;
|
||||
if (diff > 0 && step > 0) {
|
||||
return ((diff - 1) / step) + 1;
|
||||
} else if (diff < 0 && step < 0) {
|
||||
return ((diff + 1) / step) + 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle list assignment and dropping part of the list when
|
||||
// both dest_step and src_step are +1.
|
||||
// - All the index must *not* be out-of-bound or negative,
|
||||
// - The end index is *inclusive*,
|
||||
// - The length of src and dest slice size should already
|
||||
// be checked: if dest.step == 1 then len(src) <= len(dest) else len(src) == len(dest)
|
||||
SliceIndex __nac3_list_slice_assign_var_size(
|
||||
SliceIndex dest_start,
|
||||
SliceIndex dest_end,
|
||||
SliceIndex dest_step,
|
||||
uint8_t* dest_arr,
|
||||
SliceIndex dest_arr_len,
|
||||
SliceIndex src_start,
|
||||
SliceIndex src_end,
|
||||
SliceIndex src_step,
|
||||
uint8_t* src_arr,
|
||||
SliceIndex src_arr_len,
|
||||
const SliceIndex size
|
||||
) {
|
||||
/* if dest_arr_len == 0, do nothing since we do not support extending list */
|
||||
if (dest_arr_len == 0) return dest_arr_len;
|
||||
/* if both step is 1, memmove directly, handle the dropping of the list, and shrink size */
|
||||
if (src_step == dest_step && dest_step == 1) {
|
||||
const SliceIndex src_len = (src_end >= src_start) ? (src_end - src_start + 1) : 0;
|
||||
const SliceIndex dest_len = (dest_end >= dest_start) ? (dest_end - dest_start + 1) : 0;
|
||||
if (src_len > 0) {
|
||||
__builtin_memmove(
|
||||
dest_arr + dest_start * size,
|
||||
src_arr + src_start * size,
|
||||
src_len * size
|
||||
);
|
||||
}
|
||||
if (dest_len > 0) {
|
||||
/* dropping */
|
||||
__builtin_memmove(
|
||||
dest_arr + (dest_start + src_len) * size,
|
||||
dest_arr + (dest_end + 1) * size,
|
||||
(dest_arr_len - dest_end - 1) * size
|
||||
);
|
||||
}
|
||||
/* shrink size */
|
||||
return dest_arr_len - (dest_len - src_len);
|
||||
}
|
||||
/* if two range overlaps, need alloca */
|
||||
uint8_t need_alloca =
|
||||
(dest_arr == src_arr)
|
||||
&& !(
|
||||
max(dest_start, dest_end) < min(src_start, src_end)
|
||||
|| max(src_start, src_end) < min(dest_start, dest_end)
|
||||
);
|
||||
if (need_alloca) {
|
||||
uint8_t* tmp = reinterpret_cast<uint8_t *>(__builtin_alloca(src_arr_len * size));
|
||||
__builtin_memcpy(tmp, src_arr, src_arr_len * size);
|
||||
src_arr = tmp;
|
||||
}
|
||||
SliceIndex src_ind = src_start;
|
||||
SliceIndex dest_ind = dest_start;
|
||||
for (;
|
||||
(src_step > 0) ? (src_ind <= src_end) : (src_ind >= src_end);
|
||||
src_ind += src_step, dest_ind += dest_step
|
||||
) {
|
||||
/* for constant optimization */
|
||||
if (size == 1) {
|
||||
__builtin_memcpy(dest_arr + dest_ind, src_arr + src_ind, 1);
|
||||
} else if (size == 4) {
|
||||
__builtin_memcpy(dest_arr + dest_ind * 4, src_arr + src_ind * 4, 4);
|
||||
} else if (size == 8) {
|
||||
__builtin_memcpy(dest_arr + dest_ind * 8, src_arr + src_ind * 8, 8);
|
||||
} else {
|
||||
/* memcpy for var size, cannot overlap after previous alloca */
|
||||
__builtin_memcpy(dest_arr + dest_ind * size, src_arr + src_ind * size, size);
|
||||
}
|
||||
}
|
||||
/* only dest_step == 1 can we shrink the dest list. */
|
||||
/* size should be ensured prior to calling this function */
|
||||
if (dest_step == 1 && dest_end >= dest_start) {
|
||||
__builtin_memmove(
|
||||
dest_arr + dest_ind * size,
|
||||
dest_arr + (dest_end + 1) * size,
|
||||
(dest_arr_len - dest_end - 1) * size
|
||||
);
|
||||
return dest_arr_len - (dest_end - dest_ind) - 1;
|
||||
}
|
||||
return dest_arr_len;
|
||||
}
|
||||
|
||||
int32_t __nac3_isinf(double x) {
|
||||
return __builtin_isinf(x);
|
||||
}
|
||||
|
||||
int32_t __nac3_isnan(double x) {
|
||||
return __builtin_isnan(x);
|
||||
}
|
||||
|
||||
double tgamma(double arg);
|
||||
|
||||
double __nac3_gamma(double z) {
|
||||
// Handling for denormals
|
||||
// | x | Python gamma(x) | C tgamma(x) |
|
||||
// --- | ----------------- | --------------- | ----------- |
|
||||
// (1) | nan | nan | nan |
|
||||
// (2) | -inf | -inf | inf |
|
||||
// (3) | inf | inf | inf |
|
||||
// (4) | 0.0 | inf | inf |
|
||||
// (5) | {-1.0, -2.0, ...} | inf | nan |
|
||||
|
||||
// (1)-(3)
|
||||
if (__builtin_isinf(z) || __builtin_isnan(z)) {
|
||||
return z;
|
||||
}
|
||||
|
||||
double v = tgamma(z);
|
||||
|
||||
// (4)-(5)
|
||||
return __builtin_isinf(v) || __builtin_isnan(v) ? __builtin_inf() : v;
|
||||
}
|
||||
|
||||
double lgamma(double arg);
|
||||
|
||||
double __nac3_gammaln(double x) {
|
||||
// libm's handling of value overflows differs from scipy:
|
||||
// - scipy: gammaln(-inf) -> -inf
|
||||
// - libm : lgamma(-inf) -> inf
|
||||
|
||||
if (__builtin_isinf(x)) {
|
||||
return x;
|
||||
}
|
||||
|
||||
return lgamma(x);
|
||||
}
|
||||
|
||||
double j0(double x);
|
||||
|
||||
double __nac3_j0(double x) {
|
||||
// libm's handling of value overflows differs from scipy:
|
||||
// - scipy: j0(inf) -> nan
|
||||
// - libm : j0(inf) -> 0.0
|
||||
|
||||
if (__builtin_isinf(x)) {
|
||||
return __builtin_nan("");
|
||||
}
|
||||
|
||||
return j0(x);
|
||||
}
|
||||
|
||||
uint32_t __nac3_ndarray_calc_size(
|
||||
const uint32_t* list_data,
|
||||
uint32_t list_len,
|
||||
uint32_t begin_idx,
|
||||
uint32_t end_idx
|
||||
) {
|
||||
return __nac3_ndarray_calc_size_impl(list_data, list_len, begin_idx, end_idx);
|
||||
}
|
||||
|
||||
uint64_t __nac3_ndarray_calc_size64(
|
||||
const uint64_t* list_data,
|
||||
uint64_t list_len,
|
||||
uint64_t begin_idx,
|
||||
uint64_t end_idx
|
||||
) {
|
||||
return __nac3_ndarray_calc_size_impl(list_data, list_len, begin_idx, end_idx);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_nd_indices(
|
||||
uint32_t index,
|
||||
const uint32_t* dims,
|
||||
uint32_t num_dims,
|
||||
NDIndex* idxs
|
||||
) {
|
||||
__nac3_ndarray_calc_nd_indices_impl(index, dims, num_dims, idxs);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_nd_indices64(
|
||||
uint64_t index,
|
||||
const uint64_t* dims,
|
||||
uint64_t num_dims,
|
||||
NDIndex* idxs
|
||||
) {
|
||||
__nac3_ndarray_calc_nd_indices_impl(index, dims, num_dims, idxs);
|
||||
}
|
||||
|
||||
uint32_t __nac3_ndarray_flatten_index(
|
||||
const uint32_t* dims,
|
||||
uint32_t num_dims,
|
||||
const NDIndex* indices,
|
||||
uint32_t num_indices
|
||||
) {
|
||||
return __nac3_ndarray_flatten_index_impl(dims, num_dims, indices, num_indices);
|
||||
}
|
||||
|
||||
uint64_t __nac3_ndarray_flatten_index64(
|
||||
const uint64_t* dims,
|
||||
uint64_t num_dims,
|
||||
const NDIndex* indices,
|
||||
uint64_t num_indices
|
||||
) {
|
||||
return __nac3_ndarray_flatten_index_impl(dims, num_dims, indices, num_indices);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_broadcast(
|
||||
const uint32_t* lhs_dims,
|
||||
uint32_t lhs_ndims,
|
||||
const uint32_t* rhs_dims,
|
||||
uint32_t rhs_ndims,
|
||||
uint32_t* out_dims
|
||||
) {
|
||||
return __nac3_ndarray_calc_broadcast_impl(lhs_dims, lhs_ndims, rhs_dims, rhs_ndims, out_dims);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_broadcast64(
|
||||
const uint64_t* lhs_dims,
|
||||
uint64_t lhs_ndims,
|
||||
const uint64_t* rhs_dims,
|
||||
uint64_t rhs_ndims,
|
||||
uint64_t* out_dims
|
||||
) {
|
||||
return __nac3_ndarray_calc_broadcast_impl(lhs_dims, lhs_ndims, rhs_dims, rhs_ndims, out_dims);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_broadcast_idx(
|
||||
const uint32_t* src_dims,
|
||||
uint32_t src_ndims,
|
||||
const NDIndex* in_idx,
|
||||
NDIndex* out_idx
|
||||
) {
|
||||
__nac3_ndarray_calc_broadcast_idx_impl(src_dims, src_ndims, in_idx, out_idx);
|
||||
}
|
||||
|
||||
void __nac3_ndarray_calc_broadcast_idx64(
|
||||
const uint64_t* src_dims,
|
||||
uint64_t src_ndims,
|
||||
const NDIndex* in_idx,
|
||||
NDIndex* out_idx
|
||||
) {
|
||||
__nac3_ndarray_calc_broadcast_idx_impl(src_dims, src_ndims, in_idx, out_idx);
|
||||
}
|
||||
} // extern "C"
|
|
@ -1,8 +1,14 @@
|
|||
use crate::typecheck::typedef::Type;
|
||||
use crate::{
|
||||
codegen::classes::{NDArrayType, NpArrayType},
|
||||
typecheck::typedef::Type,
|
||||
util::SizeVariant,
|
||||
};
|
||||
|
||||
mod test;
|
||||
|
||||
use super::{
|
||||
classes::{
|
||||
ArrayLikeIndexer, ArrayLikeValue, ArraySliceValue, ListValue, NDArrayValue,
|
||||
ArrayLikeIndexer, ArrayLikeValue, ArraySliceValue, ListValue, NDArrayValue, NpArrayValue,
|
||||
TypedArrayLikeAdapter, UntypedArrayLikeAccessor,
|
||||
},
|
||||
llvm_intrinsics, CodeGenContext, CodeGenerator,
|
||||
|
@ -14,8 +20,8 @@ use inkwell::{
|
|||
context::Context,
|
||||
memory_buffer::MemoryBuffer,
|
||||
module::Module,
|
||||
types::{BasicTypeEnum, IntType},
|
||||
values::{BasicValueEnum, CallSiteValue, FloatValue, IntValue},
|
||||
types::{BasicType, BasicTypeEnum, FunctionType, IntType, PointerType},
|
||||
values::{BasicValueEnum, CallSiteValue, FloatValue, FunctionValue, IntValue, PointerValue},
|
||||
AddressSpace, IntPredicate,
|
||||
};
|
||||
use itertools::Either;
|
||||
|
@ -563,367 +569,62 @@ pub fn call_j0<'ctx>(ctx: &CodeGenContext<'ctx, '_>, v: FloatValue<'ctx>) -> Flo
|
|||
.unwrap()
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_calc_size`. Returns an [`IntValue`] representing the
|
||||
/// calculated total size.
|
||||
///
|
||||
/// * `dims` - An [`ArrayLikeIndexer`] containing the size of each dimension.
|
||||
/// * `range` - The dimension index to begin and end (exclusively) calculating the dimensions for,
|
||||
/// or [`None`] if starting from the first dimension and ending at the last dimension respectively.
|
||||
pub fn call_ndarray_calc_size<'ctx, G, Dims>(
|
||||
generator: &G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
dims: &Dims,
|
||||
(begin, end): (Option<IntValue<'ctx>>, Option<IntValue<'ctx>>),
|
||||
) -> IntValue<'ctx>
|
||||
where
|
||||
G: CodeGenerator + ?Sized,
|
||||
Dims: ArrayLikeIndexer<'ctx>,
|
||||
{
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
|
||||
fn get_size_variant<'ctx>(ty: IntType<'ctx>) -> SizeVariant {
|
||||
match ty.get_bit_width() {
|
||||
32 => SizeVariant::Bits32,
|
||||
64 => SizeVariant::Bits64,
|
||||
_ => unreachable!("Unsupported int type bit width {}", ty.get_bit_width()),
|
||||
}
|
||||
}
|
||||
|
||||
let ndarray_calc_size_fn_name = match llvm_usize.get_bit_width() {
|
||||
32 => "__nac3_ndarray_calc_size",
|
||||
64 => "__nac3_ndarray_calc_size64",
|
||||
bw => unreachable!("Unsupported size type bit width: {}", bw),
|
||||
};
|
||||
let ndarray_calc_size_fn_t = llvm_usize.fn_type(
|
||||
&[llvm_pusize.into(), llvm_usize.into(), llvm_usize.into(), llvm_usize.into()],
|
||||
false,
|
||||
);
|
||||
let ndarray_calc_size_fn =
|
||||
ctx.module.get_function(ndarray_calc_size_fn_name).unwrap_or_else(|| {
|
||||
ctx.module.add_function(ndarray_calc_size_fn_name, ndarray_calc_size_fn_t, None)
|
||||
fn get_size_type_dependent_function<'ctx, BuildFuncTypeFn>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
size_type: IntType<'ctx>,
|
||||
base_name: &str,
|
||||
build_func_type: BuildFuncTypeFn,
|
||||
) -> FunctionValue<'ctx>
|
||||
where
|
||||
BuildFuncTypeFn: Fn() -> FunctionType<'ctx>,
|
||||
{
|
||||
let mut fn_name = base_name.to_owned();
|
||||
match get_size_variant(size_type) {
|
||||
SizeVariant::Bits32 => {
|
||||
// The original fn_name is the correct function name
|
||||
}
|
||||
SizeVariant::Bits64 => {
|
||||
// Append "64" at the end, this is the naming convention for 64-bit
|
||||
fn_name.push_str("64");
|
||||
}
|
||||
}
|
||||
|
||||
// Get (or declare then get if does not exist) the corresponding function
|
||||
ctx.module.get_function(&fn_name).unwrap_or_else(|| {
|
||||
let fn_type = build_func_type();
|
||||
ctx.module.add_function(&fn_name, fn_type, None)
|
||||
})
|
||||
}
|
||||
|
||||
fn get_ndarray_struct_ptr<'ctx>(ctx: &'ctx Context, size_type: IntType<'ctx>) -> PointerType<'ctx> {
|
||||
let i8_type = ctx.i8_type();
|
||||
|
||||
let ndarray_ty = NpArrayType { size_type, elem_type: i8_type.as_basic_type_enum() };
|
||||
let struct_ty = ndarray_ty.fields().whole_struct.as_struct_type(ctx);
|
||||
struct_ty.ptr_type(AddressSpace::default())
|
||||
}
|
||||
|
||||
pub fn call_nac3_ndarray_size<'ctx>(
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
ndarray: NpArrayValue<'ctx>,
|
||||
) -> IntValue<'ctx> {
|
||||
let size_type = ndarray.ty.size_type;
|
||||
let function = get_size_type_dependent_function(ctx, size_type, "__nac3_ndarray_size", || {
|
||||
size_type.fn_type(&[get_ndarray_struct_ptr(ctx.ctx, size_type).into()], false)
|
||||
});
|
||||
|
||||
let begin = begin.unwrap_or_else(|| llvm_usize.const_zero());
|
||||
let end = end.unwrap_or_else(|| dims.size(ctx, generator));
|
||||
ctx.builder
|
||||
.build_call(
|
||||
ndarray_calc_size_fn,
|
||||
&[
|
||||
dims.base_ptr(ctx, generator).into(),
|
||||
dims.size(ctx, generator).into(),
|
||||
begin.into(),
|
||||
end.into(),
|
||||
],
|
||||
"",
|
||||
)
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.build_call(function, &[ndarray.ptr.into()], "size")
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_calc_nd_indices`. Returns a [`TypeArrayLikeAdpater`]
|
||||
/// containing `i32` indices of the flattened index.
|
||||
///
|
||||
/// * `index` - The index to compute the multidimensional index for.
|
||||
/// * `ndarray` - LLVM pointer to the `NDArray`. This value must be the LLVM representation of an
|
||||
/// `NDArray`.
|
||||
pub fn call_ndarray_calc_nd_indices<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
index: IntValue<'ctx>,
|
||||
ndarray: NDArrayValue<'ctx>,
|
||||
) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
|
||||
let llvm_void = ctx.ctx.void_type();
|
||||
let llvm_i32 = ctx.ctx.i32_type();
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
let llvm_pi32 = llvm_i32.ptr_type(AddressSpace::default());
|
||||
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
|
||||
|
||||
let ndarray_calc_nd_indices_fn_name = match llvm_usize.get_bit_width() {
|
||||
32 => "__nac3_ndarray_calc_nd_indices",
|
||||
64 => "__nac3_ndarray_calc_nd_indices64",
|
||||
bw => unreachable!("Unsupported size type bit width: {}", bw),
|
||||
};
|
||||
let ndarray_calc_nd_indices_fn =
|
||||
ctx.module.get_function(ndarray_calc_nd_indices_fn_name).unwrap_or_else(|| {
|
||||
let fn_type = llvm_void.fn_type(
|
||||
&[llvm_usize.into(), llvm_pusize.into(), llvm_usize.into(), llvm_pi32.into()],
|
||||
false,
|
||||
);
|
||||
|
||||
ctx.module.add_function(ndarray_calc_nd_indices_fn_name, fn_type, None)
|
||||
});
|
||||
|
||||
let ndarray_num_dims = ndarray.load_ndims(ctx);
|
||||
let ndarray_dims = ndarray.dim_sizes();
|
||||
|
||||
let indices = ctx.builder.build_array_alloca(llvm_i32, ndarray_num_dims, "").unwrap();
|
||||
|
||||
ctx.builder
|
||||
.build_call(
|
||||
ndarray_calc_nd_indices_fn,
|
||||
&[
|
||||
index.into(),
|
||||
ndarray_dims.base_ptr(ctx, generator).into(),
|
||||
ndarray_num_dims.into(),
|
||||
indices.into(),
|
||||
],
|
||||
"",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
TypedArrayLikeAdapter::from(
|
||||
ArraySliceValue::from_ptr_val(indices, ndarray_num_dims, None),
|
||||
Box::new(|_, v| v.into_int_value()),
|
||||
Box::new(|_, v| v.into()),
|
||||
)
|
||||
}
|
||||
|
||||
fn call_ndarray_flatten_index_impl<'ctx, G, Indices>(
|
||||
generator: &G,
|
||||
ctx: &CodeGenContext<'ctx, '_>,
|
||||
ndarray: NDArrayValue<'ctx>,
|
||||
indices: &Indices,
|
||||
) -> IntValue<'ctx>
|
||||
where
|
||||
G: CodeGenerator + ?Sized,
|
||||
Indices: ArrayLikeIndexer<'ctx>,
|
||||
{
|
||||
let llvm_i32 = ctx.ctx.i32_type();
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
|
||||
let llvm_pi32 = llvm_i32.ptr_type(AddressSpace::default());
|
||||
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
|
||||
|
||||
debug_assert_eq!(
|
||||
IntType::try_from(indices.element_type(ctx, generator))
|
||||
.map(IntType::get_bit_width)
|
||||
.unwrap_or_default(),
|
||||
llvm_i32.get_bit_width(),
|
||||
"Expected i32 value for argument `indices` to `call_ndarray_flatten_index_impl`"
|
||||
);
|
||||
debug_assert_eq!(
|
||||
indices.size(ctx, generator).get_type().get_bit_width(),
|
||||
llvm_usize.get_bit_width(),
|
||||
"Expected usize integer value for argument `indices_size` to `call_ndarray_flatten_index_impl`"
|
||||
);
|
||||
|
||||
let ndarray_flatten_index_fn_name = match llvm_usize.get_bit_width() {
|
||||
32 => "__nac3_ndarray_flatten_index",
|
||||
64 => "__nac3_ndarray_flatten_index64",
|
||||
bw => unreachable!("Unsupported size type bit width: {}", bw),
|
||||
};
|
||||
let ndarray_flatten_index_fn =
|
||||
ctx.module.get_function(ndarray_flatten_index_fn_name).unwrap_or_else(|| {
|
||||
let fn_type = llvm_usize.fn_type(
|
||||
&[llvm_pusize.into(), llvm_usize.into(), llvm_pi32.into(), llvm_usize.into()],
|
||||
false,
|
||||
);
|
||||
|
||||
ctx.module.add_function(ndarray_flatten_index_fn_name, fn_type, None)
|
||||
});
|
||||
|
||||
let ndarray_num_dims = ndarray.load_ndims(ctx);
|
||||
let ndarray_dims = ndarray.dim_sizes();
|
||||
|
||||
let index = ctx
|
||||
.builder
|
||||
.build_call(
|
||||
ndarray_flatten_index_fn,
|
||||
&[
|
||||
ndarray_dims.base_ptr(ctx, generator).into(),
|
||||
ndarray_num_dims.into(),
|
||||
indices.base_ptr(ctx, generator).into(),
|
||||
indices.size(ctx, generator).into(),
|
||||
],
|
||||
"",
|
||||
)
|
||||
.map(CallSiteValue::try_as_basic_value)
|
||||
.map(|v| v.map_left(BasicValueEnum::into_int_value))
|
||||
.map(Either::unwrap_left)
|
||||
.unwrap();
|
||||
|
||||
index
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_flatten_index`. Returns the flattened index for the
|
||||
/// multidimensional index.
|
||||
///
|
||||
/// * `ndarray` - LLVM pointer to the `NDArray`. This value must be the LLVM representation of an
|
||||
/// `NDArray`.
|
||||
/// * `indices` - The multidimensional index to compute the flattened index for.
|
||||
pub fn call_ndarray_flatten_index<'ctx, G, Index>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
ndarray: NDArrayValue<'ctx>,
|
||||
indices: &Index,
|
||||
) -> IntValue<'ctx>
|
||||
where
|
||||
G: CodeGenerator + ?Sized,
|
||||
Index: ArrayLikeIndexer<'ctx>,
|
||||
{
|
||||
call_ndarray_flatten_index_impl(generator, ctx, ndarray, indices)
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_calc_broadcast`. Returns a tuple containing the number of
|
||||
/// dimension and size of each dimension of the resultant `ndarray`.
|
||||
pub fn call_ndarray_calc_broadcast<'ctx, G: CodeGenerator + ?Sized>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
lhs: NDArrayValue<'ctx>,
|
||||
rhs: NDArrayValue<'ctx>,
|
||||
) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
|
||||
|
||||
let ndarray_calc_broadcast_fn_name = match llvm_usize.get_bit_width() {
|
||||
32 => "__nac3_ndarray_calc_broadcast",
|
||||
64 => "__nac3_ndarray_calc_broadcast64",
|
||||
bw => unreachable!("Unsupported size type bit width: {}", bw),
|
||||
};
|
||||
let ndarray_calc_broadcast_fn =
|
||||
ctx.module.get_function(ndarray_calc_broadcast_fn_name).unwrap_or_else(|| {
|
||||
let fn_type = llvm_usize.fn_type(
|
||||
&[
|
||||
llvm_pusize.into(),
|
||||
llvm_usize.into(),
|
||||
llvm_pusize.into(),
|
||||
llvm_usize.into(),
|
||||
llvm_pusize.into(),
|
||||
],
|
||||
false,
|
||||
);
|
||||
|
||||
ctx.module.add_function(ndarray_calc_broadcast_fn_name, fn_type, None)
|
||||
});
|
||||
|
||||
let lhs_ndims = lhs.load_ndims(ctx);
|
||||
let rhs_ndims = rhs.load_ndims(ctx);
|
||||
let min_ndims = llvm_intrinsics::call_int_umin(ctx, lhs_ndims, rhs_ndims, None);
|
||||
|
||||
gen_for_callback_incrementing(
|
||||
generator,
|
||||
ctx,
|
||||
llvm_usize.const_zero(),
|
||||
(min_ndims, false),
|
||||
|generator, ctx, _, idx| {
|
||||
let idx = ctx.builder.build_int_sub(min_ndims, idx, "").unwrap();
|
||||
let (lhs_dim_sz, rhs_dim_sz) = unsafe {
|
||||
(
|
||||
lhs.dim_sizes().get_typed_unchecked(ctx, generator, &idx, None),
|
||||
rhs.dim_sizes().get_typed_unchecked(ctx, generator, &idx, None),
|
||||
)
|
||||
};
|
||||
|
||||
let llvm_usize_const_one = llvm_usize.const_int(1, false);
|
||||
let lhs_eqz = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::EQ, lhs_dim_sz, llvm_usize_const_one, "")
|
||||
.unwrap();
|
||||
let rhs_eqz = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::EQ, rhs_dim_sz, llvm_usize_const_one, "")
|
||||
.unwrap();
|
||||
let lhs_or_rhs_eqz = ctx.builder.build_or(lhs_eqz, rhs_eqz, "").unwrap();
|
||||
|
||||
let lhs_eq_rhs = ctx
|
||||
.builder
|
||||
.build_int_compare(IntPredicate::EQ, lhs_dim_sz, rhs_dim_sz, "")
|
||||
.unwrap();
|
||||
|
||||
let is_compatible = ctx.builder.build_or(lhs_or_rhs_eqz, lhs_eq_rhs, "").unwrap();
|
||||
|
||||
ctx.make_assert(
|
||||
generator,
|
||||
is_compatible,
|
||||
"0:ValueError",
|
||||
"operands could not be broadcast together",
|
||||
[None, None, None],
|
||||
ctx.current_loc,
|
||||
);
|
||||
|
||||
Ok(())
|
||||
},
|
||||
llvm_usize.const_int(1, false),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let max_ndims = llvm_intrinsics::call_int_umax(ctx, lhs_ndims, rhs_ndims, None);
|
||||
let lhs_dims = lhs.dim_sizes().base_ptr(ctx, generator);
|
||||
let lhs_ndims = lhs.load_ndims(ctx);
|
||||
let rhs_dims = rhs.dim_sizes().base_ptr(ctx, generator);
|
||||
let rhs_ndims = rhs.load_ndims(ctx);
|
||||
let out_dims = ctx.builder.build_array_alloca(llvm_usize, max_ndims, "").unwrap();
|
||||
let out_dims = ArraySliceValue::from_ptr_val(out_dims, max_ndims, None);
|
||||
|
||||
ctx.builder
|
||||
.build_call(
|
||||
ndarray_calc_broadcast_fn,
|
||||
&[
|
||||
lhs_dims.into(),
|
||||
lhs_ndims.into(),
|
||||
rhs_dims.into(),
|
||||
rhs_ndims.into(),
|
||||
out_dims.base_ptr(ctx, generator).into(),
|
||||
],
|
||||
"",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
TypedArrayLikeAdapter::from(
|
||||
out_dims,
|
||||
Box::new(|_, v| v.into_int_value()),
|
||||
Box::new(|_, v| v.into()),
|
||||
)
|
||||
}
|
||||
|
||||
/// Generates a call to `__nac3_ndarray_calc_broadcast_idx`. Returns an [`ArrayAllocaValue`]
|
||||
/// containing the indices used for accessing `array` corresponding to the index of the broadcasted
|
||||
/// array `broadcast_idx`.
|
||||
pub fn call_ndarray_calc_broadcast_index<
|
||||
'ctx,
|
||||
G: CodeGenerator + ?Sized,
|
||||
BroadcastIdx: UntypedArrayLikeAccessor<'ctx>,
|
||||
>(
|
||||
generator: &mut G,
|
||||
ctx: &mut CodeGenContext<'ctx, '_>,
|
||||
array: NDArrayValue<'ctx>,
|
||||
broadcast_idx: &BroadcastIdx,
|
||||
) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
|
||||
let llvm_i32 = ctx.ctx.i32_type();
|
||||
let llvm_usize = generator.get_size_type(ctx.ctx);
|
||||
let llvm_pi32 = llvm_i32.ptr_type(AddressSpace::default());
|
||||
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
|
||||
|
||||
let ndarray_calc_broadcast_fn_name = match llvm_usize.get_bit_width() {
|
||||
32 => "__nac3_ndarray_calc_broadcast_idx",
|
||||
64 => "__nac3_ndarray_calc_broadcast_idx64",
|
||||
bw => unreachable!("Unsupported size type bit width: {}", bw),
|
||||
};
|
||||
let ndarray_calc_broadcast_fn =
|
||||
ctx.module.get_function(ndarray_calc_broadcast_fn_name).unwrap_or_else(|| {
|
||||
let fn_type = llvm_usize.fn_type(
|
||||
&[llvm_pusize.into(), llvm_usize.into(), llvm_pi32.into(), llvm_pi32.into()],
|
||||
false,
|
||||
);
|
||||
|
||||
ctx.module.add_function(ndarray_calc_broadcast_fn_name, fn_type, None)
|
||||
});
|
||||
|
||||
let broadcast_size = broadcast_idx.size(ctx, generator);
|
||||
let out_idx = ctx.builder.build_array_alloca(llvm_i32, broadcast_size, "").unwrap();
|
||||
|
||||
let array_dims = array.dim_sizes().base_ptr(ctx, generator);
|
||||
let array_ndims = array.load_ndims(ctx);
|
||||
let broadcast_idx_ptr = unsafe {
|
||||
broadcast_idx.ptr_offset_unchecked(ctx, generator, &llvm_usize.const_zero(), None)
|
||||
};
|
||||
|
||||
ctx.builder
|
||||
.build_call(
|
||||
ndarray_calc_broadcast_fn,
|
||||
&[array_dims.into(), array_ndims.into(), broadcast_idx_ptr.into(), out_idx.into()],
|
||||
"",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
TypedArrayLikeAdapter::from(
|
||||
ArraySliceValue::from_ptr_val(out_idx, broadcast_size, None),
|
||||
Box::new(|_, v| v.into_int_value()),
|
||||
Box::new(|_, v| v.into()),
|
||||
)
|
||||
.try_as_basic_value()
|
||||
.unwrap_left()
|
||||
.into_int_value()
|
||||
}
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{path::Path, process::Command};
|
||||
|
||||
#[test]
|
||||
fn run_irrt_test() {
|
||||
assert!(
|
||||
cfg!(feature = "test"),
|
||||
"Please do `cargo test -F test` to compile `irrt_test.out` and run test"
|
||||
);
|
||||
|
||||
let irrt_test_out_path = Path::new(concat!(env!("OUT_DIR"), "/irrt_test.out"));
|
||||
let output = Command::new(irrt_test_out_path.to_str().unwrap()).output().unwrap();
|
||||
|
||||
if !output.status.success() {
|
||||
eprintln!("irrt_test failed with status {}:", output.status);
|
||||
eprintln!("====== stdout ======");
|
||||
eprintln!("{}", String::from_utf8(output.stdout).unwrap());
|
||||
eprintln!("====== stderr ======");
|
||||
eprintln!("{}", String::from_utf8(output.stderr).unwrap());
|
||||
eprintln!("====================");
|
||||
|
||||
panic!("irrt_test failed");
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -23,3 +23,4 @@ pub mod codegen;
|
|||
pub mod symbol_resolver;
|
||||
pub mod toplevel;
|
||||
pub mod typecheck;
|
||||
pub mod util;
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use std::iter::once;
|
||||
|
||||
use crate::util::SizeVariant;
|
||||
use helper::{debug_assert_prim_is_allowed, make_exception_fields, PrimDefDetails};
|
||||
use indexmap::IndexMap;
|
||||
use inkwell::{
|
||||
|
@ -278,21 +279,12 @@ pub fn get_builtins(unifier: &mut Unifier, primitives: &PrimitiveStore) -> Built
|
|||
.collect()
|
||||
}
|
||||
|
||||
/// A helper enum used by [`BuiltinBuilder`]
|
||||
#[derive(Clone, Copy)]
|
||||
enum SizeVariant {
|
||||
Bits32,
|
||||
Bits64,
|
||||
}
|
||||
|
||||
impl SizeVariant {
|
||||
fn of_int(self, primitives: &PrimitiveStore) -> Type {
|
||||
match self {
|
||||
fn size_variant_to_int_type(variant: SizeVariant, primitives: &PrimitiveStore) -> Type {
|
||||
match variant {
|
||||
SizeVariant::Bits32 => primitives.int32,
|
||||
SizeVariant::Bits64 => primitives.int64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct BuiltinBuilder<'a> {
|
||||
unifier: &'a mut Unifier,
|
||||
|
@ -961,8 +953,9 @@ impl<'a> BuiltinBuilder<'a> {
|
|||
resolver: None,
|
||||
codegen_callback: Some(Arc::new(GenCall::new(Box::new(
|
||||
|ctx, obj, fun, args, generator| {
|
||||
gen_ndarray_copy(ctx, &obj, fun, &args, generator)
|
||||
.map(|val| Some(val.as_basic_value_enum()))
|
||||
todo!()
|
||||
// gen_ndarray_copy(ctx, &obj, fun, &args, generator)
|
||||
// .map(|val| Some(val.as_basic_value_enum()))
|
||||
},
|
||||
)))),
|
||||
loc: None,
|
||||
|
@ -978,8 +971,9 @@ impl<'a> BuiltinBuilder<'a> {
|
|||
resolver: None,
|
||||
codegen_callback: Some(Arc::new(GenCall::new(Box::new(
|
||||
|ctx, obj, fun, args, generator| {
|
||||
gen_ndarray_fill(ctx, &obj, fun, &args, generator)?;
|
||||
Ok(None)
|
||||
todo!()
|
||||
// gen_ndarray_fill(ctx, &obj, fun, &args, generator)?;
|
||||
// Ok(None)
|
||||
},
|
||||
)))),
|
||||
loc: None,
|
||||
|
@ -1059,7 +1053,7 @@ impl<'a> BuiltinBuilder<'a> {
|
|||
);
|
||||
|
||||
// The size variant of the function determines the size of the returned int.
|
||||
let int_sized = size_variant.of_int(self.primitives);
|
||||
let int_sized = size_variant_to_int_type(size_variant, self.primitives);
|
||||
|
||||
let ndarray_int_sized =
|
||||
make_ndarray_ty(self.unifier, self.primitives, Some(int_sized), Some(common_ndim.ty));
|
||||
|
@ -1084,7 +1078,7 @@ impl<'a> BuiltinBuilder<'a> {
|
|||
let arg_ty = fun.0.args[0].ty;
|
||||
let arg = args[0].1.clone().to_basic_value_enum(ctx, generator, arg_ty)?;
|
||||
|
||||
let ret_elem_ty = size_variant.of_int(&ctx.primitives);
|
||||
let ret_elem_ty = size_variant_to_int_type(size_variant, &ctx.primitives);
|
||||
Ok(Some(builtin_fns::call_round(generator, ctx, (arg_ty, arg), ret_elem_ty)?))
|
||||
}),
|
||||
)
|
||||
|
@ -1125,7 +1119,7 @@ impl<'a> BuiltinBuilder<'a> {
|
|||
make_ndarray_ty(self.unifier, self.primitives, Some(float), Some(common_ndim.ty));
|
||||
|
||||
// The size variant of the function determines the type of int returned
|
||||
let int_sized = size_variant.of_int(self.primitives);
|
||||
let int_sized = size_variant_to_int_type(size_variant, self.primitives);
|
||||
let ndarray_int_sized =
|
||||
make_ndarray_ty(self.unifier, self.primitives, Some(int_sized), Some(common_ndim.ty));
|
||||
|
||||
|
@ -1148,7 +1142,7 @@ impl<'a> BuiltinBuilder<'a> {
|
|||
let arg_ty = fun.0.args[0].ty;
|
||||
let arg = args[0].1.clone().to_basic_value_enum(ctx, generator, arg_ty)?;
|
||||
|
||||
let ret_elem_ty = size_variant.of_int(&ctx.primitives);
|
||||
let ret_elem_ty = size_variant_to_int_type(size_variant, &ctx.primitives);
|
||||
let func = match kind {
|
||||
Kind::Ceil => builtin_fns::call_ceil,
|
||||
Kind::Floor => builtin_fns::call_floor,
|
||||
|
@ -1199,13 +1193,14 @@ impl<'a> BuiltinBuilder<'a> {
|
|||
self.ndarray_float,
|
||||
&[(self.ndarray_factory_fn_shape_arg_tvar.ty, "shape")],
|
||||
Box::new(move |ctx, obj, fun, args, generator| {
|
||||
let func = match prim {
|
||||
PrimDef::FunNpNDArray | PrimDef::FunNpEmpty => gen_ndarray_empty,
|
||||
PrimDef::FunNpZeros => gen_ndarray_zeros,
|
||||
PrimDef::FunNpOnes => gen_ndarray_ones,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
func(ctx, &obj, fun, &args, generator).map(|val| Some(val.as_basic_value_enum()))
|
||||
todo!()
|
||||
// let func = match prim {
|
||||
// PrimDef::FunNpNDArray | PrimDef::FunNpEmpty => gen_ndarray_empty,
|
||||
// PrimDef::FunNpZeros => gen_ndarray_zeros,
|
||||
// PrimDef::FunNpOnes => gen_ndarray_ones,
|
||||
// _ => unreachable!(),
|
||||
// };
|
||||
// func(ctx, &obj, fun, &args, generator).map(|val| Some(val.as_basic_value_enum()))
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
@ -1251,8 +1246,9 @@ impl<'a> BuiltinBuilder<'a> {
|
|||
resolver: None,
|
||||
codegen_callback: Some(Arc::new(GenCall::new(Box::new(
|
||||
|ctx, obj, fun, args, generator| {
|
||||
gen_ndarray_array(ctx, &obj, fun, &args, generator)
|
||||
.map(|val| Some(val.as_basic_value_enum()))
|
||||
todo!()
|
||||
// gen_ndarray_array(ctx, &obj, fun, &args, generator)
|
||||
// .map(|val| Some(val.as_basic_value_enum()))
|
||||
},
|
||||
)))),
|
||||
loc: None,
|
||||
|
@ -1270,8 +1266,9 @@ impl<'a> BuiltinBuilder<'a> {
|
|||
// type variable
|
||||
&[(self.list_int32, "shape"), (tv.ty, "fill_value")],
|
||||
Box::new(move |ctx, obj, fun, args, generator| {
|
||||
gen_ndarray_full(ctx, &obj, fun, &args, generator)
|
||||
.map(|val| Some(val.as_basic_value_enum()))
|
||||
todo!()
|
||||
// gen_ndarray_full(ctx, &obj, fun, &args, generator)
|
||||
// .map(|val| Some(val.as_basic_value_enum()))
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
@ -1303,8 +1300,9 @@ impl<'a> BuiltinBuilder<'a> {
|
|||
resolver: None,
|
||||
codegen_callback: Some(Arc::new(GenCall::new(Box::new(
|
||||
|ctx, obj, fun, args, generator| {
|
||||
gen_ndarray_eye(ctx, &obj, fun, &args, generator)
|
||||
.map(|val| Some(val.as_basic_value_enum()))
|
||||
todo!()
|
||||
// gen_ndarray_eye(ctx, &obj, fun, &args, generator)
|
||||
// .map(|val| Some(val.as_basic_value_enum()))
|
||||
},
|
||||
)))),
|
||||
loc: None,
|
||||
|
@ -1317,8 +1315,9 @@ impl<'a> BuiltinBuilder<'a> {
|
|||
self.ndarray_float_2d,
|
||||
&[(int32, "n")],
|
||||
Box::new(|ctx, obj, fun, args, generator| {
|
||||
gen_ndarray_identity(ctx, &obj, fun, &args, generator)
|
||||
.map(|val| Some(val.as_basic_value_enum()))
|
||||
todo!()
|
||||
// gen_ndarray_identity(ctx, &obj, fun, &args, generator)
|
||||
// .map(|val| Some(val.as_basic_value_enum()))
|
||||
}),
|
||||
),
|
||||
_ => unreachable!(),
|
||||
|
|
|
@ -34,6 +34,7 @@ pub mod numpy;
|
|||
pub mod type_annotation;
|
||||
use composer::*;
|
||||
use type_annotation::*;
|
||||
|
||||
#[cfg(test)]
|
||||
mod test;
|
||||
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum SizeVariant {
|
||||
Bits32,
|
||||
Bits64,
|
||||
}
|
|
@ -81,6 +81,7 @@ in rec {
|
|||
''
|
||||
mkdir -p $out/bin
|
||||
ln -s ${llvm-nac3}/bin/clang.exe $out/bin/clang-irrt.exe
|
||||
ln -s ${llvm-nac3}/bin/clang.exe $out/bin/clang-irrt-test.exe
|
||||
ln -s ${llvm-nac3}/bin/llvm-as.exe $out/bin/llvm-as-irrt.exe
|
||||
'';
|
||||
nac3artiq = pkgs.rustPlatform.buildRustPackage {
|
||||
|
|
Loading…
Reference in New Issue