diff --git a/nac3core/irrt/irrt/core.hpp b/nac3core/irrt/irrt/core.hpp index 5e1bce92..73f3f894 100644 --- a/nac3core/irrt/irrt/core.hpp +++ b/nac3core/irrt/irrt/core.hpp @@ -4,7 +4,7 @@ #include // NDArray indices are always `uint32_t`. -using NDIndex = uint32_t; +using NDIndexInt = uint32_t; // The type of an index or a value describing the length of a // range/slice is always `int32_t`. using SliceIndex = int32_t; @@ -43,7 +43,7 @@ SizeT __nac3_ndarray_calc_size_impl(const SizeT* list_data, SizeT list_len, template void __nac3_ndarray_calc_nd_indices_impl(SizeT index, const SizeT* dims, - SizeT num_dims, NDIndex* idxs) { + SizeT num_dims, NDIndexInt* idxs) { SizeT stride = 1; for (SizeT dim = 0; dim < num_dims; dim++) { SizeT i = num_dims - dim - 1; @@ -55,7 +55,7 @@ void __nac3_ndarray_calc_nd_indices_impl(SizeT index, const SizeT* dims, template SizeT __nac3_ndarray_flatten_index_impl(const SizeT* dims, SizeT num_dims, - const NDIndex* indices, + const NDIndexInt* indices, SizeT num_indices) { SizeT idx = 0; SizeT stride = 1; @@ -104,8 +104,8 @@ void __nac3_ndarray_calc_broadcast_impl(const SizeT* lhs_dims, SizeT lhs_ndims, template void __nac3_ndarray_calc_broadcast_idx_impl(const SizeT* src_dims, SizeT src_ndims, - const NDIndex* in_idx, - NDIndex* out_idx) { + const NDIndexInt* in_idx, + NDIndexInt* out_idx) { for (SizeT i = 0; i < src_ndims; ++i) { SizeT src_i = src_ndims - i - 1; out_idx[src_i] = src_dims[src_i] == 1 ? 0 : in_idx[src_i]; @@ -293,24 +293,24 @@ uint64_t __nac3_ndarray_calc_size64(const uint64_t* list_data, } void __nac3_ndarray_calc_nd_indices(uint32_t index, const uint32_t* dims, - uint32_t num_dims, NDIndex* idxs) { + uint32_t num_dims, NDIndexInt* idxs) { __nac3_ndarray_calc_nd_indices_impl(index, dims, num_dims, idxs); } void __nac3_ndarray_calc_nd_indices64(uint64_t index, const uint64_t* dims, - uint64_t num_dims, NDIndex* idxs) { + uint64_t num_dims, NDIndexInt* idxs) { __nac3_ndarray_calc_nd_indices_impl(index, dims, num_dims, idxs); } uint32_t __nac3_ndarray_flatten_index(const uint32_t* dims, uint32_t num_dims, - const NDIndex* indices, + const NDIndexInt* indices, uint32_t num_indices) { return __nac3_ndarray_flatten_index_impl(dims, num_dims, indices, num_indices); } uint64_t __nac3_ndarray_flatten_index64(const uint64_t* dims, uint64_t num_dims, - const NDIndex* indices, + const NDIndexInt* indices, uint64_t num_indices) { return __nac3_ndarray_flatten_index_impl(dims, num_dims, indices, num_indices); @@ -333,16 +333,16 @@ void __nac3_ndarray_calc_broadcast64(const uint64_t* lhs_dims, void __nac3_ndarray_calc_broadcast_idx(const uint32_t* src_dims, uint32_t src_ndims, - const NDIndex* in_idx, - NDIndex* out_idx) { + const NDIndexInt* in_idx, + NDIndexInt* out_idx) { __nac3_ndarray_calc_broadcast_idx_impl(src_dims, src_ndims, in_idx, out_idx); } void __nac3_ndarray_calc_broadcast_idx64(const uint64_t* src_dims, uint64_t src_ndims, - const NDIndex* in_idx, - NDIndex* out_idx) { + const NDIndexInt* in_idx, + NDIndexInt* out_idx) { __nac3_ndarray_calc_broadcast_idx_impl(src_dims, src_ndims, in_idx, out_idx); } diff --git a/nac3core/irrt/irrt/ndarray/basic.hpp b/nac3core/irrt/irrt/ndarray/basic.hpp index cf25cce6..55ffb77b 100644 --- a/nac3core/irrt/irrt/ndarray/basic.hpp +++ b/nac3core/irrt/irrt/ndarray/basic.hpp @@ -50,9 +50,9 @@ SizeT calc_size_from_shape(SizeT ndims, const SizeT* shape) { template void set_indices_by_nth(SizeT ndims, const SizeT* shape, SizeT* indices, SizeT nth) { - for (int32_t i = 0; i < ndims; i++) { - int32_t axis = ndims - i - 1; - int32_t dim = shape[axis]; + for (SizeT i = 0; i < ndims; i++) { + SizeT axis = ndims - i - 1; + SizeT dim = shape[axis]; indices[axis] = nth % dim; nth /= dim; @@ -93,8 +93,9 @@ SizeT len(const NDArray* ndarray) { if (ndarray->ndims == 0) { raise_exception(SizeT, EXN_TYPE_ERROR, "len() of unsized object", NO_PARAM, NO_PARAM, NO_PARAM); + } else { + return ndarray->shape[0]; } - return ndarray->shape[0]; } /** @@ -156,6 +157,8 @@ uint8_t* get_pelement_by_indices(const NDArray* ndarray, return element; } +int counter = 0; + /** * @brief Return the pointer to the nth (0-based) element in a flattened view of `ndarray`. * @@ -163,9 +166,14 @@ uint8_t* get_pelement_by_indices(const NDArray* ndarray, */ template uint8_t* get_nth_pelement(const NDArray* ndarray, SizeT nth) { - SizeT* indices = (SizeT*)__builtin_alloca(sizeof(SizeT) * ndarray->ndims); - util::set_indices_by_nth(ndarray->ndims, ndarray->shape, indices, nth); - return get_pelement_by_indices(ndarray, indices); + uint8_t* element = ndarray->data; + for (SizeT i = 0; i < ndarray->ndims; i++) { + SizeT axis = ndarray->ndims - i - 1; + SizeT dim = ndarray->shape[axis]; + element += ndarray->strides[axis] * (nth % dim); + nth /= dim; + } + return element; } /** @@ -259,12 +267,13 @@ bool __nac3_ndarray_is_c_contiguous64(NDArray* ndarray) { return is_c_contiguous(ndarray); } -uint8_t* __nac3_ndarray_get_nth_pelement(const NDArray* ndarray, int32_t nth) { +uint8_t* __nac3_ndarray_get_nth_pelement(const NDArray* ndarray, + int32_t nth) { return get_nth_pelement(ndarray, nth); } uint8_t* __nac3_ndarray_get_nth_pelement64(const NDArray* ndarray, - int64_t nth) { + int64_t nth) { return get_nth_pelement(ndarray, nth); } diff --git a/nac3core/irrt/irrt/ndarray/broadcast.hpp b/nac3core/irrt/irrt/ndarray/broadcast.hpp new file mode 100644 index 00000000..ef53334b --- /dev/null +++ b/nac3core/irrt/irrt/ndarray/broadcast.hpp @@ -0,0 +1,157 @@ +#pragma once + +#include +#include +#include + +namespace { +template +struct ShapeEntry { + SizeT ndims; + SizeT* shape; +}; +} // namespace + +namespace { +namespace ndarray { +namespace broadcast { +namespace util { +/** + * @brief Return true if `src_shape` can broadcast to `dst_shape`. + * + * See https://numpy.org/doc/stable/user/basics.broadcasting.html + */ +template +bool can_broadcast_shape_to(SizeT target_ndims, const SizeT* target_shape, + SizeT src_ndims, const SizeT* src_shape) { + if (src_ndims > target_ndims) { + return false; + } + + for (SizeT i = 0; i < src_ndims; i++) { + SizeT target_dim = target_shape[target_ndims - i - 1]; + SizeT src_dim = src_shape[src_ndims - i - 1]; + if (!(src_dim == 1 || target_dim == src_dim)) { + return false; + } + } + return true; +} + +/** + * @brief Performs `np.broadcast_shapes` + */ +template +void broadcast_shapes(SizeT num_shapes, const ShapeEntry* shapes, + SizeT dst_ndims, SizeT* dst_shape) { + // `dst_ndims` must be `max([shape.ndims for shape in shapes])`, but the caller has to calculate it/provide it + // for this function since they should already know in order to allocate `dst_shape` in the first place. + // `dst_shape` must be pre-allocated. + // `dst_shape` does not have to be initialized + for (SizeT dst_axis = 0; dst_axis < dst_ndims; dst_axis++) { + dst_shape[dst_axis] = 1; + } + + for (SizeT i = 0; i < num_shapes; i++) { + ShapeEntry entry = shapes[i]; + + for (SizeT j = 0; j < entry.ndims; j++) { + SizeT entry_axis = entry.ndims - j - 1; + SizeT dst_axis = dst_ndims - j - 1; + + SizeT entry_dim = entry.shape[entry_axis]; + SizeT dst_dim = dst_shape[dst_axis]; + + if (dst_dim == 1) { + dst_shape[dst_axis] = entry_dim; + } else if (entry_dim == 1 || entry_dim == dst_dim) { + // Do nothing + } else { + raise_exception(SizeT, EXN_VALUE_ERROR, + "shape mismatch: objects cannot be broadcast " + "to a single shape.", + NO_PARAM, NO_PARAM, NO_PARAM); + } + } + } +} +} // namespace util + +/** + * @brief Perform `np.broadcast_to(, )` and appropriate assertions. + * + * This function attempts to broadcast `src_ndarray` to a new shape defined by `dst_ndarray.shape`, + * and return the result by modifying `dst_ndarray`. + * + * # Notes on `dst_ndarray` + * The caller is responsible for allocating space for the resulting ndarray. + * Here is what this function expects from `dst_ndarray` when called: + * - `dst_ndarray->data` does not have to be initialized. + * - `dst_ndarray->itemsize` does not have to be initialized. + * - `dst_ndarray->ndims` must be initialized, determining the length of `dst_ndarray->shape` + * - `dst_ndarray->shape` must be allocated, and must contain the desired target broadcast shape. + * - `dst_ndarray->strides` must be allocated, through it can contain uninitialized values. + * When this function call ends: + * - `dst_ndarray->data` is set to `src_ndarray->data` (`dst_ndarray` is just a view to `src_ndarray`) + * - `dst_ndarray->itemsize` is set to `src_ndarray->itemsize` + * - `dst_ndarray->ndims` is unchanged. + * - `dst_ndarray->shape` is unchanged. + * - `dst_ndarray->strides` is updated accordingly by how ndarray broadcast_to works. + */ +template +void broadcast_to(const NDArray* src_ndarray, + NDArray* dst_ndarray) { + if (!ndarray::broadcast::util::can_broadcast_shape_to( + dst_ndarray->ndims, dst_ndarray->shape, src_ndarray->ndims, + src_ndarray->shape)) { + raise_exception(SizeT, EXN_VALUE_ERROR, + "operands could not be broadcast together", + dst_ndarray->shape[0], src_ndarray->shape[0], NO_PARAM); + } + + dst_ndarray->data = src_ndarray->data; + dst_ndarray->itemsize = src_ndarray->itemsize; + + for (SizeT i = 0; i < dst_ndarray->ndims; i++) { + SizeT src_axis = src_ndarray->ndims - i - 1; + SizeT dst_axis = dst_ndarray->ndims - i - 1; + if (src_axis < 0 || (src_ndarray->shape[src_axis] == 1 && + dst_ndarray->shape[dst_axis] != 1)) { + // Freeze the steps in-place + dst_ndarray->strides[dst_axis] = 0; + } else { + dst_ndarray->strides[dst_axis] = src_ndarray->strides[src_axis]; + } + } +} +} // namespace broadcast +} // namespace ndarray +} // namespace + +extern "C" { +using namespace ndarray::broadcast; + +void __nac3_ndarray_broadcast_to(NDArray* src_ndarray, + NDArray* dst_ndarray) { + broadcast_to(src_ndarray, dst_ndarray); +} + +void __nac3_ndarray_broadcast_to64(NDArray* src_ndarray, + NDArray* dst_ndarray) { + broadcast_to(src_ndarray, dst_ndarray); +} + +void __nac3_ndarray_broadcast_shapes(int32_t num_shapes, + const ShapeEntry* shapes, + int32_t dst_ndims, int32_t* dst_shape) { + ndarray::broadcast::util::broadcast_shapes(num_shapes, shapes, dst_ndims, + dst_shape); +} + +void __nac3_ndarray_broadcast_shapes64(int64_t num_shapes, + const ShapeEntry* shapes, + int64_t dst_ndims, int64_t* dst_shape) { + ndarray::broadcast::util::broadcast_shapes(num_shapes, shapes, dst_ndims, + dst_shape); +} +} \ No newline at end of file diff --git a/nac3core/irrt/irrt/ndarray/indexing.hpp b/nac3core/irrt/irrt/ndarray/indexing.hpp new file mode 100644 index 00000000..c7328242 --- /dev/null +++ b/nac3core/irrt/irrt/ndarray/indexing.hpp @@ -0,0 +1,228 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace { +typedef uint8_t NDIndexType; + +/** + * @brief A single element index + * + * `data` points to a `SliceIndex`. + */ + +const NDIndexType ND_INDEX_TYPE_SINGLE_ELEMENT = 0; +/** + * @brief A slice index + * + * `data` points to a `UserRange`. + */ +const NDIndexType ND_INDEX_TYPE_SLICE = 1; + +/** + * @brief `np.newaxis` / `None` + * + * `data` is unused. + */ +const NDIndexType ND_INDEX_TYPE_NEWAXIS = 2; + +/** + * @brief `Ellipsis` / `...` + * + * `data` is unused. + */ +const NDIndexType ND_INDEX_TYPE_ELLIPSIS = 3; + +/** + * @brief An index used in ndarray indexing + */ +struct NDIndex { + /** + * @brief Enum tag to specify the type of index. + * + * Please see comments of each enum constant. + */ + NDIndexType type; + + /** + * @brief The accompanying data associated with `type`. + * + * Please see comments of each enum constant. + */ + uint8_t* data; +}; +} // namespace + +namespace { +namespace ndarray { +namespace indexing { +namespace util { + +/** + * @brief Return the expected rank of the resulting ndarray + * created by indexing an ndarray of rank `ndims` using `indexes`. + * + * An IndexError is raised if the indexes are invalid. + */ +template +SizeT validate_and_deduce_ndims_after_indexing(SizeT ndims, SizeT num_indexes, + const NDIndex* indexes) { + if (num_indexes > ndims) { + raise_exception(SizeT, EXN_INDEX_ERROR, + "too many indices for array: array is {0}-dimensional, " + "but {1} were indexed", + ndims, num_indexes, NO_PARAM); + } + + // There may be ellipsis `...` in `indexes`. There can only be 0 or 1 ellipsis. + SizeT num_ellipsis = 0; + + for (SizeT i = 0; i < num_indexes; i++) { + if (indexes[i].type == ND_INDEX_TYPE_SINGLE_ELEMENT) { + ndims--; + } else if (indexes[i].type == ND_INDEX_TYPE_SLICE) { + // Nothing + } else if (indexes[i].type == ND_INDEX_TYPE_NEWAXIS) { + ndims++; + } else if (indexes[i].type == ND_INDEX_TYPE_ELLIPSIS) { + // `...` doesn't do anything to `ndims`. + num_ellipsis++; + if (num_ellipsis > 1) { + raise_exception( + SizeT, EXN_INDEX_ERROR, + "an index can only have a single ellipsis ('...')", + NO_PARAM, NO_PARAM, NO_PARAM); + } + } else { + __builtin_unreachable(); + } + } + return ndims; +} +} // namespace util + +/** + * @brief Perform ndarray "basic indexing" (https://numpy.org/doc/stable/user/basics.indexing.html#basic-indexing) + * + * This is function very similar to performing `dst_ndarray = src_ndarray[indexes]` in Python (where the variables + * can all be found in the parameter of this function). + * + * In other words, this function takes in an ndarray (`src_ndarray`), index it with `indexes`, and return the + * indexed array (by writing the result to `dst_ndarray`). + * + * This function also does proper assertions on `indexes`. + * + * # Notes on `dst_ndarray` + * The caller is responsible for allocating space for the resulting ndarray. + * Here is what this function expects from `dst_ndarray` when called: + * - `dst_ndarray->data` does not have to be initialized. + * - `dst_ndarray->itemsize` does not have to be initialized. + * - `dst_ndarray->ndims` must be initialized, and it must be equal to the expected `ndims` of the `dst_ndarray` after + * indexing `src_ndarray` with `indexes`. + * - `dst_ndarray->shape` must be allocated, through it can contain uninitialized values. + * - `dst_ndarray->strides` must be allocated, through it can contain uninitialized values. + * When this function call ends: + * - `dst_ndarray->data` is set to `src_ndarray->data` (`dst_ndarray` is just a view to `src_ndarray`) + * - `dst_ndarray->itemsize` is set to `src_ndarray->itemsize` + * - `dst_ndarray->ndims` is unchanged. + * - `dst_ndarray->shape` is updated according to how `src_ndarray` is indexed. + * - `dst_ndarray->strides` is updated accordingly by how ndarray indexing works. + * + * @param indexes Indexes to index `src_ndarray`, ordered in the same way you would write them in Python. + * @param src_ndarray The NDArray to be indexed. + * @param dst_ndarray The resulting NDArray after indexing. Further details in the comments above, + */ +template +void index(SizeT num_indexes, const NDIndex* indexes, + const NDArray* src_ndarray, NDArray* dst_ndarray) { + SizeT expected_dst_ndarray_ndims = + util::validate_and_deduce_ndims_after_indexing(src_ndarray->ndims, + num_indexes, indexes); + + dst_ndarray->data = src_ndarray->data; + dst_ndarray->itemsize = src_ndarray->itemsize; + + // Reference code: https://github.com/wadetb/tinynumpy/blob/0d23d22e07062ffab2afa287374c7b366eebdda1/tinynumpy/tinynumpy.py#L652 + SizeT src_axis = 0; + SizeT dst_axis = 0; + + for (SliceIndex i = 0; i < num_indexes; i++) { + const NDIndex* index = &indexes[i]; + if (index->type == ND_INDEX_TYPE_SINGLE_ELEMENT) { + SliceIndex input = *((SliceIndex*)index->data); + SliceIndex k = slice::resolve_index_in_length( + src_ndarray->shape[src_axis], input); + + if (k == slice::OUT_OF_BOUNDS) { + raise_exception(SizeT, EXN_INDEX_ERROR, + "index {0} is out of bounds for axis {1} " + "with size {2}", + input, src_axis, src_ndarray->shape[src_axis]); + } + + dst_ndarray->data += k * src_ndarray->strides[src_axis]; + + src_axis++; + } else if (index->type == ND_INDEX_TYPE_SLICE) { + UserSlice* input = (UserSlice*)index->data; + + Slice slice; + input->indices_checked(src_ndarray->shape[src_axis], &slice); + + dst_ndarray->data += + (SizeT)slice.start * src_ndarray->strides[src_axis]; + dst_ndarray->strides[dst_axis] = + ((SizeT)slice.step) * src_ndarray->strides[src_axis]; + dst_ndarray->shape[dst_axis] = (SizeT)slice.len(); + + dst_axis++; + src_axis++; + } else if (index->type == ND_INDEX_TYPE_NEWAXIS) { + dst_ndarray->strides[dst_axis] = 0; + dst_ndarray->shape[dst_axis] = 1; + + dst_axis++; + } else if (index->type == ND_INDEX_TYPE_ELLIPSIS) { + // The number of ':' entries this '...' implies. + SizeT ellipsis_size = src_ndarray->ndims - (num_indexes - 1); + + for (SizeT j = 0; j < ellipsis_size; j++) { + dst_ndarray->strides[dst_axis] = src_ndarray->strides[src_axis]; + dst_ndarray->shape[dst_axis] = src_ndarray->shape[src_axis]; + + dst_axis++; + src_axis++; + } + } else { + __builtin_unreachable(); + } + } + + for (; dst_axis < dst_ndarray->ndims; dst_axis++, src_axis++) { + dst_ndarray->shape[dst_axis] = src_ndarray->shape[src_axis]; + dst_ndarray->strides[dst_axis] = src_ndarray->strides[src_axis]; + } +} +} // namespace indexing +} // namespace ndarray +} // namespace + +extern "C" { +using namespace ndarray::indexing; + +void __nac3_ndarray_index(int32_t num_indexes, NDIndex* indexes, + NDArray* src_ndarray, + NDArray* dst_ndarray) { + index(num_indexes, indexes, src_ndarray, dst_ndarray); +} + +void __nac3_ndarray_index64(int64_t num_indexes, NDIndex* indexes, + NDArray* src_ndarray, + NDArray* dst_ndarray) { + index(num_indexes, indexes, src_ndarray, dst_ndarray); +} +} \ No newline at end of file diff --git a/nac3core/irrt/irrt/ndarray/product.hpp b/nac3core/irrt/irrt/ndarray/product.hpp new file mode 100644 index 00000000..9c0aee50 --- /dev/null +++ b/nac3core/irrt/irrt/ndarray/product.hpp @@ -0,0 +1,26 @@ + +namespace { +namespace ndarray { +namespace matmul { +namespace util { + +template +void broadcast_shape(SizeT a_ndims, SizeT* a_shape, SizeT b_ndims, + SizeT* b_shape, SizeT* dst_shape) { + __builtin_assume(!(a_ndims == 1 && b_ndims == 1)); + __builtin_assume(a_ndims >= 1); + __builtin_assume(b_ndims >= 1); +} +} // namespace util + +template +void matmul_at_least_2d(NDArray* a_ndarray, NDArray* b_ndarray, + NDArray* dst_ndarray) { + __builtin_assume(sizeof(T) == a_ndarray->itemsize); + __builtin_assume(sizeof(T) == b_ndarray->itemsize); + + // See https://numpy.org/doc/stable/reference/generated/numpy.matmul.html#numpy-matmul +} +} // namespace matmul +} // namespace ndarray +} // namespace \ No newline at end of file diff --git a/nac3core/irrt/irrt/ndarray/reshape.hpp b/nac3core/irrt/irrt/ndarray/reshape.hpp new file mode 100644 index 00000000..1e32c324 --- /dev/null +++ b/nac3core/irrt/irrt/ndarray/reshape.hpp @@ -0,0 +1,111 @@ +#pragma once + +#include +#include + +namespace { +namespace ndarray { +namespace reshape { +namespace util { + +/** + * @brief Perform assertions on and resolve unknown dimensions in `new_shape` in `np.reshape(, new_shape)` + * + * If `new_shape` indeed contains unknown dimensions (specified with `-1`, just like numpy), `new_shape` will be + * modified to contain the resolved dimension. + * + * To perform assertions on and resolve unknown dimensions in `new_shape`, we don't need the actual + * `` object itself, but only the `.size` of the ``. + * + * @param size The `.size` of `` + * @param new_ndims Number of elements in `new_shape` + * @param new_shape Target shape to reshape to + */ +template +void resolve_and_check_new_shape(SizeT size, SizeT new_ndims, + SizeT* new_shape) { + // Is there a -1 in `new_shape`? + bool neg1_exists = false; + // Location of -1, only initialized if `neg1_exists` is true + SizeT neg1_axis_i; + // The computed ndarray size of `new_shape` + SizeT new_size = 1; + + for (SizeT axis_i = 0; axis_i < new_ndims; axis_i++) { + SizeT dim = new_shape[axis_i]; + if (dim < 0) { + if (dim == -1) { + if (neg1_exists) { + // Multiple `-1` found. Throw an error. + raise_exception(SizeT, EXN_VALUE_ERROR, + "can only specify one unknown dimension", + NO_PARAM, NO_PARAM, NO_PARAM); + } else { + neg1_exists = true; + neg1_axis_i = axis_i; + } + } else { + // TODO: What? In `np.reshape` any negative dimensions is + // treated like its `-1`. + // + // Try running `np.zeros((3, 4)).reshape((-999, 2))` + // + // It is not documented by numpy. + // Throw an error for now... + + raise_exception( + SizeT, EXN_VALUE_ERROR, + "Found non -1 negative dimension {0} on axis {1}", dim, + axis_i, NO_PARAM); + } + } else { + new_size *= dim; + } + } + + bool can_reshape; + if (neg1_exists) { + // Let `x` be the unknown dimension + // solve `x * = ` + if (new_size == 0 && size == 0) { + // `x` has infinitely many solutions + can_reshape = false; + } else if (new_size == 0 && size != 0) { + // `x` has no solutions + can_reshape = false; + } else if (size % new_size != 0) { + // `x` has no integer solutions + can_reshape = false; + } else { + can_reshape = true; + new_shape[neg1_axis_i] = size / new_size; // Resolve dimension + } + } else { + can_reshape = (new_size == size); + } + + if (!can_reshape) { + raise_exception(SizeT, EXN_VALUE_ERROR, + "cannot reshape array of size {0} into given shape", + size, NO_PARAM, NO_PARAM); + } +} +} // namespace util +} // namespace reshape +} // namespace ndarray +} // namespace + +extern "C" { +void __nac3_ndarray_resolve_and_check_new_shape(int32_t size, int32_t new_ndims, + int32_t* new_shape) { + ndarray::reshape::util::resolve_and_check_new_shape(size, new_ndims, + new_shape); +} + +void __nac3_ndarray_resolve_and_check_new_shape64(int64_t size, + int64_t new_ndims, + int64_t* new_shape) { + ndarray::reshape::util::resolve_and_check_new_shape(size, new_ndims, + new_shape); +} +} diff --git a/nac3core/irrt/irrt/ndarray/transpose.hpp b/nac3core/irrt/irrt/ndarray/transpose.hpp new file mode 100644 index 00000000..50a18260 --- /dev/null +++ b/nac3core/irrt/irrt/ndarray/transpose.hpp @@ -0,0 +1,148 @@ +#pragma once + +#include +#include +#include + +/* + * Notes on `np.transpose(, )` + * + * TODO: `axes`, if specified, can actually contain negative indices, + * but it is not documented in numpy. + * + * Supporting it for now. + */ + +namespace { +namespace ndarray { +namespace transpose { +namespace util { + +/** + * @brief Do assertions on `` in `np.transpose(, )`. + * + * Note that `np.transpose`'s `` argument is optional. If the argument + * is specified but the user, use this function to do assertions on it. + * + * @param ndims The number of dimensions of `` + * @param num_axes Number of elements in `` as specified by the user. + * This should be equal to `ndims`. If not, a "ValueError: axes don't match array" is thrown. + * @param axes The user specified ``. + */ +template +void assert_transpose_axes(SizeT ndims, SizeT num_axes, const SizeT* axes) { + if (ndims != num_axes) { + raise_exception(SizeT, EXN_VALUE_ERROR, "axes don't match array", + NO_PARAM, NO_PARAM, NO_PARAM); + } + + // TODO: Optimize this + bool* axe_specified = (bool*)__builtin_alloca(sizeof(bool) * ndims); + for (SizeT i = 0; i < ndims; i++) axe_specified[i] = false; + + for (SizeT i = 0; i < ndims; i++) { + SizeT axis = slice::resolve_index_in_length(ndims, axes[i]); + if (axis == slice::OUT_OF_BOUNDS) { + // TODO: numpy actually throws a `numpy.exceptions.AxisError` + raise_exception( + SizeT, EXN_VALUE_ERROR, + "axis {0} is out of bounds for array of dimension {1}", axis, + ndims, NO_PARAM); + } + + if (axe_specified[axis]) { + raise_exception(SizeT, EXN_VALUE_ERROR, + "repeated axis in transpose", NO_PARAM, NO_PARAM, + NO_PARAM); + } + + axe_specified[axis] = true; + } +} +} // namespace util + +/** + * @brief Create a transpose view of `src_ndarray` and perform proper assertions. + * + * This function is very similar to doing `dst_ndarray = np.transpose(src_ndarray, )`. + * If `` is supposed to be `None`, caller can pass in a `nullptr` to ``. + * + * The transpose view created is returned by modifying `dst_ndarray`. + * + * The caller is responsible for setting up `dst_ndarray` before calling this function. + * Here is what this function expects from `dst_ndarray` when called: + * - `dst_ndarray->data` does not have to be initialized. + * - `dst_ndarray->itemsize` does not have to be initialized. + * - `dst_ndarray->ndims` must be initialized, must be equal to `src_ndarray->ndims`. + * - `dst_ndarray->shape` must be allocated, through it can contain uninitialized values. + * - `dst_ndarray->strides` must be allocated, through it can contain uninitialized values. + * When this function call ends: + * - `dst_ndarray->data` is set to `src_ndarray->data` (`dst_ndarray` is just a view to `src_ndarray`) + * - `dst_ndarray->itemsize` is set to `src_ndarray->itemsize` + * - `dst_ndarray->ndims` is unchanged + * - `dst_ndarray->shape` is updated according to how `np.transpose` works + * - `dst_ndarray->strides` is updated according to how `np.transpose` works + * + * @param src_ndarray The NDArray to build a transpose view on + * @param dst_ndarray The resulting NDArray after transpose. Further details in the comments above, + * @param num_axes Number of elements in axes. Unused if `axes` is nullptr. + * @param axes Axes permutation. Set it to `nullptr` if `` is `None`. + */ +template +void transpose(const NDArray* src_ndarray, NDArray* dst_ndarray, + SizeT num_axes, const SizeT* axes) { + __builtin_assume(src_ndarray->ndims == dst_ndarray->ndims); + const auto ndims = src_ndarray->ndims; + + if (axes != nullptr) util::assert_transpose_axes(ndims, num_axes, axes); + + dst_ndarray->data = src_ndarray->data; + dst_ndarray->itemsize = src_ndarray->itemsize; + + // Check out https://ajcr.net/stride-guide-part-2/ to see how `np.transpose` works behind the scenes. + if (axes == nullptr) { + // `np.transpose(, axes=None)` + + /* + * Minor note: `np.transpose(, axes=None)` is equivalent to + * `np.transpose(, axes=[N-1, N-2, ..., 0])` - basically it + * is reversing the order of strides and shape. + * + * This is a fast implementation to handle this special (but very common) case. + */ + + for (SizeT axis = 0; axis < ndims; axis++) { + dst_ndarray->shape[axis] = src_ndarray->shape[ndims - axis - 1]; + dst_ndarray->strides[axis] = src_ndarray->strides[ndims - axis - 1]; + } + } else { + // `np.transpose(, )` + + // Permute strides and shape according to `axes`, while resolving negative indices in `axes` + for (SizeT axis = 0; axis < ndims; axis++) { + // `i` cannot be OUT_OF_BOUNDS because of assertions + SizeT i = slice::resolve_index_in_length(ndims, axes[axis]); + + dst_ndarray->shape[axis] = src_ndarray->shape[i]; + dst_ndarray->strides[axis] = src_ndarray->strides[i]; + } + } +} +} // namespace transpose +} // namespace ndarray +} // namespace + +extern "C" { +using namespace ndarray::transpose; +void __nac3_ndarray_transpose(const NDArray* src_ndarray, + NDArray* dst_ndarray, int32_t num_axes, + const int32_t* axes) { + transpose(src_ndarray, dst_ndarray, num_axes, axes); +} + +void __nac3_ndarray_transpose64(const NDArray* src_ndarray, + NDArray* dst_ndarray, int64_t num_axes, + const int64_t* axes) { + transpose(src_ndarray, dst_ndarray, num_axes, axes); +} +} \ No newline at end of file diff --git a/nac3core/irrt/irrt/slice.hpp b/nac3core/irrt/irrt/slice.hpp new file mode 100644 index 00000000..1ed2d8c4 --- /dev/null +++ b/nac3core/irrt/irrt/slice.hpp @@ -0,0 +1,165 @@ +#pragma once + +#include +#include +#include + +#include "exception.hpp" + +// The type of an index or a value describing the length of a +// range/slice is always `int32_t`. +using SliceIndex = int32_t; + +namespace { + +/** + * @brief A Python-like slice with resolved indices. + * + * "Resolved indices" means that `start` and `stop` must be positive and are + * bound to a known length. + */ +struct Slice { + SliceIndex start; + SliceIndex stop; + SliceIndex step; + + /** + * @brief Calculate and return the length / the number of the slice. + * + * If this were a Python range, this function would be `len(range(start, stop, step))`. + */ + SliceIndex len() { + SliceIndex diff = stop - start; + if (diff > 0 && step > 0) { + return ((diff - 1) / step) + 1; + } else if (diff < 0 && step < 0) { + return ((diff + 1) / step) + 1; + } else { + return 0; + } + } +}; + +namespace slice { +/** + * @brief Resolve a slice index under a given length like Python indexing. + * + * In Python, if you have a `list` of length 100, `list[-1]` resolves to + * `list[99]`, so `resolve_index_in_length_clamped(100, -1)` returns `99`. + * + * If `length` is 0, 0 is returned for any value of `index`. + * + * If `index` is out of bounds, clamps the returned value between `0` and + * `length - 1` (inclusive). + * + */ +SliceIndex resolve_index_in_length_clamped(SliceIndex length, + SliceIndex index) { + if (index < 0) { + return max(length + index, 0); + } else { + return min(length, index); + } +} + +const SliceIndex OUT_OF_BOUNDS = -1; + +/** + * @brief Like `resolve_index_in_length_clamped`, but returns `OUT_OF_BOUNDS` + * if `index` is out of bounds. + */ +SliceIndex resolve_index_in_length(SliceIndex length, SliceIndex index) { + SliceIndex resolved = index < 0 ? length + index : index; + if (0 <= resolved && resolved < length) { + return resolved; + } else { + return OUT_OF_BOUNDS; + } +} +} // namespace slice + +/** + * @brief A Python-like slice with **unresolved** indices. + */ +struct UserSlice { + bool start_defined; + SliceIndex start; + + bool stop_defined; + SliceIndex stop; + + bool step_defined; + SliceIndex step; + + UserSlice() { this->reset(); } + + void reset() { + this->start_defined = false; + this->stop_defined = false; + this->step_defined = false; + } + + void set_start(SliceIndex start) { + this->start_defined = true; + this->start = start; + } + + void set_stop(SliceIndex stop) { + this->stop_defined = true; + this->stop = stop; + } + + void set_step(SliceIndex step) { + this->step_defined = true; + this->step = step; + } + + /** + * @brief Resolve this slice. + * + * In Python, this would be `slice(start, stop, step).indices(length)`. + * + * @return A `Slice` with the resolved indices. + */ + Slice indices(SliceIndex length) { + Slice result; + + result.step = step_defined ? step : 1; + bool step_is_negative = result.step < 0; + + if (start_defined) { + result.start = + slice::resolve_index_in_length_clamped(length, start); + } else { + result.start = step_is_negative ? length - 1 : 0; + } + + if (stop_defined) { + result.stop = slice::resolve_index_in_length_clamped(length, stop); + } else { + result.stop = step_is_negative ? -1 : length; + } + + return result; + } + + /** + * @brief Like `.indices()` but with assertions. + */ + template + void indices_checked(SliceIndex length, Slice* result) { + if (length < 0) { + raise_exception(SizeT, EXN_VALUE_ERROR, + "length should not be negative, got {0}", length, + NO_PARAM, NO_PARAM); + } + + if (this->step_defined && this->step == 0) { + raise_exception(SizeT, EXN_VALUE_ERROR, "slice step cannot be zero", + NO_PARAM, NO_PARAM, NO_PARAM); + } + + *result = this->indices(length); + } +}; +} // namespace \ No newline at end of file diff --git a/nac3core/irrt/irrt_everything.hpp b/nac3core/irrt/irrt_everything.hpp index 13e0168d..707f1af7 100644 --- a/nac3core/irrt/irrt_everything.hpp +++ b/nac3core/irrt/irrt_everything.hpp @@ -4,5 +4,9 @@ #include #include #include +#include #include +#include +#include +#include #include diff --git a/nac3core/irrt/irrt_test.cpp b/nac3core/irrt/irrt_test.cpp index 9183eba0..4b8f528c 100644 --- a/nac3core/irrt/irrt_test.cpp +++ b/nac3core/irrt/irrt_test.cpp @@ -6,15 +6,20 @@ #include #include -// Special macro to inform `#include ` that we -// are testing. +// Special macro to inform `#include ` that we are testing. #define IRRT_TESTING +// Note that failure unit tests are not supported. + #include #include +#include +#include int main() { test::core::run(); test::ndarray_basic::run(); + test::ndarray_indexing::run(); + test::ndarray_broadcast::run(); return 0; } \ No newline at end of file diff --git a/nac3core/irrt/test/test_core.hpp b/nac3core/irrt/test/test_core.hpp index 488c6152..50bf111a 100644 --- a/nac3core/irrt/test/test_core.hpp +++ b/nac3core/irrt/test/test_core.hpp @@ -7,8 +7,8 @@ namespace core { void test_int_exp() { BEGIN_TEST(); - assert_values_match(125, __nac3_int_exp_impl(5, 3)); - assert_values_match(3125, __nac3_int_exp_impl(5, 5)); + assert_values_match(125L, __nac3_int_exp_impl(5, 3)); + assert_values_match(3125L, __nac3_int_exp_impl(5, 5)); } void run() { test_int_exp(); } diff --git a/nac3core/irrt/test/test_ndarray_basic.hpp b/nac3core/irrt/test/test_ndarray_basic.hpp index 1bbdab26..a28456ae 100644 --- a/nac3core/irrt/test/test_ndarray_basic.hpp +++ b/nac3core/irrt/test/test_ndarray_basic.hpp @@ -8,18 +8,18 @@ void test_calc_size_from_shape_normal() { // Test shapes with normal values BEGIN_TEST(); - int32_t shape[4] = {2, 3, 5, 7}; + int64_t shape[4] = {2, 3, 5, 7}; assert_values_match( - 210, ndarray::basic::util::calc_size_from_shape(4, shape)); + 210L, ndarray::basic::util::calc_size_from_shape(4, shape)); } void test_calc_size_from_shape_has_zero() { // Test shapes with 0 in them BEGIN_TEST(); - int32_t shape[4] = {2, 0, 5, 7}; + int64_t shape[4] = {2, 0, 5, 7}; assert_values_match( - 0, ndarray::basic::util::calc_size_from_shape(4, shape)); + 0L, ndarray::basic::util::calc_size_from_shape(4, shape)); } void run() { diff --git a/nac3core/irrt/test/test_ndarray_broadcast.hpp b/nac3core/irrt/test/test_ndarray_broadcast.hpp new file mode 100644 index 00000000..5910e28c --- /dev/null +++ b/nac3core/irrt/test/test_ndarray_broadcast.hpp @@ -0,0 +1,127 @@ +#pragma once + +#include + +namespace test { +namespace ndarray_broadcast { +void test_can_broadcast_shape() { + BEGIN_TEST(); + + assert_values_match(true, + ndarray::broadcast::util::can_broadcast_shape_to( + 1, (int32_t[]){3}, 5, (int32_t[]){1, 1, 1, 1, 3})); + assert_values_match(false, ndarray::broadcast::util::can_broadcast_shape_to( + 1, (int32_t[]){3}, 2, (int32_t[]){3, 1})); + assert_values_match(true, ndarray::broadcast::util::can_broadcast_shape_to( + 1, (int32_t[]){3}, 1, (int32_t[]){3})); + assert_values_match(false, ndarray::broadcast::util::can_broadcast_shape_to( + 1, (int32_t[]){1}, 1, (int32_t[]){3})); + assert_values_match(true, ndarray::broadcast::util::can_broadcast_shape_to( + 1, (int32_t[]){1}, 1, (int32_t[]){1})); + assert_values_match( + true, ndarray::broadcast::util::can_broadcast_shape_to( + 3, (int32_t[]){256, 256, 3}, 3, (int32_t[]){256, 1, 3})); + assert_values_match(true, + ndarray::broadcast::util::can_broadcast_shape_to( + 3, (int32_t[]){256, 256, 3}, 1, (int32_t[]){3})); + assert_values_match(false, + ndarray::broadcast::util::can_broadcast_shape_to( + 3, (int32_t[]){256, 256, 3}, 1, (int32_t[]){2})); + assert_values_match(true, + ndarray::broadcast::util::can_broadcast_shape_to( + 3, (int32_t[]){256, 256, 3}, 1, (int32_t[]){1})); + + // In cases when the shapes contain zero(es) + assert_values_match(true, ndarray::broadcast::util::can_broadcast_shape_to( + 1, (int32_t[]){0}, 1, (int32_t[]){1})); + assert_values_match(false, ndarray::broadcast::util::can_broadcast_shape_to( + 1, (int32_t[]){0}, 1, (int32_t[]){2})); + assert_values_match(true, + ndarray::broadcast::util::can_broadcast_shape_to( + 4, (int32_t[]){0, 4, 0, 0}, 1, (int32_t[]){1})); + assert_values_match( + true, ndarray::broadcast::util::can_broadcast_shape_to( + 4, (int32_t[]){0, 4, 0, 0}, 4, (int32_t[]){1, 1, 1, 1})); + assert_values_match( + true, ndarray::broadcast::util::can_broadcast_shape_to( + 4, (int32_t[]){0, 4, 0, 0}, 4, (int32_t[]){1, 4, 1, 1})); + assert_values_match(false, ndarray::broadcast::util::can_broadcast_shape_to( + 2, (int32_t[]){4, 3}, 2, (int32_t[]){0, 3})); + assert_values_match(false, ndarray::broadcast::util::can_broadcast_shape_to( + 2, (int32_t[]){4, 3}, 2, (int32_t[]){0, 0})); +} + +void test_ndarray_broadcast() { + /* + # array = np.array([[19.9, 29.9, 39.9, 49.9]], dtype=np.float64) + # >>> [[19.9 29.9 39.9 49.9]] + # + # array = np.broadcast_to(array, (2, 3, 4)) + # >>> [[[19.9 29.9 39.9 49.9] + # >>> [19.9 29.9 39.9 49.9] + # >>> [19.9 29.9 39.9 49.9]] + # >>> [[19.9 29.9 39.9 49.9] + # >>> [19.9 29.9 39.9 49.9] + # >>> [19.9 29.9 39.9 49.9]]] + # + # assery array.strides == (0, 0, 8) + + */ + BEGIN_TEST(); + + double in_data[4] = {19.9, 29.9, 39.9, 49.9}; + const int32_t in_ndims = 2; + int32_t in_shape[in_ndims] = {1, 4}; + int32_t in_strides[in_ndims] = {}; + NDArray ndarray = {.data = (uint8_t*)in_data, + .itemsize = sizeof(double), + .ndims = in_ndims, + .shape = in_shape, + .strides = in_strides}; + ndarray::basic::set_strides_by_shape(&ndarray); + + const int32_t dst_ndims = 3; + int32_t dst_shape[dst_ndims] = {2, 3, 4}; + int32_t dst_strides[dst_ndims] = {}; + NDArray dst_ndarray = { + .ndims = dst_ndims, .shape = dst_shape, .strides = dst_strides}; + + ndarray::broadcast::broadcast_to(&ndarray, &dst_ndarray); + + assert_arrays_match(dst_ndims, ((int32_t[]){0, 0, 8}), dst_ndarray.strides); + + assert_values_match(19.9, + *((double*)ndarray::basic::get_pelement_by_indices( + &dst_ndarray, ((int32_t[]){0, 0, 0})))); + assert_values_match(29.9, + *((double*)ndarray::basic::get_pelement_by_indices( + &dst_ndarray, ((int32_t[]){0, 0, 1})))); + assert_values_match(39.9, + *((double*)ndarray::basic::get_pelement_by_indices( + &dst_ndarray, ((int32_t[]){0, 0, 2})))); + assert_values_match(49.9, + *((double*)ndarray::basic::get_pelement_by_indices( + &dst_ndarray, ((int32_t[]){0, 0, 3})))); + assert_values_match(19.9, + *((double*)ndarray::basic::get_pelement_by_indices( + &dst_ndarray, ((int32_t[]){0, 1, 0})))); + assert_values_match(29.9, + *((double*)ndarray::basic::get_pelement_by_indices( + &dst_ndarray, ((int32_t[]){0, 1, 1})))); + assert_values_match(39.9, + *((double*)ndarray::basic::get_pelement_by_indices( + &dst_ndarray, ((int32_t[]){0, 1, 2})))); + assert_values_match(49.9, + *((double*)ndarray::basic::get_pelement_by_indices( + &dst_ndarray, ((int32_t[]){0, 1, 3})))); + assert_values_match(49.9, + *((double*)ndarray::basic::get_pelement_by_indices( + &dst_ndarray, ((int32_t[]){1, 2, 3})))); +} + +void run() { + test_can_broadcast_shape(); + test_ndarray_broadcast(); +} +} // namespace ndarray_broadcast +} // namespace test \ No newline at end of file diff --git a/nac3core/irrt/test/test_ndarray_indexing.hpp b/nac3core/irrt/test/test_ndarray_indexing.hpp new file mode 100644 index 00000000..03f00337 --- /dev/null +++ b/nac3core/irrt/test/test_ndarray_indexing.hpp @@ -0,0 +1,165 @@ +#pragma once + +#include + +namespace test { +namespace ndarray_indexing { +void test_normal_1() { + /* + Reference Python code: + ```python + ndarray = np.arange(12, dtype=np.float64).reshape((3, 4)); + # array([[ 0., 1., 2., 3.], + # [ 4., 5., 6., 7.], + # [ 8., 9., 10., 11.]]) + + dst_ndarray = ndarray[-2:, 1::2] + # array([[ 5., 7.], + # [ 9., 11.]]) + + assert dst_ndarray.shape == (2, 2) + assert dst_ndarray.strides == (32, 16) + assert dst_ndarray[0, 0] == 5.0 + assert dst_ndarray[0, 1] == 7.0 + assert dst_ndarray[1, 0] == 9.0 + assert dst_ndarray[1, 1] == 11.0 + ``` + */ + BEGIN_TEST(); + + // Prepare src_ndarray + double src_data[12] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, + 6.0, 7.0, 8.0, 9.0, 10.0, 11.0}; + int64_t src_itemsize = sizeof(double); + const int64_t src_ndims = 2; + int64_t src_shape[src_ndims] = {3, 4}; + int64_t src_strides[src_ndims] = {}; + NDArray src_ndarray = {.data = (uint8_t *)src_data, + .itemsize = src_itemsize, + .ndims = src_ndims, + .shape = src_shape, + .strides = src_strides}; + ndarray::basic::set_strides_by_shape(&src_ndarray); + + // Prepare dst_ndarray + const int64_t dst_ndims = 2; + int64_t dst_shape[dst_ndims] = {999, 999}; // Empty values + int64_t dst_strides[dst_ndims] = {999, 999}; // Empty values + NDArray dst_ndarray = {.data = nullptr, + .ndims = dst_ndims, + .shape = dst_shape, + .strides = dst_strides}; + + // Create the subscripts in `ndarray[-2::, 1::2]` + UserSlice subscript_1; + subscript_1.set_start(-2); + + UserSlice subscript_2; + subscript_2.set_start(1); + subscript_2.set_step(2); + + const int64_t num_indexes = 2; + NDIndex indexes[num_indexes] = { + {.type = ND_INDEX_TYPE_SLICE, .data = (uint8_t *)&subscript_1}, + {.type = ND_INDEX_TYPE_SLICE, .data = (uint8_t *)&subscript_2}}; + + ndarray::indexing::index(num_indexes, indexes, &src_ndarray, &dst_ndarray); + + int64_t expected_shape[dst_ndims] = {2, 2}; + int64_t expected_strides[dst_ndims] = {32, 16}; + + assert_arrays_match(dst_ndims, expected_shape, dst_ndarray.shape); + assert_arrays_match(dst_ndims, expected_strides, dst_ndarray.strides); + + // dst_ndarray[0, 0] + assert_values_match(5.0, + *((double *)ndarray::basic::get_pelement_by_indices( + &dst_ndarray, (int64_t[dst_ndims]){0, 0}))); + // dst_ndarray[0, 1] + assert_values_match(7.0, + *((double *)ndarray::basic::get_pelement_by_indices( + &dst_ndarray, (int64_t[dst_ndims]){0, 1}))); + // dst_ndarray[1, 0] + assert_values_match(9.0, + *((double *)ndarray::basic::get_pelement_by_indices( + &dst_ndarray, (int64_t[dst_ndims]){1, 0}))); + // dst_ndarray[1, 1] + assert_values_match(11.0, + *((double *)ndarray::basic::get_pelement_by_indices( + &dst_ndarray, (int64_t[dst_ndims]){1, 1}))); +} + +void test_normal_2() { + /* + ```python + ndarray = np.arange(12, dtype=np.float64).reshape((3, 4)) + # array([[ 0., 1., 2., 3.], + # [ 4., 5., 6., 7.], + # [ 8., 9., 10., 11.]]) + + dst_ndarray = ndarray[2, ::-2] + # array([11., 9.]) + + assert dst_ndarray.shape == (2,) + assert dst_ndarray.strides == (-16,) + assert dst_ndarray[0] == 11.0 + assert dst_ndarray[1] == 9.0 + ``` + */ + BEGIN_TEST(); + + // Prepare src_ndarray + double src_data[12] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, + 6.0, 7.0, 8.0, 9.0, 10.0, 11.0}; + int64_t src_itemsize = sizeof(double); + const int64_t src_ndims = 2; + int64_t src_shape[src_ndims] = {3, 4}; + int64_t src_strides[src_ndims] = {}; + NDArray src_ndarray = {.data = (uint8_t *)src_data, + .itemsize = src_itemsize, + .ndims = src_ndims, + .shape = src_shape, + .strides = src_strides}; + ndarray::basic::set_strides_by_shape(&src_ndarray); + + // Prepare dst_ndarray + const int64_t dst_ndims = 1; + int64_t dst_shape[dst_ndims] = {999}; // Empty values + int64_t dst_strides[dst_ndims] = {999}; // Empty values + NDArray dst_ndarray = {.data = nullptr, + .ndims = dst_ndims, + .shape = dst_shape, + .strides = dst_strides}; + + // Create the subscripts in `ndarray[2, ::-2]` + int64_t subscript_1 = 2; + + UserSlice subscript_2; + subscript_2.set_step(-2); + + const int64_t num_indexes = 2; + NDIndex indexes[num_indexes] = { + {.type = ND_INDEX_TYPE_SINGLE_ELEMENT, .data = (uint8_t *)&subscript_1}, + {.type = ND_INDEX_TYPE_SLICE, .data = (uint8_t *)&subscript_2}}; + + ndarray::indexing::index(num_indexes, indexes, &src_ndarray, &dst_ndarray); + + int64_t expected_shape[dst_ndims] = {2}; + int64_t expected_strides[dst_ndims] = {-16}; + assert_arrays_match(dst_ndims, expected_shape, dst_ndarray.shape); + assert_arrays_match(dst_ndims, expected_strides, dst_ndarray.strides); + + assert_values_match(11.0, + *((double *)ndarray::basic::get_pelement_by_indices( + &dst_ndarray, (int64_t[dst_ndims]){0}))); + assert_values_match(9.0, + *((double *)ndarray::basic::get_pelement_by_indices( + &dst_ndarray, (int64_t[dst_ndims]){1}))); +} + +void run() { + test_normal_1(); + test_normal_2(); +} +} // namespace ndarray_indexing +} // namespace test \ No newline at end of file diff --git a/nac3core/irrt/test/util.hpp b/nac3core/irrt/test/util.hpp index 33e2e45e..0f22b885 100644 --- a/nac3core/irrt/test/util.hpp +++ b/nac3core/irrt/test/util.hpp @@ -6,6 +6,11 @@ template void print_value(const T& value); +template <> +void print_value(const bool& value) { + printf("%s", value ? "true" : "false"); +} + template <> void print_value(const int8_t& value) { printf("%d", value); @@ -16,6 +21,11 @@ void print_value(const int32_t& value) { printf("%d", value); } +template <> +void print_value(const int64_t& value) { + printf("%d", value); +} + template <> void print_value(const uint8_t& value) { printf("%u", value); @@ -26,6 +36,11 @@ void print_value(const uint32_t& value) { printf("%u", value); } +template <> +void print_value(const uint64_t& value) { + printf("%d", value); +} + template <> void print_value(const float& value) { printf("%f", value); diff --git a/nac3core/src/codegen/builtin_fns.rs b/nac3core/src/codegen/builtin_fns.rs index 311fd358..abdb835f 100644 --- a/nac3core/src/codegen/builtin_fns.rs +++ b/nac3core/src/codegen/builtin_fns.rs @@ -1,14 +1,10 @@ use inkwell::types::BasicTypeEnum; use inkwell::values::{BasicValue, BasicValueEnum, PointerValue}; -use inkwell::{FloatPredicate, IntPredicate, OptimizationLevel}; use itertools::Itertools; -use crate::codegen::classes::{ - NDArrayValue, ProxyValue, UntypedArrayLikeAccessor, UntypedArrayLikeMutator, -}; +use crate::codegen::classes::{NDArrayValue, ProxyValue, UntypedArrayLikeAccessor}; use crate::codegen::numpy::ndarray_elementwise_unaryop_impl; -use crate::codegen::stmt::gen_for_callback_incrementing; -use crate::codegen::{extern_fns, irrt, llvm_intrinsics, numpy, CodeGenContext, CodeGenerator}; +use crate::codegen::{extern_fns, llvm_intrinsics, numpy, CodeGenContext, CodeGenerator}; use crate::toplevel::helper::PrimDef; use crate::toplevel::numpy::unpack_ndarray_var_tys; use crate::typecheck::typedef::Type; @@ -23,990 +19,6 @@ fn unsupported_type(ctx: &CodeGenContext<'_, '_>, fn_name: &str, tys: &[Type]) - ) } -/// Invokes the `int32` builtin function. -pub fn call_int32<'ctx, G: CodeGenerator + ?Sized>( - generator: &mut G, - ctx: &mut CodeGenContext<'ctx, '_>, - n: (Type, BasicValueEnum<'ctx>), -) -> Result, String> { - let llvm_i32 = ctx.ctx.i32_type(); - let llvm_usize = generator.get_size_type(ctx.ctx); - - let (n_ty, n) = n; - Ok(match n { - BasicValueEnum::IntValue(n) if matches!(n.get_type().get_bit_width(), 1 | 8) => { - debug_assert!(ctx.unifier.unioned(n_ty, ctx.primitives.bool)); - - ctx.builder.build_int_z_extend(n, llvm_i32, "zext").map(Into::into).unwrap() - } - - BasicValueEnum::IntValue(n) if n.get_type().get_bit_width() == 32 => { - debug_assert!([ctx.primitives.int32, ctx.primitives.uint32,] - .iter() - .any(|ty| ctx.unifier.unioned(n_ty, *ty))); - - n.into() - } - - BasicValueEnum::IntValue(n) if n.get_type().get_bit_width() == 64 => { - debug_assert!([ctx.primitives.int64, ctx.primitives.uint64,] - .iter() - .any(|ty| ctx.unifier.unioned(n_ty, *ty))); - - ctx.builder.build_int_truncate(n, llvm_i32, "trunc").map(Into::into).unwrap() - } - - BasicValueEnum::FloatValue(n) => { - debug_assert!(ctx.unifier.unioned(n_ty, ctx.primitives.float)); - - let to_int64 = - ctx.builder.build_float_to_signed_int(n, ctx.ctx.i64_type(), "").unwrap(); - ctx.builder.build_int_truncate(to_int64, llvm_i32, "conv").map(Into::into).unwrap() - } - - BasicValueEnum::PointerValue(n) - if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) => - { - let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty); - - let ndarray = ndarray_elementwise_unaryop_impl( - generator, - ctx, - ctx.primitives.int32, - None, - NDArrayValue::from_ptr_val(n, llvm_usize, None), - |generator, ctx, val| call_int32(generator, ctx, (elem_ty, val)), - )?; - - ndarray.as_base_value().into() - } - - _ => unsupported_type(ctx, "int32", &[n_ty]), - }) -} - -/// Invokes the `int64` builtin function. -pub fn call_int64<'ctx, G: CodeGenerator + ?Sized>( - generator: &mut G, - ctx: &mut CodeGenContext<'ctx, '_>, - n: (Type, BasicValueEnum<'ctx>), -) -> Result, String> { - let llvm_i64 = ctx.ctx.i64_type(); - let llvm_usize = generator.get_size_type(ctx.ctx); - - let (n_ty, n) = n; - - Ok(match n { - BasicValueEnum::IntValue(n) if matches!(n.get_type().get_bit_width(), 1 | 8 | 32) => { - debug_assert!([ctx.primitives.bool, ctx.primitives.int32, ctx.primitives.uint32,] - .iter() - .any(|ty| ctx.unifier.unioned(n_ty, *ty))); - - if ctx.unifier.unioned(n_ty, ctx.primitives.int32) { - ctx.builder.build_int_s_extend(n, llvm_i64, "sext").map(Into::into).unwrap() - } else { - ctx.builder.build_int_z_extend(n, llvm_i64, "zext").map(Into::into).unwrap() - } - } - - BasicValueEnum::IntValue(n) if n.get_type().get_bit_width() == 64 => { - debug_assert!([ctx.primitives.int64, ctx.primitives.uint64,] - .iter() - .any(|ty| ctx.unifier.unioned(n_ty, *ty))); - - n.into() - } - - BasicValueEnum::FloatValue(n) => { - debug_assert!(ctx.unifier.unioned(n_ty, ctx.primitives.float)); - - ctx.builder - .build_float_to_signed_int(n, ctx.ctx.i64_type(), "fptosi") - .map(Into::into) - .unwrap() - } - - BasicValueEnum::PointerValue(n) - if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) => - { - let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty); - - let ndarray = ndarray_elementwise_unaryop_impl( - generator, - ctx, - ctx.primitives.int64, - None, - NDArrayValue::from_ptr_val(n, llvm_usize, None), - |generator, ctx, val| call_int64(generator, ctx, (elem_ty, val)), - )?; - - ndarray.as_base_value().into() - } - - _ => unsupported_type(ctx, "int64", &[n_ty]), - }) -} - -/// Invokes the `uint32` builtin function. -pub fn call_uint32<'ctx, G: CodeGenerator + ?Sized>( - generator: &mut G, - ctx: &mut CodeGenContext<'ctx, '_>, - n: (Type, BasicValueEnum<'ctx>), -) -> Result, String> { - let llvm_i32 = ctx.ctx.i32_type(); - let llvm_usize = generator.get_size_type(ctx.ctx); - - let (n_ty, n) = n; - - Ok(match n { - BasicValueEnum::IntValue(n) if matches!(n.get_type().get_bit_width(), 1 | 8) => { - debug_assert!(ctx.unifier.unioned(n_ty, ctx.primitives.bool)); - - ctx.builder.build_int_z_extend(n, llvm_i32, "zext").map(Into::into).unwrap() - } - - BasicValueEnum::IntValue(n) if n.get_type().get_bit_width() == 32 => { - debug_assert!([ctx.primitives.int32, ctx.primitives.uint32,] - .iter() - .any(|ty| ctx.unifier.unioned(n_ty, *ty))); - - n.into() - } - - BasicValueEnum::IntValue(n) if n.get_type().get_bit_width() == 64 => { - debug_assert!( - ctx.unifier.unioned(n_ty, ctx.primitives.int64) - || ctx.unifier.unioned(n_ty, ctx.primitives.uint64) - ); - - ctx.builder.build_int_truncate(n, llvm_i32, "trunc").map(Into::into).unwrap() - } - - BasicValueEnum::FloatValue(n) => { - debug_assert!(ctx.unifier.unioned(n_ty, ctx.primitives.float)); - - let n_gez = ctx - .builder - .build_float_compare(FloatPredicate::OGE, n, n.get_type().const_zero(), "") - .unwrap(); - - let to_int32 = ctx.builder.build_float_to_signed_int(n, llvm_i32, "").unwrap(); - let to_uint64 = - ctx.builder.build_float_to_unsigned_int(n, ctx.ctx.i64_type(), "").unwrap(); - - ctx.builder - .build_select( - n_gez, - ctx.builder.build_int_truncate(to_uint64, llvm_i32, "").unwrap(), - to_int32, - "conv", - ) - .unwrap() - } - - BasicValueEnum::PointerValue(n) - if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) => - { - let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty); - - let ndarray = ndarray_elementwise_unaryop_impl( - generator, - ctx, - ctx.primitives.uint32, - None, - NDArrayValue::from_ptr_val(n, llvm_usize, None), - |generator, ctx, val| call_uint32(generator, ctx, (elem_ty, val)), - )?; - - ndarray.as_base_value().into() - } - - _ => unsupported_type(ctx, "uint32", &[n_ty]), - }) -} - -/// Invokes the `uint64` builtin function. -pub fn call_uint64<'ctx, G: CodeGenerator + ?Sized>( - generator: &mut G, - ctx: &mut CodeGenContext<'ctx, '_>, - n: (Type, BasicValueEnum<'ctx>), -) -> Result, String> { - let llvm_i64 = ctx.ctx.i64_type(); - let llvm_usize = generator.get_size_type(ctx.ctx); - - let (n_ty, n) = n; - - Ok(match n { - BasicValueEnum::IntValue(n) if matches!(n.get_type().get_bit_width(), 1 | 8 | 32) => { - debug_assert!([ctx.primitives.bool, ctx.primitives.int32, ctx.primitives.uint32,] - .iter() - .any(|ty| ctx.unifier.unioned(n_ty, *ty))); - - if ctx.unifier.unioned(n_ty, ctx.primitives.int32) { - ctx.builder.build_int_s_extend(n, llvm_i64, "sext").map(Into::into).unwrap() - } else { - ctx.builder.build_int_z_extend(n, llvm_i64, "zext").map(Into::into).unwrap() - } - } - - BasicValueEnum::IntValue(n) if n.get_type().get_bit_width() == 64 => { - debug_assert!([ctx.primitives.int64, ctx.primitives.uint64,] - .iter() - .any(|ty| ctx.unifier.unioned(n_ty, *ty))); - - n.into() - } - - BasicValueEnum::FloatValue(n) => { - debug_assert!(ctx.unifier.unioned(n_ty, ctx.primitives.float)); - - let val_gez = ctx - .builder - .build_float_compare(FloatPredicate::OGE, n, n.get_type().const_zero(), "") - .unwrap(); - - let to_int64 = ctx.builder.build_float_to_signed_int(n, llvm_i64, "").unwrap(); - let to_uint64 = ctx.builder.build_float_to_unsigned_int(n, llvm_i64, "").unwrap(); - - ctx.builder.build_select(val_gez, to_uint64, to_int64, "conv").unwrap() - } - - BasicValueEnum::PointerValue(n) - if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) => - { - let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty); - - let ndarray = ndarray_elementwise_unaryop_impl( - generator, - ctx, - ctx.primitives.uint64, - None, - NDArrayValue::from_ptr_val(n, llvm_usize, None), - |generator, ctx, val| call_uint64(generator, ctx, (elem_ty, val)), - )?; - - ndarray.as_base_value().into() - } - - _ => unsupported_type(ctx, "uint64", &[n_ty]), - }) -} - -/// Invokes the `float` builtin function. -pub fn call_float<'ctx, G: CodeGenerator + ?Sized>( - generator: &mut G, - ctx: &mut CodeGenContext<'ctx, '_>, - n: (Type, BasicValueEnum<'ctx>), -) -> Result, String> { - let llvm_f64 = ctx.ctx.f64_type(); - let llvm_usize = generator.get_size_type(ctx.ctx); - - let (n_ty, n) = n; - - Ok(match n { - BasicValueEnum::IntValue(n) if matches!(n.get_type().get_bit_width(), 1 | 8 | 32 | 64) => { - debug_assert!([ - ctx.primitives.bool, - ctx.primitives.int32, - ctx.primitives.uint32, - ctx.primitives.int64, - ctx.primitives.uint64, - ] - .iter() - .any(|ty| ctx.unifier.unioned(n_ty, *ty))); - - if [ctx.primitives.bool, ctx.primitives.int32, ctx.primitives.int64] - .iter() - .any(|ty| ctx.unifier.unioned(n_ty, *ty)) - { - ctx.builder - .build_signed_int_to_float(n, llvm_f64, "sitofp") - .map(Into::into) - .unwrap() - } else { - ctx.builder - .build_unsigned_int_to_float(n, llvm_f64, "uitofp") - .map(Into::into) - .unwrap() - } - } - - BasicValueEnum::FloatValue(n) => { - debug_assert!(ctx.unifier.unioned(n_ty, ctx.primitives.float)); - - n.into() - } - - BasicValueEnum::PointerValue(n) - if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) => - { - let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty); - - let ndarray = ndarray_elementwise_unaryop_impl( - generator, - ctx, - ctx.primitives.float, - None, - NDArrayValue::from_ptr_val(n, llvm_usize, None), - |generator, ctx, val| call_float(generator, ctx, (elem_ty, val)), - )?; - - ndarray.as_base_value().into() - } - - _ => unsupported_type(ctx, "float", &[n_ty]), - }) -} - -/// Invokes the `round` builtin function. -pub fn call_round<'ctx, G: CodeGenerator + ?Sized>( - generator: &mut G, - ctx: &mut CodeGenContext<'ctx, '_>, - n: (Type, BasicValueEnum<'ctx>), - ret_elem_ty: Type, -) -> Result, String> { - const FN_NAME: &str = "round"; - - let llvm_usize = generator.get_size_type(ctx.ctx); - - let (n_ty, n) = n; - let llvm_ret_elem_ty = ctx.get_llvm_abi_type(generator, ret_elem_ty).into_int_type(); - - Ok(match n { - BasicValueEnum::FloatValue(n) => { - debug_assert!(ctx.unifier.unioned(n_ty, ctx.primitives.float)); - - let val = llvm_intrinsics::call_float_round(ctx, n, None); - ctx.builder - .build_float_to_signed_int(val, llvm_ret_elem_ty, FN_NAME) - .map(Into::into) - .unwrap() - } - - BasicValueEnum::PointerValue(n) - if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) => - { - let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty); - - let ndarray = ndarray_elementwise_unaryop_impl( - generator, - ctx, - ret_elem_ty, - None, - NDArrayValue::from_ptr_val(n, llvm_usize, None), - |generator, ctx, val| call_round(generator, ctx, (elem_ty, val), ret_elem_ty), - )?; - - ndarray.as_base_value().into() - } - - _ => unsupported_type(ctx, FN_NAME, &[n_ty]), - }) -} - -/// Invokes the `np_round` builtin function. -pub fn call_numpy_round<'ctx, G: CodeGenerator + ?Sized>( - generator: &mut G, - ctx: &mut CodeGenContext<'ctx, '_>, - n: (Type, BasicValueEnum<'ctx>), -) -> Result, String> { - const FN_NAME: &str = "np_round"; - - let llvm_usize = generator.get_size_type(ctx.ctx); - - let (n_ty, n) = n; - - Ok(match n { - BasicValueEnum::FloatValue(n) => { - debug_assert!(ctx.unifier.unioned(n_ty, ctx.primitives.float)); - - llvm_intrinsics::call_float_rint(ctx, n, None).into() - } - - BasicValueEnum::PointerValue(n) - if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) => - { - let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty); - - let ndarray = ndarray_elementwise_unaryop_impl( - generator, - ctx, - ctx.primitives.float, - None, - NDArrayValue::from_ptr_val(n, llvm_usize, None), - |generator, ctx, val| call_numpy_round(generator, ctx, (elem_ty, val)), - )?; - - ndarray.as_base_value().into() - } - - _ => unsupported_type(ctx, FN_NAME, &[n_ty]), - }) -} - -/// Invokes the `bool` builtin function. -pub fn call_bool<'ctx, G: CodeGenerator + ?Sized>( - generator: &mut G, - ctx: &mut CodeGenContext<'ctx, '_>, - n: (Type, BasicValueEnum<'ctx>), -) -> Result, String> { - const FN_NAME: &str = "bool"; - - let llvm_usize = generator.get_size_type(ctx.ctx); - - let (n_ty, n) = n; - - Ok(match n { - BasicValueEnum::IntValue(n) if matches!(n.get_type().get_bit_width(), 1 | 8) => { - debug_assert!(ctx.unifier.unioned(n_ty, ctx.primitives.bool)); - - n.into() - } - - BasicValueEnum::IntValue(n) => { - debug_assert!([ - ctx.primitives.int32, - ctx.primitives.uint32, - ctx.primitives.int64, - ctx.primitives.uint64, - ] - .iter() - .any(|ty| ctx.unifier.unioned(n_ty, *ty))); - - ctx.builder - .build_int_compare(IntPredicate::NE, n, n.get_type().const_zero(), FN_NAME) - .map(Into::into) - .unwrap() - } - - BasicValueEnum::FloatValue(n) => { - debug_assert!(ctx.unifier.unioned(n_ty, ctx.primitives.float)); - - ctx.builder - .build_float_compare(FloatPredicate::UNE, n, n.get_type().const_zero(), FN_NAME) - .map(Into::into) - .unwrap() - } - - BasicValueEnum::PointerValue(n) - if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) => - { - let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty); - - let ndarray = ndarray_elementwise_unaryop_impl( - generator, - ctx, - ctx.primitives.bool, - None, - NDArrayValue::from_ptr_val(n, llvm_usize, None), - |generator, ctx, val| { - let elem = call_bool(generator, ctx, (elem_ty, val))?; - - Ok(generator.bool_to_i8(ctx, elem.into_int_value()).into()) - }, - )?; - - ndarray.as_base_value().into() - } - - _ => unsupported_type(ctx, FN_NAME, &[n_ty]), - }) -} - -/// Invokes the `floor` builtin function. -pub fn call_floor<'ctx, G: CodeGenerator + ?Sized>( - generator: &mut G, - ctx: &mut CodeGenContext<'ctx, '_>, - n: (Type, BasicValueEnum<'ctx>), - ret_elem_ty: Type, -) -> Result, String> { - const FN_NAME: &str = "floor"; - - let llvm_usize = generator.get_size_type(ctx.ctx); - - let (n_ty, n) = n; - let llvm_ret_elem_ty = ctx.get_llvm_abi_type(generator, ret_elem_ty); - - Ok(match n { - BasicValueEnum::FloatValue(n) => { - debug_assert!(ctx.unifier.unioned(n_ty, ctx.primitives.float)); - - let val = llvm_intrinsics::call_float_floor(ctx, n, None); - if let BasicTypeEnum::IntType(llvm_ret_elem_ty) = llvm_ret_elem_ty { - ctx.builder - .build_float_to_signed_int(val, llvm_ret_elem_ty, FN_NAME) - .map(Into::into) - .unwrap() - } else { - val.into() - } - } - - BasicValueEnum::PointerValue(n) - if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) => - { - let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty); - - let ndarray = ndarray_elementwise_unaryop_impl( - generator, - ctx, - ret_elem_ty, - None, - NDArrayValue::from_ptr_val(n, llvm_usize, None), - |generator, ctx, val| call_floor(generator, ctx, (elem_ty, val), ret_elem_ty), - )?; - - ndarray.as_base_value().into() - } - - _ => unsupported_type(ctx, FN_NAME, &[n_ty]), - }) -} - -/// Invokes the `ceil` builtin function. -pub fn call_ceil<'ctx, G: CodeGenerator + ?Sized>( - generator: &mut G, - ctx: &mut CodeGenContext<'ctx, '_>, - n: (Type, BasicValueEnum<'ctx>), - ret_elem_ty: Type, -) -> Result, String> { - const FN_NAME: &str = "ceil"; - - let llvm_usize = generator.get_size_type(ctx.ctx); - - let (n_ty, n) = n; - let llvm_ret_elem_ty = ctx.get_llvm_abi_type(generator, ret_elem_ty); - - Ok(match n { - BasicValueEnum::FloatValue(n) => { - debug_assert!(ctx.unifier.unioned(n_ty, ctx.primitives.float)); - - let val = llvm_intrinsics::call_float_ceil(ctx, n, None); - if let BasicTypeEnum::IntType(llvm_ret_elem_ty) = llvm_ret_elem_ty { - ctx.builder - .build_float_to_signed_int(val, llvm_ret_elem_ty, FN_NAME) - .map(Into::into) - .unwrap() - } else { - val.into() - } - } - - BasicValueEnum::PointerValue(n) - if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) => - { - let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty); - - let ndarray = ndarray_elementwise_unaryop_impl( - generator, - ctx, - ret_elem_ty, - None, - NDArrayValue::from_ptr_val(n, llvm_usize, None), - |generator, ctx, val| call_ceil(generator, ctx, (elem_ty, val), ret_elem_ty), - )?; - - ndarray.as_base_value().into() - } - - _ => unsupported_type(ctx, FN_NAME, &[n_ty]), - }) -} - -/// Invokes the `min` builtin function. -pub fn call_min<'ctx>( - ctx: &mut CodeGenContext<'ctx, '_>, - m: (Type, BasicValueEnum<'ctx>), - n: (Type, BasicValueEnum<'ctx>), -) -> BasicValueEnum<'ctx> { - const FN_NAME: &str = "min"; - - let (m_ty, m) = m; - let (n_ty, n) = n; - - let common_ty = if ctx.unifier.unioned(m_ty, n_ty) { - m_ty - } else { - unsupported_type(ctx, FN_NAME, &[m_ty, n_ty]) - }; - - match (m, n) { - (BasicValueEnum::IntValue(m), BasicValueEnum::IntValue(n)) => { - debug_assert!([ - ctx.primitives.bool, - ctx.primitives.int32, - ctx.primitives.uint32, - ctx.primitives.int64, - ctx.primitives.uint64, - ] - .iter() - .any(|ty| ctx.unifier.unioned(common_ty, *ty))); - - if [ctx.primitives.int32, ctx.primitives.int64] - .iter() - .any(|ty| ctx.unifier.unioned(common_ty, *ty)) - { - llvm_intrinsics::call_int_smin(ctx, m, n, Some(FN_NAME)).into() - } else { - llvm_intrinsics::call_int_umin(ctx, m, n, Some(FN_NAME)).into() - } - } - - (BasicValueEnum::FloatValue(m), BasicValueEnum::FloatValue(n)) => { - debug_assert!(ctx.unifier.unioned(common_ty, ctx.primitives.float)); - - llvm_intrinsics::call_float_minnum(ctx, m, n, Some(FN_NAME)).into() - } - - _ => unsupported_type(ctx, FN_NAME, &[m_ty, n_ty]), - } -} - -/// Invokes the `np_minimum` builtin function. -pub fn call_numpy_minimum<'ctx, G: CodeGenerator + ?Sized>( - generator: &mut G, - ctx: &mut CodeGenContext<'ctx, '_>, - x1: (Type, BasicValueEnum<'ctx>), - x2: (Type, BasicValueEnum<'ctx>), -) -> Result, String> { - const FN_NAME: &str = "np_minimum"; - - let (x1_ty, x1) = x1; - let (x2_ty, x2) = x2; - - let common_ty = if ctx.unifier.unioned(x1_ty, x2_ty) { Some(x1_ty) } else { None }; - - Ok(match (x1, x2) { - (BasicValueEnum::IntValue(x1), BasicValueEnum::IntValue(x2)) => { - debug_assert!([ - ctx.primitives.bool, - ctx.primitives.int32, - ctx.primitives.uint32, - ctx.primitives.int64, - ctx.primitives.uint64, - ctx.primitives.float, - ] - .iter() - .any(|ty| ctx.unifier.unioned(common_ty.unwrap(), *ty))); - - call_min(ctx, (x1_ty, x1.into()), (x2_ty, x2.into())) - } - - (BasicValueEnum::FloatValue(x1), BasicValueEnum::FloatValue(x2)) => { - debug_assert!(ctx.unifier.unioned(common_ty.unwrap(), ctx.primitives.float)); - - call_min(ctx, (x1_ty, x1.into()), (x2_ty, x2.into())) - } - - (x1, x2) - if [&x1_ty, &x2_ty].into_iter().any(|ty| { - ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) - }) => - { - let is_ndarray1 = - x1_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()); - let is_ndarray2 = - x2_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()); - - let dtype = if is_ndarray1 && is_ndarray2 { - let (ndarray_dtype1, _) = unpack_ndarray_var_tys(&mut ctx.unifier, x1_ty); - let (ndarray_dtype2, _) = unpack_ndarray_var_tys(&mut ctx.unifier, x2_ty); - - debug_assert!(ctx.unifier.unioned(ndarray_dtype1, ndarray_dtype2)); - - ndarray_dtype1 - } else if is_ndarray1 { - unpack_ndarray_var_tys(&mut ctx.unifier, x1_ty).0 - } else if is_ndarray2 { - unpack_ndarray_var_tys(&mut ctx.unifier, x2_ty).0 - } else { - unreachable!() - }; - - let x1_scalar_ty = if is_ndarray1 { dtype } else { x1_ty }; - let x2_scalar_ty = if is_ndarray2 { dtype } else { x2_ty }; - - numpy::ndarray_elementwise_binop_impl( - generator, - ctx, - dtype, - None, - (x1, !is_ndarray1), - (x2, !is_ndarray2), - |generator, ctx, (lhs, rhs)| { - call_numpy_minimum(generator, ctx, (x1_scalar_ty, lhs), (x2_scalar_ty, rhs)) - }, - )? - .as_base_value() - .into() - } - - _ => unsupported_type(ctx, FN_NAME, &[x1_ty, x2_ty]), - }) -} - -/// Invokes the `max` builtin function. -pub fn call_max<'ctx>( - ctx: &mut CodeGenContext<'ctx, '_>, - m: (Type, BasicValueEnum<'ctx>), - n: (Type, BasicValueEnum<'ctx>), -) -> BasicValueEnum<'ctx> { - const FN_NAME: &str = "max"; - - let (m_ty, m) = m; - let (n_ty, n) = n; - - let common_ty = if ctx.unifier.unioned(m_ty, n_ty) { - m_ty - } else { - unsupported_type(ctx, FN_NAME, &[m_ty, n_ty]) - }; - - match (m, n) { - (BasicValueEnum::IntValue(m), BasicValueEnum::IntValue(n)) => { - debug_assert!([ - ctx.primitives.bool, - ctx.primitives.int32, - ctx.primitives.uint32, - ctx.primitives.int64, - ctx.primitives.uint64, - ] - .iter() - .any(|ty| ctx.unifier.unioned(common_ty, *ty))); - - if [ctx.primitives.int32, ctx.primitives.int64] - .iter() - .any(|ty| ctx.unifier.unioned(common_ty, *ty)) - { - llvm_intrinsics::call_int_smax(ctx, m, n, Some(FN_NAME)).into() - } else { - llvm_intrinsics::call_int_umax(ctx, m, n, Some(FN_NAME)).into() - } - } - - (BasicValueEnum::FloatValue(m), BasicValueEnum::FloatValue(n)) => { - debug_assert!(ctx.unifier.unioned(common_ty, ctx.primitives.float)); - - llvm_intrinsics::call_float_maxnum(ctx, m, n, Some(FN_NAME)).into() - } - - _ => unsupported_type(ctx, FN_NAME, &[m_ty, n_ty]), - } -} - -/// Invokes the `np_max`, `np_min`, `np_argmax`, `np_argmin` functions -/// * `fn_name`: Can be one of `"np_argmin"`, `"np_argmax"`, `"np_max"`, `"np_min"` -pub fn call_numpy_max_min<'ctx, G: CodeGenerator + ?Sized>( - generator: &mut G, - ctx: &mut CodeGenContext<'ctx, '_>, - a: (Type, BasicValueEnum<'ctx>), - fn_name: &str, -) -> Result, String> { - debug_assert!(["np_argmin", "np_argmax", "np_max", "np_min"].iter().any(|f| *f == fn_name)); - - let llvm_int64 = ctx.ctx.i64_type(); - let llvm_usize = generator.get_size_type(ctx.ctx); - - let (a_ty, a) = a; - Ok(match a { - BasicValueEnum::IntValue(_) | BasicValueEnum::FloatValue(_) => { - debug_assert!([ - ctx.primitives.bool, - ctx.primitives.int32, - ctx.primitives.uint32, - ctx.primitives.int64, - ctx.primitives.uint64, - ctx.primitives.float, - ] - .iter() - .any(|ty| ctx.unifier.unioned(a_ty, *ty))); - - match fn_name { - "np_argmin" | "np_argmax" => llvm_int64.const_zero().into(), - "np_max" | "np_min" => a, - _ => unreachable!(), - } - } - BasicValueEnum::PointerValue(n) - if a_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) => - { - let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, a_ty); - let llvm_ndarray_ty = ctx.get_llvm_type(generator, elem_ty); - - let n = NDArrayValue::from_ptr_val(n, llvm_usize, None); - let n_sz = irrt::call_ndarray_calc_size(generator, ctx, &n.dim_sizes(), (None, None)); - if ctx.registry.llvm_options.opt_level == OptimizationLevel::None { - let n_sz_eqz = ctx - .builder - .build_int_compare(IntPredicate::NE, n_sz, n_sz.get_type().const_zero(), "") - .unwrap(); - - ctx.make_assert( - generator, - n_sz_eqz, - "0:ValueError", - format!("zero-size array to reduction operation {fn_name}").as_str(), - [None, None, None], - ctx.current_loc, - ); - } - - let accumulator_addr = generator.gen_var_alloc(ctx, llvm_ndarray_ty, None)?; - let res_idx = generator.gen_var_alloc(ctx, llvm_int64.into(), None)?; - - unsafe { - let identity = - n.data().get_unchecked(ctx, generator, &llvm_usize.const_zero(), None); - ctx.builder.build_store(accumulator_addr, identity).unwrap(); - ctx.builder.build_store(res_idx, llvm_int64.const_zero()).unwrap(); - } - - gen_for_callback_incrementing( - generator, - ctx, - None, - llvm_int64.const_int(1, false), - (n_sz, false), - |generator, ctx, _, idx| { - let elem = unsafe { n.data().get_unchecked(ctx, generator, &idx, None) }; - let accumulator = ctx.builder.build_load(accumulator_addr, "").unwrap(); - let cur_idx = ctx.builder.build_load(res_idx, "").unwrap(); - - let result = match fn_name { - "np_argmin" | "np_min" => { - call_min(ctx, (elem_ty, accumulator), (elem_ty, elem)) - } - "np_argmax" | "np_max" => { - call_max(ctx, (elem_ty, accumulator), (elem_ty, elem)) - } - _ => unreachable!(), - }; - - let updated_idx = match (accumulator, result) { - (BasicValueEnum::IntValue(m), BasicValueEnum::IntValue(n)) => ctx - .builder - .build_select( - ctx.builder.build_int_compare(IntPredicate::NE, m, n, "").unwrap(), - idx.into(), - cur_idx, - "", - ) - .unwrap(), - (BasicValueEnum::FloatValue(m), BasicValueEnum::FloatValue(n)) => ctx - .builder - .build_select( - ctx.builder - .build_float_compare(FloatPredicate::ONE, m, n, "") - .unwrap(), - idx.into(), - cur_idx, - "", - ) - .unwrap(), - _ => unsupported_type(ctx, fn_name, &[elem_ty, elem_ty]), - }; - ctx.builder.build_store(res_idx, updated_idx).unwrap(); - ctx.builder.build_store(accumulator_addr, result).unwrap(); - - Ok(()) - }, - llvm_int64.const_int(1, false), - )?; - - match fn_name { - "np_argmin" | "np_argmax" => ctx.builder.build_load(res_idx, "").unwrap(), - "np_max" | "np_min" => ctx.builder.build_load(accumulator_addr, "").unwrap(), - _ => unreachable!(), - } - } - - _ => unsupported_type(ctx, fn_name, &[a_ty]), - }) -} - -/// Invokes the `np_maximum` builtin function. -pub fn call_numpy_maximum<'ctx, G: CodeGenerator + ?Sized>( - generator: &mut G, - ctx: &mut CodeGenContext<'ctx, '_>, - x1: (Type, BasicValueEnum<'ctx>), - x2: (Type, BasicValueEnum<'ctx>), -) -> Result, String> { - const FN_NAME: &str = "np_maximum"; - - let (x1_ty, x1) = x1; - let (x2_ty, x2) = x2; - - let common_ty = if ctx.unifier.unioned(x1_ty, x2_ty) { Some(x1_ty) } else { None }; - - Ok(match (x1, x2) { - (BasicValueEnum::IntValue(x1), BasicValueEnum::IntValue(x2)) => { - debug_assert!([ - ctx.primitives.bool, - ctx.primitives.int32, - ctx.primitives.uint32, - ctx.primitives.int64, - ctx.primitives.uint64, - ctx.primitives.float, - ] - .iter() - .any(|ty| ctx.unifier.unioned(common_ty.unwrap(), *ty))); - - call_max(ctx, (x1_ty, x1.into()), (x2_ty, x2.into())) - } - - (BasicValueEnum::FloatValue(x1), BasicValueEnum::FloatValue(x2)) => { - debug_assert!(ctx.unifier.unioned(common_ty.unwrap(), ctx.primitives.float)); - - call_max(ctx, (x1_ty, x1.into()), (x2_ty, x2.into())) - } - - (x1, x2) - if [&x1_ty, &x2_ty].into_iter().any(|ty| { - ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) - }) => - { - let is_ndarray1 = - x1_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()); - let is_ndarray2 = - x2_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()); - - let dtype = if is_ndarray1 && is_ndarray2 { - let (ndarray_dtype1, _) = unpack_ndarray_var_tys(&mut ctx.unifier, x1_ty); - let (ndarray_dtype2, _) = unpack_ndarray_var_tys(&mut ctx.unifier, x2_ty); - - debug_assert!(ctx.unifier.unioned(ndarray_dtype1, ndarray_dtype2)); - - ndarray_dtype1 - } else if is_ndarray1 { - unpack_ndarray_var_tys(&mut ctx.unifier, x1_ty).0 - } else if is_ndarray2 { - unpack_ndarray_var_tys(&mut ctx.unifier, x2_ty).0 - } else { - unreachable!() - }; - - let x1_scalar_ty = if is_ndarray1 { dtype } else { x1_ty }; - let x2_scalar_ty = if is_ndarray2 { dtype } else { x2_ty }; - - numpy::ndarray_elementwise_binop_impl( - generator, - ctx, - dtype, - None, - (x1, !is_ndarray1), - (x2, !is_ndarray2), - |generator, ctx, (lhs, rhs)| { - call_numpy_maximum(generator, ctx, (x1_scalar_ty, lhs), (x2_scalar_ty, rhs)) - }, - )? - .as_base_value() - .into() - } - - _ => unsupported_type(ctx, FN_NAME, &[x1_ty, x2_ty]), - }) -} - /// Helper function to create a built-in elementwise unary numpy function that takes in either an ndarray or a scalar. /// /// * `(arg_ty, arg_val)`: The [`Type`] and llvm value of the input argument. @@ -1068,318 +80,6 @@ where Ok(result) } -pub fn call_abs<'ctx, G: CodeGenerator + ?Sized>( - generator: &mut G, - ctx: &mut CodeGenContext<'ctx, '_>, - n: (Type, BasicValueEnum<'ctx>), -) -> Result, String> { - const FN_NAME: &str = "abs"; - helper_call_numpy_unary_elementwise( - generator, - ctx, - n, - FN_NAME, - &|_ctx, elem_ty| elem_ty, - &|_generator, ctx, val_ty, val| match val { - BasicValueEnum::IntValue(n) => Some({ - debug_assert!([ - ctx.primitives.bool, - ctx.primitives.int32, - ctx.primitives.uint32, - ctx.primitives.int64, - ctx.primitives.uint64, - ] - .iter() - .any(|ty| ctx.unifier.unioned(val_ty, *ty))); - - if [ctx.primitives.int32, ctx.primitives.int64] - .iter() - .any(|ty| ctx.unifier.unioned(val_ty, *ty)) - { - llvm_intrinsics::call_int_abs( - ctx, - n, - ctx.ctx.bool_type().const_zero(), - Some(FN_NAME), - ) - .into() - } else { - n.into() - } - }), - - BasicValueEnum::FloatValue(n) => Some({ - debug_assert!(ctx.unifier.unioned(val_ty, ctx.primitives.float)); - - llvm_intrinsics::call_float_fabs(ctx, n, Some(FN_NAME)).into() - }), - - _ => None, - }, - ) -} - -/// Macro to conveniently generate numpy functions with [`helper_call_numpy_unary_elementwise`]. -/// -/// Arguments: -/// * `$name:ident`: The identifier of the rust function to be generated. -/// * `$fn_name:literal`: To be passed to the `fn_name` parameter of [`helper_call_numpy_unary_elementwise`] -/// * `$get_ret_elem_type:expr`: To be passed to the `get_ret_elem_type` parameter of [`helper_call_numpy_unary_elementwise`]. -/// But there is no need to make it a reference. -/// * `$on_scalar:expr`: To be passed to the `on_scalar` parameter of [`helper_call_numpy_unary_elementwise`]. -/// But there is no need to make it a reference. -macro_rules! create_helper_call_numpy_unary_elementwise { - ($name:ident, $fn_name:literal, $get_ret_elem_type:expr, $on_scalar:expr) => { - #[allow(clippy::redundant_closure_call)] - pub fn $name<'ctx, G: CodeGenerator + ?Sized>( - generator: &mut G, - ctx: &mut CodeGenContext<'ctx, '_>, - arg: (Type, BasicValueEnum<'ctx>), - ) -> Result, String> { - helper_call_numpy_unary_elementwise( - generator, - ctx, - arg, - $fn_name, - &$get_ret_elem_type, - &$on_scalar, - ) - } - }; -} - -/// A specialized version of [`create_helper_call_numpy_unary_elementwise`] to generate functions that takes in float and returns boolean (as an `i8`) elementwise. -/// -/// Arguments: -/// * `$name:ident`: The identifier of the rust function to be generated. -/// * `$fn_name:literal`: To be passed to the `fn_name` parameter of [`helper_call_numpy_unary_elementwise`]. -/// * `$on_scalar:expr`: The closure (see below for its type) that acts on float scalar values and returns -/// the boolean results of LLVM type `i1`. The returned `i1` value will be converted into an `i8`. -/// -/// ```ignore -/// // Type of `$on_scalar:expr` -/// fn on_scalar<'ctx, G: CodeGenerator + ?Sized>( -/// generator: &mut G, -/// ctx: &mut CodeGenContext<'ctx, '_>, -/// arg: FloatValue<'ctx> -/// ) -> IntValue<'ctx> // of LLVM type `i1` -/// ``` -macro_rules! create_helper_call_numpy_unary_elementwise_float_to_bool { - ($name:ident, $fn_name:literal, $on_scalar:expr) => { - create_helper_call_numpy_unary_elementwise!( - $name, - $fn_name, - |ctx, _| ctx.primitives.bool, - |generator, ctx, n_ty, val| { - match val { - BasicValueEnum::FloatValue(n) => { - debug_assert!(ctx.unifier.unioned(n_ty, ctx.primitives.float)); - - let ret = $on_scalar(generator, ctx, n); - Some(generator.bool_to_i8(ctx, ret).into()) - } - _ => None, - } - } - ); - }; -} - -/// A specialized version of [`create_helper_call_numpy_unary_elementwise`] to generate functions that takes in float and returns float elementwise. -/// -/// Arguments: -/// * `$name:ident`: The identifier of the rust function to be generated. -/// * `$fn_name:literal`: To be passed to the `fn_name` parameter of [`helper_call_numpy_unary_elementwise`]. -/// * `$on_scalar:expr`: The closure (see below for its type) that acts on float scalar values and returns float results. -/// -/// ```ignore -/// // Type of `$on_scalar:expr` -/// fn on_scalar<'ctx, G: CodeGenerator + ?Sized>( -/// generator: &mut G, -/// ctx: &mut CodeGenContext<'ctx, '_>, -/// arg: FloatValue<'ctx> -/// ) -> FloatValue<'ctx> -/// ``` -macro_rules! create_helper_call_numpy_unary_elementwise_float_to_float { - ($name:ident, $fn_name:literal, $elem_call:expr) => { - create_helper_call_numpy_unary_elementwise!( - $name, - $fn_name, - |ctx, _| ctx.primitives.float, - |_generator, ctx, val_ty, val| { - match val { - BasicValueEnum::FloatValue(n) => { - debug_assert!(ctx.unifier.unioned(val_ty, ctx.primitives.float)); - - Some($elem_call(ctx, n, Option::<&str>::None).into()) - } - _ => None, - } - } - ); - }; -} - -create_helper_call_numpy_unary_elementwise_float_to_bool!( - call_numpy_isnan, - "np_isnan", - irrt::call_isnan -); -create_helper_call_numpy_unary_elementwise_float_to_bool!( - call_numpy_isinf, - "np_isinf", - irrt::call_isinf -); - -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_sin, - "np_sin", - llvm_intrinsics::call_float_sin -); -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_cos, - "np_cos", - llvm_intrinsics::call_float_cos -); -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_tan, - "np_tan", - extern_fns::call_tan -); - -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_arcsin, - "np_arcsin", - extern_fns::call_asin -); -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_arccos, - "np_arccos", - extern_fns::call_acos -); -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_arctan, - "np_arctan", - extern_fns::call_atan -); - -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_sinh, - "np_sinh", - extern_fns::call_sinh -); -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_cosh, - "np_cosh", - extern_fns::call_cosh -); -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_tanh, - "np_tanh", - extern_fns::call_tanh -); - -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_arcsinh, - "np_arcsinh", - extern_fns::call_asinh -); -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_arccosh, - "np_arccosh", - extern_fns::call_acosh -); -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_arctanh, - "np_arctanh", - extern_fns::call_atanh -); - -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_exp, - "np_exp", - llvm_intrinsics::call_float_exp -); -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_exp2, - "np_exp2", - llvm_intrinsics::call_float_exp2 -); -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_expm1, - "np_expm1", - extern_fns::call_expm1 -); - -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_log, - "np_log", - llvm_intrinsics::call_float_log -); -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_log2, - "np_log2", - llvm_intrinsics::call_float_log2 -); -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_log10, - "np_log10", - llvm_intrinsics::call_float_log10 -); - -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_sqrt, - "np_sqrt", - llvm_intrinsics::call_float_sqrt -); -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_cbrt, - "np_cbrt", - extern_fns::call_cbrt -); - -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_fabs, - "np_fabs", - llvm_intrinsics::call_float_fabs -); -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_numpy_rint, - "np_rint", - llvm_intrinsics::call_float_rint -); - -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_scipy_special_erf, - "sp_spec_erf", - extern_fns::call_erf -); -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_scipy_special_erfc, - "sp_spec_erfc", - extern_fns::call_erfc -); -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_scipy_special_gamma, - "sp_spec_gamma", - |ctx, val, _| irrt::call_gamma(ctx, val) -); -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_scipy_special_gammaln, - "sp_spec_gammaln", - |ctx, val, _| irrt::call_gammaln(ctx, val) -); -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_scipy_special_j0, - "sp_spec_j0", - |ctx, val, _| irrt::call_j0(ctx, val) -); -create_helper_call_numpy_unary_elementwise_float_to_float!( - call_scipy_special_j1, - "sp_spec_j1", - extern_fns::call_j1 -); - /// Invokes the `np_arctan2` builtin function. pub fn call_numpy_arctan2<'ctx, G: CodeGenerator + ?Sized>( generator: &mut G, @@ -2156,60 +856,64 @@ pub fn call_np_linalg_matrix_power<'ctx, G: CodeGenerator + ?Sized>( x1: (Type, BasicValueEnum<'ctx>), x2: (Type, BasicValueEnum<'ctx>), ) -> Result, String> { - const FN_NAME: &str = "np_linalg_matrix_power"; - let (x1_ty, x1) = x1; - let (x2_ty, x2) = x2; - let x2 = call_float(generator, ctx, (x2_ty, x2)).unwrap(); + todo!(); - let llvm_usize = generator.get_size_type(ctx.ctx); - if let (BasicValueEnum::PointerValue(n1), BasicValueEnum::FloatValue(n2)) = (x1, x2) { - let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, x1_ty); - let n1_elem_ty = ctx.get_llvm_type(generator, elem_ty); + /* + const FN_NAME: &str = "np_linalg_matrix_power"; + let (x1_ty, x1) = x1; + let (x2_ty, x2) = x2; + let x2 = call_float(generator, ctx, (x2_ty, x2)).unwrap(); - let BasicTypeEnum::FloatType(_) = n1_elem_ty else { - unsupported_type(ctx, FN_NAME, &[x1_ty, x2_ty]); - }; + let llvm_usize = generator.get_size_type(ctx.ctx); + if let (BasicValueEnum::PointerValue(n1), BasicValueEnum::FloatValue(n2)) = (x1, x2) { + let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, x1_ty); + let n1_elem_ty = ctx.get_llvm_type(generator, elem_ty); - let n1 = NDArrayValue::from_ptr_val(n1, llvm_usize, None); - // Changing second parameter to a `NDArray` for uniformity in function call - let n2_array = numpy::create_ndarray_const_shape( - generator, - ctx, - elem_ty, - &[llvm_usize.const_int(1, false)], - ) - .unwrap(); - unsafe { - n2_array.data().set_unchecked( - ctx, + let BasicTypeEnum::FloatType(_) = n1_elem_ty else { + unsupported_type(ctx, FN_NAME, &[x1_ty, x2_ty]); + }; + + let n1 = NDArrayValue::from_ptr_val(n1, llvm_usize, None); + // Changing second parameter to a `NDArray` for uniformity in function call + let n2_array = numpy::create_ndarray_const_shape( generator, - &llvm_usize.const_zero(), - n2.as_basic_value_enum(), - ); - }; - let n2_array = n2_array.as_base_value().as_basic_value_enum(); + ctx, + elem_ty, + &[llvm_usize.const_int(1, false)], + ) + .unwrap(); + unsafe { + n2_array.data().set_unchecked( + ctx, + generator, + &llvm_usize.const_zero(), + n2.as_basic_value_enum(), + ); + }; + let n2_array = n2_array.as_base_value().as_basic_value_enum(); - let outdim0 = unsafe { - n1.dim_sizes() - .get_unchecked(ctx, generator, &llvm_usize.const_zero(), None) - .into_int_value() - }; - let outdim1 = unsafe { - n1.dim_sizes() - .get_unchecked(ctx, generator, &llvm_usize.const_int(1, false), None) - .into_int_value() - }; + let outdim0 = unsafe { + n1.dim_sizes() + .get_unchecked(ctx, generator, &llvm_usize.const_zero(), None) + .into_int_value() + }; + let outdim1 = unsafe { + n1.dim_sizes() + .get_unchecked(ctx, generator, &llvm_usize.const_int(1, false), None) + .into_int_value() + }; - let out = numpy::create_ndarray_const_shape(generator, ctx, elem_ty, &[outdim0, outdim1]) - .unwrap() - .as_base_value() - .as_basic_value_enum(); + let out = numpy::create_ndarray_const_shape(generator, ctx, elem_ty, &[outdim0, outdim1]) + .unwrap() + .as_base_value() + .as_basic_value_enum(); - extern_fns::call_np_linalg_matrix_power(ctx, x1, n2_array, out, None); - Ok(out) - } else { - unsupported_type(ctx, FN_NAME, &[x1_ty, x2_ty]) - } + extern_fns::call_np_linalg_matrix_power(ctx, x1, n2_array, out, None); + Ok(out) + } else { + unsupported_type(ctx, FN_NAME, &[x1_ty, x2_ty]) + } + */ } /// Invokes the `np_linalg_det` linalg function diff --git a/nac3core/src/codegen/expr.rs b/nac3core/src/codegen/expr.rs index bdadb621..0480e96b 100644 --- a/nac3core/src/codegen/expr.rs +++ b/nac3core/src/codegen/expr.rs @@ -2,7 +2,7 @@ use crate::{ codegen::{ classes::{ ArrayLikeIndexer, ArrayLikeValue, ListType, ListValue, NDArrayValue, ProxyType, - ProxyValue, RangeValue, TypedArrayLikeAccessor, UntypedArrayLikeAccessor, + ProxyValue, RangeValue, UntypedArrayLikeAccessor, }, concrete_type::{ConcreteFuncArg, ConcreteTypeEnum, ConcreteTypeStore}, gen_in_range_check, get_llvm_abi_type, get_llvm_type, get_va_count_arg_name, @@ -16,14 +16,11 @@ use crate::{ gen_for_callback_incrementing, gen_if_callback, gen_if_else_expr_callback, gen_raise, gen_var, }, + structure::ndarray::NDArrayObject, CodeGenContext, CodeGenTask, CodeGenerator, }, symbol_resolver::{SymbolValue, ValueEnum}, - toplevel::{ - helper::PrimDef, - numpy::{make_ndarray_ty, unpack_ndarray_var_tys}, - DefinitionId, TopLevelDef, - }, + toplevel::{helper::PrimDef, numpy::unpack_ndarray_var_tys, DefinitionId, TopLevelDef}, typecheck::{ magic_methods::{Binop, BinopVariant, HasOpInfo}, typedef::{FunSignature, FuncArg, Type, TypeEnum, TypeVarId, Unifier, VarMap}, @@ -43,7 +40,7 @@ use nac3parser::ast::{ use std::iter::{repeat, repeat_with}; use std::{collections::HashMap, convert::TryInto, iter::once, iter::zip}; -use super::structure::cslice::CSlice; +use super::structure::{cslice::CSlice, ndarray::indexing::util::gen_ndarray_subscript_ndindexes}; use super::{ model::*, structure::exception::{Exception, ExceptionId}, @@ -562,7 +559,6 @@ impl<'ctx, 'a> CodeGenContext<'ctx, 'a> { G: CodeGenerator + ?Sized, { self.const_strings.get(string).copied().unwrap_or_else(|| { - let type_context = generator.type_context(self.ctx); let sizet_model = IntModel(SizeT); let pbyte_model = PtrModel(IntModel(Byte)); let cslice_model = StructModel(CSlice); @@ -570,9 +566,9 @@ impl<'ctx, 'a> CodeGenContext<'ctx, 'a> { let base = self.builder.build_global_string_ptr(string, "constant_string").unwrap(); let base = pbyte_model.believe_value(base.as_pointer_value()); - let len = sizet_model.constant(type_context, self.ctx, string.len() as u64); + let len = sizet_model.constant(generator, self.ctx, string.len() as u64); - let cslice = cslice_model.create_const(type_context, self, base, len); + let cslice = cslice_model.create_const(generator, self.ctx, base, len); self.const_strings.insert(string.to_owned(), cslice); @@ -588,12 +584,11 @@ impl<'ctx, 'a> CodeGenContext<'ctx, 'a> { params: [Option>; 3], loc: Location, ) { - let type_context = generator.type_context(self.ctx); let exn_model = StructModel(Exception); let exn_id_model = IntModel(ExceptionId::default()); let exn_id = - exn_id_model.constant(type_context, self.ctx, self.resolver.get_string_id(name) as u64); + exn_id_model.constant(generator, self.ctx, self.resolver.get_string_id(name) as u64); let exn = self.exception_val.unwrap_or_else(|| { let exn = exn_model.var_alloca(generator, self, Some("exn")).unwrap(); *self.exception_val.insert(exn) @@ -619,15 +614,11 @@ impl<'ctx, 'a> CodeGenContext<'ctx, 'a> { params: [Option>; 3], loc: Location, ) { - let type_context = generator.type_context(self.ctx); let param_model = IntModel(Int64); + let params = + params.map(|p| p.map(|p| param_model.check_value(generator, self.ctx, p).unwrap())); let err_msg = self.gen_string(generator, err_msg); - - let ctx = self.ctx; - let params = - params.map(|p| p.map(|p| param_model.check_value(type_context, ctx, p).unwrap())); - self.make_assert_impl(generator, cond, err_name, err_msg, params, loc); } @@ -640,7 +631,6 @@ impl<'ctx, 'a> CodeGenContext<'ctx, 'a> { params: [Option>; 3], loc: Location, ) { - let type_context = generator.type_context(self.ctx); let bool_model = IntModel(Bool); // We assume that the condition is most probably true, so the normal path is the most @@ -648,7 +638,7 @@ impl<'ctx, 'a> CodeGenContext<'ctx, 'a> { let cond = call_expect( self, generator.bool_to_i1(self, cond), - bool_model.const_true(type_context, self.ctx).value, + bool_model.const_true(generator, self.ctx).value, Some("expect"), ); @@ -2257,338 +2247,6 @@ pub fn gen_cmpop_expr<'ctx, G: CodeGenerator>( ) } -/// Generates code for a subscript expression on an `ndarray`. -/// -/// * `ty` - The `Type` of the `NDArray` elements. -/// * `ndims` - The `Type` of the `NDArray` number-of-dimensions `Literal`. -/// * `v` - The `NDArray` value. -/// * `slice` - The slice expression used to subscript into the `ndarray`. -fn gen_ndarray_subscript_expr<'ctx, G: CodeGenerator>( - generator: &mut G, - ctx: &mut CodeGenContext<'ctx, '_>, - ty: Type, - ndims: Type, - v: NDArrayValue<'ctx>, - slice: &Expr>, -) -> Result>, String> { - let llvm_i1 = ctx.ctx.bool_type(); - let llvm_i32 = ctx.ctx.i32_type(); - let llvm_usize = generator.get_size_type(ctx.ctx); - - let TypeEnum::TLiteral { values, .. } = &*ctx.unifier.get_ty_immutable(ndims) else { - unreachable!() - }; - - let ndims = values - .iter() - .map(|ndim| u64::try_from(ndim.clone()).map_err(|()| ndim.clone())) - .collect::, _>>() - .map_err(|val| { - format!( - "Expected non-negative literal for ndarray.ndims, got {}", - i128::try_from(val).unwrap() - ) - })?; - - assert!(!ndims.is_empty()); - - // The number of dimensions subscripted by the index expression. - // Slicing a ndarray will yield the same number of dimensions, whereas indexing into a - // dimension will remove a dimension. - let subscripted_dims = match &slice.node { - ExprKind::Tuple { elts, .. } => elts.iter().fold(0, |acc, value_subexpr| { - if let ExprKind::Slice { .. } = &value_subexpr.node { - acc - } else { - acc + 1 - } - }), - - ExprKind::Slice { .. } => 0, - _ => 1, - }; - - let ndarray_ndims_ty = ctx.unifier.get_fresh_literal( - ndims.iter().map(|v| SymbolValue::U64(v - subscripted_dims)).collect(), - None, - ); - let ndarray_ty = - make_ndarray_ty(&mut ctx.unifier, &ctx.primitives, Some(ty), Some(ndarray_ndims_ty)); - let llvm_pndarray_t = ctx.get_llvm_type(generator, ndarray_ty).into_pointer_type(); - let llvm_ndarray_t = llvm_pndarray_t.get_element_type().into_struct_type(); - let llvm_ndarray_data_t = ctx.get_llvm_type(generator, ty).as_basic_type_enum(); - let sizeof_elem = llvm_ndarray_data_t.size_of().unwrap(); - - // Check that len is non-zero - let len = v.load_ndims(ctx); - ctx.make_assert( - generator, - ctx.builder.build_int_compare(IntPredicate::SGT, len, llvm_usize.const_zero(), "").unwrap(), - "0:IndexError", - "too many indices for array: array is {0}-dimensional but 1 were indexed", - [Some(len), None, None], - slice.location, - ); - - // Normalizes a possibly-negative index to its corresponding positive index - let normalize_index = |generator: &mut G, - ctx: &mut CodeGenContext<'ctx, '_>, - index: IntValue<'ctx>, - dim: u64| { - gen_if_else_expr_callback( - generator, - ctx, - |_, ctx| { - Ok(ctx - .builder - .build_int_compare(IntPredicate::SGE, index, index.get_type().const_zero(), "") - .unwrap()) - }, - |_, _| Ok(Some(index)), - |generator, ctx| { - let llvm_i32 = ctx.ctx.i32_type(); - - let len = unsafe { - v.dim_sizes().get_typed_unchecked( - ctx, - generator, - &llvm_usize.const_int(dim, true), - None, - ) - }; - - let index = ctx - .builder - .build_int_add( - len, - ctx.builder.build_int_s_extend(index, llvm_usize, "").unwrap(), - "", - ) - .unwrap(); - - Ok(Some(ctx.builder.build_int_truncate(index, llvm_i32, "").unwrap())) - }, - ) - .map(|v| v.map(BasicValueEnum::into_int_value)) - }; - - // Converts a slice expression into a slice-range tuple - let expr_to_slice = |generator: &mut G, - ctx: &mut CodeGenContext<'ctx, '_>, - node: &ExprKind>, - dim: u64| { - match node { - ExprKind::Constant { value: Constant::Int(v), .. } => { - let Some(index) = - normalize_index(generator, ctx, llvm_i32.const_int(*v as u64, true), dim)? - else { - return Ok(None); - }; - - Ok(Some((index, index, llvm_i32.const_int(1, true)))) - } - - ExprKind::Slice { lower, upper, step } => { - let dim_sz = unsafe { - v.dim_sizes().get_typed_unchecked( - ctx, - generator, - &llvm_usize.const_int(dim, false), - None, - ) - }; - - handle_slice_indices(lower, upper, step, ctx, generator, dim_sz) - } - - _ => { - let Some(index) = generator.gen_expr(ctx, slice)? else { return Ok(None) }; - let index = index - .to_basic_value_enum(ctx, generator, slice.custom.unwrap())? - .into_int_value(); - let Some(index) = normalize_index(generator, ctx, index, dim)? else { - return Ok(None); - }; - - Ok(Some((index, index, llvm_i32.const_int(1, true)))) - } - } - }; - - let make_indices_arr = |generator: &mut G, - ctx: &mut CodeGenContext<'ctx, '_>| - -> Result<_, String> { - Ok(if let ExprKind::Tuple { elts, .. } = &slice.node { - let llvm_int_ty = ctx.get_llvm_type(generator, elts[0].custom.unwrap()); - let index_addr = generator.gen_array_var_alloc( - ctx, - llvm_int_ty, - llvm_usize.const_int(elts.len() as u64, false), - None, - )?; - - for (i, elt) in elts.iter().enumerate() { - let Some(index) = generator.gen_expr(ctx, elt)? else { - return Ok(None); - }; - - let index = index - .to_basic_value_enum(ctx, generator, elt.custom.unwrap())? - .into_int_value(); - let Some(index) = normalize_index(generator, ctx, index, 0)? else { - return Ok(None); - }; - - let store_ptr = unsafe { - index_addr.ptr_offset_unchecked( - ctx, - generator, - &llvm_usize.const_int(i as u64, false), - None, - ) - }; - ctx.builder.build_store(store_ptr, index).unwrap(); - } - - Some(index_addr) - } else if let Some(index) = generator.gen_expr(ctx, slice)? { - let llvm_int_ty = ctx.get_llvm_type(generator, slice.custom.unwrap()); - let index_addr = generator.gen_array_var_alloc( - ctx, - llvm_int_ty, - llvm_usize.const_int(1u64, false), - None, - )?; - - let index = - index.to_basic_value_enum(ctx, generator, slice.custom.unwrap())?.into_int_value(); - let Some(index) = normalize_index(generator, ctx, index, 0)? else { return Ok(None) }; - - let store_ptr = unsafe { - index_addr.ptr_offset_unchecked(ctx, generator, &llvm_usize.const_zero(), None) - }; - ctx.builder.build_store(store_ptr, index).unwrap(); - - Some(index_addr) - } else { - None - }) - }; - - Ok(Some(if ndims.len() == 1 && ndims[0] - subscripted_dims == 0 { - let Some(index_addr) = make_indices_arr(generator, ctx)? else { return Ok(None) }; - - v.data().get(ctx, generator, &index_addr, None).into() - } else { - match &slice.node { - ExprKind::Tuple { elts, .. } => { - let slices = elts - .iter() - .enumerate() - .map(|(dim, elt)| expr_to_slice(generator, ctx, &elt.node, dim as u64)) - .take_while_inclusive(|slice| slice.as_ref().is_ok_and(Option::is_some)) - .collect::, _>>()?; - if slices.len() < elts.len() { - return Ok(None); - } - - let slices = slices.into_iter().map(Option::unwrap).collect_vec(); - - numpy::ndarray_sliced_copy(generator, ctx, ty, v, &slices)?.as_base_value().into() - } - - ExprKind::Slice { .. } => { - let Some(slice) = expr_to_slice(generator, ctx, &slice.node, 0)? else { - return Ok(None); - }; - - numpy::ndarray_sliced_copy(generator, ctx, ty, v, &[slice])?.as_base_value().into() - } - - _ => { - // Accessing an element from a multi-dimensional `ndarray` - - let Some(index_addr) = make_indices_arr(generator, ctx)? else { return Ok(None) }; - - // Create a new array, remove the top dimension from the dimension-size-list, and copy the - // elements over - let subscripted_ndarray = - generator.gen_var_alloc(ctx, llvm_ndarray_t.into(), None)?; - let ndarray = NDArrayValue::from_ptr_val(subscripted_ndarray, llvm_usize, None); - - let num_dims = v.load_ndims(ctx); - ndarray.store_ndims( - ctx, - generator, - ctx.builder - .build_int_sub(num_dims, llvm_usize.const_int(1, false), "") - .unwrap(), - ); - - let ndarray_num_dims = ndarray.load_ndims(ctx); - ndarray.create_dim_sizes(ctx, llvm_usize, ndarray_num_dims); - - let ndarray_num_dims = ctx - .builder - .build_int_z_extend_or_bit_cast( - ndarray.load_ndims(ctx), - llvm_usize.size_of().get_type(), - "", - ) - .unwrap(); - let v_dims_src_ptr = unsafe { - v.dim_sizes().ptr_offset_unchecked( - ctx, - generator, - &llvm_usize.const_int(1, false), - None, - ) - }; - call_memcpy_generic( - ctx, - ndarray.dim_sizes().base_ptr(ctx, generator), - v_dims_src_ptr, - ctx.builder - .build_int_mul(ndarray_num_dims, llvm_usize.size_of(), "") - .map(Into::into) - .unwrap(), - llvm_i1.const_zero(), - ); - - let ndarray_num_elems = call_ndarray_calc_size( - generator, - ctx, - &ndarray.dim_sizes().as_slice_value(ctx, generator), - (None, None), - ); - let ndarray_num_elems = ctx - .builder - .build_int_z_extend_or_bit_cast(ndarray_num_elems, sizeof_elem.get_type(), "") - .unwrap(); - ndarray.create_data(ctx, llvm_ndarray_data_t, ndarray_num_elems); - - let v_data_src_ptr = v.data().ptr_offset(ctx, generator, &index_addr, None); - call_memcpy_generic( - ctx, - ndarray.data().base_ptr(ctx, generator), - v_data_src_ptr, - ctx.builder - .build_int_mul( - ndarray_num_elems, - llvm_ndarray_data_t.size_of().unwrap(), - "", - ) - .map(Into::into) - .unwrap(), - llvm_i1.const_zero(), - ); - - ndarray.as_base_value().into() - } - } - })) -} - /// See [`CodeGenerator::gen_expr`]. pub fn gen_expr<'ctx, G: CodeGenerator>( generator: &mut G, @@ -3228,18 +2886,20 @@ pub fn gen_expr<'ctx, G: CodeGenerator>( v.data().get(ctx, generator, &index, None).into() } } - TypeEnum::TObj { obj_id, params, .. } if *obj_id == PrimDef::NDArray.id() => { - let (ty, ndims) = params.iter().map(|(_, ty)| ty).collect_tuple().unwrap(); - - let v = if let Some(v) = generator.gen_expr(ctx, value)? { - v.to_basic_value_enum(ctx, generator, value.custom.unwrap())? - .into_pointer_value() - } else { + TypeEnum::TObj { obj_id, .. } if *obj_id == PrimDef::NDArray.id() => { + let Some(ndarray) = generator.gen_expr(ctx, value)? else { return Ok(None); }; - let v = NDArrayValue::from_ptr_val(v, usize, None); - return gen_ndarray_subscript_expr(generator, ctx, *ty, *ndims, v, slice); + let ndarray_ty = value.custom.unwrap(); + let ndarray = ndarray.to_basic_value_enum(ctx, generator, ndarray_ty)?; + let ndarray = + NDArrayObject::from_value_and_type(generator, ctx, ndarray, ndarray_ty); + + let indexes = gen_ndarray_subscript_ndindexes(generator, ctx, slice)?; + let result = ndarray.index_or_scalar(generator, ctx, &indexes, "index_result"); + let result = result.to_basic_value_enum(); + return Ok(Some(ValueEnum::Dynamic(result))); } TypeEnum::TTuple { .. } => { let index: u32 = diff --git a/nac3core/src/codegen/irrt/mod.rs b/nac3core/src/codegen/irrt/mod.rs index 8ed02f87..980c2767 100644 --- a/nac3core/src/codegen/irrt/mod.rs +++ b/nac3core/src/codegen/irrt/mod.rs @@ -5,6 +5,8 @@ mod test; pub mod util; use super::model::*; +use super::structure::ndarray::broadcast::ShapeEntry; +use super::structure::ndarray::indexing::NDIndex; use super::structure::ndarray::NpArray; use super::{ classes::{ @@ -427,15 +429,13 @@ pub fn list_slice_assignment<'ctx, G: CodeGenerator + ?Sized>( // TODO: Temporary fix. Rewrite `list_slice_assignment` later // Exception params should have been i64 { - let type_context = generator.type_context(ctx.ctx); let param_model = IntModel(Int64); let src_slice_len = - param_model.s_extend_or_bit_cast(type_context, ctx, src_slice_len, "src_slice_len"); + param_model.s_extend_or_bit_cast(generator, ctx, src_slice_len, "src_slice_len"); let dest_slice_len = - param_model.s_extend_or_bit_cast(type_context, ctx, dest_slice_len, "dest_slice_len"); - let dest_idx_2 = - param_model.s_extend_or_bit_cast(type_context, ctx, dest_idx.2, "dest_idx_2"); + param_model.s_extend_or_bit_cast(generator, ctx, dest_slice_len, "dest_slice_len"); + let dest_idx_2 = param_model.s_extend_or_bit_cast(generator, ctx, dest_idx.2, "dest_idx_2"); ctx.make_assert( generator, @@ -897,7 +897,7 @@ pub fn call_ndarray_calc_broadcast<'ctx, G: CodeGenerator + ?Sized>( } /// Generates a call to `__nac3_ndarray_calc_broadcast_idx`. Returns an [`ArrayAllocaValue`] -/// containing the indices used for accessing `array` corresponding to the index of the broadcasted +/// containing the indices used for accessing `array` corresponding to the index of the broadcast /// array `broadcast_idx`. pub fn call_ndarray_calc_broadcast_index< 'ctx, @@ -953,13 +953,12 @@ pub fn call_ndarray_calc_broadcast_index< ) } -pub fn call_nac3_throw_dummy_error<'ctx>(tyctx: TypeContext<'ctx>, ctx: &CodeGenContext<'ctx, '_>) { - CallFunction::begin( - tyctx, - ctx, - &get_sizet_dependent_function_name(tyctx, "__nac3_throw_dummy_error"), - ) - .returning_void(); +pub fn call_nac3_throw_dummy_error( + generator: &mut G, + ctx: &CodeGenContext<'_, '_>, +) { + let name = get_sizet_dependent_function_name(generator, ctx, "__nac3_throw_dummy_error"); + CallFunction::begin(generator, ctx, &name).returning_void(); } /// Initialize all global `EXN_*` exception IDs in IRRT with the [`SymbolResolver`]. @@ -989,116 +988,176 @@ pub fn setup_irrt_exceptions<'ctx>( } } -pub fn call_nac3_ndarray_util_assert_shape_no_negative<'ctx>( - tyctx: TypeContext<'ctx>, +pub fn call_nac3_ndarray_util_assert_shape_no_negative<'ctx, G: CodeGenerator + ?Sized>( + generator: &mut G, ctx: &mut CodeGenContext<'ctx, '_>, ndims: Int<'ctx, SizeT>, shape: Ptr<'ctx, IntModel>, ) { - CallFunction::begin( - tyctx, + let name = get_sizet_dependent_function_name( + generator, ctx, - &get_sizet_dependent_function_name(tyctx, "__nac3_ndarray_util_assert_shape_no_negative"), - ) - .arg("ndims", ndims) - .arg("shape", shape) - .returning_void(); + "__nac3_ndarray_util_assert_shape_no_negative", + ); + CallFunction::begin(generator, ctx, &name) + .arg("ndims", ndims) + .arg("shape", shape) + .returning_void(); } -pub fn call_nac3_ndarray_size<'ctx>( - tyctx: TypeContext<'ctx>, +pub fn call_nac3_ndarray_size<'ctx, G: CodeGenerator + ?Sized>( + generator: &mut G, ctx: &mut CodeGenContext<'ctx, '_>, pndarray: Ptr<'ctx, StructModel>, ) -> Int<'ctx, SizeT> { - CallFunction::begin( - tyctx, - ctx, - &get_sizet_dependent_function_name(tyctx, "__nac3_ndarray_size"), - ) - .arg("ndarray", pndarray) - .returning_auto("size") + let name = get_sizet_dependent_function_name(generator, ctx, "__nac3_ndarray_size"); + CallFunction::begin(generator, ctx, &name).arg("ndarray", pndarray).returning_auto("size") } -pub fn call_nac3_ndarray_nbytes<'ctx>( - tyctx: TypeContext<'ctx>, +pub fn call_nac3_ndarray_nbytes<'ctx, G: CodeGenerator + ?Sized>( + generator: &mut G, ctx: &mut CodeGenContext<'ctx, '_>, pndarray: Ptr<'ctx, StructModel>, ) -> Int<'ctx, SizeT> { - CallFunction::begin( - tyctx, - ctx, - &get_sizet_dependent_function_name(tyctx, "__nac3_ndarray_nbytes"), - ) - .arg("ndarray", pndarray) - .returning_auto("nbytes") + let name = get_sizet_dependent_function_name(generator, ctx, "__nac3_ndarray_nbytes"); + CallFunction::begin(generator, ctx, &name).arg("ndarray", pndarray).returning_auto("nbytes") } -pub fn call_nac3_ndarray_len<'ctx>( - tyctx: TypeContext<'ctx>, +pub fn call_nac3_ndarray_len<'ctx, G: CodeGenerator + ?Sized>( + generator: &mut G, ctx: &mut CodeGenContext<'ctx, '_>, pndarray: Ptr<'ctx, StructModel>, ) -> Int<'ctx, SizeT> { - CallFunction::begin(tyctx, ctx, &get_sizet_dependent_function_name(tyctx, "__nac3_ndarray_len")) - .arg("ndarray", pndarray) - .returning_auto("len") + let name = get_sizet_dependent_function_name(generator, ctx, "__nac3_ndarray_len"); + CallFunction::begin(generator, ctx, &name).arg("ndarray", pndarray).returning_auto("len") } -pub fn call_nac3_ndarray_is_c_contiguous<'ctx>( - tyctx: TypeContext<'ctx>, +pub fn call_nac3_ndarray_is_c_contiguous<'ctx, G: CodeGenerator + ?Sized>( + generator: &mut G, ctx: &mut CodeGenContext<'ctx, '_>, ndarray_ptr: Ptr<'ctx, StructModel>, ) -> Int<'ctx, Bool> { - CallFunction::begin( - tyctx, - ctx, - &get_sizet_dependent_function_name(tyctx, "__nac3_ndarray_is_c_contiguous"), - ) - .arg("ndarray", ndarray_ptr) - .returning_auto("is_c_contiguous") + let name = get_sizet_dependent_function_name(generator, ctx, "__nac3_ndarray_is_c_contiguous"); + CallFunction::begin(generator, ctx, &name) + .arg("ndarray", ndarray_ptr) + .returning_auto("is_c_contiguous") } -pub fn call_nac3_ndarray_get_nth_pelement<'ctx>( - tyctx: TypeContext<'ctx>, +pub fn call_nac3_ndarray_get_nth_pelement<'ctx, G: CodeGenerator + ?Sized>( + generator: &mut G, ctx: &mut CodeGenContext<'ctx, '_>, pndarray: Ptr<'ctx, StructModel>, index: Int<'ctx, SizeT>, ) -> Ptr<'ctx, IntModel> { - CallFunction::begin( - tyctx, - ctx, - &get_sizet_dependent_function_name(tyctx, "__nac3_ndarray_get_nth_pelement"), - ) - .arg("ndarray", pndarray) - .arg("index", index) - .returning_auto("pelement") + let name = get_sizet_dependent_function_name(generator, ctx, "__nac3_ndarray_get_nth_pelement"); + CallFunction::begin(generator, ctx, &name) + .arg("ndarray", pndarray) + .arg("index", index) + .returning_auto("pelement") } -pub fn call_nac3_ndarray_set_strides_by_shape<'ctx>( - tyctx: TypeContext<'ctx>, +pub fn call_nac3_ndarray_set_strides_by_shape<'ctx, G: CodeGenerator + ?Sized>( + generator: &mut G, ctx: &mut CodeGenContext<'ctx, '_>, pdnarray: Ptr<'ctx, StructModel>, ) { - CallFunction::begin( - tyctx, - ctx, - &get_sizet_dependent_function_name(tyctx, "__nac3_ndarray_set_strides_by_shape"), - ) - .arg("ndarray", pdnarray) - .returning_void(); + let name = + get_sizet_dependent_function_name(generator, ctx, "__nac3_ndarray_set_strides_by_shape"); + CallFunction::begin(generator, ctx, &name).arg("ndarray", pdnarray).returning_void(); } -pub fn call_nac3_ndarray_copy_data<'ctx>( - tyctx: TypeContext<'ctx>, +pub fn call_nac3_ndarray_copy_data<'ctx, G: CodeGenerator + ?Sized>( + generator: &mut G, ctx: &mut CodeGenContext<'ctx, '_>, src_ndarray: Ptr<'ctx, StructModel>, dst_ndarray: Ptr<'ctx, StructModel>, ) { - CallFunction::begin( - tyctx, - ctx, - &get_sizet_dependent_function_name(tyctx, "__nac3_ndarray_copy_data"), - ) - .arg("src_ndarray", src_ndarray) - .arg("dst_ndarray", dst_ndarray) - .returning_void(); + let name = get_sizet_dependent_function_name(generator, ctx, "__nac3_ndarray_copy_data"); + CallFunction::begin(generator, ctx, &name) + .arg("src_ndarray", src_ndarray) + .arg("dst_ndarray", dst_ndarray) + .returning_void(); +} + +pub fn call_nac3_ndarray_index<'ctx, G: CodeGenerator + ?Sized>( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + num_indexes: Int<'ctx, SizeT>, + indexes: Ptr<'ctx, StructModel>, + src_ndarray: Ptr<'ctx, StructModel>, + dst_ndarray: Ptr<'ctx, StructModel>, +) { + let name = get_sizet_dependent_function_name(generator, ctx, "__nac3_ndarray_index"); + CallFunction::begin(generator, ctx, &name) + .arg("num_indexes", num_indexes) + .arg("indexes", indexes) + .arg("src_ndarray", src_ndarray) + .arg("dst_ndarray", dst_ndarray) + .returning_void(); +} + +pub fn call_nac3_ndarray_broadcast_to<'ctx, G: CodeGenerator + ?Sized>( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + src_ndarray: Ptr<'ctx, StructModel>, + dst_ndarray: Ptr<'ctx, StructModel>, +) { + let name = get_sizet_dependent_function_name(generator, ctx, "__nac3_ndarray_broadcast_to"); + CallFunction::begin(generator, ctx, &name) + .arg("src_ndarray", src_ndarray) + .arg("dst_ndarray", dst_ndarray) + .returning_void(); +} + +pub fn call_nac3_ndarray_broadcast_shapes<'ctx, G: CodeGenerator + ?Sized>( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + num_shape_entries: Int<'ctx, SizeT>, + shape_entries: Ptr<'ctx, StructModel>, + dst_ndims: Int<'ctx, SizeT>, + dst_shape: Ptr<'ctx, IntModel>, +) { + let name = get_sizet_dependent_function_name(generator, ctx, "__nac3_ndarray_broadcast_shapes"); + CallFunction::begin(generator, ctx, &name) + .arg("num_shapes", num_shape_entries) + .arg("shapes", shape_entries) + .arg("dst_ndims", dst_ndims) + .arg("dst_shape", dst_shape) + .returning_void(); +} + +pub fn call_nac3_ndarray_resolve_and_check_new_shape<'ctx, G: CodeGenerator + ?Sized>( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + size: Int<'ctx, SizeT>, + new_ndims: Int<'ctx, SizeT>, + new_shape: Ptr<'ctx, IntModel>, +) { + let name = get_sizet_dependent_function_name( + generator, + ctx, + "__nac3_ndarray_resolve_and_check_new_shape", + ); + CallFunction::begin(generator, ctx, &name) + .arg("size", size) + .arg("new_ndims", new_ndims) + .arg("new_shape", new_shape) + .returning_void(); +} + +pub fn call_nac3_ndarray_transpose<'ctx, G: CodeGenerator + ?Sized>( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + src_ndarray: Ptr<'ctx, StructModel>, + dst_ndarray: Ptr<'ctx, StructModel>, + num_axes: Int<'ctx, SizeT>, + axes: Ptr<'ctx, IntModel>, +) { + let name = get_sizet_dependent_function_name(generator, ctx, "__nac3_ndarray_transpose"); + CallFunction::begin(generator, ctx, &name) + .arg("src_ndarray", src_ndarray) + .arg("dst_ndarray", dst_ndarray) + .arg("num_axes", num_axes) + .arg("axes", axes) + .returning_void(); } diff --git a/nac3core/src/codegen/irrt/util.rs b/nac3core/src/codegen/irrt/util.rs index 1f2776f8..124b5ef1 100644 --- a/nac3core/src/codegen/irrt/util.rs +++ b/nac3core/src/codegen/irrt/util.rs @@ -1,11 +1,15 @@ -use crate::codegen::model::*; +use crate::codegen::{CodeGenContext, CodeGenerator}; // When [`TypeContext::size_type`] is 32-bits, the function name is "{fn_name}". // When [`TypeContext::size_type`] is 64-bits, the function name is "{fn_name}64". #[must_use] -pub fn get_sizet_dependent_function_name(tyctx: TypeContext<'_>, name: &str) -> String { +pub fn get_sizet_dependent_function_name( + generator: &mut G, + ctx: &CodeGenContext<'_, '_>, + name: &str, +) -> String { let mut name = name.to_owned(); - match tyctx.size_type.get_bit_width() { + match generator.get_size_type(ctx.ctx).get_bit_width() { 32 => {} 64 => name.push_str("64"), bit_width => { @@ -16,7 +20,7 @@ pub fn get_sizet_dependent_function_name(tyctx: TypeContext<'_>, name: &str) -> } pub mod function { - use crate::codegen::{model::*, CodeGenContext}; + use crate::codegen::{model::*, CodeGenContext, CodeGenerator}; use inkwell::{ types::{BasicMetadataTypeEnum, BasicType, FunctionType}, values::{AnyValue, BasicMetadataValueEnum, BasicValue, BasicValueEnum, CallSiteValue}, @@ -30,8 +34,8 @@ pub mod function { } /// Helper structure to reduce IRRT Inkwell function call boilerplate - pub struct CallFunction<'ctx, 'a, 'b, 'c> { - tyctx: TypeContext<'ctx>, + pub struct CallFunction<'ctx, 'a, 'b, 'c, 'd, G: CodeGenerator + ?Sized> { + generator: &'d mut G, ctx: &'b CodeGenContext<'ctx, 'a>, /// Function name name: &'c str, @@ -39,13 +43,13 @@ pub mod function { args: Vec>, } - impl<'ctx, 'a, 'b, 'c> CallFunction<'ctx, 'a, 'b, 'c> { + impl<'ctx, 'a, 'b, 'c, 'd, G: CodeGenerator + ?Sized> CallFunction<'ctx, 'a, 'b, 'c, 'd, G> { pub fn begin( - tyctx: TypeContext<'ctx>, + generator: &'d mut G, ctx: &'b CodeGenContext<'ctx, 'a>, name: &'c str, ) -> Self { - CallFunction { tyctx, ctx, name, args: Vec::new() } + CallFunction { generator, ctx, name, args: Vec::new() } } /// Push a call argument to the function call. @@ -55,7 +59,7 @@ pub mod function { #[must_use] pub fn arg>(mut self, _name: &str, arg: Instance<'ctx, M>) -> Self { let arg = Arg { - ty: arg.model.get_type(self.tyctx, self.ctx.ctx).as_basic_type_enum().into(), + ty: arg.model.get_type(self.generator, self.ctx.ctx).as_basic_type_enum().into(), val: arg.value.as_basic_value_enum().into(), }; self.args.push(arg); @@ -65,11 +69,11 @@ pub mod function { /// Call the function and expect the function to return a value of type of `return_model`. #[must_use] pub fn returning>(self, name: &str, return_model: M) -> Instance<'ctx, M> { - let ret_ty = return_model.get_type(self.tyctx, self.ctx.ctx); + let ret_ty = return_model.get_type(self.generator, self.ctx.ctx); let ret = self.get_function(|tys| ret_ty.fn_type(tys, false), name); let ret = BasicValueEnum::try_from(ret.as_any_value_enum()).unwrap(); // Must work - let ret = return_model.check_value(self.tyctx, self.ctx.ctx, ret).unwrap(); // Must work + let ret = return_model.check_value(self.generator, self.ctx.ctx, ret).unwrap(); // Must work ret } diff --git a/nac3core/src/codegen/mod.rs b/nac3core/src/codegen/mod.rs index 5d5a5ba7..3cd0daa5 100644 --- a/nac3core/src/codegen/mod.rs +++ b/nac3core/src/codegen/mod.rs @@ -1,7 +1,7 @@ use crate::{ - codegen::classes::{ListType, NDArrayType, ProxyType, RangeType}, + codegen::classes::{ListType, ProxyType, RangeType}, symbol_resolver::{StaticValue, SymbolResolver}, - toplevel::{helper::PrimDef, numpy::unpack_ndarray_var_tys, TopLevelContext, TopLevelDef}, + toplevel::{helper::PrimDef, TopLevelContext, TopLevelDef}, typecheck::{ type_inferencer::{CodeLocation, PrimitiveStore}, typedef::{CallId, FuncArg, Type, TypeEnum, Unifier}, @@ -494,10 +494,8 @@ fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>( } TObj { obj_id, .. } if *obj_id == PrimDef::NDArray.id() => { - let tyctx = generator.type_context(ctx); - let pndarray_model = PtrModel(StructModel(NpArray)); - pndarray_model.get_type(tyctx, ctx).as_basic_type_enum() + pndarray_model.get_type(generator, ctx).as_basic_type_enum() } _ => unreachable!( @@ -703,7 +701,6 @@ pub fn gen_func_impl< ..primitives }; - let type_context = generator.type_context(context); let cslice_model = StructModel(CSlice); let pexn_model = PtrModel(StructModel(Exception)); @@ -714,9 +711,9 @@ pub fn gen_func_impl< (primitives.uint64, context.i64_type().into()), (primitives.float, context.f64_type().into()), (primitives.bool, context.i8_type().into()), - (primitives.str, cslice_model.get_type(type_context, context).into()), + (primitives.str, cslice_model.get_type(generator, context).into()), (primitives.range, RangeType::new(context).as_base_type().into()), - (primitives.exception, pexn_model.get_type(type_context, context).into()), + (primitives.exception, pexn_model.get_type(generator, context).into()), ] .iter() .copied() diff --git a/nac3core/src/codegen/model/any.rs b/nac3core/src/codegen/model/any.rs index 9b7c9562..7ad7d8f5 100644 --- a/nac3core/src/codegen/model/any.rs +++ b/nac3core/src/codegen/model/any.rs @@ -4,6 +4,8 @@ use inkwell::{ values::BasicValueEnum, }; +use crate::codegen::CodeGenerator; + use super::*; #[derive(Debug, Clone, Copy)] @@ -14,13 +16,17 @@ impl<'ctx> Model<'ctx> for AnyModel<'ctx> { type Value = BasicValueEnum<'ctx>; type Type = BasicTypeEnum<'ctx>; - fn get_type(&self, _tyctx: TypeContext<'ctx>, _ctx: &'ctx Context) -> Self::Type { + fn get_type( + &self, + _generator: &G, + _ctx: &'ctx Context, + ) -> Self::Type { self.0 } - fn check_type>( + fn check_type, G: CodeGenerator + ?Sized>( &self, - _tyctx: TypeContext<'ctx>, + _generator: &mut G, _ctx: &'ctx Context, ty: T, ) -> Result<(), ModelError> { diff --git a/nac3core/src/codegen/model/core.rs b/nac3core/src/codegen/model/core.rs index deb5f036..8e47cf02 100644 --- a/nac3core/src/codegen/model/core.rs +++ b/nac3core/src/codegen/model/core.rs @@ -5,21 +5,6 @@ use inkwell::{context::Context, types::*, values::*}; use super::*; use crate::codegen::{CodeGenContext, CodeGenerator}; -#[derive(Clone, Copy)] -pub struct TypeContext<'ctx> { - pub size_type: IntType<'ctx>, -} - -pub trait HasTypeContext { - fn type_context<'ctx>(&self, ctx: &'ctx Context) -> TypeContext<'ctx>; -} - -impl HasTypeContext for T { - fn type_context<'ctx>(&self, ctx: &'ctx Context) -> TypeContext<'ctx> { - TypeContext { size_type: self.get_size_type(ctx) } - } -} - #[derive(Debug, Clone)] pub struct ModelError(pub String); @@ -36,12 +21,16 @@ pub trait Model<'ctx>: fmt::Debug + Clone + Copy { type Type: BasicType<'ctx>; /// Return the [`BasicType`] of this model. - fn get_type(&self, tyctx: TypeContext<'ctx>, ctx: &'ctx Context) -> Self::Type; + fn get_type( + &self, + generator: &G, + ctx: &'ctx Context, + ) -> Self::Type; /// Check if a [`BasicType`] is the same type of this model. - fn check_type>( + fn check_type, G: CodeGenerator + ?Sized>( &self, - tyctx: TypeContext<'ctx>, + generator: &mut G, ctx: &'ctx Context, ty: T, ) -> Result<(), ModelError>; @@ -55,15 +44,15 @@ pub trait Model<'ctx>: fmt::Debug + Clone + Copy { /// Check if a [`BasicValue`]'s type is equivalent to the type of this model. /// Wrap it into an [`Instance`] if it is. - fn check_value>( + fn check_value, G: CodeGenerator + ?Sized>( &self, - tyctx: TypeContext<'ctx>, + generator: &mut G, ctx: &'ctx Context, value: V, ) -> Result, ModelError> { let value = value.as_basic_value_enum(); - self.check_type(tyctx, ctx, value.get_type()) - .map_err(|err| err.under_context("the value {value:?}"))?; + self.check_type(generator, ctx, value.get_type()) + .map_err(|err| err.under_context(format!("the value {value:?}").as_str()))?; let Ok(value) = Self::Value::try_from(value) else { unreachable!("check_type() has bad implementation") @@ -72,27 +61,28 @@ pub trait Model<'ctx>: fmt::Debug + Clone + Copy { } // Allocate a value on the stack and return its pointer. - fn alloca( + fn alloca( &self, - tyctx: TypeContext<'ctx>, + generator: &mut G, ctx: &CodeGenContext<'ctx, '_>, name: &str, ) -> Ptr<'ctx, Self> { let pmodel = PtrModel(*self); - let p = ctx.builder.build_alloca(self.get_type(tyctx, ctx.ctx), name).unwrap(); + let p = ctx.builder.build_alloca(self.get_type(generator, ctx.ctx), name).unwrap(); pmodel.believe_value(p) } // Allocate an array on the stack and return its pointer. - fn array_alloca( + fn array_alloca( &self, - tyctx: TypeContext<'ctx>, + generator: &mut G, ctx: &CodeGenContext<'ctx, '_>, len: IntValue<'ctx>, name: &str, ) -> Ptr<'ctx, Self> { let pmodel = PtrModel(*self); - let p = ctx.builder.build_array_alloca(self.get_type(tyctx, ctx.ctx), len, name).unwrap(); + let p = + ctx.builder.build_array_alloca(self.get_type(generator, ctx.ctx), len, name).unwrap(); pmodel.believe_value(p) } @@ -102,14 +92,9 @@ pub trait Model<'ctx>: fmt::Debug + Clone + Copy { ctx: &mut CodeGenContext<'ctx, '_>, name: Option<&str>, ) -> Result, String> { - let tyctx = generator.type_context(ctx.ctx); - let pmodel = PtrModel(*self); - let p = generator.gen_var_alloc( - ctx, - self.get_type(tyctx, ctx.ctx).as_basic_type_enum(), - name, - )?; + let ty = self.get_type(generator, ctx.ctx).as_basic_type_enum(); + let p = generator.gen_var_alloc(ctx, ty, name)?; Ok(pmodel.believe_value(p)) } @@ -120,16 +105,10 @@ pub trait Model<'ctx>: fmt::Debug + Clone + Copy { len: IntValue<'ctx>, name: Option<&'ctx str>, ) -> Result, String> { - let tyctx = generator.type_context(ctx.ctx); - // TODO: Remove ArraySliceValue let pmodel = PtrModel(*self); - let p = generator.gen_array_var_alloc( - ctx, - self.get_type(tyctx, ctx.ctx).as_basic_type_enum(), - len, - name, - )?; + let ty = self.get_type(generator, ctx.ctx).as_basic_type_enum(); + let p = generator.gen_array_var_alloc(ctx, ty, len, name)?; Ok(pmodel.believe_value(PointerValue::from(p))) } } diff --git a/nac3core/src/codegen/model/int.rs b/nac3core/src/codegen/model/int.rs index e0f38c73..9f0d9f5f 100644 --- a/nac3core/src/codegen/model/int.rs +++ b/nac3core/src/codegen/model/int.rs @@ -7,7 +7,11 @@ use crate::codegen::{CodeGenContext, CodeGenerator}; use super::*; pub trait IntKind<'ctx>: fmt::Debug + Clone + Copy { - fn get_int_type(&self, tyctx: TypeContext<'ctx>, ctx: &'ctx Context) -> IntType<'ctx>; + fn get_int_type( + &self, + generator: &G, + ctx: &'ctx Context, + ) -> IntType<'ctx>; } #[derive(Debug, Clone, Copy, Default)] @@ -22,32 +26,52 @@ pub struct Int64; pub struct SizeT; impl<'ctx> IntKind<'ctx> for Bool { - fn get_int_type(&self, _tyctx: TypeContext<'ctx>, ctx: &'ctx Context) -> IntType<'ctx> { + fn get_int_type( + &self, + _generator: &G, + ctx: &'ctx Context, + ) -> IntType<'ctx> { ctx.bool_type() } } impl<'ctx> IntKind<'ctx> for Byte { - fn get_int_type(&self, _tyctx: TypeContext<'ctx>, ctx: &'ctx Context) -> IntType<'ctx> { + fn get_int_type( + &self, + _generator: &G, + ctx: &'ctx Context, + ) -> IntType<'ctx> { ctx.i8_type() } } impl<'ctx> IntKind<'ctx> for Int32 { - fn get_int_type(&self, _tyctx: TypeContext<'ctx>, ctx: &'ctx Context) -> IntType<'ctx> { + fn get_int_type( + &self, + _generator: &G, + ctx: &'ctx Context, + ) -> IntType<'ctx> { ctx.i32_type() } } impl<'ctx> IntKind<'ctx> for Int64 { - fn get_int_type(&self, _tyctx: TypeContext<'ctx>, ctx: &'ctx Context) -> IntType<'ctx> { + fn get_int_type( + &self, + _generator: &G, + ctx: &'ctx Context, + ) -> IntType<'ctx> { ctx.i64_type() } } impl<'ctx> IntKind<'ctx> for SizeT { - fn get_int_type(&self, tyctx: TypeContext<'ctx>, _ctx: &'ctx Context) -> IntType<'ctx> { - tyctx.size_type + fn get_int_type( + &self, + generator: &G, + ctx: &'ctx Context, + ) -> IntType<'ctx> { + generator.get_size_type(ctx) } } @@ -55,7 +79,11 @@ impl<'ctx> IntKind<'ctx> for SizeT { pub struct AnyInt<'ctx>(pub IntType<'ctx>); impl<'ctx> IntKind<'ctx> for AnyInt<'ctx> { - fn get_int_type(&self, _tyctx: TypeContext<'ctx>, _ctx: &'ctx Context) -> IntType<'ctx> { + fn get_int_type( + &self, + _generator: &G, + _ctx: &'ctx Context, + ) -> IntType<'ctx> { self.0 } } @@ -69,13 +97,17 @@ impl<'ctx, N: IntKind<'ctx>> Model<'ctx> for IntModel { type Type = IntType<'ctx>; #[must_use] - fn get_type(&self, tyctx: TypeContext<'ctx>, ctx: &'ctx Context) -> Self::Type { - self.0.get_int_type(tyctx, ctx) + fn get_type( + &self, + generator: &G, + ctx: &'ctx Context, + ) -> Self::Type { + self.0.get_int_type(generator, ctx) } - fn check_type>( + fn check_type, G: CodeGenerator + ?Sized>( &self, - tyctx: TypeContext<'ctx>, + generator: &mut G, ctx: &'ctx Context, ty: T, ) -> Result<(), ModelError> { @@ -84,7 +116,7 @@ impl<'ctx, N: IntKind<'ctx>> Model<'ctx> for IntModel { return Err(ModelError(format!("Expecting IntType, but got {ty:?}"))); }; - let exp_ty = self.0.get_int_type(tyctx, ctx); + let exp_ty = self.0.get_int_type(generator, ctx); if ty.get_bit_width() != exp_ty.get_bit_width() { return Err(ModelError(format!( "Expecting IntType to have {} bit(s), but got {} bit(s)", @@ -98,90 +130,107 @@ impl<'ctx, N: IntKind<'ctx>> Model<'ctx> for IntModel { } impl<'ctx, N: IntKind<'ctx>> IntModel { - pub fn constant( + pub fn constant( &self, - tyctx: TypeContext<'ctx>, + generator: &mut G, ctx: &'ctx Context, value: u64, ) -> Int<'ctx, N> { - let value = self.get_type(tyctx, ctx).const_int(value, false); + let value = self.get_type(generator, ctx).const_int(value, false); self.believe_value(value) } - pub fn const_0(&self, tyctx: TypeContext<'ctx>, ctx: &'ctx Context) -> Int<'ctx, N> { - self.constant(tyctx, ctx, 0) - } - - pub fn const_1(&self, tyctx: TypeContext<'ctx>, ctx: &'ctx Context) -> Int<'ctx, N> { - self.constant(tyctx, ctx, 1) - } - - pub fn s_extend_or_bit_cast( + pub fn const_0( &self, - tyctx: TypeContext<'ctx>, + generator: &mut G, + ctx: &'ctx Context, + ) -> Int<'ctx, N> { + self.constant(generator, ctx, 0) + } + + pub fn const_1( + &self, + generator: &mut G, + ctx: &'ctx Context, + ) -> Int<'ctx, N> { + self.constant(generator, ctx, 1) + } + + pub fn const_all_1s( + &self, + generator: &mut G, + ctx: &'ctx Context, + ) -> Int<'ctx, N> { + let value = self.get_type(generator, ctx).const_all_ones(); + self.believe_value(value) + } + + pub fn s_extend_or_bit_cast( + &self, + generator: &mut G, ctx: &CodeGenContext<'ctx, '_>, value: IntValue<'ctx>, name: &str, ) -> Int<'ctx, N> { let value = ctx .builder - .build_int_s_extend_or_bit_cast(value, self.get_type(tyctx, ctx.ctx), name) + .build_int_s_extend_or_bit_cast(value, self.get_type(generator, ctx.ctx), name) .unwrap(); self.believe_value(value) } - pub fn truncate( + pub fn truncate( &self, - tyctx: TypeContext<'ctx>, + generator: &mut G, ctx: &CodeGenContext<'ctx, '_>, value: IntValue<'ctx>, name: &str, ) -> Int<'ctx, N> { let value = - ctx.builder.build_int_truncate(value, self.get_type(tyctx, ctx.ctx), name).unwrap(); + ctx.builder.build_int_truncate(value, self.get_type(generator, ctx.ctx), name).unwrap(); self.believe_value(value) } } impl IntModel { #[must_use] - pub fn const_false<'ctx>( + pub fn const_false<'ctx, G: CodeGenerator + ?Sized>( &self, - tyctx: TypeContext<'ctx>, + generator: &mut G, ctx: &'ctx Context, ) -> Int<'ctx, Bool> { - self.constant(tyctx, ctx, 0) + self.constant(generator, ctx, 0) } #[must_use] - pub fn const_true<'ctx>( + pub fn const_true<'ctx, G: CodeGenerator + ?Sized>( &self, - tyctx: TypeContext<'ctx>, + generator: &mut G, ctx: &'ctx Context, ) -> Int<'ctx, Bool> { - self.constant(tyctx, ctx, 1) + self.constant(generator, ctx, 1) } } impl<'ctx, N: IntKind<'ctx>> Int<'ctx, N> { - pub fn s_extend_or_bit_cast>( + pub fn s_extend_or_bit_cast, G: CodeGenerator + ?Sized>( &self, - tyctx: TypeContext<'ctx>, + generator: &mut G, ctx: &CodeGenContext<'ctx, '_>, to_int_kind: NewN, name: &str, ) -> Int<'ctx, NewN> { - IntModel(to_int_kind).s_extend_or_bit_cast(tyctx, ctx, self.value, name) + IntModel(to_int_kind).s_extend_or_bit_cast(generator, ctx, self.value, name) } - pub fn truncate>( + pub fn truncate, G: CodeGenerator + ?Sized>( &self, - tyctx: TypeContext<'ctx>, + generator: &mut G, ctx: &CodeGenContext<'ctx, '_>, to_int_kind: NewN, name: &str, ) -> Int<'ctx, NewN> { - IntModel(to_int_kind).truncate(tyctx, ctx, self.value, name) + IntModel(to_int_kind).truncate(generator, ctx, self.value, name) } #[must_use] diff --git a/nac3core/src/codegen/model/mod.rs b/nac3core/src/codegen/model/mod.rs index c030f7cb..b73a1de9 100644 --- a/nac3core/src/codegen/model/mod.rs +++ b/nac3core/src/codegen/model/mod.rs @@ -3,6 +3,7 @@ mod core; mod int; mod ptr; mod structure; +pub mod util; pub use any::*; pub use core::*; diff --git a/nac3core/src/codegen/model/ptr.rs b/nac3core/src/codegen/model/ptr.rs index febac3c7..fb2d48a2 100644 --- a/nac3core/src/codegen/model/ptr.rs +++ b/nac3core/src/codegen/model/ptr.rs @@ -5,7 +5,7 @@ use inkwell::{ AddressSpace, }; -use crate::codegen::CodeGenContext; +use crate::codegen::{CodeGenContext, CodeGenerator}; use super::*; @@ -17,13 +17,17 @@ impl<'ctx, Element: Model<'ctx>> Model<'ctx> for PtrModel { type Value = PointerValue<'ctx>; type Type = PointerType<'ctx>; - fn get_type(&self, tyctx: TypeContext<'ctx>, ctx: &'ctx Context) -> Self::Type { - self.0.get_type(tyctx, ctx).ptr_type(AddressSpace::default()) + fn get_type( + &self, + generator: &G, + ctx: &'ctx Context, + ) -> Self::Type { + self.0.get_type(generator, ctx).ptr_type(AddressSpace::default()) } - fn check_type>( + fn check_type, G: CodeGenerator + ?Sized>( &self, - tyctx: TypeContext<'ctx>, + generator: &mut G, ctx: &'ctx Context, ty: T, ) -> Result<(), ModelError> { @@ -41,7 +45,9 @@ impl<'ctx, Element: Model<'ctx>> Model<'ctx> for PtrModel { // TODO: inkwell `get_element_type()` will be deprecated. // Remove the check for `get_element_type()` when the time comes. - self.0.check_type(tyctx, ctx, elem_ty).map_err(|err| err.under_context("a PointerType"))?; + self.0 + .check_type(generator, ctx, elem_ty) + .map_err(|err| err.under_context("a PointerType"))?; Ok(()) } @@ -49,20 +55,25 @@ impl<'ctx, Element: Model<'ctx>> Model<'ctx> for PtrModel { impl<'ctx, Element: Model<'ctx>> PtrModel { /// Return a ***constant*** nullptr. - pub fn nullptr(&self, tyctx: TypeContext<'ctx>, ctx: &'ctx Context) -> Ptr<'ctx, Element> { - let ptr = self.get_type(tyctx, ctx).const_null(); + pub fn nullptr( + &self, + generator: &mut G, + ctx: &'ctx Context, + ) -> Ptr<'ctx, Element> { + let ptr = self.get_type(generator, ctx).const_null(); self.believe_value(ptr) } /// Cast a pointer into this model with [`inkwell::builder::Builder::build_pointer_cast`] - pub fn pointer_cast( + pub fn pointer_cast( &self, - tyctx: TypeContext<'ctx>, + generator: &mut G, ctx: &CodeGenContext<'ctx, '_>, ptr: PointerValue<'ctx>, name: &str, ) -> Ptr<'ctx, Element> { - let ptr = ctx.builder.build_pointer_cast(ptr, self.get_type(tyctx, ctx.ctx), name).unwrap(); + let ptr = + ctx.builder.build_pointer_cast(ptr, self.get_type(generator, ctx.ctx), name).unwrap(); self.believe_value(ptr) } } @@ -70,38 +81,38 @@ impl<'ctx, Element: Model<'ctx>> PtrModel { impl<'ctx, Element: Model<'ctx>> Ptr<'ctx, Element> { /// Offset the pointer by [`inkwell::builder::Builder::build_in_bounds_gep`]. #[must_use] - pub fn offset( + pub fn offset( &self, - tyctx: TypeContext<'ctx>, + generator: &mut G, ctx: &CodeGenContext<'ctx, '_>, offset: IntValue<'ctx>, name: &str, ) -> Ptr<'ctx, Element> { let new_ptr = unsafe { ctx.builder.build_in_bounds_gep(self.value, &[offset], name).unwrap() }; - self.model.check_value(tyctx, ctx.ctx, new_ptr).unwrap() + self.model.check_value(generator, ctx.ctx, new_ptr).unwrap() } // Load the `i`-th element (0-based) on the array with [`inkwell::builder::Builder::build_in_bounds_gep`]. - pub fn ix( + pub fn ix( &self, - tyctx: TypeContext<'ctx>, + generator: &mut G, ctx: &CodeGenContext<'ctx, '_>, i: IntValue<'ctx>, name: &str, ) -> Instance<'ctx, Element> { - self.offset(tyctx, ctx, i, name).load(tyctx, ctx, name) + self.offset(generator, ctx, i, name).load(generator, ctx, name) } /// Load the value with [`inkwell::builder::Builder::build_load`]. - pub fn load( + pub fn load( &self, - tyctx: TypeContext<'ctx>, + generator: &mut G, ctx: &CodeGenContext<'ctx, '_>, name: &str, ) -> Instance<'ctx, Element> { let value = ctx.builder.build_load(self.value, name).unwrap(); - self.model.0.check_value(tyctx, ctx.ctx, value).unwrap() // If unwrap() panics, there is a logic error. + self.model.0.check_value(generator, ctx.ctx, value).unwrap() // If unwrap() panics, there is a logic error. } /// Store a value with [`inkwell::builder::Builder::build_store`]. @@ -110,14 +121,14 @@ impl<'ctx, Element: Model<'ctx>> Ptr<'ctx, Element> { } /// Return a casted pointer of element type `NewElement` with [`inkwell::builder::Builder::build_pointer_cast`]. - pub fn transmute>( + pub fn transmute, G: CodeGenerator + ?Sized>( &self, - tyctx: TypeContext<'ctx>, + generator: &mut G, ctx: &CodeGenContext<'ctx, '_>, new_model: NewElement, name: &str, ) -> Ptr<'ctx, NewElement> { - PtrModel(new_model).pointer_cast(tyctx, ctx, self.value, name) + PtrModel(new_model).pointer_cast(generator, ctx, self.value, name) } /// Check if the pointer is null with [`inkwell::builder::Builder::build_is_null`]. diff --git a/nac3core/src/codegen/model/structure.rs b/nac3core/src/codegen/model/structure.rs index 63e1040f..cd4b9995 100644 --- a/nac3core/src/codegen/model/structure.rs +++ b/nac3core/src/codegen/model/structure.rs @@ -6,7 +6,7 @@ use inkwell::{ values::StructValue, }; -use crate::codegen::CodeGenContext; +use crate::codegen::{CodeGenContext, CodeGenerator}; use super::*; @@ -42,30 +42,32 @@ impl<'ctx> FieldTraversal<'ctx> for GepFieldTraversal { } } -struct TypeFieldTraversal<'ctx> { - tyctx: TypeContext<'ctx>, +struct TypeFieldTraversal<'ctx, 'a, G: CodeGenerator + ?Sized> { + generator: &'a G, ctx: &'ctx Context, field_types: Vec>, } -impl<'ctx> FieldTraversal<'ctx> for TypeFieldTraversal<'ctx> { +impl<'ctx, 'a, G: CodeGenerator + ?Sized> FieldTraversal<'ctx> for TypeFieldTraversal<'ctx, 'a, G> { type Out = (); fn add>(&mut self, _name: &'static str, model: M) -> Self::Out { - let t = model.get_type(self.tyctx, self.ctx).as_basic_type_enum(); + let t = model.get_type(self.generator, self.ctx).as_basic_type_enum(); self.field_types.push(t); } } -struct CheckTypeFieldTraversal<'ctx> { - tyctx: TypeContext<'ctx>, +struct CheckTypeFieldTraversal<'ctx, 'a, G: CodeGenerator + ?Sized> { + generator: &'a mut G, ctx: &'ctx Context, index: u32, scrutinee: StructType<'ctx>, errors: Vec, } -impl<'ctx> FieldTraversal<'ctx> for CheckTypeFieldTraversal<'ctx> { +impl<'ctx, 'a, G: CodeGenerator + ?Sized> FieldTraversal<'ctx> + for CheckTypeFieldTraversal<'ctx, 'a, G> +{ type Out = (); fn add>(&mut self, name: &'static str, model: M) -> Self::Out { @@ -73,8 +75,8 @@ impl<'ctx> FieldTraversal<'ctx> for CheckTypeFieldTraversal<'ctx> { self.index += 1; if let Some(t) = self.scrutinee.get_field_type_at_index(i) { - if let Err(err) = model.check_type(self.tyctx, self.ctx, t) { - self.errors.push(err.under_context(format!("At field #{i} '{name}'").as_str())); + if let Err(err) = model.check_type(self.generator, self.ctx, t) { + self.errors.push(err.under_context(format!("field #{i} '{name}'").as_str())); } } // Otherwise, it will be caught } @@ -89,8 +91,12 @@ pub trait StructKind<'ctx>: fmt::Debug + Clone + Copy { self.traverse_fields(&mut GepFieldTraversal { gep_index_counter: 0 }) } - fn get_struct_type(&self, tyctx: TypeContext<'ctx>, ctx: &'ctx Context) -> StructType<'ctx> { - let mut traversal = TypeFieldTraversal { tyctx, ctx, field_types: Vec::new() }; + fn get_struct_type( + &self, + generator: &G, + ctx: &'ctx Context, + ) -> StructType<'ctx> { + let mut traversal = TypeFieldTraversal { generator, ctx, field_types: Vec::new() }; self.traverse_fields(&mut traversal); ctx.struct_type(&traversal.field_types, false) @@ -105,13 +111,17 @@ impl<'ctx, S: StructKind<'ctx>> Model<'ctx> for StructModel { type Value = StructValue<'ctx>; type Type = StructType<'ctx>; - fn get_type(&self, tyctx: TypeContext<'ctx>, ctx: &'ctx Context) -> Self::Type { - self.0.get_struct_type(tyctx, ctx) + fn get_type( + &self, + generator: &G, + ctx: &'ctx Context, + ) -> Self::Type { + self.0.get_struct_type(generator, ctx) } - fn check_type>( + fn check_type, G: CodeGenerator + ?Sized>( &self, - tyctx: TypeContext<'ctx>, + generator: &mut G, ctx: &'ctx Context, ty: T, ) -> Result<(), ModelError> { @@ -121,7 +131,7 @@ impl<'ctx, S: StructKind<'ctx>> Model<'ctx> for StructModel { }; let mut traversal = - CheckTypeFieldTraversal { tyctx, ctx, index: 0, errors: Vec::new(), scrutinee: ty }; + CheckTypeFieldTraversal { generator, ctx, index: 0, errors: Vec::new(), scrutinee: ty }; self.0.traverse_fields(&mut traversal); let exp_num_fields = traversal.index; @@ -168,9 +178,9 @@ impl<'ctx, S: StructKind<'ctx>> Ptr<'ctx, StructModel> { } /// Convenience function equivalent to `.gep(...).load(...)`. - pub fn get( + pub fn get( &self, - tyctx: TypeContext<'ctx>, + generator: &mut G, ctx: &CodeGenContext<'ctx, '_>, get_field: GetField, name: &str, @@ -179,7 +189,7 @@ impl<'ctx, S: StructKind<'ctx>> Ptr<'ctx, StructModel> { M: Model<'ctx>, GetField: FnOnce(S::Fields) -> GepField, { - self.gep(ctx, get_field).load(tyctx, ctx, name) + self.gep(ctx, get_field).load(generator, ctx, name) } /// Convenience function equivalent to `.gep(...).store(...)`. @@ -192,6 +202,6 @@ impl<'ctx, S: StructKind<'ctx>> Ptr<'ctx, StructModel> { M: Model<'ctx>, GetField: FnOnce(S::Fields) -> GepField, { - self.gep(ctx, get_field).store(ctx, value) + self.gep(ctx, get_field).store(ctx, value); } } diff --git a/nac3core/src/codegen/model/util.rs b/nac3core/src/codegen/model/util.rs new file mode 100644 index 00000000..2b09a259 --- /dev/null +++ b/nac3core/src/codegen/model/util.rs @@ -0,0 +1,91 @@ +use inkwell::{types::BasicType, values::IntValue}; + +/// `llvm.memcpy` but under the [`Model`] abstraction +use crate::codegen::{ + llvm_intrinsics::call_memcpy_generic, + stmt::{gen_for_callback_incrementing, BreakContinueHooks}, + CodeGenContext, CodeGenerator, +}; + +use super::*; + +/// Convenience function. +/// +/// Like [`call_memcpy_generic`] but with model abstractions and `is_volatile` set to `false`. +pub fn call_memcpy_model<'ctx, Item: Model<'ctx> + Default, G: CodeGenerator + ?Sized>( + generator: &mut G, + ctx: &CodeGenContext<'ctx, '_>, + dst_array: Ptr<'ctx, Item>, + src_array: Ptr<'ctx, Item>, + num_items: IntValue<'ctx>, +) { + let itemsize = Item::default().get_type(generator, ctx.ctx).size_of().unwrap(); + let totalsize = ctx.builder.build_int_mul(itemsize, num_items, "totalsize").unwrap(); // TODO: Int types may not match. + let is_volatile = ctx.ctx.bool_type().const_zero(); + call_memcpy_generic(ctx, dst_array.value, src_array.value, totalsize, is_volatile); +} + +/// Like [`gen_for_callback_incrementing`] with [`Model`] abstractions. +/// The [`IntKind`] is automatically inferred. +pub fn gen_for_model_auto<'ctx, 'a, G, F, I>( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, 'a>, + start: Int<'ctx, I>, + stop: Int<'ctx, I>, + step: Int<'ctx, I>, + body: F, +) -> Result<(), String> +where + G: CodeGenerator + ?Sized, + F: FnOnce( + &mut G, + &mut CodeGenContext<'ctx, 'a>, + BreakContinueHooks<'ctx>, + Int<'ctx, I>, + ) -> Result<(), String>, + I: IntKind<'ctx> + Default, +{ + let int_model = IntModel(I::default()); + + gen_for_callback_incrementing( + generator, + ctx, + None, + start.value, + (stop.value, false), + |g, ctx, hooks, i| { + let i = int_model.believe_value(i); + body(g, ctx, hooks, i) + }, + step.value, + ) +} + +/// Like [`gen_if_callback`] with [`Model`] abstractions and without the `else` block. +pub fn gen_if_model<'ctx, 'a, G, ThenFn>( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, 'a>, + cond: Int<'ctx, Bool>, + then: ThenFn, +) -> Result<(), String> +where + G: CodeGenerator + ?Sized, + ThenFn: FnOnce(&mut G, &mut CodeGenContext<'ctx, 'a>) -> Result<(), String>, +{ + let current_bb = ctx.builder.get_insert_block().unwrap(); + let then_bb = ctx.ctx.insert_basic_block_after(current_bb, "if.then"); + let end_bb = ctx.ctx.insert_basic_block_after(then_bb, "if.end"); + + // Inserting into `current_bb`. + ctx.builder.build_conditional_branch(cond.value, then_bb, end_bb).unwrap(); + + // Inserting into `then_bb` + ctx.builder.position_at_end(then_bb); + then(generator, ctx)?; + ctx.builder.build_unconditional_branch(end_bb).unwrap(); + + // Reposition to `end_bb` for continuation. + ctx.builder.position_at_end(end_bb); + + Ok(()) +} diff --git a/nac3core/src/codegen/numpy_new.rs b/nac3core/src/codegen/numpy_new.rs new file mode 100644 index 00000000..0f3df368 --- /dev/null +++ b/nac3core/src/codegen/numpy_new.rs @@ -0,0 +1,483 @@ +// TODO: Replace numpy.rs + +use inkwell::values::{BasicValue, BasicValueEnum}; +use nac3parser::ast::StrRef; + +use crate::{ + codegen::{ + structure::{ + ndarray::{ + scalar::split_scalar_or_ndarray, shape_util::parse_numpy_int_sequence, + NDArrayObject, + }, + tuple::TupleObject, + }, + }, + symbol_resolver::ValueEnum, + toplevel::{ + numpy::{extract_ndims, unpack_ndarray_var_tys}, + DefinitionId, + }, + typecheck::typedef::{FunSignature, Type}, +}; + +use super::{ + irrt::call_nac3_ndarray_util_assert_shape_no_negative, model::*, CodeGenContext, CodeGenerator, +}; + +/// Get the zero value in `np.zeros()` of a `dtype`. +fn ndarray_zero_value<'ctx, G: CodeGenerator + ?Sized>( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + dtype: Type, +) -> BasicValueEnum<'ctx> { + if [ctx.primitives.int32, ctx.primitives.uint32] + .iter() + .any(|ty| ctx.unifier.unioned(dtype, *ty)) + { + ctx.ctx.i32_type().const_zero().into() + } else if [ctx.primitives.int64, ctx.primitives.uint64] + .iter() + .any(|ty| ctx.unifier.unioned(dtype, *ty)) + { + ctx.ctx.i64_type().const_zero().into() + } else if ctx.unifier.unioned(dtype, ctx.primitives.float) { + ctx.ctx.f64_type().const_zero().into() + } else if ctx.unifier.unioned(dtype, ctx.primitives.bool) { + ctx.ctx.bool_type().const_zero().into() + } else if ctx.unifier.unioned(dtype, ctx.primitives.str) { + ctx.gen_string(generator, "").value.into() + } else { + unreachable!() + } +} + +/// Get the one value in `np.ones()` of a `dtype`. +fn ndarray_one_value<'ctx, G: CodeGenerator + ?Sized>( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + elem_ty: Type, +) -> BasicValueEnum<'ctx> { + if [ctx.primitives.int32, ctx.primitives.uint32] + .iter() + .any(|ty| ctx.unifier.unioned(elem_ty, *ty)) + { + let is_signed = ctx.unifier.unioned(elem_ty, ctx.primitives.int32); + ctx.ctx.i32_type().const_int(1, is_signed).into() + } else if [ctx.primitives.int64, ctx.primitives.uint64] + .iter() + .any(|ty| ctx.unifier.unioned(elem_ty, *ty)) + { + let is_signed = ctx.unifier.unioned(elem_ty, ctx.primitives.int64); + ctx.ctx.i64_type().const_int(1, is_signed).into() + } else if ctx.unifier.unioned(elem_ty, ctx.primitives.float) { + ctx.ctx.f64_type().const_float(1.0).into() + } else if ctx.unifier.unioned(elem_ty, ctx.primitives.bool) { + ctx.ctx.bool_type().const_int(1, false).into() + } else if ctx.unifier.unioned(elem_ty, ctx.primitives.str) { + ctx.gen_string(generator, "1").value.into() + } else { + unreachable!() + } +} + +/// Helper function to create an ndarray with uninitialized values. +/// +/// * `ndarray_ty` - The [`Type`] of the ndarray +/// * `shape` - The user input shape argument +/// * `shape_ty` - The [`Type`] of the shape argument +/// +/// This function does data validation the `shape` input. +fn create_empty_ndarray<'ctx, G>( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + ndarray_ty: Type, + shape: BasicValueEnum<'ctx>, + shape_ty: Type, +) -> NDArrayObject<'ctx> +where + G: CodeGenerator + ?Sized, +{ + let (_, shape) = parse_numpy_int_sequence(generator, ctx, shape, shape_ty); + + let ndarray = + NDArrayObject::alloca_uninitialized_of_type(generator, ctx, ndarray_ty, "ndarray"); + + // Validate `shape` + let ndims = ndarray.get_ndims(generator, ctx.ctx); + call_nac3_ndarray_util_assert_shape_no_negative(generator, ctx, ndims, shape); + + // Setup `ndarray` with `shape` + ndarray.copy_shape_from_array(generator, ctx, shape); + ndarray.create_data(generator, ctx); // `shape` has to be set + + ndarray +} + +/// Generates LLVM IR for `np.empty`. +pub fn gen_ndarray_empty<'ctx>( + ctx: &mut CodeGenContext<'ctx, '_>, + obj: &Option<(Type, ValueEnum<'ctx>)>, + fun: (&FunSignature, DefinitionId), + args: &[(Option, ValueEnum<'ctx>)], + generator: &mut dyn CodeGenerator, +) -> Result, String> { + assert!(obj.is_none()); + assert_eq!(args.len(), 1); + + // Parse arguments + let shape_ty = fun.0.args[0].ty; + let shape = args[0].1.clone().to_basic_value_enum(ctx, generator, shape_ty)?; + + // Implementation + let ndarray_ty = fun.0.ret; + let ndarray = create_empty_ndarray(generator, ctx, ndarray_ty, shape, shape_ty); + + Ok(ndarray.value.value.as_basic_value_enum()) +} + +/// Generates LLVM IR for `np.zero`. +pub fn gen_ndarray_zeros<'ctx>( + ctx: &mut CodeGenContext<'ctx, '_>, + obj: &Option<(Type, ValueEnum<'ctx>)>, + fun: (&FunSignature, DefinitionId), + args: &[(Option, ValueEnum<'ctx>)], + generator: &mut dyn CodeGenerator, +) -> Result, String> { + assert!(obj.is_none()); + assert_eq!(args.len(), 1); + + // Parse arguments + let shape_ty = fun.0.args[0].ty; + let shape = args[0].1.clone().to_basic_value_enum(ctx, generator, shape_ty)?; + + // Implementation + let ndarray_ty = fun.0.ret; + let ndarray = create_empty_ndarray(generator, ctx, ndarray_ty, shape, shape_ty); + + let fill_value = ndarray_zero_value(generator, ctx, ndarray.dtype); + ndarray.fill(generator, ctx, fill_value); + + Ok(ndarray.value.value.as_basic_value_enum()) +} + +/// Generates LLVM IR for `np.ones`. +pub fn gen_ndarray_ones<'ctx>( + ctx: &mut CodeGenContext<'ctx, '_>, + obj: &Option<(Type, ValueEnum<'ctx>)>, + fun: (&FunSignature, DefinitionId), + args: &[(Option, ValueEnum<'ctx>)], + generator: &mut dyn CodeGenerator, +) -> Result, String> { + assert!(obj.is_none()); + assert_eq!(args.len(), 1); + + // Parse arguments + let shape_ty = fun.0.args[0].ty; + let shape = args[0].1.clone().to_basic_value_enum(ctx, generator, shape_ty)?; + + // Implementation + let ndarray_ty = fun.0.ret; + let ndarray = create_empty_ndarray(generator, ctx, ndarray_ty, shape, shape_ty); + + let fill_value = ndarray_zero_value(generator, ctx, ndarray.dtype); + ndarray.fill(generator, ctx, fill_value); + + Ok(ndarray.value.value.as_basic_value_enum()) +} + +/// Generates LLVM IR for `np.full`. +pub fn gen_ndarray_full<'ctx>( + ctx: &mut CodeGenContext<'ctx, '_>, + obj: &Option<(Type, ValueEnum<'ctx>)>, + fun: (&FunSignature, DefinitionId), + args: &[(Option, ValueEnum<'ctx>)], + generator: &mut dyn CodeGenerator, +) -> Result, String> { + assert!(obj.is_none()); + assert_eq!(args.len(), 2); + + // Parse argument #1 shape + let shape_ty = fun.0.args[0].ty; + let shape = args[0].1.clone().to_basic_value_enum(ctx, generator, shape_ty)?; + + // Parse argument #2 fill_value + let fill_value_ty = fun.0.args[1].ty; + let fill_value = args[1].1.clone().to_basic_value_enum(ctx, generator, fill_value_ty)?; + + // Implementation + let ndarray_ty = fun.0.ret; + let ndarray = create_empty_ndarray(generator, ctx, ndarray_ty, shape, shape_ty); + + ndarray.fill(generator, ctx, fill_value); + + Ok(ndarray.value.value.as_basic_value_enum()) +} + +/// Generates LLVM IR for `np.broadcast_to`. +pub fn gen_ndarray_broadcast_to<'ctx>( + ctx: &mut CodeGenContext<'ctx, '_>, + obj: &Option<(Type, ValueEnum<'ctx>)>, + fun: (&FunSignature, DefinitionId), + args: &[(Option, ValueEnum<'ctx>)], + generator: &mut dyn CodeGenerator, +) -> Result, String> { + assert!(obj.is_none()); + assert_eq!(args.len(), 2); + + // Parse argument #1 input + let input_ty = fun.0.args[0].ty; + let input = args[0].1.clone().to_basic_value_enum(ctx, generator, input_ty)?; + + // Parse argument #2 shape + let shape_ty = fun.0.args[1].ty; + let shape = args[1].1.clone().to_basic_value_enum(ctx, generator, shape_ty)?; + + // Define models + let sizet_model = IntModel(SizeT); + + // Extract broadcast_ndims, this is the only way to get the + // ndims of the ndarray result statically. + let (_, broadcast_ndims_ty) = unpack_ndarray_var_tys(&mut ctx.unifier, fun.0.ret); + let broadcast_ndims = extract_ndims(&ctx.unifier, broadcast_ndims_ty); + + // Process `input` + let in_ndarray = + split_scalar_or_ndarray(generator, ctx, input, input_ty).as_ndarray(generator, ctx); + + // Process `shape` + let (_, broadcast_shape) = parse_numpy_int_sequence(generator, ctx, shape, shape_ty); + // NOTE: shape.size should equal to `broadcasted_ndims`. + let broadcast_ndims_llvm = sizet_model.constant(generator, ctx.ctx, broadcast_ndims); + call_nac3_ndarray_util_assert_shape_no_negative( + generator, + ctx, + broadcast_ndims_llvm, + broadcast_shape, + ); + + // Create broadcast view + let broadcast_ndarray = + in_ndarray.broadcast_to(generator, ctx, broadcast_ndims, broadcast_shape); + + Ok(broadcast_ndarray.value.value.as_basic_value_enum()) +} + +/// Generates LLVM IR for `np.reshape`. +pub fn gen_ndarray_reshape<'ctx>( + ctx: &mut CodeGenContext<'ctx, '_>, + obj: &Option<(Type, ValueEnum<'ctx>)>, + fun: (&FunSignature, DefinitionId), + args: &[(Option, ValueEnum<'ctx>)], + generator: &mut dyn CodeGenerator, +) -> Result, String> { + assert!(obj.is_none()); + assert_eq!(args.len(), 2); + + // Parse argument #1 input + let input_ty = fun.0.args[0].ty; + let input = args[0].1.clone().to_basic_value_enum(ctx, generator, input_ty)?; + + // Parse argument #2 shape + let shape_ty = fun.0.args[1].ty; + let shape = args[1].1.clone().to_basic_value_enum(ctx, generator, shape_ty)?; + + // Extract reshaped_ndims + let (_, reshaped_ndims_ty) = unpack_ndarray_var_tys(&mut ctx.unifier, fun.0.ret); + let reshaped_ndims = extract_ndims(&ctx.unifier, reshaped_ndims_ty); + + // Process `input` + let in_ndarray = + split_scalar_or_ndarray(generator, ctx, input, input_ty).as_ndarray(generator, ctx); + + // Process the shape input from user and resolve negative indices. + // The resulting `new_shape`'s size should be equal to reshaped_ndims. + // This is ensured by the typechecker. + let (_, new_shape) = parse_numpy_int_sequence(generator, ctx, shape, shape_ty); + let reshaped_ndarray = in_ndarray.reshape_or_copy(generator, ctx, reshaped_ndims, new_shape); + + Ok(reshaped_ndarray.value.value.as_basic_value_enum()) +} + +/// Generates LLVM IR for `np.arange`. +pub fn gen_ndarray_arange<'ctx>( + ctx: &mut CodeGenContext<'ctx, '_>, + obj: &Option<(Type, ValueEnum<'ctx>)>, + fun: (&FunSignature, DefinitionId), + args: &[(Option, ValueEnum<'ctx>)], + generator: &mut dyn CodeGenerator, +) -> Result, String> { + assert!(obj.is_none()); + assert_eq!(args.len(), 1); + + // Parse argument #1 len + let input_ty = fun.0.args[0].ty; + let input = args[0].1.clone().to_basic_value_enum(ctx, generator, input_ty)?.into_int_value(); + + // Define models + let sizet_model = IntModel(SizeT); + + // Process input + let input = sizet_model.s_extend_or_bit_cast(generator, ctx, input, "input_dim"); + + // Allocate the resulting ndarray + let ndarray = NDArrayObject::alloca_uninitialized( + generator, + ctx, + ctx.primitives.float, + 1, // ndims = 1 + "arange_ndarray", + ); + + // `ndarray.shape[0] = input` + let zero = sizet_model.const_0(generator, ctx.ctx); + ndarray + .value + .get(generator, ctx, |f| f.shape, "shape") + .offset(generator, ctx, zero.value, "dim") + .store(ctx, input); + + // Create data and set elements + ndarray.create_data(generator, ctx); + ndarray.foreach_pointer(generator, ctx, |_generator, ctx, _hooks, i, pelement| { + let val = + ctx.builder.build_unsigned_int_to_float(i.value, ctx.ctx.f64_type(), "val").unwrap(); + ctx.builder.build_store(pelement, val).unwrap(); + Ok(()) + })?; + + Ok(ndarray.value.value.as_basic_value_enum()) +} + +/// Generates LLVM IR for `np.size`. +pub fn gen_ndarray_size<'ctx>( + ctx: &mut CodeGenContext<'ctx, '_>, + obj: &Option<(Type, ValueEnum<'ctx>)>, + fun: (&FunSignature, DefinitionId), + args: &[(Option, ValueEnum<'ctx>)], + generator: &mut dyn CodeGenerator, +) -> Result, String> { + assert!(obj.is_none()); + assert_eq!(args.len(), 1); + + let ndarray_ty = fun.0.args[0].ty; + let ndarray = args[0].1.clone().to_basic_value_enum(ctx, generator, ndarray_ty)?; + + let ndarray = NDArrayObject::from_value_and_type(generator, ctx, ndarray, ndarray_ty); + + let size = ndarray.size(generator, ctx).truncate(generator, ctx, Int32, "size"); + Ok(size.value.as_basic_value_enum()) +} + +/// Generates LLVM IR for `np.shape`. +pub fn gen_ndarray_shape<'ctx>( + ctx: &mut CodeGenContext<'ctx, '_>, + obj: &Option<(Type, ValueEnum<'ctx>)>, + fun: (&FunSignature, DefinitionId), + args: &[(Option, ValueEnum<'ctx>)], + generator: &mut dyn CodeGenerator, +) -> Result, String> { + assert!(obj.is_none()); + assert_eq!(args.len(), 1); + + // Parse argument #1 ndarray + let ndarray_ty = fun.0.args[0].ty; + let ndarray = args[0].1.clone().to_basic_value_enum(ctx, generator, ndarray_ty)?; + + // Define models + let sizet_model = IntModel(SizeT); + + // Process ndarray + let ndarray = NDArrayObject::from_value_and_type(generator, ctx, ndarray, ndarray_ty); + + let mut items = Vec::with_capacity(ndarray.ndims as usize); + + for i in 0..ndarray.ndims { + let i = sizet_model.constant(generator, ctx.ctx, i); + let dim = + ndarray.value.get(generator, ctx, |f| f.shape, "").ix(generator, ctx, i.value, "dim"); + let dim = dim.truncate(generator, ctx, Int32, "dim"); // TODO: keep using SizeT + + items.push((dim.value.as_basic_value_enum(), ctx.primitives.int32)); + } + + let shape = TupleObject::create(generator, ctx, items, "shape"); + Ok(shape.value.as_basic_value_enum()) +} + +/// Generates LLVM IR for `.strides`. +pub fn gen_ndarray_strides<'ctx>( + ctx: &mut CodeGenContext<'ctx, '_>, + obj: &Option<(Type, ValueEnum<'ctx>)>, + fun: (&FunSignature, DefinitionId), + args: &[(Option, ValueEnum<'ctx>)], + generator: &mut dyn CodeGenerator, +) -> Result, String> { + // TODO: This function looks exactly like `gen_ndarray_shapes`, code duplication? + + assert!(obj.is_none()); + assert_eq!(args.len(), 1); + + // Parse argument #1 ndarray + let ndarray_ty = fun.0.args[0].ty; + let ndarray = args[0].1.clone().to_basic_value_enum(ctx, generator, ndarray_ty)?; + + // Define models + let sizet_model = IntModel(SizeT); + + // Process ndarray + let ndarray = NDArrayObject::from_value_and_type(generator, ctx, ndarray, ndarray_ty); + + let mut items = Vec::with_capacity(ndarray.ndims as usize); + + for i in 0..ndarray.ndims { + let i = sizet_model.constant(generator, ctx.ctx, i); + let dim = + ndarray.value.get(generator, ctx, |f| f.strides, "").ix(generator, ctx, i.value, "dim"); + let dim = dim.truncate(generator, ctx, Int32, "dim"); // TODO: keep using SizeT + + items.push((dim.value.as_basic_value_enum(), ctx.primitives.int32)); + } + + let strides = TupleObject::create(generator, ctx, items, "strides"); + Ok(strides.value.as_basic_value_enum()) +} + +/// Generates LLVM IR for `np.transpose`. +pub fn gen_ndarray_transpose<'ctx>( + ctx: &mut CodeGenContext<'ctx, '_>, + obj: &Option<(Type, ValueEnum<'ctx>)>, + fun: (&FunSignature, DefinitionId), + args: &[(Option, ValueEnum<'ctx>)], + generator: &mut dyn CodeGenerator, +) -> Result, String> { + // TODO: The implementation will be changed once default values start working again. + // Read the comment on this function in BuiltinBuilder. + + // TODO: Change axes values to `SizeT` + + assert!(obj.is_none()); + assert_eq!(args.len(), 1); + + // Parse argument #1 ndarray + let ndarray_ty = fun.0.args[0].ty; + let ndarray = args[0].1.clone().to_basic_value_enum(ctx, generator, ndarray_ty)?; + + // Implementation + let ndarray = NDArrayObject::from_value_and_type(generator, ctx, ndarray, ndarray_ty); + + let has_axes = args.len() >= 2; + let transposed_ndarray = if has_axes { + // Parse argument #2 axes + let in_axes_ty = fun.0.args[1].ty; + let in_axes = args[1].1.clone().to_basic_value_enum(ctx, generator, in_axes_ty)?; + + let (_, axes) = parse_numpy_int_sequence(generator, ctx, in_axes, in_axes_ty); + + ndarray.transpose(generator, ctx, Some(axes)) + } else { + ndarray.transpose(generator, ctx, None) + }; + + Ok(transposed_ndarray.value.value.as_basic_value_enum()) +} diff --git a/nac3core/src/codegen/stmt.rs b/nac3core/src/codegen/stmt.rs index 321178a0..e96abb5f 100644 --- a/nac3core/src/codegen/stmt.rs +++ b/nac3core/src/codegen/stmt.rs @@ -7,6 +7,9 @@ use super::{ structure::exception::Exception, CodeGenContext, CodeGenerator, Int32, IntModel, Ptr, StructModel, }; +use crate::codegen::structure::ndarray::indexing::util::gen_ndarray_subscript_ndindexes; +use crate::codegen::structure::ndarray::scalar::split_scalar_or_ndarray; +use crate::codegen::structure::ndarray::NDArrayObject; use crate::{ codegen::{ classes::{ArrayLikeIndexer, ArraySliceValue, ListValue, RangeValue}, @@ -404,7 +407,43 @@ pub fn gen_setitem<'ctx, G: CodeGenerator>( if *obj_id == ctx.primitives.ndarray.obj_id(&ctx.unifier).unwrap() => { // Handle NDArray item assignment - todo!("ndarray subscript assignment is not yet implemented"); + // Process target + let target = generator + .gen_expr(ctx, target)? + .unwrap() + .to_basic_value_enum(ctx, generator, target_ty)?; + let target = NDArrayObject::from_value_and_type(generator, ctx, target, target_ty); + + // Process key + let key = gen_ndarray_subscript_ndindexes(generator, ctx, key)?; + + // Process value + let value = value.to_basic_value_enum(ctx, generator, value_ty)?; + + /* + Reference code: + ```python + target = target[key] + value = np.asarray(value) + + shape = np.broadcast_shape((target, value)) + + target = np.broadcast_to(target, shape) + value = np.broadcast_to(value, shape) + + ...and finally copy 1-1 from value to target. + ``` + */ + let target = target.index(generator, ctx, &key, "assign_target_ndarray"); + let value = + split_scalar_or_ndarray(generator, ctx, value, value_ty).as_ndarray(generator, ctx); + + let broadcast_result = NDArrayObject::broadcast_all(generator, ctx, &[target, value]); + + let target = broadcast_result.ndarrays[0]; + let value = broadcast_result.ndarrays[1]; + + target.copy_data_from(generator, ctx, value); } _ => { panic!("encountered unknown target type: {}", ctx.unifier.stringify(target_ty)); @@ -641,8 +680,12 @@ where I: Clone, InitFn: FnOnce(&mut G, &mut CodeGenContext<'ctx, 'a>) -> Result, CondFn: FnOnce(&mut G, &mut CodeGenContext<'ctx, 'a>, I) -> Result, String>, - BodyFn: - FnOnce(&mut G, &mut CodeGenContext<'ctx, 'a>, BreakContinueHooks, I) -> Result<(), String>, + BodyFn: FnOnce( + &mut G, + &mut CodeGenContext<'ctx, 'a>, + BreakContinueHooks<'ctx>, + I, + ) -> Result<(), String>, UpdateFn: FnOnce(&mut G, &mut CodeGenContext<'ctx, 'a>, I) -> Result<(), String>, { let label = label.unwrap_or("for"); @@ -722,7 +765,7 @@ where BodyFn: FnOnce( &mut G, &mut CodeGenContext<'ctx, 'a>, - BreakContinueHooks, + BreakContinueHooks<'ctx>, IntValue<'ctx>, ) -> Result<(), String>, { @@ -1266,20 +1309,19 @@ pub fn gen_raise<'ctx, G: CodeGenerator + ?Sized>( loc: Location, ) { if let Some(pexn) = exception { - let type_context = generator.type_context(ctx.ctx); let i32_model = IntModel(Int32); let cslice_model = StructModel(CSlice); // Get and store filename let filename = loc.file.0; let filename = ctx.gen_string(generator, &String::from(filename)).value; - let filename = cslice_model.check_value(type_context, ctx.ctx, filename).unwrap(); + let filename = cslice_model.check_value(generator, ctx.ctx, filename).unwrap(); pexn.set(ctx, |f| f.filename, filename); - let row = i32_model.constant(type_context, ctx.ctx, loc.row as u64); + let row = i32_model.constant(generator, ctx.ctx, loc.row as u64); pexn.set(ctx, |f| f.line, row); - let column = i32_model.constant(type_context, ctx.ctx, loc.column as u64); + let column = i32_model.constant(generator, ctx.ctx, loc.column as u64); pexn.set(ctx, |f| f.column, column); let current_fn = ctx.builder.get_insert_block().unwrap().get_parent().unwrap(); @@ -1755,9 +1797,8 @@ pub fn gen_stmt( return Ok(()); }; - let type_context = generator.type_context(ctx.ctx); let pexn_model = PtrModel(StructModel(Exception)); - let exn = pexn_model.check_value(type_context, ctx.ctx, exc).unwrap(); + let exn = pexn_model.check_value(generator, ctx.ctx, exc).unwrap(); gen_raise(generator, ctx, Some(exn), stmt.location); } else { @@ -1765,7 +1806,6 @@ pub fn gen_stmt( } } StmtKind::Assert { test, msg, .. } => { - let type_context = generator.type_context(ctx.ctx); let byte_model = IntModel(Byte); let cslice_model = StructModel(CSlice); @@ -1773,7 +1813,7 @@ pub fn gen_stmt( return Ok(()); }; let test = test.to_basic_value_enum(ctx, generator, ctx.primitives.bool)?; - let test = byte_model.check_value(type_context, ctx.ctx, test).unwrap(); // Python `bool` is represented as `i8` in nac3core + let test = byte_model.check_value(generator, ctx.ctx, test).unwrap(); // Python `bool` is represented as `i8` in nac3core // Check `msg` let err_msg = match msg { @@ -1783,7 +1823,7 @@ pub fn gen_stmt( }; let msg = msg.to_basic_value_enum(ctx, generator, ctx.primitives.str)?; - cslice_model.check_value(type_context, ctx.ctx, msg).unwrap() + cslice_model.check_value(generator, ctx.ctx, msg).unwrap() } None => ctx.gen_string(generator, ""), }; diff --git a/nac3core/src/codegen/structure/cslice.rs b/nac3core/src/codegen/structure/cslice.rs index 481d1fe8..87a8d1e7 100644 --- a/nac3core/src/codegen/structure/cslice.rs +++ b/nac3core/src/codegen/structure/cslice.rs @@ -1,4 +1,6 @@ -use crate::codegen::{model::*, CodeGenContext}; +use inkwell::context::Context; + +use crate::codegen::{model::*, CodeGenerator}; /// Fields of [`CSlice<'ctx>`]. pub struct CSliceFields<'ctx, F: FieldTraversal<'ctx>> { @@ -27,16 +29,16 @@ impl StructModel { /// Create a [`CSlice`]. /// /// `base` and `len` must be LLVM global constants. - pub fn create_const<'ctx>( + pub fn create_const<'ctx, G: CodeGenerator + ?Sized>( &self, - type_context: TypeContext<'ctx>, - ctx: &CodeGenContext<'ctx, '_>, + generator: &mut G, + ctx: &'ctx Context, base: Ptr<'ctx, IntModel>, len: Int<'ctx, SizeT>, ) -> Struct<'ctx, CSlice> { let value = self .0 - .get_struct_type(type_context, ctx.ctx) + .get_struct_type(generator, ctx) .const_named_struct(&[base.value.into(), len.value.into()]); self.believe_value(value) } diff --git a/nac3core/src/codegen/structure/list.rs b/nac3core/src/codegen/structure/list.rs new file mode 100644 index 00000000..a3d232db --- /dev/null +++ b/nac3core/src/codegen/structure/list.rs @@ -0,0 +1,70 @@ +use inkwell::values::BasicValue; + +use crate::{ + codegen::{model::*, CodeGenContext, CodeGenerator}, + typecheck::typedef::{iter_type_vars, Type, TypeEnum}, +}; + +/// Fields of [`List`] +pub struct ListFields<'ctx, F: FieldTraversal<'ctx>, Item: Model<'ctx>, Size: IntKind<'ctx>> { + /// Array pointer to content + pub items: F::Out>, + /// Number of items in the array + pub len: F::Out>, +} + +/// A list in NAC3. +#[derive(Debug, Clone, Copy, Default)] +pub struct List { + /// Model of the list items + pub item: Item, + /// Model of type of integer storing the number of items on the list + pub len: Len, +} + +impl<'ctx, Item: Model<'ctx>, Size: IntKind<'ctx>> StructKind<'ctx> for List { + type Fields> = ListFields<'ctx, F, Item, Size>; + + fn traverse_fields>(&self, traversal: &mut F) -> Self::Fields { + Self::Fields { + items: traversal.add("data", PtrModel(self.item)), + len: traversal.add("len", IntModel(self.len)), + } + } +} + +/// A NAC3 Python List object. +pub struct ListObject<'ctx> { + /// Typechecker type of the list items + pub item_type: Type, + pub value: Ptr<'ctx, StructModel, SizeT>>>, +} + +impl<'ctx> ListObject<'ctx> { + /// Create a [`ListObject`] from an LLVM value and its typechecker [`Type`]. + pub fn from_value_and_type, G: CodeGenerator + ?Sized>( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + list_val: V, + list_type: Type, + ) -> Self { + // Check typechecker type and extract `item_type` + let item_type = match &*ctx.unifier.get_ty(list_type) { + TypeEnum::TObj { obj_id, params, .. } + if *obj_id == ctx.primitives.list.obj_id(&ctx.unifier).unwrap() => + { + iter_type_vars(params).next().unwrap().ty // Extract `item_type` + } + _ => { + panic!("Expecting type to be a list, but got {}", ctx.unifier.stringify(list_type)) + } + }; + + let item_model = AnyModel(ctx.get_llvm_type(generator, item_type)); + let plist_model = PtrModel(StructModel(List { item: item_model, len: SizeT })); + + // Create object + let value = plist_model.check_value(generator, ctx.ctx, list_val).unwrap(); + ListObject { item_type, value } + } +} diff --git a/nac3core/src/codegen/structure/mod.rs b/nac3core/src/codegen/structure/mod.rs index 51c9cab0..a81ded03 100644 --- a/nac3core/src/codegen/structure/mod.rs +++ b/nac3core/src/codegen/structure/mod.rs @@ -1,3 +1,5 @@ pub mod cslice; pub mod exception; +pub mod list; pub mod ndarray; +pub mod tuple; diff --git a/nac3core/src/codegen/structure/ndarray.rs b/nac3core/src/codegen/structure/ndarray.rs deleted file mode 100644 index c870bb45..00000000 --- a/nac3core/src/codegen/structure/ndarray.rs +++ /dev/null @@ -1,224 +0,0 @@ -use irrt::{ - call_nac3_ndarray_copy_data, call_nac3_ndarray_get_nth_pelement, - call_nac3_ndarray_is_c_contiguous, call_nac3_ndarray_len, call_nac3_ndarray_nbytes, - call_nac3_ndarray_set_strides_by_shape, call_nac3_ndarray_size, -}; - -use crate::{codegen::*, symbol_resolver::SymbolValue}; - -pub struct NpArrayFields<'ctx, F: FieldTraversal<'ctx>> { - pub data: F::Out>>, - pub itemsize: F::Out>, - pub ndims: F::Out>, - pub shape: F::Out>>, - pub strides: F::Out>>, -} - -// TODO: Rename to `NDArray` when the old NDArray is removed. -#[derive(Debug, Clone, Copy, Default)] -pub struct NpArray; - -impl<'ctx> StructKind<'ctx> for NpArray { - type Fields> = NpArrayFields<'ctx, F>; - - fn traverse_fields>(&self, traversal: &mut F) -> Self::Fields { - Self::Fields { - data: traversal.add_auto("data"), - itemsize: traversal.add_auto("itemsize"), - ndims: traversal.add_auto("ndims"), - shape: traversal.add_auto("shape"), - strides: traversal.add_auto("strides"), - } - } -} - -/// Extract an ndarray's `ndims` [type][`Type`] in `u64`. Panic if not possible. -/// The `ndims` must only contain 1 value. -#[must_use] -pub fn extract_ndims(unifier: &Unifier, ndims_ty: Type) -> u64 { - let ndims_ty_enum = unifier.get_ty_immutable(ndims_ty); - let TypeEnum::TLiteral { values, .. } = &*ndims_ty_enum else { - panic!("ndims_ty should be a TLiteral"); - }; - - assert_eq!(values.len(), 1, "ndims_ty TLiteral should only contain 1 value"); - - let ndims = values[0].clone(); - u64::try_from(ndims).unwrap() -} - -/// Return an ndarray's `ndims` as a typechecker [`Type`] from its `u64` value. -pub fn create_ndims(unifier: &mut Unifier, ndims: u64) -> Type { - unifier.get_fresh_literal(vec![SymbolValue::U64(ndims)], None) -} - -#[derive(Debug, Clone, Copy)] -pub struct NDArrayObject<'ctx> { - pub dtype: Type, - pub ndims: Type, - pub value: Ptr<'ctx, StructModel>, -} - -impl<'ctx> NDArrayObject<'ctx> { - /// Allocate an ndarray on the stack given its `ndims` and `dtype`. - /// - /// `shape` and `strides` will be automatically allocated on the stack. - /// - /// The returned ndarray's content will be: - /// - `data`: set to `nullptr`. - /// - `itemsize`: set to the `sizeof()` of `dtype`. - /// - `ndims`: set to the value of `ndims`. - /// - `shape`: allocated with an array of length `ndims` with uninitialized values. - /// - `strides`: allocated with an array of length `ndims` with uninitialized values. - pub fn alloca_uninitialized( - generator: &mut G, - ctx: &mut CodeGenContext<'ctx, '_>, - dtype: Type, - ndims: Type, - name: &str, - ) -> Self { - let tyctx = generator.type_context(ctx.ctx); - let sizet_model = IntModel(SizeT); - let ndarray_model = StructModel(NpArray); - let ndarray_data_model = PtrModel(IntModel(Byte)); - - let pndarray = ndarray_model.alloca(tyctx, ctx, name); - - let data = ndarray_data_model.nullptr(tyctx, ctx.ctx); - - let itemsize = ctx.get_llvm_type(generator, dtype).size_of().unwrap(); - let itemsize = sizet_model.s_extend_or_bit_cast(tyctx, ctx, itemsize, "itemsize"); - - let ndims_val = extract_ndims(&ctx.unifier, ndims); - let ndims_val = sizet_model.constant(tyctx, ctx.ctx, ndims_val); - - let shape = sizet_model.array_alloca(tyctx, ctx, ndims_val.value, "shape"); - let strides = sizet_model.array_alloca(tyctx, ctx, ndims_val.value, "strides"); - - pndarray.set(ctx, |f| f.data, data); - pndarray.set(ctx, |f| f.itemsize, itemsize); - pndarray.set(ctx, |f| f.ndims, ndims_val); - pndarray.set(ctx, |f| f.shape, shape); - pndarray.set(ctx, |f| f.strides, strides); - - NDArrayObject { dtype, ndims, value: pndarray } - } - - /// Get this ndarray's `ndims` as an LLVM constant. - pub fn get_ndims( - &self, - tyctx: TypeContext<'ctx>, - ctx: &CodeGenContext<'ctx, '_>, - ) -> Int<'ctx, SizeT> { - let sizet_model = IntModel(SizeT); - - let ndims_val = extract_ndims(&ctx.unifier, self.ndims); - sizet_model.constant(tyctx, ctx.ctx, ndims_val) - } - - /// Return true if this ndarray is unsized. - #[must_use] - pub fn is_unsized(&self, unifier: &Unifier) -> bool { - extract_ndims(unifier, self.ndims) == 0 - } - - /// Initialize an ndarray's `data` by allocating a buffer on the stack. - /// The allocated data buffer is considered to be *owned* by the ndarray. - /// - /// `strides` of the ndarray will also be updated with `set_strides_by_shape`. - /// - /// `shape` and `itemsize` of the ndarray ***must*** be initialized first. - pub fn create_data( - &self, - tyctx: TypeContext<'ctx>, - ctx: &mut CodeGenContext<'ctx, '_>, - ) { - let byte_model = IntModel(Byte); - - let data = byte_model.array_alloca(tyctx, ctx, self.get_ndims(tyctx, ctx).value, "data"); - self.value.set(ctx, |f| f.data, data); - - self.update_strides_by_shape(tyctx, ctx); - } - - /// Get the `np.size()` of this ndarray. - pub fn size( - &self, - tyctx: TypeContext<'ctx>, - ctx: &mut CodeGenContext<'ctx, '_>, - ) -> Int<'ctx, SizeT> { - call_nac3_ndarray_size(tyctx, ctx, self.value) - } - - /// Get the `ndarray.nbytes` of this ndarray. - pub fn nbytes( - &self, - tyctx: TypeContext<'ctx>, - ctx: &mut CodeGenContext<'ctx, '_>, - ) -> Int<'ctx, SizeT> { - call_nac3_ndarray_nbytes(tyctx, ctx, self.value) - } - - /// Get the `len()` of this ndarray. - pub fn len( - &self, - tyctx: TypeContext<'ctx>, - ctx: &mut CodeGenContext<'ctx, '_>, - ) -> Int<'ctx, SizeT> { - call_nac3_ndarray_len(tyctx, ctx, self.value) - } - - /// Check if this ndarray is C-contiguous. - /// - /// See NumPy's `flags["C_CONTIGUOUS"]`: - pub fn is_c_contiguous( - &self, - tyctx: TypeContext<'ctx>, - ctx: &mut CodeGenContext<'ctx, '_>, - ) -> Int<'ctx, Bool> { - call_nac3_ndarray_is_c_contiguous(tyctx, ctx, self.value) - } - - /// Get the pointer to the n-th (0-based) element. - /// - /// The returned pointer has the element type of the LLVM type of this ndarray's `dtype`. - pub fn get_nth_pelement( - &self, - generator: &mut G, - ctx: &mut CodeGenContext<'ctx, '_>, - nth: Int<'ctx, SizeT>, - name: &str, - ) -> PointerValue<'ctx> { - let tyctx = generator.type_context(ctx.ctx); - let elem_ty = ctx.get_llvm_type(generator, self.dtype); - - let p = call_nac3_ndarray_get_nth_pelement(tyctx, ctx, self.value, nth); - ctx.builder - .build_pointer_cast(p.value, elem_ty.ptr_type(AddressSpace::default()), name) - .unwrap() - } - - /// Call [`call_nac3_ndarray_set_strides_by_shape`] on this ndarray to update `strides`. - /// - /// Please refer to the IRRT implementation to see its purpose. - pub fn update_strides_by_shape( - &self, - tyctx: TypeContext<'ctx>, - ctx: &mut CodeGenContext<'ctx, '_>, - ) { - call_nac3_ndarray_set_strides_by_shape(tyctx, ctx, self.value); - } - - /// Copy data from another ndarray. - /// - /// Panics if the `dtype`s of ndarrays are different. - pub fn copy_data_from( - &self, - tyctx: TypeContext<'ctx>, - ctx: &mut CodeGenContext<'ctx, '_>, - src: NDArrayObject<'ctx>, - ) { - assert!(ctx.unifier.unioned(self.dtype, src.dtype), "self and src dtype should match"); - call_nac3_ndarray_copy_data(tyctx, ctx, src.value, self.value); - } -} diff --git a/nac3core/src/codegen/structure/ndarray/broadcast.rs b/nac3core/src/codegen/structure/ndarray/broadcast.rs new file mode 100644 index 00000000..a9384709 --- /dev/null +++ b/nac3core/src/codegen/structure/ndarray/broadcast.rs @@ -0,0 +1,134 @@ +use itertools::Itertools; + +use crate::{ + codegen::{ + irrt::{call_nac3_ndarray_broadcast_shapes, call_nac3_ndarray_broadcast_to}, + model::*, + CodeGenContext, CodeGenerator, + }, + toplevel::numpy::get_broadcast_all_ndims, +}; + +use super::NDArrayObject; + +/// Fields of [`ShapeEntry`] +pub struct ShapeEntryFields<'ctx, F: FieldTraversal<'ctx>> { + pub ndims: F::Out>, + pub shape: F::Out>>, +} + +/// An IRRT structure used in broadcasting. +#[derive(Debug, Clone, Copy, Default)] +pub struct ShapeEntry; + +impl<'ctx> StructKind<'ctx> for ShapeEntry { + type Fields> = ShapeEntryFields<'ctx, F>; + + fn traverse_fields>(&self, traversal: &mut F) -> Self::Fields { + Self::Fields { ndims: traversal.add_auto("ndims"), shape: traversal.add_auto("shape") } + } +} + +impl<'ctx> NDArrayObject<'ctx> { + /// Create a broadcast view on this ndarray with a target shape. + /// + /// * `target_ndims` - The ndims type after broadcasting to the given shape. + /// The caller has to figure this out for this function. + /// * `target_shape` - An array pointer pointing to the target shape. + #[must_use] + pub fn broadcast_to( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + target_ndims: u64, + target_shape: Ptr<'ctx, IntModel>, + ) -> Self { + let broadcast_ndarray = NDArrayObject::alloca_uninitialized( + generator, + ctx, + self.dtype, + target_ndims, + "broadcast_ndarray_to_dst", + ); + broadcast_ndarray.copy_shape_from_array(generator, ctx, target_shape); + + call_nac3_ndarray_broadcast_to(generator, ctx, self.value, broadcast_ndarray.value); + broadcast_ndarray + } +} +/// A result produced by [`broadcast_all_ndarrays`] +#[derive(Debug, Clone)] +pub struct BroadcastAllResult<'ctx> { + /// The statically known `ndims` of the broadcast result. + pub ndims: u64, + /// The broadcasting shape. + pub shape: Ptr<'ctx, IntModel>, + /// Broadcasted views on the inputs. + /// + /// All of them will have `shape` [`BroadcastAllResult::shape`] and + /// `ndims` [`BroadcastAllResult::ndims`]. The length of the vector + /// is the same as the input. + pub ndarrays: Vec>, +} + +impl<'ctx> NDArrayObject<'ctx> { + // TODO: DOCUMENT: Behaves like `np.broadcast()`, except returns results differently. + pub fn broadcast_all( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + ndarrays: &[Self], + ) -> BroadcastAllResult<'ctx> { + assert!(!ndarrays.is_empty()); + + let sizet_model = IntModel(SizeT); + let shape_model = StructModel(ShapeEntry); + + let broadcast_ndims = get_broadcast_all_ndims(ndarrays.iter().map(|ndarray| ndarray.ndims)); + + // Prepare input shape entries + let num_shape_entries = + sizet_model.constant(generator, ctx.ctx, u64::try_from(ndarrays.len()).unwrap()); + let shape_entries = + shape_model.array_alloca(generator, ctx, num_shape_entries.value, "shape_entries"); + for (i, ndarray) in ndarrays.iter().enumerate() { + let i = sizet_model.constant(generator, ctx.ctx, i as u64).value; + + let shape_entry = shape_entries.offset(generator, ctx, i, "shape_entry"); + + let this_ndims = ndarray.value.get(generator, ctx, |f| f.ndims, "this_ndims"); + shape_entry.set(ctx, |f| f.ndims, this_ndims); + + let this_shape = ndarray.value.get(generator, ctx, |f| f.shape, "this_shape"); + shape_entry.set(ctx, |f| f.shape, this_shape); + } + + // Prepare destination + let broadcast_ndims_llvm = sizet_model.constant(generator, ctx.ctx, broadcast_ndims); + let broadcast_shape = + sizet_model.array_alloca(generator, ctx, broadcast_ndims_llvm.value, "dst_shape"); + + // Compute the target broadcast shape `dst_shape` for all ndarrays. + call_nac3_ndarray_broadcast_shapes( + generator, + ctx, + num_shape_entries, + shape_entries, + broadcast_ndims_llvm, + broadcast_shape, + ); + + // Now that we know about the broadcasting shape, broadcast all the inputs. + + // Broadcast all the inputs to shape `dst_shape`. + let broadcast_ndarrays: Vec<_> = ndarrays + .iter() + .map(|ndarray| ndarray.broadcast_to(generator, ctx, broadcast_ndims, broadcast_shape)) + .collect_vec(); + + BroadcastAllResult { + ndims: broadcast_ndims, + shape: broadcast_shape, + ndarrays: broadcast_ndarrays, + } + } +} diff --git a/nac3core/src/codegen/structure/ndarray/functions.rs b/nac3core/src/codegen/structure/ndarray/functions.rs new file mode 100644 index 00000000..1bd812e6 --- /dev/null +++ b/nac3core/src/codegen/structure/ndarray/functions.rs @@ -0,0 +1,562 @@ +use inkwell::{ + values::{BasicValue, FloatValue, IntValue}, + FloatPredicate, IntPredicate, +}; +use itertools::Itertools; + +use crate::{ + codegen::{ + llvm_intrinsics, + model::{ + util::{gen_for_model_auto, gen_if_model}, + *, + }, + CodeGenContext, CodeGenerator, + }, + typecheck::typedef::Type, +}; + +use super::{scalar::ScalarObject, NDArrayObject}; + +/// Convenience function to crash the program when types of arguments are not supported. +/// Used to be debugged with a stacktrace. +fn unsupported_type(ctx: &CodeGenContext<'_, '_>, tys: I) -> ! +where + I: IntoIterator, +{ + unreachable!( + "unsupported types found '{}'", + tys.into_iter().map(|ty| format!("'{}'", ctx.unifier.stringify(ty))).join(", "), + ) +} + +#[derive(Debug, Clone, Copy)] +pub enum FloorOrCeil { + Floor, + Ceil, +} + +#[derive(Debug, Clone, Copy)] +pub enum MinOrMax { + Min, + Max, +} + +fn signed_ints(ctx: &CodeGenContext<'_, '_>) -> Vec { + vec![ctx.primitives.int32, ctx.primitives.int64] +} + +fn unsigned_ints(ctx: &CodeGenContext<'_, '_>) -> Vec { + vec![ctx.primitives.uint32, ctx.primitives.uint64] +} + +fn ints(ctx: &CodeGenContext<'_, '_>) -> Vec { + vec![ctx.primitives.int32, ctx.primitives.int64, ctx.primitives.uint32, ctx.primitives.uint64] +} + +fn int_like(ctx: &CodeGenContext<'_, '_>) -> Vec { + vec![ + ctx.primitives.bool, + ctx.primitives.int32, + ctx.primitives.int64, + ctx.primitives.uint32, + ctx.primitives.uint64, + ] +} + +fn cast_to_int_conversion<'ctx, 'a, G, HandleFloatFn>( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, 'a>, + scalar: ScalarObject<'ctx>, + ret_int_dtype: Type, + handle_float: HandleFloatFn, +) -> ScalarObject<'ctx> +where + G: CodeGenerator + ?Sized, + HandleFloatFn: + FnOnce(&mut G, &mut CodeGenContext<'ctx, 'a>, FloatValue<'ctx>) -> IntValue<'ctx>, +{ + let ret_int_dtype_llvm = ctx.get_llvm_type(generator, ret_int_dtype).into_int_type(); + + let result = if ctx.unifier.unioned(scalar.dtype, ctx.primitives.float) { + // Special handling for floats + let n = scalar.value.into_float_value(); + handle_float(generator, ctx, n) + } else if ctx.unifier.unioned_any(scalar.dtype, int_like(ctx)) { + let n = scalar.value.into_int_value(); + + if n.get_type().get_bit_width() <= ret_int_dtype_llvm.get_bit_width() { + ctx.builder.build_int_z_extend(n, ret_int_dtype_llvm, "zext").unwrap() + } else { + ctx.builder.build_int_truncate(n, ret_int_dtype_llvm, "trunc").unwrap() + } + } else { + unsupported_type(ctx, [scalar.dtype]); + }; + + assert_eq!(ret_int_dtype_llvm.get_bit_width(), result.get_type().get_bit_width()); // Sanity check + ScalarObject { value: result.into(), dtype: ret_int_dtype } +} + +impl<'ctx> ScalarObject<'ctx> { + /// Convenience function. Assume this scalar has typechecker type float64, get its underlying LLVM value. + /// + /// Panic if the type is wrong. + pub fn into_float64(&self, ctx: &mut CodeGenContext<'ctx, '_>) -> FloatValue<'ctx> { + if ctx.unifier.unioned(self.dtype, ctx.primitives.float) { + self.value.into_float_value() // self.value must be a FloatValue + } else { + panic!("not a float type") + } + } + + /// Convenience function. Assume this scalar has typechecker type int32, get its underlying LLVM value. + /// + /// Panic if the type is wrong. + pub fn into_int32(&self, ctx: &mut CodeGenContext<'ctx, '_>) -> IntValue<'ctx> { + if ctx.unifier.unioned(self.dtype, ctx.primitives.int32) { + let value = self.value.into_int_value(); + debug_assert_eq!(value.get_type().get_bit_width(), 32); // Sanity check + value + } else { + panic!("not a float type") + } + } + + /// Compare two scalars. Only int-to-int and float-to-float comparisons are allowed. + /// Panic otherwise. + pub fn compare( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + lhs: ScalarObject<'ctx>, + rhs: ScalarObject<'ctx>, + int_predicate: IntPredicate, + float_predicate: FloatPredicate, + name: &str, + ) -> Int<'ctx, Bool> { + if !ctx.unifier.unioned(lhs.dtype, rhs.dtype) { + unsupported_type(ctx, [lhs.dtype, rhs.dtype]); + } + + let bool_model = IntModel(Bool); + + let common_ty = lhs.dtype; + let result = if ctx.unifier.unioned(common_ty, ctx.primitives.float) { + let lhs = lhs.value.into_float_value(); + let rhs = rhs.value.into_float_value(); + ctx.builder.build_float_compare(float_predicate, lhs, rhs, name).unwrap() + } else if ctx.unifier.unioned_any(common_ty, int_like(ctx)) { + let lhs = lhs.value.into_int_value(); + let rhs = rhs.value.into_int_value(); + ctx.builder.build_int_compare(int_predicate, lhs, rhs, name).unwrap() + } else { + unsupported_type(ctx, [lhs.dtype, rhs.dtype]); + }; + + bool_model.check_value(generator, ctx.ctx, result).unwrap() + } + + /// Invoke NAC3's builtin `int32()`. + #[must_use] + pub fn cast_to_int32( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + ) -> Self { + cast_to_int_conversion( + generator, + ctx, + *self, + ctx.primitives.int32, + |_generator, ctx, input| { + let n = + ctx.builder.build_float_to_signed_int(input, ctx.ctx.i64_type(), "").unwrap(); + ctx.builder.build_int_truncate(n, ctx.ctx.i32_type(), "conv").unwrap() + }, + ) + } + + /// Invoke NAC3's builtin `int64()`. + #[must_use] + pub fn cast_to_int64( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + ) -> Self { + cast_to_int_conversion( + generator, + ctx, + *self, + ctx.primitives.int64, + |_generator, ctx, input| { + ctx.builder.build_float_to_signed_int(input, ctx.ctx.i64_type(), "").unwrap() + }, + ) + } + + /// Invoke NAC3's builtin `uint32()`. + #[must_use] + pub fn cast_to_uint32( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + ) -> Self { + cast_to_int_conversion( + generator, + ctx, + *self, + ctx.primitives.uint32, + |_generator, ctx, n| { + let n_gez = ctx + .builder + .build_float_compare(FloatPredicate::OGE, n, n.get_type().const_zero(), "") + .unwrap(); + + let to_int32 = + ctx.builder.build_float_to_signed_int(n, ctx.ctx.i32_type(), "").unwrap(); + let to_uint64 = + ctx.builder.build_float_to_unsigned_int(n, ctx.ctx.i64_type(), "").unwrap(); + + ctx.builder + .build_select( + n_gez, + ctx.builder.build_int_truncate(to_uint64, ctx.ctx.i32_type(), "").unwrap(), + to_int32, + "conv", + ) + .unwrap() + .into_int_value() + }, + ) + } + + /// Invoke NAC3's builtin `uint64()`. + #[must_use] + pub fn cast_to_uint64( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + ) -> Self { + cast_to_int_conversion( + generator, + ctx, + *self, + ctx.primitives.uint64, + |_generator, ctx, n| { + let val_gez = ctx + .builder + .build_float_compare(FloatPredicate::OGE, n, n.get_type().const_zero(), "") + .unwrap(); + + let to_int64 = + ctx.builder.build_float_to_signed_int(n, ctx.ctx.i64_type(), "").unwrap(); + let to_uint64 = + ctx.builder.build_float_to_unsigned_int(n, ctx.ctx.i64_type(), "").unwrap(); + ctx.builder + .build_select(val_gez, to_uint64, to_int64, "conv") + .unwrap() + .into_int_value() + }, + ) + } + + /// Invoke NAC3's builtin `bool()`. + #[must_use] + pub fn cast_to_bool(&self, ctx: &mut CodeGenContext<'ctx, '_>) -> Self { + // TODO: Why is the original code being so lax about i1 and i8 for the returned int type? + let result = if ctx.unifier.unioned(self.dtype, ctx.primitives.bool) { + self.value.into_int_value() + } else if ctx.unifier.unioned_any(self.dtype, ints(ctx)) { + let n = self.value.into_int_value(); + ctx.builder + .build_int_compare(inkwell::IntPredicate::NE, n, n.get_type().const_zero(), "bool") + .unwrap() + } else if ctx.unifier.unioned(self.dtype, ctx.primitives.float) { + let n = self.value.into_float_value(); + ctx.builder + .build_float_compare(FloatPredicate::UNE, n, n.get_type().const_zero(), "bool") + .unwrap() + } else { + unsupported_type(ctx, [self.dtype]) + }; + + ScalarObject { dtype: ctx.primitives.bool, value: result.as_basic_value_enum() } + } + + /// Invoke NAC3's builtin `float()`. + #[must_use] + pub fn cast_to_float(&self, ctx: &mut CodeGenContext<'ctx, '_>) -> Self { + let llvm_f64 = ctx.ctx.f64_type(); + + let result: FloatValue<'_> = if ctx.unifier.unioned(self.dtype, ctx.primitives.float) { + self.value.into_float_value() + } else if ctx + .unifier + .unioned_any(self.dtype, [signed_ints(ctx).as_slice(), &[ctx.primitives.bool]].concat()) + { + let n = self.value.into_int_value(); + ctx.builder.build_signed_int_to_float(n, llvm_f64, "sitofp").unwrap() + } else if ctx.unifier.unioned_any(self.dtype, unsigned_ints(ctx)) { + let n = self.value.into_int_value(); + ctx.builder.build_unsigned_int_to_float(n, llvm_f64, "uitofp").unwrap() + } else { + unsupported_type(ctx, [self.dtype]); + }; + + ScalarObject { value: result.as_basic_value_enum(), dtype: ctx.primitives.float } + } + + /// Invoke NAC3's builtin `round()`. + #[must_use] + pub fn round( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + ret_int_dtype: Type, + ) -> Self { + let ret_int_dtype_llvm = ctx.get_llvm_type(generator, ret_int_dtype).into_int_type(); + + let result = if ctx.unifier.unioned(self.dtype, ctx.primitives.float) { + let n = self.value.into_float_value(); + let n = llvm_intrinsics::call_float_round(ctx, n, None); + ctx.builder.build_float_to_signed_int(n, ret_int_dtype_llvm, "round").unwrap() + } else { + unsupported_type(ctx, [self.dtype, ret_int_dtype]) + }; + ScalarObject { dtype: ret_int_dtype, value: result.as_basic_value_enum() } + } + + /// Invoke NAC3's builtin `np_round()`. + /// + /// NOTE: `np.round()` has different behaviors than `round()` in terms of their result + /// on "tie" cases and return type. + #[must_use] + pub fn np_round(&self, ctx: &mut CodeGenContext<'ctx, '_>) -> Self { + let result = if ctx.unifier.unioned(self.dtype, ctx.primitives.float) { + let n = self.value.into_float_value(); + llvm_intrinsics::call_float_rint(ctx, n, None) + } else { + unsupported_type(ctx, [self.dtype]) + }; + ScalarObject { dtype: ctx.primitives.float, value: result.as_basic_value_enum() } + } + + /// Invoke NAC3's builtin `min()` or `max()`. + pub fn min_or_max( + ctx: &mut CodeGenContext<'ctx, '_>, + kind: MinOrMax, + a: Self, + b: Self, + ) -> Self { + if !ctx.unifier.unioned(a.dtype, b.dtype) { + unsupported_type(ctx, [a.dtype, b.dtype]) + } + + let common_dtype = a.dtype; + + if ctx.unifier.unioned(common_dtype, ctx.primitives.float) { + let function = match kind { + MinOrMax::Min => llvm_intrinsics::call_float_minnum, + MinOrMax::Max => llvm_intrinsics::call_float_maxnum, + }; + let result = + function(ctx, a.value.into_float_value(), b.value.into_float_value(), None); + ScalarObject { value: result.as_basic_value_enum(), dtype: ctx.primitives.float } + } else if ctx.unifier.unioned_any( + common_dtype, + [unsigned_ints(ctx).as_slice(), &[ctx.primitives.bool]].concat(), + ) { + // Treating bool has an unsigned int since that is convenient + let function = match kind { + MinOrMax::Min => llvm_intrinsics::call_int_umin, + MinOrMax::Max => llvm_intrinsics::call_int_umax, + }; + let result = function(ctx, a.value.into_int_value(), b.value.into_int_value(), None); + ScalarObject { value: result.as_basic_value_enum(), dtype: common_dtype } + } else { + unsupported_type(ctx, [common_dtype]) + } + } + + /// Invoke NAC3's builtin `floor()` or `ceil()`. + /// + /// * `ret_int_dtype` - The type of int to return. + /// + /// Takes in a float/int and returns an int of type `ret_int_dtype` + #[must_use] + pub fn floor_or_ceil( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + kind: FloorOrCeil, + ret_int_dtype: Type, + ) -> Self { + let ret_int_dtype_llvm = ctx.get_llvm_type(generator, ret_int_dtype).into_int_type(); + + if ctx.unifier.unioned(self.dtype, ctx.primitives.float) { + let function = match kind { + FloorOrCeil::Floor => llvm_intrinsics::call_float_floor, + FloorOrCeil::Ceil => llvm_intrinsics::call_float_ceil, + }; + let n = self.value.into_float_value(); + let n = function(ctx, n, None); + + let n = ctx.builder.build_float_to_signed_int(n, ret_int_dtype_llvm, "").unwrap(); + ScalarObject { dtype: ret_int_dtype, value: n.as_basic_value_enum() } + } else { + unsupported_type(ctx, [self.dtype]) + } + } + + /// Invoke NAC3's builtin `np_floor()`/ `np_ceil()`. + /// + /// Takes in a float/int and returns a float64 result. + #[must_use] + pub fn np_floor_or_ceil(&self, ctx: &mut CodeGenContext<'ctx, '_>, kind: FloorOrCeil) -> Self { + if ctx.unifier.unioned(self.dtype, ctx.primitives.float) { + let function = match kind { + FloorOrCeil::Floor => llvm_intrinsics::call_float_floor, + FloorOrCeil::Ceil => llvm_intrinsics::call_float_ceil, + }; + let n = self.value.into_float_value(); + let n = function(ctx, n, None); + ScalarObject { dtype: ctx.primitives.float, value: n.as_basic_value_enum() } + } else { + unsupported_type(ctx, [self.dtype]) + } + } + + /// Invoke NAC3's builtin `abs()`. + pub fn abs(&self, ctx: &mut CodeGenContext<'ctx, '_>) -> Self { + if ctx.unifier.unioned(self.dtype, ctx.primitives.float) { + let n = self.value.into_float_value(); + let n = llvm_intrinsics::call_float_fabs(ctx, n, Some("abs")); + ScalarObject { value: n.into(), dtype: ctx.primitives.float } + } else if ctx.unifier.unioned_any(self.dtype, ints(ctx)) { + let n = self.value.into_int_value(); + + let is_poisoned = ctx.ctx.bool_type().const_zero(); // is_poisoned = false + let n = llvm_intrinsics::call_int_abs(ctx, n, is_poisoned, Some("abs")); + + ScalarObject { value: n.into(), dtype: self.dtype } + } else { + unsupported_type(ctx, [self.dtype]) + } + } +} + +impl<'ctx> NDArrayObject<'ctx> { + /// Helper function to implement NAC3's builtin `np_min()`, `np_max()`, `np_argmin()`, and `np_argmax()`. + /// + /// Generate LLVM IR to find the extremum and index of the **first** extremum value. + /// + /// Care has also been taken to make the error messages match that of NumPy. + fn min_max_argmin_argmax_helper( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + kind: MinOrMax, + on_empty_err_msg: &str, + ) -> (ScalarObject<'ctx>, Int<'ctx, SizeT>) { + let sizet_model = IntModel(SizeT); + let dtype_llvm = ctx.get_llvm_type(generator, self.dtype); + + // If the ndarray is empty, throw an error. + let is_empty = self.is_empty(generator, ctx); + ctx.make_assert( + generator, + is_empty.value, + "0:ValueError", + on_empty_err_msg, + [None, None, None], + ctx.current_loc, + ); + + // Setup and initialize the extremum to be the first element in the ndarray + let pextremum_index = sizet_model.alloca(generator, ctx, "extremum_index"); + let pextremum = ctx.builder.build_alloca(dtype_llvm, "extremum").unwrap(); + + let zero = sizet_model.const_0(generator, ctx.ctx); + pextremum_index.store(ctx, zero); + + let first_scalar = self.get_nth(generator, ctx, zero); + ctx.builder.build_store(pextremum, first_scalar.value).unwrap(); + + // Find extremum + let start = sizet_model.const_1(generator, ctx.ctx); // Start on 1 + let stop = self.size(generator, ctx); + let step = sizet_model.const_1(generator, ctx.ctx); + gen_for_model_auto(generator, ctx, start, stop, step, |generator, ctx, _hooks, i| { + // Worth reading on "Notes" in + // on how `NaN` values have to be handled. + + let scalar = self.get_nth(generator, ctx, i); + + let old_extremum = ctx.builder.build_load(pextremum, "current_extremum").unwrap(); + let old_extremum = ScalarObject { dtype: self.dtype, value: old_extremum }; + + let new_extremum = ScalarObject::min_or_max(ctx, kind, old_extremum, scalar); + + // Check if new_extremum is more extreme than old_extremum. + let update_index = ScalarObject::compare( + generator, + ctx, + new_extremum, + old_extremum, + IntPredicate::NE, + FloatPredicate::ONE, + "", + ); + + gen_if_model(generator, ctx, update_index, |_generator, ctx| { + pextremum_index.store(ctx, i); + Ok(()) + }) + .unwrap(); + Ok(()) + }) + .unwrap(); + + // Finally return the extremum and extremum index. + let extremum_index = pextremum_index.load(generator, ctx, "extremum_index"); + + let extremum = ctx.builder.build_load(pextremum, "extremum_value").unwrap(); + let extremum = ScalarObject { dtype: self.dtype, value: extremum }; + + (extremum, extremum_index) + } + + /// Invoke NAC3's builtin `np_min()` or `np_max()`. + pub fn min_or_max( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + kind: MinOrMax, + ) -> ScalarObject<'ctx> { + let on_empty_err_msg = format!( + "zero-size array to reduction operation {} which has no identity", + match kind { + MinOrMax::Min => "minimum", + MinOrMax::Max => "maximum", + } + ); + self.min_max_argmin_argmax_helper(generator, ctx, kind, &on_empty_err_msg).0 + } + + /// Invoke NAC3's builtin `np_argmin()` or `np_argmax()`. + pub fn argmin_or_argmax( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + kind: MinOrMax, + ) -> Int<'ctx, SizeT> { + let on_empty_err_msg = format!( + "attempt to get {} of an empty sequence", + match kind { + MinOrMax::Min => "argmin", + MinOrMax::Max => "argmax", + } + ); + self.min_max_argmin_argmax_helper(generator, ctx, kind, &on_empty_err_msg).1 + } +} diff --git a/nac3core/src/codegen/structure/ndarray/indexing.rs b/nac3core/src/codegen/structure/ndarray/indexing.rs new file mode 100644 index 00000000..d88cf81e --- /dev/null +++ b/nac3core/src/codegen/structure/ndarray/indexing.rs @@ -0,0 +1,353 @@ +use crate::codegen::{irrt::call_nac3_ndarray_index, model::*, CodeGenContext, CodeGenerator}; + +use super::{scalar::ScalarOrNDArray, NDArrayObject}; + +pub type NDIndexType = Byte; + +/// Fields of [`NDIndex`] +#[derive(Debug, Clone, Copy)] +pub struct NDIndexFields<'ctx, F: FieldTraversal<'ctx>> { + pub type_: F::Out>, // Defined to be uint8_t in IRRT + pub data: F::Out>>, +} + +/// An IRRT representation fo an ndarray subscript index. +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)] +pub struct NDIndex; + +impl<'ctx> StructKind<'ctx> for NDIndex { + type Fields> = NDIndexFields<'ctx, F>; + + fn traverse_fields>(&self, traversal: &mut F) -> Self::Fields { + Self::Fields { type_: traversal.add_auto("type"), data: traversal.add_auto("data") } + } +} + +/// Fields of [`UserSlice`] +#[derive(Debug, Clone)] +pub struct UserSliceFields<'ctx, F: FieldTraversal<'ctx>> { + pub start_defined: F::Out>, + pub start: F::Out>, + pub stop_defined: F::Out>, + pub stop: F::Out>, + pub step_defined: F::Out>, + pub step: F::Out>, +} + +/// An IRRT representation of a user slice. +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)] +pub struct UserSlice; + +impl<'ctx> StructKind<'ctx> for UserSlice { + type Fields> = UserSliceFields<'ctx, F>; + + fn traverse_fields>(&self, traversal: &mut F) -> Self::Fields { + Self::Fields { + start_defined: traversal.add_auto("start_defined"), + start: traversal.add_auto("start"), + stop_defined: traversal.add_auto("stop_defined"), + stop: traversal.add_auto("stop"), + step_defined: traversal.add_auto("step_defined"), + step: traversal.add_auto("step"), + } + } +} + +/// A convenience structure to prepare a [`UserSlice`]. +#[derive(Debug, Clone)] +pub struct RustUserSlice<'ctx> { + pub start: Option>, + pub stop: Option>, + pub step: Option>, +} + +impl<'ctx> RustUserSlice<'ctx> { + /// Write the contents to an LLVM [`UserSlice`]. + pub fn write_to_user_slice( + &self, + generator: &mut G, + ctx: &CodeGenContext<'ctx, '_>, + dst_slice_ptr: Ptr<'ctx, StructModel>, + ) { + let bool_model = IntModel(Bool); + + let false_ = bool_model.constant(generator, ctx.ctx, 0); + let true_ = bool_model.constant(generator, ctx.ctx, 1); + + // TODO: Code duplication. Probably okay...? + + match self.start { + Some(start) => { + dst_slice_ptr.gep(ctx, |f| f.start_defined).store(ctx, true_); + dst_slice_ptr.gep(ctx, |f| f.start).store(ctx, start); + } + None => dst_slice_ptr.gep(ctx, |f| f.start_defined).store(ctx, false_), + } + + match self.stop { + Some(stop) => { + dst_slice_ptr.gep(ctx, |f| f.stop_defined).store(ctx, true_); + dst_slice_ptr.gep(ctx, |f| f.stop).store(ctx, stop); + } + None => dst_slice_ptr.gep(ctx, |f| f.stop_defined).store(ctx, false_), + } + + match self.step { + Some(step) => { + dst_slice_ptr.gep(ctx, |f| f.step_defined).store(ctx, true_); + dst_slice_ptr.gep(ctx, |f| f.step).store(ctx, step); + } + None => dst_slice_ptr.gep(ctx, |f| f.step_defined).store(ctx, false_), + } + } +} + +// A convenience enum variant to store the content and type of an NDIndex in high level. +#[derive(Debug, Clone)] +pub enum RustNDIndex<'ctx> { + SingleElement(Int<'ctx, Int32>), // TODO: To be SizeT + Slice(RustUserSlice<'ctx>), + NewAxis, + Ellipsis, +} + +impl<'ctx> RustNDIndex<'ctx> { + /// Get the value to set `NDIndex::type` for this variant. + fn get_type_id(&self) -> u64 { + // Defined in IRRT, must be in sync + match self { + RustNDIndex::SingleElement(_) => 0, + RustNDIndex::Slice(_) => 1, + RustNDIndex::NewAxis => 2, + RustNDIndex::Ellipsis => 3, + } + } + + /// Write the contents to an LLVM [`NDIndex`]. + fn write_to_ndindex( + &self, + generator: &mut G, + ctx: &CodeGenContext<'ctx, '_>, + dst_ndindex_ptr: Ptr<'ctx, StructModel>, + ) { + let ndindex_type_model = IntModel(NDIndexType::default()); + let i32_model = IntModel(Int32); + let user_slice_model = StructModel(UserSlice); + + // Set `dst_ndindex_ptr->type` + dst_ndindex_ptr + .gep(ctx, |f| f.type_) + .store(ctx, ndindex_type_model.constant(generator, ctx.ctx, self.get_type_id())); + + // Set `dst_ndindex_ptr->data` + match self { + RustNDIndex::SingleElement(in_index) => { + let index_ptr = i32_model.alloca(generator, ctx, "index"); + index_ptr.store(ctx, *in_index); + + dst_ndindex_ptr + .gep(ctx, |f| f.data) + .store(ctx, index_ptr.transmute(generator, ctx, IntModel(Byte), "")); + } + RustNDIndex::Slice(in_rust_slice) => { + let user_slice_ptr = user_slice_model.alloca(generator, ctx, "user_slice"); + in_rust_slice.write_to_user_slice(generator, ctx, user_slice_ptr); + + dst_ndindex_ptr + .gep(ctx, |f| f.data) + .store(ctx, user_slice_ptr.transmute(generator, ctx, IntModel(Byte), "")); + } + RustNDIndex::NewAxis => {} + RustNDIndex::Ellipsis => {} + } + } + + /// Allocate an array of `NDIndex`es on the stack and return its stack pointer. + pub fn alloca_ndindexes( + generator: &mut G, + ctx: &CodeGenContext<'ctx, '_>, + in_ndindexes: &[RustNDIndex<'ctx>], + ) -> (Int<'ctx, SizeT>, Ptr<'ctx, StructModel>) { + let sizet_model = IntModel(SizeT); + let ndindex_model = StructModel(NDIndex); + + let num_ndindexes = sizet_model.constant(generator, ctx.ctx, in_ndindexes.len() as u64); + let ndindexes = + ndindex_model.array_alloca(generator, ctx, num_ndindexes.value, "ndindexes"); + for (i, in_ndindex) in in_ndindexes.iter().enumerate() { + let i = sizet_model.constant(generator, ctx.ctx, i as u64); + let pndindex = ndindexes.offset(generator, ctx, i.value, ""); + in_ndindex.write_to_ndindex(generator, ctx, pndindex); + } + + (num_ndindexes, ndindexes) + } +} + +impl<'ctx> NDArrayObject<'ctx> { + /// Get the ndims [`Type`] after indexing with a given slice. + #[must_use] + pub fn deduce_ndims_after_indexing_with(&self, indexes: &[RustNDIndex<'ctx>]) -> u64 { + let mut ndims = self.ndims; + for index in indexes { + match index { + RustNDIndex::SingleElement(_) => { + ndims -= 1; // Single elements decrements ndims + } + RustNDIndex::Slice(_) => {} + RustNDIndex::NewAxis => { + ndims += 1; // `np.newaxis` / `none` adds a new axis + } + RustNDIndex::Ellipsis => {} + } + } + ndims + } + + /// Index into the ndarray, and return a newly-allocated view on this ndarray. + /// + /// This function behaves like NumPy's ndarray indexing, but if the indexes index + /// into a single element, an unsized ndarray is returned. + #[must_use] + pub fn index( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + indexes: &[RustNDIndex<'ctx>], + name: &str, + ) -> Self { + let dst_ndims = self.deduce_ndims_after_indexing_with(indexes); + let dst_ndarray = + NDArrayObject::alloca_uninitialized(generator, ctx, self.dtype, dst_ndims, name); + + let (num_indexes, indexes) = RustNDIndex::alloca_ndindexes(generator, ctx, indexes); + call_nac3_ndarray_index( + generator, + ctx, + num_indexes, + indexes, + self.value, + dst_ndarray.value, + ); + + dst_ndarray + } + + /// Like [`NDArrayObject::index`] but returns a scalar if the indexes index + /// into a single element. + #[must_use] + pub fn index_or_scalar( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + indexes: &[RustNDIndex<'ctx>], + name: &str, + ) -> ScalarOrNDArray<'ctx> { + let sizet_model = IntModel(SizeT); + let zero = sizet_model.const_0(generator, ctx.ctx); + + let subndarray = self.index(generator, ctx, indexes, name); + if subndarray.is_unsized() { + // NOTE: `np.size(self) == 0` here is never possible. + ScalarOrNDArray::Scalar(subndarray.get_nth(generator, ctx, zero)) + } else { + ScalarOrNDArray::NDArray(subndarray) + } + } +} + +pub mod util { + use itertools::Itertools; + use nac3parser::ast::{Expr, ExprKind}; + + use crate::{ + codegen::{model::*, CodeGenContext, CodeGenerator}, + typecheck::typedef::Type, + }; + + use super::{RustNDIndex, RustUserSlice}; + + /// Generate LLVM code to transform an ndarray subscript expression to + /// its list of [`RustNDIndex`] + /// + /// i.e., + /// ```python + /// my_ndarray[::3, 1, :2:] + /// ^^^^^^^^^^^ Then these into a three `RustNDIndex`es + /// ``` + pub fn gen_ndarray_subscript_ndindexes<'ctx, G: CodeGenerator>( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + subscript: &Expr>, + ) -> Result>, String> { + // TODO: Support https://numpy.org/doc/stable/user/basics.indexing.html#dimensional-indexing-tools + let i32_model = IntModel(Int32); + + // Annoying notes about `slice` + // - `my_array[5]` + // - slice is a `Constant` + // - `my_array[:5]` + // - slice is a `Slice` + // - `my_array[:]` + // - slice is a `Slice`, but lower upper step would all be `Option::None` + // - `my_array[:, :]` + // - slice is now a `Tuple` of two `Slice`-s + // + // In summary: + // - when there is a comma "," within [], `slice` will be a `Tuple` of the entries. + // - when there is not comma "," within [] (i.e., just a single entry), `slice` will be that entry itself. + // + // So we first "flatten" out the slice expression + let index_exprs = match &subscript.node { + ExprKind::Tuple { elts, .. } => elts.iter().collect_vec(), + _ => vec![subscript], + }; + + // Process all index expressions + let mut rust_ndindexes: Vec = Vec::with_capacity(index_exprs.len()); // Not using iterators here because `?` is used here. + for index_expr in index_exprs { + // NOTE: Currently nac3core's slices do not have an object representation, + // so the code/implementation looks awkward - we have to do pattern matching on the expression + let ndindex = + if let ExprKind::Slice { lower: start, upper: stop, step } = &index_expr.node { + // Helper function here to deduce code duplication + type ValueExpr = Option>>>; + let mut help = |value_expr: &ValueExpr| -> Result<_, String> { + Ok(match value_expr { + None => None, + Some(value_expr) => { + let value_expr = generator + .gen_expr(ctx, value_expr)? + .unwrap() + .to_basic_value_enum(ctx, generator, ctx.primitives.int32)?; + + let value_expr = + i32_model.check_value(generator, ctx.ctx, value_expr).unwrap(); + + Some(value_expr) + } + }) + }; + + let start = help(start)?; + let stop = help(stop)?; + let step = help(step)?; + + RustNDIndex::Slice(RustUserSlice { start, stop, step }) + } else { + // Anything else that is not a slice (might be illegal values), + // For nac3core, this should be e.g., an int32 constant, an int32 variable, otherwise its an error + let index = generator.gen_expr(ctx, index_expr)?.unwrap().to_basic_value_enum( + ctx, + generator, + ctx.primitives.int32, + )?; + let index = i32_model.check_value(generator, ctx.ctx, index).unwrap(); + + RustNDIndex::SingleElement(index) + }; + rust_ndindexes.push(ndindex); + } + Ok(rust_ndindexes) + } +} diff --git a/nac3core/src/codegen/structure/ndarray/mapping.rs b/nac3core/src/codegen/structure/ndarray/mapping.rs new file mode 100644 index 00000000..e30806ba --- /dev/null +++ b/nac3core/src/codegen/structure/ndarray/mapping.rs @@ -0,0 +1,158 @@ +use inkwell::values::BasicValueEnum; +use itertools::Itertools; +use util::gen_for_model_auto; + +use crate::{ + codegen::{ + model::*, + structure::ndarray::{scalar::ScalarObject, NDArrayObject}, + CodeGenContext, CodeGenerator, + }, + typecheck::typedef::Type, +}; + +use super::scalar::ScalarOrNDArray; + +impl<'ctx> NDArrayObject<'ctx> { + /// TODO: Document me. Has complex behavior. + /// and explain why `ret_dtype` has to be specified beforehand. + pub fn broadcasting_starmap<'a, G, MappingFn>( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, 'a>, + ndarrays: &[Self], + ret_dtype: Type, + mapping: MappingFn, + ) -> Result + where + G: CodeGenerator + ?Sized, + MappingFn: FnOnce( + &mut G, + &mut CodeGenContext<'ctx, 'a>, + Int<'ctx, SizeT>, + &[ScalarObject<'ctx>], + ) -> Result, String>, + { + let sizet_model = IntModel(SizeT); + + // Broadcast inputs + let broadcast_result = NDArrayObject::broadcast_all(generator, ctx, ndarrays); + + // Allocate the resulting ndarray + let mapped_ndarray = NDArrayObject::alloca_uninitialized( + generator, + ctx, + ret_dtype, + broadcast_result.ndims, + "mapped_ndarray", + ); + mapped_ndarray.copy_shape_from_array(generator, ctx, broadcast_result.shape); + mapped_ndarray.create_data(generator, ctx); + + // Map element-wise and store results into `mapped_ndarray`. + let start = sizet_model.const_0(generator, ctx.ctx); + let stop = broadcast_result.ndarrays[0].size(generator, ctx); // They all should have the same `np.size`. + let step = sizet_model.const_1(generator, ctx.ctx); + gen_for_model_auto(generator, ctx, start, stop, step, move |generator, ctx, _hooks, i| { + let elements = + ndarrays.iter().map(|ndarray| ndarray.get_nth(generator, ctx, i)).collect_vec(); + + let ret = mapping(generator, ctx, i, &elements)?; + + let pret = mapped_ndarray.get_nth_pointer(generator, ctx, i, "pret"); + ctx.builder.build_store(pret, ret).unwrap(); + Ok(()) + })?; + + Ok(mapped_ndarray) + } + + pub fn map<'a, G, Mapping>( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, 'a>, + ret_dtype: Type, + mapping: Mapping, + ) -> Result + where + G: CodeGenerator + ?Sized, + Mapping: FnOnce( + &mut G, + &mut CodeGenContext<'ctx, 'a>, + Int<'ctx, SizeT>, + ScalarObject<'ctx>, + ) -> Result, String>, + { + NDArrayObject::broadcasting_starmap( + generator, + ctx, + &[*self], + ret_dtype, + |generator, ctx, i, scalars| mapping(generator, ctx, i, scalars[0]), + ) + } +} + +impl<'ctx> ScalarOrNDArray<'ctx> { + /// TODO: Document me. Has complex behavior. + /// and explain why `ret_dtype` has to be specified beforehand. + pub fn broadcasting_starmap<'a, G, MappingFn>( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, 'a>, + inputs: &[Self], + ret_dtype: Type, + mapping: MappingFn, + ) -> Result + where + G: CodeGenerator + ?Sized, + MappingFn: FnOnce( + &mut G, + &mut CodeGenContext<'ctx, 'a>, + Int<'ctx, SizeT>, + &[ScalarObject<'ctx>], + ) -> Result, String>, + { + let sizet_model = IntModel(SizeT); + + // Check if all inputs are ScalarObjects + let all_scalars: Option> = + inputs.iter().map(ScalarObject::try_from).try_collect().ok(); + + if let Some(scalars) = all_scalars { + let i = sizet_model.const_0(generator, ctx.ctx); // Pass 0 as the index + let scalar = + ScalarObject { value: mapping(generator, ctx, i, &scalars)?, dtype: ret_dtype }; + Ok(ScalarOrNDArray::Scalar(scalar)) + } else { + // Promote all input to ndarrays and map through them. + let inputs = inputs.iter().map(|input| input.as_ndarray(generator, ctx)).collect_vec(); + let ndarray = + NDArrayObject::broadcasting_starmap(generator, ctx, &inputs, ret_dtype, mapping)?; + Ok(ScalarOrNDArray::NDArray(ndarray)) + } + } + + pub fn map<'a, G, Mapping>( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, 'a>, + ret_dtype: Type, + mapping: Mapping, + ) -> Result + where + G: CodeGenerator + ?Sized, + Mapping: FnOnce( + &mut G, + &mut CodeGenContext<'ctx, 'a>, + Int<'ctx, SizeT>, + ScalarObject<'ctx>, + ) -> Result, String>, + { + ScalarOrNDArray::broadcasting_starmap( + generator, + ctx, + &[*self], + ret_dtype, + |generator, ctx, i, scalars| mapping(generator, ctx, i, scalars[0]), + ) + } +} diff --git a/nac3core/src/codegen/structure/ndarray/mod.rs b/nac3core/src/codegen/structure/ndarray/mod.rs new file mode 100644 index 00000000..47d827e9 --- /dev/null +++ b/nac3core/src/codegen/structure/ndarray/mod.rs @@ -0,0 +1,543 @@ +pub mod broadcast; +pub mod functions; +pub mod indexing; +pub mod mapping; +pub mod product; +pub mod scalar; +pub mod shape_util; + +use crate::{ + codegen::{ + irrt::{ + call_nac3_ndarray_copy_data, call_nac3_ndarray_get_nth_pelement, + call_nac3_ndarray_is_c_contiguous, call_nac3_ndarray_len, call_nac3_ndarray_nbytes, + call_nac3_ndarray_resolve_and_check_new_shape, call_nac3_ndarray_set_strides_by_shape, + call_nac3_ndarray_size, call_nac3_ndarray_transpose, + }, + model::*, + stmt::BreakContinueHooks, + CodeGenContext, CodeGenerator, + }, + toplevel::numpy::{extract_ndims, unpack_ndarray_var_tys}, + typecheck::typedef::Type, +}; +use inkwell::{ + context::Context, + types::BasicType, + values::{BasicValue, BasicValueEnum, PointerValue}, + AddressSpace, IntPredicate, +}; +use scalar::ScalarObject; +use util::{call_memcpy_model, gen_for_model_auto}; + +pub struct NpArrayFields<'ctx, F: FieldTraversal<'ctx>> { + pub data: F::Out>>, + pub itemsize: F::Out>, + pub ndims: F::Out>, + pub shape: F::Out>>, + pub strides: F::Out>>, +} + +// TODO: Rename to `NDArray` when the old NDArray is removed. +#[derive(Debug, Clone, Copy, Default)] +pub struct NpArray; + +impl<'ctx> StructKind<'ctx> for NpArray { + type Fields> = NpArrayFields<'ctx, F>; + + fn traverse_fields>(&self, traversal: &mut F) -> Self::Fields { + Self::Fields { + data: traversal.add_auto("data"), + itemsize: traversal.add_auto("itemsize"), + ndims: traversal.add_auto("ndims"), + shape: traversal.add_auto("shape"), + strides: traversal.add_auto("strides"), + } + } +} + +/// A NAC3 Python ndarray object. +#[derive(Debug, Clone, Copy)] +pub struct NDArrayObject<'ctx> { + pub dtype: Type, + pub ndims: u64, + pub value: Ptr<'ctx, StructModel>, +} + +impl<'ctx> NDArrayObject<'ctx> { + /// Create an [`NDArrayObject`] from an LLVM value and its typechecker [`Type`]. + pub fn from_value_and_type, G: CodeGenerator + ?Sized>( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + value: V, + ty: Type, + ) -> Self { + let pndarray_model = PtrModel(StructModel(NpArray)); + + let (dtype, ndims) = unpack_ndarray_var_tys(&mut ctx.unifier, ty); + let ndims = extract_ndims(&ctx.unifier, ndims); + let value = pndarray_model.check_value(generator, ctx.ctx, value).unwrap(); + NDArrayObject { dtype, ndims, value } + } + + /// Get the `np.size()` of this ndarray. + pub fn size( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + ) -> Int<'ctx, SizeT> { + call_nac3_ndarray_size(generator, ctx, self.value) + } + + /// Get the `ndarray.nbytes` of this ndarray. + pub fn nbytes( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + ) -> Int<'ctx, SizeT> { + call_nac3_ndarray_nbytes(generator, ctx, self.value) + } + + /// Get the `len()` of this ndarray. + pub fn len( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + ) -> Int<'ctx, SizeT> { + call_nac3_ndarray_len(generator, ctx, self.value) + } + + /// Check if this ndarray is C-contiguous. + /// + /// See NumPy's `flags["C_CONTIGUOUS"]`: + pub fn is_c_contiguous( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + ) -> Int<'ctx, Bool> { + call_nac3_ndarray_is_c_contiguous(generator, ctx, self.value) + } + + /// Get the pointer to the n-th (0-based) element. + /// + /// The returned pointer has the element type of the LLVM type of this ndarray's `dtype`. + pub fn get_nth_pointer( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + nth: Int<'ctx, SizeT>, + name: &str, + ) -> PointerValue<'ctx> { + let elem_ty = ctx.get_llvm_type(generator, self.dtype); + + let p = call_nac3_ndarray_get_nth_pelement(generator, ctx, self.value, nth); + ctx.builder + .build_pointer_cast(p.value, elem_ty.ptr_type(AddressSpace::default()), name) + .unwrap() + } + + /// Get the n-th (0-based) scalar. + pub fn get_nth( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + nth: Int<'ctx, SizeT>, + ) -> ScalarObject<'ctx> { + let p = self.get_nth_pointer(generator, ctx, nth, "value"); + let value = ctx.builder.build_load(p, "value").unwrap(); + ScalarObject { dtype: self.dtype, value } + } + + /// Call [`call_nac3_ndarray_set_strides_by_shape`] on this ndarray to update `strides`. + /// + /// Please refer to the IRRT implementation to see its purpose. + pub fn update_strides_by_shape( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + ) { + call_nac3_ndarray_set_strides_by_shape(generator, ctx, self.value); + } + + /// Copy data from another ndarray. + /// + /// This ndarray and `src` is that their `np.size()` should be the same. Their shapes + /// do not matter. The copying order is determined by how their flattened views look. + /// + /// Panics if the `dtype`s of ndarrays are different. + pub fn copy_data_from( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + src: NDArrayObject<'ctx>, + ) { + assert!(ctx.unifier.unioned(self.dtype, src.dtype), "self and src dtype should match"); + call_nac3_ndarray_copy_data(generator, ctx, src.value, self.value); + } + + /// Allocate an ndarray on the stack given its `ndims` and `dtype`. + /// + /// `shape` and `strides` will be automatically allocated on the stack. + /// + /// The returned ndarray's content will be: + /// - `data`: set to `nullptr`. + /// - `itemsize`: set to the `sizeof()` of `dtype`. + /// - `ndims`: set to the value of `ndims`. + /// - `shape`: allocated with an array of length `ndims` with uninitialized values. + /// - `strides`: allocated with an array of length `ndims` with uninitialized values. + pub fn alloca_uninitialized( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + dtype: Type, + ndims: u64, + name: &str, + ) -> Self { + let sizet_model = IntModel(SizeT); + let ndarray_model = StructModel(NpArray); + let ndarray_data_model = PtrModel(IntModel(Byte)); + + let pndarray = ndarray_model.alloca(generator, ctx, name); + + let data = ndarray_data_model.nullptr(generator, ctx.ctx); + pndarray.set(ctx, |f| f.data, data); + + let itemsize = ctx.get_llvm_type(generator, dtype).size_of().unwrap(); + let itemsize = + sizet_model.s_extend_or_bit_cast(generator, ctx, itemsize, "alloca_itemsize"); + pndarray.set(ctx, |f| f.itemsize, itemsize); + + let ndims_val = sizet_model.constant(generator, ctx.ctx, ndims); + pndarray.set(ctx, |f| f.ndims, ndims_val); + + let shape = sizet_model.array_alloca(generator, ctx, ndims_val.value, "alloca_shape"); + pndarray.set(ctx, |f| f.shape, shape); + + let strides = sizet_model.array_alloca(generator, ctx, ndims_val.value, "alloca_strides"); + pndarray.set(ctx, |f| f.strides, strides); + + NDArrayObject { dtype, ndims, value: pndarray } + } + + /// Convenience function. + /// Like [`NDArrayObject::alloca_uninitialized`] but directly takes the typechecker type of the ndarray. + pub fn alloca_uninitialized_of_type( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + ndarray_ty: Type, + name: &str, + ) -> Self { + let (dtype, ndims) = unpack_ndarray_var_tys(&mut ctx.unifier, ndarray_ty); + let ndims = extract_ndims(&ctx.unifier, ndims); + Self::alloca_uninitialized(generator, ctx, dtype, ndims, name) + } + + /// Clone this ndaarray - Allocate a new ndarray with the same shape as this ndarray and copy the contents + /// over. + /// + /// The new ndarray will own its data and will be C-contiguous. + pub fn make_clone( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + name: &str, + ) -> Self { + let clone = + NDArrayObject::alloca_uninitialized(generator, ctx, self.dtype, self.ndims, name); + + let shape = self.value.gep(ctx, |f| f.shape).load(generator, ctx, "shape"); + clone.copy_shape_from_array(generator, ctx, shape); + clone.create_data(generator, ctx); + clone.copy_data_from(generator, ctx, *self); + clone + } + + /// Get this ndarray's `ndims` as an LLVM constant. + pub fn get_ndims( + &self, + generator: &mut G, + ctx: &'ctx Context, + ) -> Int<'ctx, SizeT> { + let sizet_model = IntModel(SizeT); + sizet_model.constant(generator, ctx, self.ndims) + } + + /// Get if this ndarray's `np.size` is `0` - containing no content. + pub fn is_empty( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + ) -> Int<'ctx, Bool> { + let sizet_model = IntModel(SizeT); + + let size = self.size(generator, ctx); + size.compare(ctx, IntPredicate::EQ, sizet_model.const_0(generator, ctx.ctx), "is_empty") + } + + /// Return true if this ndarray is unsized - `ndims == 0` and only contains a scalar. + /// + /// This is a staticially known property of ndarrays. This is why it is returning + /// a Rust boolean instead of a [`BasicValue`]. + #[must_use] + pub fn is_unsized(&self) -> bool { + self.ndims == 0 + } + + /// Initialize an ndarray's `data` by allocating a buffer on the stack. + /// The allocated data buffer is considered to be *owned* by the ndarray. + /// + /// `strides` of the ndarray will also be updated with `set_strides_by_shape`. + /// + /// `shape` and `itemsize` of the ndarray ***must*** be initialized first. + pub fn create_data( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + ) { + let byte_model = IntModel(Byte); + + let nbytes = self.nbytes(generator, ctx); + + let data = byte_model.array_alloca(generator, ctx, nbytes.value, "data"); + self.value.set(ctx, |f| f.data, data); + + self.update_strides_by_shape(generator, ctx); + } + + /// Copy shape dimensions from an array. + pub fn copy_shape_from_array( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + src_shape: Ptr<'ctx, IntModel>, + ) { + let dst_shape = self.value.get(generator, ctx, |f| f.shape, "dst_shape"); + let num_items = self.get_ndims(generator, ctx.ctx).value; + call_memcpy_model(generator, ctx, dst_shape, src_shape, num_items); + } + + /// Copy shape dimensions from an ndarray. + /// Panics if `ndims` mismatches. + pub fn copy_shape_from_ndarray( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + src_ndarray: NDArrayObject<'ctx>, + ) { + assert_eq!(self.ndims, src_ndarray.ndims); + let src_shape = src_ndarray.value.get(generator, ctx, |f| f.shape, "src_shape"); + self.copy_shape_from_array(generator, ctx, src_shape); + } + + /// Copy strides dimensions from an array. + pub fn copy_strides_from_array( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + src_strides: Ptr<'ctx, IntModel>, + ) { + let dst_strides = self.value.get(generator, ctx, |f| f.strides, "dst_strides"); + let num_items = self.get_ndims(generator, ctx.ctx).value; + call_memcpy_model(generator, ctx, dst_strides, src_strides, num_items); + } + + /// Copy strides dimensions from an ndarray. + /// Panics if `ndims` mismatches. + pub fn copy_strides_from_ndarray( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + src_ndarray: NDArrayObject<'ctx>, + ) { + assert_eq!(self.ndims, src_ndarray.ndims); + let src_strides = src_ndarray.value.get(generator, ctx, |f| f.strides, "src_strides"); + self.copy_strides_from_array(generator, ctx, src_strides); + } + + /// Iterate through every element pointer in the ndarray in its flatten view. + /// + /// `body` also access to [`BreakContinueHooks`] to short-circuit and an element's + /// index. The given element pointer also has been casted to the LLVM type of this ndarray's `dtype`. + pub fn foreach_pointer<'a, G, F>( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, 'a>, + body: F, + ) -> Result<(), String> + where + G: CodeGenerator + ?Sized, + F: FnOnce( + &mut G, + &mut CodeGenContext<'ctx, 'a>, + BreakContinueHooks<'ctx>, + Int<'ctx, SizeT>, + PointerValue<'ctx>, + ) -> Result<(), String>, + { + let sizet_model = IntModel(SizeT); + + let start = sizet_model.const_0(generator, ctx.ctx); + let stop = self.size(generator, ctx); + let step = sizet_model.const_1(generator, ctx.ctx); + + gen_for_model_auto(generator, ctx, start, stop, step, |generator, ctx, hooks, i| { + let pelement = self.get_nth_pointer(generator, ctx, i, "element"); + body(generator, ctx, hooks, i, pelement) + }) + } + + /// Iterate through every scalar in this ndarray. + pub fn foreach<'a, G, F>( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, 'a>, + body: F, + ) -> Result<(), String> + where + G: CodeGenerator + ?Sized, + F: FnOnce( + &mut G, + &mut CodeGenContext<'ctx, 'a>, + BreakContinueHooks<'ctx>, + Int<'ctx, SizeT>, + ScalarObject<'ctx>, + ) -> Result<(), String>, + { + self.foreach_pointer(generator, ctx, |generator, ctx, hooks, i, p| { + let value = ctx.builder.build_load(p, "value").unwrap(); + let scalar = ScalarObject { dtype: self.dtype, value }; + body(generator, ctx, hooks, i, scalar) + }) + } + + /// Fill the ndarray with a value. + /// + /// `fill_value` must have the same LLVM type as the `dtype` of this ndarray. + pub fn fill( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + fill_value: BasicValueEnum<'ctx>, + ) { + self.foreach_pointer(generator, ctx, |_generator, ctx, _hooks, _i, pelement| { + ctx.builder.build_store(pelement, fill_value).unwrap(); + Ok(()) + }) + .unwrap(); + } + + /// Create a reshaped view on this ndarray like `np.reshape()`. + /// + /// If there is a `-1` in `new_shape`, it will be resolved; `new_shape` would **NOT** be modified as a result. + /// + /// If reshape without copying is impossible, this function will allocate a new ndarray and copy contents. + /// + /// * `new_ndims` - The number of dimensions of `new_shape` as a [`Type`]. + /// * `new_shape` - The target shape to do `np.reshape()`. + #[must_use] + pub fn reshape_or_copy( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + new_ndims: u64, + new_shape: Ptr<'ctx, IntModel>, + ) -> Self { + // TODO: The current criterion for whether to do a full copy or not is by checking `is_c_contiguous`, + // but this is not optimal. Look into how numpy does it. + + let current_bb = ctx.builder.get_insert_block().unwrap(); + let then_bb = ctx.ctx.insert_basic_block_after(current_bb, "then_bb"); + let else_bb = ctx.ctx.insert_basic_block_after(then_bb, "else_bb"); + let end_bb = ctx.ctx.insert_basic_block_after(else_bb, "end_bb"); + + let dst_ndarray = NDArrayObject::alloca_uninitialized( + generator, + ctx, + self.dtype, + new_ndims, + "reshaped_ndarray", + ); + dst_ndarray.copy_shape_from_array(generator, ctx, new_shape); + + let size = self.size(generator, ctx); + let new_ndims = dst_ndarray.get_ndims(generator, ctx.ctx); + call_nac3_ndarray_resolve_and_check_new_shape(generator, ctx, size, new_ndims, new_shape); + + let is_c_contiguous = self.is_c_contiguous(generator, ctx); + ctx.builder.build_conditional_branch(is_c_contiguous.value, then_bb, else_bb).unwrap(); + + // Inserting into then_bb: reshape is possible without copying + ctx.builder.position_at_end(then_bb); + dst_ndarray.update_strides_by_shape(generator, ctx); + dst_ndarray.value.set(ctx, |f| f.data, self.value.get(generator, ctx, |f| f.data, "data")); + ctx.builder.build_unconditional_branch(end_bb).unwrap(); + + // Inserting into else_bb: reshape is impossible without copying + ctx.builder.position_at_end(else_bb); + dst_ndarray.create_data(generator, ctx); + dst_ndarray.copy_data_from(generator, ctx, *self); + ctx.builder.build_unconditional_branch(end_bb).unwrap(); + + // Reposition for continuation + ctx.builder.position_at_end(end_bb); + + dst_ndarray + } + + /// Create a flattened view of this ndarray, like `np.ravel()`. + /// + /// Uses [`NDArrayObject::reshape_or_copy`] under-the-hood so ndarray may or may not be copied. + pub fn ravel_or_copy( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + ) -> Self { + // Define models + let sizet_model = IntModel(SizeT); + + let num0 = sizet_model.const_0(generator, ctx.ctx); + let num1 = sizet_model.const_1(generator, ctx.ctx); + let num_neg1 = sizet_model.const_all_1s(generator, ctx.ctx); + + // Create `[-1]` and pass to `reshape_or_copy`. + let new_shape = sizet_model.array_alloca(generator, ctx, num1.value, "new_shape"); + new_shape.offset(generator, ctx, num0.value, "").store(ctx, num_neg1); + + self.reshape_or_copy(generator, ctx, 1, new_shape) + } + + /// Create a transposed view on this ndarray like `np.transpose(, = None)`. + /// * `axes` - If specified, should be an array of the permutation (negative indices are **allowed**). + pub fn transpose( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + axes: Option>>, + ) -> Self { + // Define models + let sizet_model = IntModel(SizeT); + + let transposed_ndarray = NDArrayObject::alloca_uninitialized( + generator, + ctx, + self.dtype, + self.ndims, + "transposed_ndarray", + ); + + let num_axes = self.get_ndims(generator, ctx.ctx); + + // `axes = nullptr` if `axes` is unspecified. + let axes = axes.unwrap_or_else(|| PtrModel(sizet_model).nullptr(generator, ctx.ctx)); + + call_nac3_ndarray_transpose( + generator, + ctx, + self.value, + transposed_ndarray.value, + num_axes, + axes, + ); + + transposed_ndarray + } +} diff --git a/nac3core/src/codegen/structure/ndarray/product.rs b/nac3core/src/codegen/structure/ndarray/product.rs new file mode 100644 index 00000000..96d337ec --- /dev/null +++ b/nac3core/src/codegen/structure/ndarray/product.rs @@ -0,0 +1,20 @@ +use crate::codegen::{CodeGenContext, CodeGenerator}; + +use super::NDArrayObject; + +impl<'ctx> NDArrayObject<'ctx> { + /// TODO: Document me + pub fn matmul_at_least_2d( + generator: &G, + ctx: &mut CodeGenContext<'ctx, '_>, + a: Self, + b: Self, + ) -> Self { + assert!(a.ndims >= 2); + assert!(b.ndims >= 2); + + + + todo!() + } +} diff --git a/nac3core/src/codegen/structure/ndarray/scalar.rs b/nac3core/src/codegen/structure/ndarray/scalar.rs new file mode 100644 index 00000000..1a03a223 --- /dev/null +++ b/nac3core/src/codegen/structure/ndarray/scalar.rs @@ -0,0 +1,143 @@ +use inkwell::values::{BasicValue, BasicValueEnum}; + +use crate::{ + codegen::{model::*, CodeGenContext, CodeGenerator}, + typecheck::typedef::{Type, TypeEnum}, +}; + +use super::NDArrayObject; + +/// An LLVM numpy scalar with its [`Type`]. +/// +/// Intended to be used with [`ScalarOrNDArray`]. +/// +/// A scalar does not have to be an actual number. It could be arbitrary objects. +#[derive(Debug, Clone, Copy)] +pub struct ScalarObject<'ctx> { + pub dtype: Type, + pub value: BasicValueEnum<'ctx>, +} + +impl<'ctx> ScalarObject<'ctx> { + /// Promote this scalar to an unsized ndarray (like doing `np.asarray`). + /// + /// The scalar value is allocated onto the stack, and the ndarray's `data` will point to that + /// allocated value. + pub fn as_ndarray( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + ) -> NDArrayObject<'ctx> { + let pbyte_model = PtrModel(IntModel(Byte)); + + // We have to put the value on the stack to get a data pointer. + let data = ctx.builder.build_alloca(self.value.get_type(), "as_ndarray_scalar").unwrap(); + ctx.builder.build_store(data, self.value).unwrap(); + let data = pbyte_model.pointer_cast(generator, ctx, data, "data"); + + let ndarray = + NDArrayObject::alloca_uninitialized(generator, ctx, self.dtype, 0, "scalar_ndarray"); + ndarray.value.set(ctx, |f| f.data, data); + ndarray + } +} + +/// A convenience enum for implementing scalar/ndarray agnostic utilities. +#[derive(Debug, Clone, Copy)] +pub enum ScalarOrNDArray<'ctx> { + Scalar(ScalarObject<'ctx>), + NDArray(NDArrayObject<'ctx>), +} + +impl<'ctx> ScalarOrNDArray<'ctx> { + /// Get the underlying [`BasicValueEnum<'ctx>`] of this [`ScalarOrNDArray`]. + #[must_use] + pub fn to_basic_value_enum(self) -> BasicValueEnum<'ctx> { + match self { + ScalarOrNDArray::Scalar(scalar) => scalar.value, + ScalarOrNDArray::NDArray(ndarray) => ndarray.value.value.as_basic_value_enum(), + } + } + + #[must_use] + pub fn into_scalar(&self) -> ScalarObject<'ctx> { + match self { + ScalarOrNDArray::NDArray(_ndarray) => panic!("Got NDArray"), + ScalarOrNDArray::Scalar(scalar) => *scalar, + } + } + + #[must_use] + pub fn into_ndarray(&self) -> NDArrayObject<'ctx> { + match self { + ScalarOrNDArray::NDArray(ndarray) => *ndarray, + ScalarOrNDArray::Scalar(_scalar) => panic!("Got Scalar"), + } + } + + /// Convert this [`ScalarOrNDArray`] to an ndarray - behaves like `np.asarray`. + /// - If this is an ndarray, the ndarray is returned. + /// - If this is a scalar, an unsized ndarray view is created on it. + pub fn as_ndarray( + &self, + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + ) -> NDArrayObject<'ctx> { + match self { + ScalarOrNDArray::NDArray(ndarray) => *ndarray, + ScalarOrNDArray::Scalar(scalar) => scalar.as_ndarray(generator, ctx), + } + } + + #[must_use] + pub fn dtype(&self) -> Type { + match self { + ScalarOrNDArray::Scalar(scalar) => scalar.dtype, + ScalarOrNDArray::NDArray(ndarray) => ndarray.dtype, + } + } +} + +impl<'ctx> TryFrom<&ScalarOrNDArray<'ctx>> for ScalarObject<'ctx> { + type Error = (); + + fn try_from(value: &ScalarOrNDArray<'ctx>) -> Result { + match value { + ScalarOrNDArray::Scalar(scalar) => Ok(*scalar), + ScalarOrNDArray::NDArray(_ndarray) => Err(()), + } + } +} + +impl<'ctx> TryFrom<&ScalarOrNDArray<'ctx>> for NDArrayObject<'ctx> { + type Error = (); + + fn try_from(value: &ScalarOrNDArray<'ctx>) -> Result { + match value { + ScalarOrNDArray::Scalar(_scalar) => Err(()), + ScalarOrNDArray::NDArray(ndarray) => Ok(*ndarray), + } + } +} + +/// Split an [`BasicValueEnum<'ctx>`] into a [`ScalarOrNDArray`] depending +/// on its [`Type`]. +pub fn split_scalar_or_ndarray<'ctx, G: CodeGenerator + ?Sized>( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + input: BasicValueEnum<'ctx>, + input_ty: Type, +) -> ScalarOrNDArray<'ctx> { + match &*ctx.unifier.get_ty(input_ty) { + TypeEnum::TObj { obj_id, .. } + if *obj_id == ctx.primitives.ndarray.obj_id(&ctx.unifier).unwrap() => + { + let ndarray = NDArrayObject::from_value_and_type(generator, ctx, input, input_ty); + ScalarOrNDArray::NDArray(ndarray) + } + _ => { + let scalar = ScalarObject { dtype: input_ty, value: input }; + ScalarOrNDArray::Scalar(scalar) + } + } +} diff --git a/nac3core/src/codegen/structure/ndarray/shape_util.rs b/nac3core/src/codegen/structure/ndarray/shape_util.rs new file mode 100644 index 00000000..b5521898 --- /dev/null +++ b/nac3core/src/codegen/structure/ndarray/shape_util.rs @@ -0,0 +1,112 @@ +use inkwell::values::BasicValueEnum; +use util::gen_for_model_auto; + +use crate::{ + codegen::{model::*, structure::list::ListObject, CodeGenContext, CodeGenerator}, + typecheck::typedef::{Type, TypeEnum}, +}; + +/// Parse a NumPy-like "int sequence" input and return the int sequence as an array and its length. +/// +/// * `sequence` - The `sequence` parameter. +/// * `sequence_ty` - The typechecker type of `sequence` +/// +/// The `sequence` argument type may only be one of the following: +/// 1. A list of `int32`; e.g., `np.empty([600, 800, 3])` +/// 2. A tuple of `int32`; e.g., `np.empty((600, 800, 3))` +/// 3. A scalar `int32`; e.g., `np.empty(3)`, this is functionally equivalent to `np.empty([3])` +/// +/// All `int32` values will be sign-extended to `SizeT`. +pub fn parse_numpy_int_sequence<'ctx, G: CodeGenerator + ?Sized>( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + input_sequence: BasicValueEnum<'ctx>, + input_sequence_ty: Type, +) -> (Int<'ctx, SizeT>, Ptr<'ctx, IntModel>) { + let sizet_model = IntModel(SizeT); + + let zero = sizet_model.const_0(generator, ctx.ctx); + let one = sizet_model.const_1(generator, ctx.ctx); + + // The result `list` to return. + match &*ctx.unifier.get_ty(input_sequence_ty) { + TypeEnum::TObj { obj_id, .. } + if *obj_id == ctx.primitives.list.obj_id(&ctx.unifier).unwrap() => + { + // 1. A list of `int32`; e.g., `np.empty([600, 800, 3])` + + // Check `input_sequence` + let input_sequence = + ListObject::from_value_and_type(generator, ctx, input_sequence, input_sequence_ty); + + let len = input_sequence.value.gep(ctx, |f| f.len).load(generator, ctx, "len"); + let result = sizet_model.array_alloca(generator, ctx, len.value, "int_sequence"); + + // Load all the `int32`s from the input_sequence, cast them to `SizeT`, and store them into `result` + gen_for_model_auto(generator, ctx, zero, len, one, |generator, ctx, _hooks, i| { + // Load the i-th int32 in the input sequence + let int = input_sequence + .value + .get(generator, ctx, |f| f.items, "int") + .ix(generator, ctx, i.value, "int") + .value + .into_int_value(); + + // Cast to SizeT + let int = sizet_model.s_extend_or_bit_cast(generator, ctx, int, "int"); + + // Store + result.offset(generator, ctx, i.value, "int").store(ctx, int); + + Ok(()) + }) + .unwrap(); + + (len, result) + } + TypeEnum::TTuple { ty: tuple_types, .. } => { + // 2. A tuple of ints; e.g., `np.empty((600, 800, 3))` + let input_sequence = input_sequence.into_struct_value(); // A tuple is a struct + + let len_int = tuple_types.len(); + + let len = sizet_model.constant(generator, ctx.ctx, len_int as u64); + let result = sizet_model.array_alloca(generator, ctx, len.value, "int_sequence"); + + for i in 0..len_int { + // Get the i-th element off of the tuple and load it into `result`. + let int = ctx + .builder + .build_extract_value(input_sequence, i as u32, "int") + .unwrap() + .into_int_value(); + let int = sizet_model.s_extend_or_bit_cast(generator, ctx, int, "int"); + + let offset = sizet_model.constant(generator, ctx.ctx, i as u64); + result.offset(generator, ctx, offset.value, "int").store(ctx, int); + } + + (len, result) + } + TypeEnum::TObj { obj_id, .. } + if *obj_id == ctx.primitives.int32.obj_id(&ctx.unifier).unwrap() => + { + // 3. A scalar int; e.g., `np.empty(3)`, this is functionally equivalent to `np.empty([3])` + let input_int = input_sequence.into_int_value(); + + let len = sizet_model.const_1(generator, ctx.ctx); + let result = sizet_model.array_alloca(generator, ctx, len.value, "int_sequence"); + + let int = sizet_model.s_extend_or_bit_cast(generator, ctx, input_int, "int"); + + // Storing into result[0] + result.store(ctx, int); + + (len, result) + } + _ => panic!( + "encountered unknown sequence type: {}", + ctx.unifier.stringify(input_sequence_ty) + ), + } +} diff --git a/nac3core/src/codegen/structure/tuple.rs b/nac3core/src/codegen/structure/tuple.rs new file mode 100644 index 00000000..241fef14 --- /dev/null +++ b/nac3core/src/codegen/structure/tuple.rs @@ -0,0 +1,39 @@ +use inkwell::values::{BasicValueEnum, StructValue}; +use itertools::Itertools; + +use crate::{ + codegen::{CodeGenContext, CodeGenerator}, + typecheck::typedef::Type, +}; + +pub struct TupleObject<'ctx> { + pub tys: Vec, + pub value: StructValue<'ctx>, +} + +impl<'ctx> TupleObject<'ctx> { + pub fn create( + generator: &mut G, + ctx: &mut CodeGenContext<'ctx, '_>, + items: I, + name: &str, + ) -> Self + where + I: IntoIterator, Type)>, + { + let (vals, tys): (Vec<_>, Vec<_>) = items.into_iter().unzip(); + + // let tuple_ty = ctx.unifier.add_ty(TypeEnum::TTuple { ty: tys }); + let llvm_tys = tys.iter().map(|ty| ctx.get_llvm_type(generator, *ty)).collect_vec(); + let llvm_tuple_ty = ctx.ctx.struct_type(&llvm_tys, false); + let pllvm_tuple = ctx.builder.build_alloca(llvm_tuple_ty, "tuple").unwrap(); + for (i, val) in vals.into_iter().enumerate() { + // Store the dim value into the tuple + let pval = ctx.builder.build_struct_gep(pllvm_tuple, i as u32, "value").unwrap(); + ctx.builder.build_store(pval, val).unwrap(); + } + + let value = ctx.builder.build_load(pllvm_tuple, name).unwrap().into_struct_value(); + TupleObject { tys, value } + } +} diff --git a/nac3core/src/toplevel/builtins.rs b/nac3core/src/toplevel/builtins.rs index be8687ea..36e66b15 100644 --- a/nac3core/src/toplevel/builtins.rs +++ b/nac3core/src/toplevel/builtins.rs @@ -13,15 +13,27 @@ use strum::IntoEnumIterator; use crate::{ codegen::{ - builtin_fns, - classes::{ArrayLikeValue, NDArrayValue, ProxyValue, RangeValue, TypedArrayLikeAccessor}, + builtin_fns::{self}, + classes::{ProxyValue, RangeValue}, expr::destructure_range, - irrt::*, + extern_fns, + irrt::{self, *}, + llvm_intrinsics, + model::Int32, numpy::*, + numpy_new::{self, gen_ndarray_transpose}, stmt::exn_constructor, + structure::ndarray::{ + functions::{FloorOrCeil, MinOrMax}, + scalar::{split_scalar_or_ndarray, ScalarObject, ScalarOrNDArray}, + NDArrayObject, + }, }, symbol_resolver::SymbolValue, - toplevel::{helper::PrimDef, numpy::make_ndarray_ty}, + toplevel::{ + helper::PrimDef, + numpy::{create_ndims, make_ndarray_ty}, + }, typecheck::typedef::{into_var_map, iter_type_vars, TypeVar, VarMap}, }; @@ -511,7 +523,16 @@ impl<'a> BuiltinBuilder<'a> { PrimDef::FunNpArray | PrimDef::FunNpFull | PrimDef::FunNpEye - | PrimDef::FunNpIdentity => self.build_ndarray_other_factory_function(prim), + | PrimDef::FunNpIdentity + | PrimDef::FunNpArange => self.build_ndarray_other_factory_function(prim), + + PrimDef::FunNpBroadcastTo | PrimDef::FunNpReshape | PrimDef::FunNpTranspose => { + self.build_ndarray_view_function(prim) + } + + PrimDef::FunNpSize | PrimDef::FunNpShape | PrimDef::FunNpStrides => { + self.build_ndarray_property_getter_function(prim) + } PrimDef::FunStr => self.build_str_function(), @@ -578,10 +599,6 @@ impl<'a> BuiltinBuilder<'a> { | PrimDef::FunNpHypot | PrimDef::FunNpNextAfter => self.build_np_2ary_function(prim), - PrimDef::FunNpTranspose | PrimDef::FunNpReshape => { - self.build_np_sp_ndarray_function(prim) - } - PrimDef::FunNpDot | PrimDef::FunNpLinalgCholesky | PrimDef::FunNpLinalgQr @@ -1072,16 +1089,34 @@ impl<'a> BuiltinBuilder<'a> { let arg_ty = fun.0.args[0].ty; let arg = args[0].1.clone().to_basic_value_enum(ctx, generator, arg_ty)?; - let func = match prim { - PrimDef::FunInt32 => builtin_fns::call_int32, - PrimDef::FunInt64 => builtin_fns::call_int64, - PrimDef::FunUInt32 => builtin_fns::call_uint32, - PrimDef::FunUInt64 => builtin_fns::call_uint64, - PrimDef::FunFloat => builtin_fns::call_float, - PrimDef::FunBool => builtin_fns::call_bool, + let ret_dtype = match prim { + PrimDef::FunInt32 => ctx.primitives.int32, + PrimDef::FunInt64 => ctx.primitives.int64, + PrimDef::FunUInt32 => ctx.primitives.uint32, + PrimDef::FunUInt64 => ctx.primitives.uint64, + PrimDef::FunFloat => ctx.primitives.float, + PrimDef::FunBool => ctx.primitives.bool, _ => unreachable!(), }; - Ok(Some(func(generator, ctx, (arg_ty, arg))?)) + + let result = split_scalar_or_ndarray(generator, ctx, arg, arg_ty).map( + generator, + ctx, + ret_dtype, + |generator, ctx, _i, scalar| { + let result = match prim { + PrimDef::FunInt32 => scalar.cast_to_int32(generator, ctx), + PrimDef::FunInt64 => scalar.cast_to_int64(generator, ctx), + PrimDef::FunUInt32 => scalar.cast_to_uint32(generator, ctx), + PrimDef::FunUInt64 => scalar.cast_to_uint64(generator, ctx), + PrimDef::FunFloat => scalar.cast_to_float(ctx), + PrimDef::FunBool => scalar.cast_to_bool(ctx), + _ => unreachable!(), + }; + Ok(result.value) + }, + )?; + Ok(Some(result.to_basic_value_enum())) }, )))), loc: None, @@ -1132,20 +1167,23 @@ impl<'a> BuiltinBuilder<'a> { let arg_ty = fun.0.args[0].ty; let arg = args[0].1.clone().to_basic_value_enum(ctx, generator, arg_ty)?; - let ret_elem_ty = size_variant.of_int(&ctx.primitives); - Ok(Some(builtin_fns::call_round(generator, ctx, (arg_ty, arg), ret_elem_ty)?)) + let ret_int_dtype = size_variant.of_int(&ctx.primitives); + + let result = split_scalar_or_ndarray(generator, ctx, arg, arg_ty).map( + generator, + ctx, + ret_int_dtype, + |generator, ctx, _i, scalar| { + Ok(scalar.round(generator, ctx, ret_int_dtype).value) + }, + )?; + Ok(Some(result.to_basic_value_enum())) }), ) } /// Build the functions `ceil()` and `floor()` and their 64 bit variants. fn build_ceil_floor_function(&mut self, prim: PrimDef) -> TopLevelDef { - #[derive(Clone, Copy)] - enum Kind { - Floor, - Ceil, - } - debug_assert_prim_is_allowed( prim, &[PrimDef::FunFloor, PrimDef::FunFloor64, PrimDef::FunCeil, PrimDef::FunCeil64], @@ -1153,10 +1191,10 @@ impl<'a> BuiltinBuilder<'a> { let (size_variant, kind) = { match prim { - PrimDef::FunFloor => (SizeVariant::Bits32, Kind::Floor), - PrimDef::FunFloor64 => (SizeVariant::Bits64, Kind::Floor), - PrimDef::FunCeil => (SizeVariant::Bits32, Kind::Ceil), - PrimDef::FunCeil64 => (SizeVariant::Bits64, Kind::Ceil), + PrimDef::FunFloor => (SizeVariant::Bits32, FloorOrCeil::Floor), + PrimDef::FunFloor64 => (SizeVariant::Bits64, FloorOrCeil::Floor), + PrimDef::FunCeil => (SizeVariant::Bits32, FloorOrCeil::Ceil), + PrimDef::FunCeil64 => (SizeVariant::Bits64, FloorOrCeil::Ceil), _ => unreachable!(), } }; @@ -1196,12 +1234,15 @@ impl<'a> BuiltinBuilder<'a> { let arg_ty = fun.0.args[0].ty; let arg = args[0].1.clone().to_basic_value_enum(ctx, generator, arg_ty)?; - let ret_elem_ty = size_variant.of_int(&ctx.primitives); - let func = match kind { - Kind::Ceil => builtin_fns::call_ceil, - Kind::Floor => builtin_fns::call_floor, - }; - Ok(Some(func(generator, ctx, (arg_ty, arg), ret_elem_ty)?)) + let result = split_scalar_or_ndarray(generator, ctx, arg, arg_ty).map( + generator, + ctx, + int_sized, + |generator, ctx, _i, scalar| { + Ok(scalar.floor_or_ceil(generator, ctx, kind, int_sized).value) + }, + )?; + Ok(Some(result.to_basic_value_enum())) }), ) } @@ -1248,9 +1289,9 @@ impl<'a> BuiltinBuilder<'a> { &[(self.ndarray_factory_fn_shape_arg_tvar.ty, "shape")], Box::new(move |ctx, obj, fun, args, generator| { let func = match prim { - PrimDef::FunNpNDArray | PrimDef::FunNpEmpty => gen_ndarray_empty, - PrimDef::FunNpZeros => gen_ndarray_zeros, - PrimDef::FunNpOnes => gen_ndarray_ones, + PrimDef::FunNpNDArray | PrimDef::FunNpEmpty => numpy_new::gen_ndarray_empty, + PrimDef::FunNpZeros => numpy_new::gen_ndarray_zeros, + PrimDef::FunNpOnes => numpy_new::gen_ndarray_ones, _ => unreachable!(), }; func(ctx, &obj, fun, &args, generator).map(|val| Some(val.as_basic_value_enum())) @@ -1264,7 +1305,13 @@ impl<'a> BuiltinBuilder<'a> { fn build_ndarray_other_factory_function(&mut self, prim: PrimDef) -> TopLevelDef { debug_assert_prim_is_allowed( prim, - &[PrimDef::FunNpArray, PrimDef::FunNpFull, PrimDef::FunNpEye, PrimDef::FunNpIdentity], + &[ + PrimDef::FunNpArray, + PrimDef::FunNpFull, + PrimDef::FunNpEye, + PrimDef::FunNpIdentity, + PrimDef::FunNpArange, + ], ); let PrimitiveStore { int32, bool, ndarray, .. } = *self.primitives; @@ -1325,7 +1372,7 @@ impl<'a> BuiltinBuilder<'a> { // type variable &[(self.list_int32, "shape"), (tv.ty, "fill_value")], Box::new(move |ctx, obj, fun, args, generator| { - gen_ndarray_full(ctx, &obj, fun, &args, generator) + numpy_new::gen_ndarray_full(ctx, &obj, fun, &args, generator) .map(|val| Some(val.as_basic_value_enum())) }), ) @@ -1383,6 +1430,152 @@ impl<'a> BuiltinBuilder<'a> { .map(|val| Some(val.as_basic_value_enum())) }), ), + PrimDef::FunNpArange => { + // TODO: Support `np.arange(start, stop, step)` + let ndims1 = create_ndims(self.unifier, 1); + let ndarray_float_1d = make_ndarray_ty( + self.unifier, + self.primitives, + Some(self.primitives.float), + Some(ndims1), + ); + create_fn_by_codegen( + self.unifier, + &VarMap::new(), + prim.name(), + ndarray_float_1d, + &[(int32, "n")], + Box::new(|ctx, obj, fun, args, generator| { + numpy_new::gen_ndarray_arange(ctx, &obj, fun, &args, generator) + .map(|val| Some(val.as_basic_value_enum())) + }), + ) + } + _ => unreachable!(), + } + } + + fn build_ndarray_view_function(&mut self, prim: PrimDef) -> TopLevelDef { + debug_assert_prim_is_allowed( + prim, + &[PrimDef::FunNpBroadcastTo, PrimDef::FunNpReshape, PrimDef::FunNpTranspose], + ); + + match prim { + PrimDef::FunNpBroadcastTo | PrimDef::FunNpReshape => { + // `array_ty` can be ndarrays and arbitrary scalars and objects. + let array_tvar = self.unifier.get_dummy_var(); + + // The return type is handled by special folding in the type inferencer, + // since the returned `ndims` depends on input shape. + let return_tvar = self.unifier.get_dummy_var(); + + create_fn_by_codegen( + self.unifier, + &into_var_map([array_tvar, return_tvar]), + prim.name(), + return_tvar.ty, + &[ + (array_tvar.ty, "array"), + (self.ndarray_factory_fn_shape_arg_tvar.ty, "shape"), + ], + Box::new(move |ctx, obj, fun, args, generator| { + let f = match prim { + PrimDef::FunNpBroadcastTo => numpy_new::gen_ndarray_broadcast_to, + PrimDef::FunNpReshape => numpy_new::gen_ndarray_reshape, + _ => unreachable!(), + }; + f(ctx, &obj, fun, &args, generator).map(Some) + }), + ) + } + PrimDef::FunNpTranspose => { + // TODO: Allow tuple inputs. + // TODO: Support scalar inputs (difficult) + + // TODO: Default values don't work for some reason. + // `axes` should have been `Option[List[int32]]` with default `None`. + // Workaround with some bogus types and values for now. + + let axes_ty = self.list_int32; + + TopLevelDef::Function { + name: prim.name().into(), + simple_name: prim.simple_name().into(), + signature: self.unifier.add_ty(TypeEnum::TFunc(FunSignature { + args: vec![ + FuncArg { + name: "a".into(), + ty: self.primitives.ndarray, + default_value: None, + is_vararg: false, + }, + FuncArg { + name: "axes".into(), + ty: axes_ty, + default_value: Some(SymbolValue::OptionNone), // Bogus + is_vararg: false, + }, + ], + ret: self.primitives.ndarray, + vars: VarMap::new(), + })), + var_id: Vec::default(), + instance_to_symbol: HashMap::default(), + instance_to_stmt: HashMap::default(), + resolver: None, + codegen_callback: Some(Arc::new(GenCall::new(Box::new( + |ctx, obj, fun, args, generator| { + gen_ndarray_transpose(ctx, &obj, fun, &args, generator).map(Some) + }, + )))), + loc: None, + } + } + _ => unreachable!(), + } + } + + fn build_ndarray_property_getter_function(&mut self, prim: PrimDef) -> TopLevelDef { + debug_assert_prim_is_allowed( + prim, + &[PrimDef::FunNpSize, PrimDef::FunNpShape, PrimDef::FunNpStrides], + ); + + match prim { + PrimDef::FunNpSize => { + // TODO: Make the return type usize + create_fn_by_codegen( + self.unifier, + &VarMap::new(), + prim.name(), + self.primitives.int32, + &[(self.primitives.ndarray, "a")], + Box::new(|ctx, obj, fun, args, generator| { + numpy_new::gen_ndarray_size(ctx, &obj, fun, &args, generator).map(Some) + }), + ) + } + PrimDef::FunNpShape | PrimDef::FunNpStrides => { + // The return type is a tuple of variable length depending on the ndims + // of the input ndarray. + let ret_ty = self.unifier.get_dummy_var().ty; + create_fn_by_codegen( + self.unifier, + &VarMap::new(), + prim.name(), + ret_ty, + &[(self.primitives.ndarray, "a")], + Box::new(move |ctx, obj, fun, args, generator| { + let f = match prim { + PrimDef::FunNpShape => numpy_new::gen_ndarray_shape, + PrimDef::FunNpStrides => numpy_new::gen_ndarray_strides, + _ => unreachable!(), + }; + f(ctx, &obj, fun, &args, generator).map(Some) + }), + ) + } _ => unreachable!(), } } @@ -1434,12 +1627,22 @@ impl<'a> BuiltinBuilder<'a> { let arg_ty = fun.0.args[0].ty; let arg = args[0].1.clone().to_basic_value_enum(ctx, generator, arg_ty)?; - let func = match prim { - PrimDef::FunNpCeil => builtin_fns::call_ceil, - PrimDef::FunNpFloor => builtin_fns::call_floor, + let kind = match prim { + PrimDef::FunNpFloor => FloorOrCeil::Floor, + PrimDef::FunNpCeil => FloorOrCeil::Ceil, _ => unreachable!(), }; - Ok(Some(func(generator, ctx, (arg_ty, arg), ctx.primitives.float)?)) + + let result = split_scalar_or_ndarray(generator, ctx, arg, arg_ty).map( + generator, + ctx, + ctx.primitives.float, + move |_generator, ctx, _i, scalar| { + let result = scalar.np_floor_or_ceil(ctx, kind); + Ok(result.value) + }, + )?; + Ok(Some(result.to_basic_value_enum())) }), ) } @@ -1457,7 +1660,17 @@ impl<'a> BuiltinBuilder<'a> { Box::new(|ctx, _, fun, args, generator| { let arg_ty = fun.0.args[0].ty; let arg = args[0].1.clone().to_basic_value_enum(ctx, generator, arg_ty)?; - Ok(Some(builtin_fns::call_numpy_round(generator, ctx, (arg_ty, arg))?)) + + let result = split_scalar_or_ndarray(generator, ctx, arg, arg_ty).map( + generator, + ctx, + ctx.primitives.float, + |_generator, ctx, _i, scalar| { + let result = scalar.np_round(ctx); + Ok(result.value) + }, + )?; + Ok(Some(result.to_basic_value_enum())) }), ) } @@ -1534,51 +1747,11 @@ impl<'a> BuiltinBuilder<'a> { } } TypeEnum::TObj { obj_id, .. } if *obj_id == PrimDef::NDArray.id() => { - let llvm_i32 = ctx.ctx.i32_type(); - let llvm_usize = generator.get_size_type(ctx.ctx); - - let arg = NDArrayValue::from_ptr_val( - arg.into_pointer_value(), - llvm_usize, - None, - ); - - let ndims = arg.dim_sizes().size(ctx, generator); - ctx.make_assert( - generator, - ctx.builder - .build_int_compare( - IntPredicate::NE, - ndims, - llvm_usize.const_zero(), - "", - ) - .unwrap(), - "0:TypeError", - &format!("{name}() of unsized object", name = prim.name()), - [None, None, None], - ctx.current_loc, - ); - - let len = unsafe { - arg.dim_sizes().get_typed_unchecked( - ctx, - generator, - &llvm_usize.const_zero(), - None, - ) - }; - - if len.get_type().get_bit_width() == 32 { - Some(len.into()) - } else { - Some( - ctx.builder - .build_int_truncate(len, llvm_i32, "len") - .map(Into::into) - .unwrap(), - ) - } + let ndarray = + NDArrayObject::from_value_and_type(generator, ctx, arg, arg_ty); + let len = ndarray.len(generator, ctx); + let len = len.truncate(generator, ctx, Int32, "len"); // TODO: Currently `len()` returns an int32. It should have been SizeT + Some(len.value.as_basic_value_enum()) } _ => unreachable!(), } @@ -1621,16 +1794,21 @@ impl<'a> BuiltinBuilder<'a> { codegen_callback: Some(Arc::new(GenCall::new(Box::new( move |ctx, _, fun, args, generator| { let m_ty = fun.0.args[0].ty; - let n_ty = fun.0.args[1].ty; let m_val = args[0].1.clone().to_basic_value_enum(ctx, generator, m_ty)?; + + let n_ty = fun.0.args[1].ty; let n_val = args[1].1.clone().to_basic_value_enum(ctx, generator, n_ty)?; - let func = match prim { - PrimDef::FunMin => builtin_fns::call_min, - PrimDef::FunMax => builtin_fns::call_max, + let kind = match prim { + PrimDef::FunMin => MinOrMax::Min, + PrimDef::FunMax => MinOrMax::Max, _ => unreachable!(), }; - Ok(Some(func(ctx, (m_ty, m_val), (n_ty, n_val)))) + + let m = ScalarObject { dtype: m_ty, value: m_val }; + let n = ScalarObject { dtype: n_ty, value: n_val }; + let result = ScalarObject::min_or_max(ctx, kind, m, n); + Ok(Some(result.value)) }, )))), loc: None, @@ -1672,7 +1850,25 @@ impl<'a> BuiltinBuilder<'a> { let a_ty = fun.0.args[0].ty; let a = args[0].1.clone().to_basic_value_enum(ctx, generator, a_ty)?; - Ok(Some(builtin_fns::call_numpy_max_min(generator, ctx, (a_ty, a), prim.name())?)) + let a = split_scalar_or_ndarray(generator, ctx, a, a_ty).as_ndarray(generator, ctx); + let result = match prim { + PrimDef::FunNpArgmin => a + .argmin_or_argmax(generator, ctx, MinOrMax::Min) + .value + .as_basic_value_enum(), + PrimDef::FunNpArgmax => a + .argmin_or_argmax(generator, ctx, MinOrMax::Max) + .value + .as_basic_value_enum(), + PrimDef::FunNpMin => { + a.min_or_max(generator, ctx, MinOrMax::Min).value.as_basic_value_enum() + } + PrimDef::FunNpMax => { + a.min_or_max(generator, ctx, MinOrMax::Max).value.as_basic_value_enum() + } + _ => unreachable!(), + }; + Ok(Some(result)) }), ) } @@ -1712,13 +1908,32 @@ impl<'a> BuiltinBuilder<'a> { let x1_val = args[0].1.clone().to_basic_value_enum(ctx, generator, x1_ty)?; let x2_val = args[1].1.clone().to_basic_value_enum(ctx, generator, x2_ty)?; - let func = match prim { - PrimDef::FunNpMinimum => builtin_fns::call_numpy_minimum, - PrimDef::FunNpMaximum => builtin_fns::call_numpy_maximum, + let kind = match prim { + PrimDef::FunNpMinimum => MinOrMax::Min, + PrimDef::FunNpMaximum => MinOrMax::Max, _ => unreachable!(), }; - Ok(Some(func(generator, ctx, (x1_ty, x1_val), (x2_ty, x2_val))?)) + let x1 = split_scalar_or_ndarray(generator, ctx, x1_val, x1_ty); + let x2 = split_scalar_or_ndarray(generator, ctx, x2_val, x2_ty); + + // NOTE: x1.dtype() and x2.dtype() should be the same + let common_ty = x1.dtype(); + + let result = ScalarOrNDArray::broadcasting_starmap( + generator, + ctx, + &[x1, x2], + common_ty, + |_generator, ctx, _i, scalars| { + let x1 = scalars[0]; + let x2 = scalars[1]; + + let result = ScalarObject::min_or_max(ctx, kind, x1, x2); + Ok(result.value) + }, + )?; + Ok(Some(result.to_basic_value_enum())) }, )))), loc: None, @@ -1729,6 +1944,7 @@ impl<'a> BuiltinBuilder<'a> { fn build_abs_function(&mut self) -> TopLevelDef { let prim = PrimDef::FunAbs; + let num_ty = self.num_ty; // To move into codegen_callback TopLevelDef::Function { name: prim.name().into(), simple_name: prim.simple_name().into(), @@ -1747,11 +1963,17 @@ impl<'a> BuiltinBuilder<'a> { instance_to_stmt: HashMap::default(), resolver: None, codegen_callback: Some(Arc::new(GenCall::new(Box::new( - |ctx, _, fun, args, generator| { + move |ctx, _, fun, args, generator| { let n_ty = fun.0.args[0].ty; let n_val = args[0].1.clone().to_basic_value_enum(ctx, generator, n_ty)?; - Ok(Some(builtin_fns::call_abs(generator, ctx, (n_ty, n_val))?)) + let result = split_scalar_or_ndarray(generator, ctx, n_val, n_ty).map( + generator, + ctx, + num_ty.ty, + |_generator, ctx, _i, scalar| Ok(scalar.abs(ctx).value), + )?; + Ok(Some(result.to_basic_value_enum())) }, )))), loc: None, @@ -1774,13 +1996,23 @@ impl<'a> BuiltinBuilder<'a> { let x_ty = fun.0.args[0].ty; let x_val = args[0].1.clone().to_basic_value_enum(ctx, generator, x_ty)?; - let func = match prim { - PrimDef::FunNpIsInf => builtin_fns::call_numpy_isinf, - PrimDef::FunNpIsNan => builtin_fns::call_numpy_isnan, + let function = match prim { + PrimDef::FunNpIsInf => irrt::call_isnan, + PrimDef::FunNpIsNan => irrt::call_isinf, _ => unreachable!(), }; - Ok(Some(func(generator, ctx, (x_ty, x_val))?)) + let result = split_scalar_or_ndarray(generator, ctx, x_val, x_ty).map( + generator, + ctx, + ctx.primitives.bool, + |generator, ctx, _i, scalar| { + let n = scalar.into_float64(ctx); + let n = function(generator, ctx, n); + Ok(n.as_basic_value_enum()) + }, + )?; + Ok(Some(result.to_basic_value_enum())) }), ) } @@ -1838,49 +2070,58 @@ impl<'a> BuiltinBuilder<'a> { let arg_ty = fun.0.args[0].ty; let arg_val = args[0].1.clone().to_basic_value_enum(ctx, generator, arg_ty)?; - let func = match prim { - PrimDef::FunNpSin => builtin_fns::call_numpy_sin, - PrimDef::FunNpCos => builtin_fns::call_numpy_cos, - PrimDef::FunNpTan => builtin_fns::call_numpy_tan, + let result = split_scalar_or_ndarray(generator, ctx, arg_val, arg_ty).map( + generator, + ctx, + ctx.primitives.float, + |_generator, ctx, _i, scalar| { + let n = scalar.into_float64(ctx); + let n = match prim { + PrimDef::FunNpSin => llvm_intrinsics::call_float_sin(ctx, n, None), + PrimDef::FunNpCos => llvm_intrinsics::call_float_cos(ctx, n, None), + PrimDef::FunNpTan => extern_fns::call_tan(ctx, n, None), - PrimDef::FunNpArcsin => builtin_fns::call_numpy_arcsin, - PrimDef::FunNpArccos => builtin_fns::call_numpy_arccos, - PrimDef::FunNpArctan => builtin_fns::call_numpy_arctan, + PrimDef::FunNpArcsin => extern_fns::call_asin(ctx, n, None), + PrimDef::FunNpArccos => extern_fns::call_acos(ctx, n, None), + PrimDef::FunNpArctan => extern_fns::call_atan(ctx, n, None), - PrimDef::FunNpSinh => builtin_fns::call_numpy_sinh, - PrimDef::FunNpCosh => builtin_fns::call_numpy_cosh, - PrimDef::FunNpTanh => builtin_fns::call_numpy_tanh, + PrimDef::FunNpSinh => extern_fns::call_sinh(ctx, n, None), + PrimDef::FunNpCosh => extern_fns::call_cosh(ctx, n, None), + PrimDef::FunNpTanh => extern_fns::call_tanh(ctx, n, None), - PrimDef::FunNpArcsinh => builtin_fns::call_numpy_arcsinh, - PrimDef::FunNpArccosh => builtin_fns::call_numpy_arccosh, - PrimDef::FunNpArctanh => builtin_fns::call_numpy_arctanh, + PrimDef::FunNpArcsinh => extern_fns::call_asinh(ctx, n, None), + PrimDef::FunNpArccosh => extern_fns::call_acosh(ctx, n, None), + PrimDef::FunNpArctanh => extern_fns::call_atanh(ctx, n, None), - PrimDef::FunNpExp => builtin_fns::call_numpy_exp, - PrimDef::FunNpExp2 => builtin_fns::call_numpy_exp2, - PrimDef::FunNpExpm1 => builtin_fns::call_numpy_expm1, + PrimDef::FunNpExp => llvm_intrinsics::call_float_exp(ctx, n, None), + PrimDef::FunNpExp2 => llvm_intrinsics::call_float_exp2(ctx, n, None), + PrimDef::FunNpExpm1 => extern_fns::call_expm1(ctx, n, None), - PrimDef::FunNpLog => builtin_fns::call_numpy_log, - PrimDef::FunNpLog2 => builtin_fns::call_numpy_log2, - PrimDef::FunNpLog10 => builtin_fns::call_numpy_log10, + PrimDef::FunNpLog => llvm_intrinsics::call_float_log(ctx, n, None), + PrimDef::FunNpLog2 => llvm_intrinsics::call_float_log2(ctx, n, None), + PrimDef::FunNpLog10 => llvm_intrinsics::call_float_log10(ctx, n, None), - PrimDef::FunNpSqrt => builtin_fns::call_numpy_sqrt, - PrimDef::FunNpCbrt => builtin_fns::call_numpy_cbrt, + PrimDef::FunNpSqrt => llvm_intrinsics::call_float_sqrt(ctx, n, None), + PrimDef::FunNpCbrt => extern_fns::call_cbrt(ctx, n, None), - PrimDef::FunNpFabs => builtin_fns::call_numpy_fabs, - PrimDef::FunNpRint => builtin_fns::call_numpy_rint, + PrimDef::FunNpFabs => llvm_intrinsics::call_float_fabs(ctx, n, None), + PrimDef::FunNpRint => llvm_intrinsics::call_float_rint(ctx, n, None), - PrimDef::FunSpSpecErf => builtin_fns::call_scipy_special_erf, - PrimDef::FunSpSpecErfc => builtin_fns::call_scipy_special_erfc, + PrimDef::FunSpSpecErf => extern_fns::call_erf(ctx, n, None), + PrimDef::FunSpSpecErfc => extern_fns::call_erfc(ctx, n, None), - PrimDef::FunSpSpecGamma => builtin_fns::call_scipy_special_gamma, - PrimDef::FunSpSpecGammaln => builtin_fns::call_scipy_special_gammaln, + PrimDef::FunSpSpecGamma => irrt::call_gamma(ctx, n), + PrimDef::FunSpSpecGammaln => irrt::call_gammaln(ctx, n), - PrimDef::FunSpSpecJ0 => builtin_fns::call_scipy_special_j0, - PrimDef::FunSpSpecJ1 => builtin_fns::call_scipy_special_j1, + PrimDef::FunSpSpecJ0 => irrt::call_j0(ctx, n), + PrimDef::FunSpSpecJ1 => extern_fns::call_j1(ctx, n, None), - _ => unreachable!(), - }; - Ok(Some(func(generator, ctx, (arg_ty, arg_val))?)) + _ => unreachable!(), + }; + Ok(n.as_basic_value_enum()) + }, + )?; + Ok(Some(result.to_basic_value_enum())) }), ) } @@ -1902,20 +2143,20 @@ impl<'a> BuiltinBuilder<'a> { let PrimitiveStore { float, int32, .. } = *self.primitives; - // The argument types of the two input arguments are controlled here. - let (x1_ty, x2_ty) = match prim { + // The argument types of the two input arguments + the return type + let (x1_dtype, x2_dtype, ret_dtype) = match prim { PrimDef::FunNpArctan2 | PrimDef::FunNpCopysign | PrimDef::FunNpFmax | PrimDef::FunNpFmin | PrimDef::FunNpHypot - | PrimDef::FunNpNextAfter => (float, float), - PrimDef::FunNpLdExp => (float, int32), + | PrimDef::FunNpNextAfter => (float, float, float), + PrimDef::FunNpLdExp => (float, int32, float), _ => unreachable!(), }; - let x1_ty = self.new_type_or_ndarray_ty(x1_ty); - let x2_ty = self.new_type_or_ndarray_ty(x2_ty); + let x1_ty = self.new_type_or_ndarray_ty(x1_dtype); + let x2_ty = self.new_type_or_ndarray_ty(x2_dtype); let param_ty = &[(x1_ty.ty, "x1"), (x2_ty.ty, "x2")]; let ret_ty = self.unifier.get_fresh_var(None, None); @@ -1944,78 +2185,78 @@ impl<'a> BuiltinBuilder<'a> { move |ctx, _, fun, args, generator| { let x1_ty = fun.0.args[0].ty; let x1_val = args[0].1.clone().to_basic_value_enum(ctx, generator, x1_ty)?; + let x2_ty = fun.0.args[1].ty; let x2_val = args[1].1.clone().to_basic_value_enum(ctx, generator, x2_ty)?; - let func = match prim { - PrimDef::FunNpArctan2 => builtin_fns::call_numpy_arctan2, - PrimDef::FunNpCopysign => builtin_fns::call_numpy_copysign, - PrimDef::FunNpFmax => builtin_fns::call_numpy_fmax, - PrimDef::FunNpFmin => builtin_fns::call_numpy_fmin, - PrimDef::FunNpLdExp => builtin_fns::call_numpy_ldexp, - PrimDef::FunNpHypot => builtin_fns::call_numpy_hypot, - PrimDef::FunNpNextAfter => builtin_fns::call_numpy_nextafter, - _ => unreachable!(), - }; + let x1 = split_scalar_or_ndarray(generator, ctx, x1_val, x1_ty); + let x2 = split_scalar_or_ndarray(generator, ctx, x2_val, x2_ty); - Ok(Some(func(generator, ctx, (x1_ty, x1_val), (x2_ty, x2_val))?)) + let result = ScalarOrNDArray::broadcasting_starmap( + generator, + ctx, + &[x1, x2], + ret_dtype, + |_generator, ctx, _i, scalars| { + let x1 = scalars[0]; + let x2 = scalars[1]; + + // TODO: This looks ugly + let result = match prim { + PrimDef::FunNpArctan2 => { + let x1 = x1.into_float64(ctx); + let x2 = x2.into_float64(ctx); + extern_fns::call_atan2(ctx, x1, x2, None).as_basic_value_enum() + } + PrimDef::FunNpCopysign => { + let x1 = x1.into_float64(ctx); + let x2 = x2.into_float64(ctx); + llvm_intrinsics::call_float_copysign(ctx, x1, x2, None) + .as_basic_value_enum() + } + PrimDef::FunNpFmax => { + let x1 = x1.into_float64(ctx); + let x2 = x2.into_float64(ctx); + llvm_intrinsics::call_float_maxnum(ctx, x1, x2, None) + .as_basic_value_enum() + } + PrimDef::FunNpFmin => { + let x1 = x1.into_float64(ctx); + let x2 = x2.into_float64(ctx); + llvm_intrinsics::call_float_minnum(ctx, x1, x2, None) + .as_basic_value_enum() + } + PrimDef::FunNpHypot => { + let x1 = x1.into_float64(ctx); + let x2 = x2.into_float64(ctx); + llvm_intrinsics::call_float_minnum(ctx, x1, x2, None) + .as_basic_value_enum() + } + PrimDef::FunNpNextAfter => { + let x1 = x1.into_float64(ctx); + let x2 = x2.into_float64(ctx); + extern_fns::call_nextafter(ctx, x1, x2, None) + .as_basic_value_enum() + } + PrimDef::FunNpLdExp => { + let x1 = x1.into_float64(ctx); + let x2 = x2.into_int32(ctx); + extern_fns::call_ldexp(ctx, x1, x2, None).as_basic_value_enum() + } + _ => unreachable!(), + }; + + Ok(result) + }, + )?; + + Ok(Some(result.to_basic_value_enum())) }, )))), loc: None, } } - /// Build np/sp functions that take as input `NDArray` only - fn build_np_sp_ndarray_function(&mut self, prim: PrimDef) -> TopLevelDef { - debug_assert_prim_is_allowed(prim, &[PrimDef::FunNpTranspose, PrimDef::FunNpReshape]); - - match prim { - PrimDef::FunNpTranspose => { - let ndarray_ty = self.unifier.get_fresh_var_with_range( - &[self.ndarray_num_ty], - Some("T".into()), - None, - ); - create_fn_by_codegen( - self.unifier, - &into_var_map([ndarray_ty]), - prim.name(), - ndarray_ty.ty, - &[(ndarray_ty.ty, "x")], - Box::new(move |ctx, _, fun, args, generator| { - let arg_ty = fun.0.args[0].ty; - let arg_val = - args[0].1.clone().to_basic_value_enum(ctx, generator, arg_ty)?; - Ok(Some(ndarray_transpose(generator, ctx, (arg_ty, arg_val))?)) - }), - ) - } - - // NOTE: on `ndarray_factory_fn_shape_arg_tvar` and - // the `param_ty` for `create_fn_by_codegen`. - // - // Similar to `build_ndarray_from_shape_factory_function` we delegate the responsibility of typechecking - // to [`typecheck::type_inferencer::Inferencer::fold_numpy_function_call_shape_argument`], - // and use a dummy [`TypeVar`] `ndarray_factory_fn_shape_arg_tvar` as a placeholder for `param_ty`. - PrimDef::FunNpReshape => create_fn_by_codegen( - self.unifier, - &VarMap::new(), - prim.name(), - self.ndarray_num_ty, - &[(self.ndarray_num_ty, "x"), (self.ndarray_factory_fn_shape_arg_tvar.ty, "shape")], - Box::new(move |ctx, _, fun, args, generator| { - let x1_ty = fun.0.args[0].ty; - let x1_val = args[0].1.clone().to_basic_value_enum(ctx, generator, x1_ty)?; - let x2_ty = fun.0.args[1].ty; - let x2_val = args[1].1.clone().to_basic_value_enum(ctx, generator, x2_ty)?; - Ok(Some(ndarray_reshape(generator, ctx, (x1_ty, x1_val), (x2_ty, x2_val))?)) - }), - ), - - _ => unreachable!(), - } - } - /// Build `np_linalg` and `sp_linalg` functions /// /// The input to these functions must be floating point `NDArray` diff --git a/nac3core/src/toplevel/helper.rs b/nac3core/src/toplevel/helper.rs index 21aeb9db..ece60120 100644 --- a/nac3core/src/toplevel/helper.rs +++ b/nac3core/src/toplevel/helper.rs @@ -51,6 +51,17 @@ pub enum PrimDef { FunNpArray, FunNpEye, FunNpIdentity, + FunNpArange, + + // NumPy view functions + FunNpBroadcastTo, + FunNpReshape, + FunNpTranspose, + + // NumPy NDArray property getters + FunNpSize, + FunNpShape, + FunNpStrides, // Miscellaneous NumPy & SciPy functions FunNpRound, @@ -99,8 +110,6 @@ pub enum PrimDef { FunNpLdExp, FunNpHypot, FunNpNextAfter, - FunNpTranspose, - FunNpReshape, // Linalg functions FunNpDot, @@ -237,6 +246,17 @@ impl PrimDef { PrimDef::FunNpArray => fun("np_array", None), PrimDef::FunNpEye => fun("np_eye", None), PrimDef::FunNpIdentity => fun("np_identity", None), + PrimDef::FunNpArange => fun("np_arange", None), + + // NumPy view functions + PrimDef::FunNpBroadcastTo => fun("np_broadcast_to", None), + PrimDef::FunNpReshape => fun("np_reshape", None), + PrimDef::FunNpTranspose => fun("np_transpose", None), + + // NumPy NDArray property getters + PrimDef::FunNpSize => fun("np_size", None), + PrimDef::FunNpShape => fun("np_shape", None), + PrimDef::FunNpStrides => fun("np_strides", None), // Miscellaneous NumPy & SciPy functions PrimDef::FunNpRound => fun("np_round", None), @@ -285,8 +305,6 @@ impl PrimDef { PrimDef::FunNpLdExp => fun("np_ldexp", None), PrimDef::FunNpHypot => fun("np_hypot", None), PrimDef::FunNpNextAfter => fun("np_nextafter", None), - PrimDef::FunNpTranspose => fun("np_transpose", None), - PrimDef::FunNpReshape => fun("np_reshape", None), // Linalg functions PrimDef::FunNpDot => fun("np_dot", None), diff --git a/nac3core/src/toplevel/numpy.rs b/nac3core/src/toplevel/numpy.rs index 63f6173d..015b4eac 100644 --- a/nac3core/src/toplevel/numpy.rs +++ b/nac3core/src/toplevel/numpy.rs @@ -1,4 +1,7 @@ +use std::sync::Arc; + use crate::{ + symbol_resolver::SymbolValue, toplevel::helper::PrimDef, typecheck::{ type_inferencer::PrimitiveStore, @@ -83,3 +86,33 @@ pub fn unpack_ndarray_var_ids(unifier: &mut Unifier, ndarray: Type) -> (TypeVarI pub fn unpack_ndarray_var_tys(unifier: &mut Unifier, ndarray: Type) -> (Type, Type) { unpack_ndarray_tvars(unifier, ndarray).into_iter().map(|v| v.1).collect_tuple().unwrap() } + +/// Extract an ndarray's `ndims` [type][`Type`] in `u64`. Panic if not possible. +/// The `ndims` must only contain 1 value. +#[must_use] +pub fn extract_ndims(unifier: &Unifier, ndims_ty: Type) -> u64 { + let ndims_ty_enum = unifier.get_ty_immutable(ndims_ty); + let TypeEnum::TLiteral { values, .. } = &*ndims_ty_enum else { + panic!("ndims_ty should be a TLiteral"); + }; + + assert_eq!(values.len(), 1, "ndims_ty TLiteral should only contain 1 value"); + + let ndims = values[0].clone(); + u64::try_from(ndims).unwrap() +} + +/// Return an ndarray's `ndims` as a typechecker [`Type`] from its `u64` value. +pub fn create_ndims(unifier: &mut Unifier, ndims: u64) -> Type { + unifier.get_fresh_literal(vec![SymbolValue::U64(ndims)], None) +} + +/// Return the ndims after broadcasting ndarrays of different ndims. +/// +/// Panics if the input list is empty. +pub fn get_broadcast_all_ndims(ndims: I) -> u64 +where + I: IntoIterator, +{ + ndims.into_iter().max().unwrap() +} diff --git a/nac3core/src/typecheck/type_inferencer/mod.rs b/nac3core/src/typecheck/type_inferencer/mod.rs index 9ac503a1..3f7713d4 100644 --- a/nac3core/src/typecheck/type_inferencer/mod.rs +++ b/nac3core/src/typecheck/type_inferencer/mod.rs @@ -1,6 +1,6 @@ use std::collections::{HashMap, HashSet}; use std::convert::{From, TryInto}; -use std::iter::once; +use std::iter::{self, once}; use std::{cell::RefCell, sync::Arc}; use super::{ @@ -15,12 +15,13 @@ use crate::{ symbol_resolver::{SymbolResolver, SymbolValue}, toplevel::{ helper::{arraylike_flatten_element_type, arraylike_get_ndims, PrimDef}, - numpy::{make_ndarray_ty, unpack_ndarray_var_tys}, + numpy::{extract_ndims, make_ndarray_ty, unpack_ndarray_var_tys}, TopLevelContext, TopLevelDef, }, typecheck::typedef::Mapping, }; use itertools::{izip, Itertools}; +use nac3parser::ast::Constant; use nac3parser::ast::{ self, fold::{self, Fold}, @@ -1272,7 +1273,7 @@ impl<'a> Inferencer<'a> { arg_ty.obj_id(self.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) }) { // typeof_ndarray_broadcast requires both dtypes to be the same, but ldexp accepts - // (float, int32), so convert it to align with the dtype of the first arg + // (float, int32), so convert it to align with t#he dtype of the first arg let arg1_ty = if id == &"np_ldexp".into() { if arg1_ty.obj_id(self.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) { let (_, ndims) = unpack_ndarray_var_tys(self.unifier, arg1_ty); @@ -1448,7 +1449,7 @@ impl<'a> Inferencer<'a> { }, })); } - // 2-argument ndarray n-dimensional factory functions + if id == &"np_reshape".into() && args.len() == 2 { let arg0 = self.fold_expr(args.remove(0))?; @@ -1493,6 +1494,51 @@ impl<'a> Inferencer<'a> { }, })); } + + if ["np_shape".into(), "np_strides".into()].contains(id) && args.len() == 1 { + // Output tuple size depends on input ndarray's ndims. + + let ndarray = self.fold_expr(args.remove(0))?; + + let (_, ndims) = unpack_ndarray_var_tys(self.unifier, ndarray.custom.unwrap()); + let ndims = extract_ndims(self.unifier, ndims); + + // Create a tuple of size `ndims` full of int32 + // TODO: Make it usize + + let ret_ty = TypeEnum::TTuple { + ty: iter::repeat(self.primitives.int32).take(ndims as usize).collect_vec(), + is_vararg_ctx: false, + }; + let ret_ty = self.unifier.add_ty(ret_ty); + + let func_ty = TypeEnum::TFunc(FunSignature { + args: vec![FuncArg { + name: "a".into(), + default_value: None, + ty: ndarray.custom.unwrap(), + is_vararg: false, + }], + ret: ret_ty, + vars: VarMap::new(), + }); + let func_ty = self.unifier.add_ty(func_ty); + + return Ok(Some(Located { + location, + custom: Some(ret_ty), + node: ExprKind::Call { + func: Box::new(Located { + custom: Some(func_ty), + location: func.location, + node: ExprKind::Name { id: *id, ctx: *ctx }, + }), + args: vec![ndarray], + keywords: vec![], + }, + })); + } + // 2-argument ndarray n-dimensional creation functions if id == &"np_full".into() && args.len() == 2 { let ExprKind::List { elts, .. } = &args[0].node else { @@ -2210,14 +2256,25 @@ impl<'a> Inferencer<'a> { // We will also take the opportunity to deduce `dims_to_subtract` as well let mut dims_to_subtract: i128 = 0; for index in indices { - if let ExprKind::Slice { lower, upper, step } = &index.node { - for v in [lower.as_ref(), upper.as_ref(), step.as_ref()].iter().flatten() { - self.constrain(v.custom.unwrap(), self.primitives.int32, &v.location)?; + match &index.node { + ExprKind::Slice { lower, upper, step } => { + // Handle slices + for v in [lower.as_ref(), upper.as_ref(), step.as_ref()].iter().flatten() { + self.constrain(v.custom.unwrap(), self.primitives.int32, &v.location)?; + } + } + ExprKind::Constant { value: Constant::Ellipsis, .. } => { + // Handle `...`. Do nothing. + } + ExprKind::Name { id, .. } if id == &"none".into() => { + // Handle `np.newaxis` / `None`. + dims_to_subtract -= 1; + } + _ => { + // Treat anything else as an integer index, and force unify their type to int32. + self.unify(index.custom.unwrap(), self.primitives.int32, &index.location)?; + dims_to_subtract += 1; } - } else { - // Treat anything else as an integer index, and force unify their type to int32. - self.unify(index.custom.unwrap(), self.primitives.int32, &index.location)?; - dims_to_subtract += 1; } } diff --git a/nac3core/src/typecheck/typedef/mod.rs b/nac3core/src/typecheck/typedef/mod.rs index 99a282f2..e125ff80 100644 --- a/nac3core/src/typecheck/typedef/mod.rs +++ b/nac3core/src/typecheck/typedef/mod.rs @@ -342,6 +342,14 @@ impl Unifier { self.unification_table.unioned(a, b) } + /// Determine if a type unions with a type in `tys`. + pub fn unioned_any(&mut self, a: Type, tys: I) -> bool + where + I: IntoIterator, + { + tys.into_iter().any(|ty| self.unioned(a, ty)) + } + pub fn from_shared_unifier(unifier: &SharedUnifier) -> Unifier { let lock = unifier.lock().unwrap(); Unifier { diff --git a/nac3standalone/demo/interpret_demo.py b/nac3standalone/demo/interpret_demo.py index 4f19db95..5a451aa8 100755 --- a/nac3standalone/demo/interpret_demo.py +++ b/nac3standalone/demo/interpret_demo.py @@ -218,8 +218,16 @@ def patch(module): module.np_ldexp = np.ldexp module.np_hypot = np.hypot module.np_nextafter = np.nextafter - module.np_transpose = np.transpose + + # NumPy view functions + module.np_broadcast_to = np.broadcast_to module.np_reshape = np.reshape + module.np_transpose = np.transpose + + # NumPy NDArray property getter functions + module.np_size = np.size + module.np_shape = np.shape + module.np_strides = lambda ndarray: ndarray.strides # SciPy Math functions module.sp_spec_erf = special.erf