Compare commits

...

4 Commits

Author SHA1 Message Date
099971da65 update dependencies 2025-08-01 13:32:01 +08:00
bd7cf9f4ab tests: make doctests work 2025-08-01 11:47:02 +08:00
1656770478 tests: fix missing target init 2025-07-31 16:15:56 +08:00
aacfba8c57 refactor: separate CoreContext out of CodeGenContext 2025-07-31 11:17:55 +08:00
56 changed files with 909 additions and 1286 deletions

39
Cargo.lock generated
View File

@@ -142,9 +142,9 @@ checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268"
[[package]]
name = "clap"
version = "4.5.41"
version = "4.5.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be92d32e80243a54711e5d7ce823c35c41c9d929dc4ab58e1276f625841aadf9"
checksum = "ed87a9d530bb41a67537289bafcac159cb3ee28460e0a4571123d2a778a6a882"
dependencies = [
"clap_builder",
"clap_derive",
@@ -152,9 +152,9 @@ dependencies = [
[[package]]
name = "clap_builder"
version = "4.5.41"
version = "4.5.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "707eab41e9622f9139419d573eca0900137718000c517d47da73045f54331c3d"
checksum = "64f4f3f3c77c94aff3c7e9aac9a2ca1974a5adf392a8bb751e827d6d127ab966"
dependencies = [
"anstream",
"anstyle",
@@ -564,7 +564,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667"
dependencies = [
"cfg-if",
"windows-targets 0.53.2",
"windows-targets 0.53.3",
]
[[package]]
@@ -975,9 +975,9 @@ dependencies = [
[[package]]
name = "redox_syscall"
version = "0.5.15"
version = "0.5.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e8af0dde094006011e6a740d4879319439489813bd0bcdc7d821beaeeff48ec"
checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77"
dependencies = [
"bitflags",
]
@@ -1086,9 +1086,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.141"
version = "1.0.142"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3"
checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7"
dependencies = [
"itoa",
"memchr",
@@ -1304,9 +1304,9 @@ dependencies = [
[[package]]
name = "toml"
version = "0.9.2"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed0aee96c12fa71097902e0bb061a5e1ebd766a6636bb605ba401c45c1650eac"
checksum = "41ae868b5a0f67631c14589f7e250c1ea2c574ee5ba21c6c8dd4b1485705a5a1"
dependencies = [
"indexmap",
"serde",
@@ -1343,9 +1343,9 @@ checksum = "fcc842091f2def52017664b53082ecbbeb5c7731092bad69d2c63050401dfd64"
[[package]]
name = "trybuild"
version = "1.0.106"
version = "1.0.110"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "65af40ad689f2527aebbd37a0a816aea88ff5f774ceabe99de5be02f2f91dae2"
checksum = "32e257d7246e7a9fd015fb0b28b330a8d4142151a33f03e6a497754f4b1f6a8e"
dependencies = [
"dissimilar",
"glob",
@@ -1507,6 +1507,12 @@ dependencies = [
"windows-sys 0.59.0",
]
[[package]]
name = "windows-link"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a"
[[package]]
name = "windows-sys"
version = "0.59.0"
@@ -1522,7 +1528,7 @@ version = "0.60.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
dependencies = [
"windows-targets 0.53.2",
"windows-targets 0.53.3",
]
[[package]]
@@ -1543,10 +1549,11 @@ dependencies = [
[[package]]
name = "windows-targets"
version = "0.53.2"
version = "0.53.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef"
checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91"
dependencies = [
"windows-link",
"windows_aarch64_gnullvm 0.53.0",
"windows_aarch64_msvc 0.53.0",
"windows_i686_gnu 0.53.0",

6
flake.lock generated
View File

@@ -2,11 +2,11 @@
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1752950548,
"narHash": "sha256-NS6BLD0lxOrnCiEOcvQCDVPXafX1/ek1dfJHX1nUIzc=",
"lastModified": 1753694789,
"narHash": "sha256-cKgvtz6fKuK1Xr5LQW/zOUiAC0oSQoA9nOISB0pJZqM=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "c87b95e25065c028d31a94f06a62927d18763fdf",
"rev": "dc9637876d0dcc8c9e5e22986b857632effeb727",
"type": "github"
},
"original": {

View File

@@ -42,7 +42,7 @@
};
passthru.cargoLock = cargoLock;
nativeBuildInputs = [ pkgs.python3 (pkgs.wrapClangMulti pkgs.llvmPackages_16.clang) llvm-tools-irrt pkgs.llvmPackages_16.llvm.out pkgs.llvmPackages_16.bintools llvm-nac3 ];
buildInputs = [ pkgs.python3 llvm-nac3 ];
buildInputs = [ pkgs.python3 llvm-nac3 pkgs.stdenv.cc.cc.lib ];
checkInputs = [ (pkgs.python3.withPackages(ps: [ ps.numpy ps.scipy ])) ];
checkPhase =
''

View File

@@ -26,10 +26,8 @@ use nac3core::{
},
},
inkwell::{
AddressSpace, IntPredicate, OptimizationLevel,
context::Context,
AddressSpace, IntPredicate,
module::Linkage,
targets::TargetMachine,
types::{BasicType, IntType},
values::{BasicValueEnum, IntValue, PointerValue, StructValue},
},
@@ -69,9 +67,6 @@ enum ParallelMode {
pub struct ArtiqCodeGenerator<'a> {
name: String,
/// The size of a `size_t` variable in bits.
size_t: u32,
/// Monotonic counter for naming `start`/`stop` variables used by `with parallel` blocks.
name_counter: u32,
@@ -95,14 +90,11 @@ pub struct ArtiqCodeGenerator<'a> {
impl<'a> ArtiqCodeGenerator<'a> {
pub fn new(
name: String,
size_t: IntType<'_>,
timeline: &'a (dyn TimeFns + Sync),
special_ids: SpecialPythonId,
) -> ArtiqCodeGenerator<'a> {
assert!(matches!(size_t.get_bit_width(), 32 | 64));
ArtiqCodeGenerator {
name,
size_t: size_t.get_bit_width(),
name_counter: 0,
start: None,
end: None,
@@ -112,18 +104,6 @@ impl<'a> ArtiqCodeGenerator<'a> {
}
}
#[must_use]
pub fn with_target_machine(
name: String,
ctx: &Context,
target_machine: &TargetMachine,
timeline: &'a (dyn TimeFns + Sync),
special_ids: SpecialPythonId,
) -> ArtiqCodeGenerator<'a> {
let llvm_usize = ctx.ptr_sized_int_type(&target_machine.get_target_data(), None);
Self::new(name, llvm_usize, timeline, special_ids)
}
/// If the generator is currently in a direct-`parallel` block context, emits IR that resets the
/// position of the timeline to the initial timeline position before entering the `parallel`
/// block.
@@ -189,10 +169,6 @@ impl CodeGenerator for ArtiqCodeGenerator<'_> {
&self.name
}
fn get_size_type<'ctx>(&self, ctx: &'ctx Context) -> IntType<'ctx> {
if self.size_t == 32 { ctx.i32_type() } else { ctx.i64_type() }
}
fn gen_block<'ctx, 'a, 'c, I: Iterator<Item = &'c Stmt<Option<Type>>>>(
&mut self,
ctx: &mut CodeGenContext<'ctx, 'a>,
@@ -492,28 +468,26 @@ fn format_rpc_arg<'ctx>(
// NAC3: NDArray = { usize, usize*, T* }
// libproto_artiq: NDArray = [data[..], dim_sz[..]]
let llvm_usize = ctx.get_size_type();
let (elem_ty, ndims) = unpack_ndarray_var_tys(&mut ctx.unifier, arg_ty);
let ndims = extract_ndims(&ctx.unifier, ndims);
let dtype = ctx.get_llvm_type(generator, elem_ty);
let dtype = ctx.get_llvm_type(elem_ty);
let ndarray = NDArrayType::new(ctx, dtype, ndims)
.map_pointer_value(arg.into_pointer_value(), None);
let ndims = llvm_usize.const_int(ndims, false);
let ndims = ctx.size_t.const_int(ndims, false);
// `ndarray.data` is possibly not contiguous, and we need it to be contiguous for
// the reader.
// Turning it into a ContiguousNDArray to get a `data` that is contiguous.
let carray = ndarray.make_contiguous_ndarray(generator, ctx);
let sizeof_usize = llvm_usize.size_of();
let sizeof_usize = ctx.size_t.size_of();
let sizeof_usize =
ctx.builder.build_int_truncate_or_bit_cast(sizeof_usize, llvm_usize, "").unwrap();
ctx.builder.build_int_truncate_or_bit_cast(sizeof_usize, ctx.size_t, "").unwrap();
let sizeof_pdata = dtype.ptr_type(AddressSpace::default()).size_of();
let sizeof_pdata =
ctx.builder.build_int_truncate_or_bit_cast(sizeof_pdata, llvm_usize, "").unwrap();
ctx.builder.build_int_truncate_or_bit_cast(sizeof_pdata, ctx.size_t, "").unwrap();
let sizeof_buf_shape = ctx.builder.build_int_mul(sizeof_usize, ndims, "").unwrap();
let sizeof_buf = ctx.builder.build_int_add(sizeof_buf_shape, sizeof_pdata, "").unwrap();
@@ -574,20 +548,13 @@ fn format_rpc_ret<'ctx>(
// }
let llvm_i8 = ctx.ctx.i8_type();
let llvm_i32 = ctx.ctx.i32_type();
let llvm_i32 = ctx.i32;
let llvm_i8_8 = ctx.ctx.struct_type(&[llvm_i8.array_type(8).into()], false);
let llvm_pi8 = llvm_i8.ptr_type(AddressSpace::default());
let llvm_usize = ctx.get_size_type();
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
let llvm_pusize = ctx.size_t.ptr_type(AddressSpace::default());
let rpc_recv = ctx.fn_store.declare_external(
&ctx.module,
"rpc_recv",
Some(llvm_i32.into()),
&[llvm_pi8.into()],
false,
&[],
);
let rpc_recv =
ctx.declare_external("rpc_recv", Some(llvm_i32.into()), &[llvm_pi8.into()], false, &[]);
if ctx.unifier.unioned(ret_ty, ctx.primitives.none) {
ctx.build_call_or_invoke(&rpc_recv, &[llvm_pi8.const_null().into()], "rpc_recv");
@@ -600,11 +567,11 @@ fn format_rpc_ret<'ctx>(
let alloc_bb = ctx.ctx.append_basic_block(current_function, "rpc.continue");
let tail_bb = ctx.ctx.append_basic_block(current_function, "rpc.tail");
let llvm_ret_ty = ctx.get_llvm_abi_type(generator, ret_ty);
let llvm_ret_ty = ctx.get_llvm_abi_type(ret_ty);
let result = match &*ctx.unifier.get_ty_immutable(ret_ty) {
TypeEnum::TObj { obj_id, .. } if *obj_id == PrimDef::NDArray.id() => {
let num_0 = llvm_usize.const_zero();
let num_0 = ctx.size_t.const_zero();
// Round `val` up to its modulo `power_of_two`
let round_up = |ctx: &mut CodeGenContext<'ctx, '_>,
@@ -633,7 +600,7 @@ fn format_rpc_ret<'ctx>(
// Allocate the resulting ndarray
// A condition after format_rpc_ret ensures this will not be popped this off.
let (dtype, ndims) = unpack_ndarray_var_tys(&mut ctx.unifier, ret_ty);
let dtype_llvm = ctx.get_llvm_type(generator, dtype);
let dtype_llvm = ctx.get_llvm_type(dtype);
let ndims = extract_ndims(&ctx.unifier, ndims);
let ndarray = NDArrayType::new(ctx, dtype_llvm, ndims)
.construct_uninitialized(generator, ctx, None);
@@ -649,13 +616,13 @@ fn format_rpc_ret<'ctx>(
// Allocates a buffer for the initial RPC'ed object, which is guaranteed to be
// (4 + 4 * ndims) bytes with 8-byte alignment
let sizeof_usize = llvm_usize.size_of();
let sizeof_usize = ctx.size_t.size_of();
let sizeof_usize =
ctx.builder.build_int_truncate_or_bit_cast(sizeof_usize, llvm_usize, "").unwrap();
ctx.builder.build_int_truncate_or_bit_cast(sizeof_usize, ctx.size_t, "").unwrap();
let sizeof_ptr = llvm_i8.ptr_type(AddressSpace::default()).size_of();
let sizeof_ptr =
ctx.builder.build_int_z_extend_or_bit_cast(sizeof_ptr, llvm_usize, "").unwrap();
ctx.builder.build_int_z_extend_or_bit_cast(sizeof_ptr, ctx.size_t, "").unwrap();
let ndims = ndarray.load_ndims(ctx);
let sizeof_shape = ctx.builder.build_int_mul(ndims, sizeof_usize, "").unwrap();
@@ -687,7 +654,7 @@ fn format_rpc_ret<'ctx>(
.unwrap();
// debug_assert(ndarray_nbytes > 0)
if ctx.registry.llvm_options.opt_level == OptimizationLevel::None {
if ctx.registry.codegen_options.debug {
let cmp = ctx
.builder
.build_int_compare(IntPredicate::UGT, ndarray_nbytes, num_0, "")
@@ -722,7 +689,7 @@ fn format_rpc_ret<'ctx>(
unsafe { ndarray.create_data(generator, ctx) }; // NOTE: the strides of `ndarray` has also been set to contiguous in `create_data`.
// debug_assert(nelems * sizeof(T) >= ndarray_nbytes)
if ctx.registry.llvm_options.opt_level == OptimizationLevel::None {
if ctx.registry.codegen_options.debug {
let num_elements = ndarray.size(ctx);
let expected_ndarray_nbytes =
@@ -834,8 +801,8 @@ fn rpc_codegen_callback_fn<'ctx>(
is_async: bool,
) -> Result<Option<BasicValueEnum<'ctx>>, String> {
let int8 = ctx.ctx.i8_type();
let int32 = ctx.ctx.i32_type();
let size_type = ctx.get_size_type();
let int32 = ctx.i32;
let size_type = ctx.size_t;
let ptr_type = int8.ptr_type(AddressSpace::default());
let tag_ptr_type = ctx.ctx.struct_type(&[ptr_type.into(), size_type.into()], false);
@@ -886,11 +853,7 @@ fn rpc_codegen_callback_fn<'ctx>(
let stackptr = call_stacksave(ctx, Some("rpc.stack"));
let args_ptr = ctx
.builder
.build_array_alloca(
ptr_type,
ctx.ctx.i32_type().const_int(arg_length as u64, false),
"argptr",
)
.build_array_alloca(ptr_type, ctx.i32.const_int(arg_length as u64, false), "argptr")
.unwrap();
// -- rpc args handling
@@ -973,7 +936,7 @@ pub fn attributes_writeback<'ctx>(
let host_attributes = host_attributes.downcast_bound::<PyList>(py)?;
let top_levels = ctx.top_level.definitions.read();
let globals = inner_resolver.global_value_ids.read();
let int32 = ctx.ctx.i32_type();
let int32 = ctx.i32;
let zero = int32.const_zero();
let mut values = Vec::new();
let mut scratch_buffer = Vec::new();
@@ -1128,7 +1091,7 @@ fn polymorphic_print<'ctx>(
debug_assert!(!fmt.is_empty());
debug_assert_eq!(fmt.as_bytes().last().unwrap(), &0u8);
let llvm_i32 = ctx.ctx.i32_type();
let llvm_i32 = ctx.i32;
let fmt = ctx.gen_string(generator, fmt);
let fmt = unsafe { fmt.get_field_at_index_unchecked(0) }.into_pointer_value();
@@ -1140,9 +1103,9 @@ fn polymorphic_print<'ctx>(
}
};
let llvm_i32 = ctx.ctx.i32_type();
let llvm_i64 = ctx.ctx.i64_type();
let llvm_usize = ctx.get_size_type();
let llvm_i32 = ctx.i32;
let llvm_i64 = ctx.i64;
let llvm_usize = ctx.size_t;
let suffix = suffix.unwrap_or_default();
@@ -1329,7 +1292,7 @@ fn polymorphic_print<'ctx>(
flush(ctx, generator, &mut fmt, &mut args);
let (dtype, _) = unpack_ndarray_var_tys(&mut ctx.unifier, ty);
let ndarray = NDArrayType::from_unifier_type(generator, ctx, ty)
let ndarray = NDArrayType::from_unifier_type(ctx, ty)
.map_pointer_value(value.into_pointer_value(), None);
let num_0 = llvm_usize.const_zero();

View File

@@ -32,14 +32,14 @@ use tempfile::{self, TempDir};
use nac3core::{
codegen::{
CodeGenLLVMOptions, CodeGenTargetMachineOptions, CodeGenTask, CodeGenerator, FunctionStore,
WithCall, WorkerRegistry, concrete_type::ConcreteTypeStore, gen_func_impl, irrt::load_irrt,
CodeGenOptions, CodeGenTask, CodeGenerator, CoreContext, TargetMachineOptions, WithCall,
WorkerRegistry, concrete_type::ConcreteTypeStore, context_ref, gen_func_impl,
irrt::load_irrt,
},
inkwell::{
OptimizationLevel,
context::Context,
memory_buffer::MemoryBuffer,
module::{FlagBehavior, Linkage, Module},
module::{Linkage, Module},
passes::PassBuilderOptions,
support::is_multithreaded,
targets::{FileType, InitializationConfig, RelocMode, Target, TargetMachine, TargetTriple},
@@ -117,32 +117,18 @@ impl Isa {
/// Returns an instance of [`CodeGenTargetMachineOptions`] representing the target machine
/// options used for compiling to this ISA.
pub fn get_llvm_target_options(self) -> CodeGenTargetMachineOptions {
CodeGenTargetMachineOptions {
pub fn get_llvm_target_options(
self,
target_opt_level: OptimizationLevel,
) -> TargetMachineOptions {
TargetMachineOptions {
triple: self.get_llvm_target_triple().as_str().to_string_lossy().into_owned(),
cpu: self.get_llvm_target_cpu(),
features: self.get_llvm_target_features(),
reloc_mode: RelocMode::PIC,
..CodeGenTargetMachineOptions::from_host()
..TargetMachineOptions::from_host(target_opt_level)
}
}
/// Returns an instance of [`TargetMachine`] used in compiling and linking of a program of this
/// ISA.
pub fn create_llvm_target_machine(self, opt_level: OptimizationLevel) -> TargetMachine {
self.get_llvm_target_options()
.create_target_machine(opt_level)
.expect("couldn't create target machine")
}
/// Returns the number of bits in `size_t` for this ISA.
fn get_size_type(self, ctx: &Context) -> u32 {
ctx.ptr_sized_int_type(
&self.create_llvm_target_machine(OptimizationLevel::Default).get_target_data(),
None,
)
.get_bit_width()
}
}
#[derive(Clone)]
@@ -216,7 +202,7 @@ struct Nac3 {
/// Modules registered with NAC3.
modules: Arc<RwLock<Vec<ModuleInfo>>>,
/// LLVM-related options for code generation.
llvm_options: CodeGenLLVMOptions,
codegen_options: CodeGenOptions,
}
create_exception!(nac3artiq, CompileError, exceptions::PyException);
@@ -494,7 +480,7 @@ impl Nac3 {
py: Python<'py>,
link_fn: &dyn Fn(&Module) -> PyResult<T>,
) -> PyResult<T> {
let size_t = self.isa.get_size_type(&Context::create());
let size_t = self.primitive.size_t;
// Cache all imported modules indexed by their path for symbol resolution context
let modules_by_path = LazyCell::new(|| {
@@ -818,8 +804,8 @@ impl Nac3 {
.unwrap();
// Process IRRT
let context = Context::create();
let irrt = load_irrt(&context, resolver.as_ref());
context_ref!(context);
let irrt = load_irrt(context, resolver.as_ref());
let fun_signature =
FunSignature { args: vec![], ret: self.primitive.none, vars: VarMap::new() };
@@ -934,10 +920,8 @@ impl Nac3 {
let threads: Vec<_> = thread_names
.iter()
.map(|s| {
Box::new(ArtiqCodeGenerator::with_target_machine(
Box::new(ArtiqCodeGenerator::new(
s.to_string(),
&context,
&self.get_llvm_target_machine(),
self.time_fns,
self.special_ids.clone(),
))
@@ -947,40 +931,27 @@ impl Nac3 {
let membuffer = membuffers.clone();
let mut has_return = false;
py.allow_threads(|| {
let (registry, handles) =
WorkerRegistry::create_workers(threads, top_level.clone(), &self.llvm_options, &f);
let context = Context::create();
let mut generator = ArtiqCodeGenerator::with_target_machine(
let mut generator = ArtiqCodeGenerator::new(
"main".to_string(),
&context,
&self.get_llvm_target_machine(),
self.time_fns,
self.special_ids.clone(),
);
let module = context.create_module("main");
let fn_store = FunctionStore::default();
let target_machine = self.llvm_options.create_target_machine().unwrap();
module.set_data_layout(&target_machine.get_target_data().get_data_layout());
module.set_triple(&target_machine.get_triple());
module.add_basic_value_flag(
"Debug Info Version",
FlagBehavior::Warning,
context.i32_type().const_int(3, false),
let (registry, handles) = WorkerRegistry::create_workers(
threads,
top_level.clone(),
&self.codegen_options,
&f,
);
module.add_basic_value_flag(
"Dwarf Version",
FlagBehavior::Warning,
context.i32_type().const_int(4, false),
);
let builder = context.create_builder();
let (_, module, _, _) = gen_func_impl(
&context,
context_ref!(context);
let context = CoreContext::new(context, "main", &self.codegen_options.target);
let builder = context.ctx.create_builder();
let (context, _, result) = gen_func_impl(
context,
builder,
&mut generator,
&registry,
builder,
module,
fn_store,
task,
|generator, ctx| {
assert_eq!(instance.body.len(), 1, "toplevel module should have 1 statement");
@@ -1003,9 +974,9 @@ impl Nac3 {
return_obj,
)
},
)
.unwrap();
let buffer = module.write_bitcode_to_memory();
);
result.unwrap();
let buffer = context.module.write_bitcode_to_memory();
let buffer = buffer.as_slice().into();
membuffer.lock().push(buffer);
});
@@ -1064,11 +1035,7 @@ impl Nac3 {
global_option = global.get_next_global();
}
let target_machine = self
.llvm_options
.target
.create_target_machine(self.llvm_options.opt_level)
.expect("couldn't create target machine");
let target_machine = self.codegen_options.target.create_target_machine();
// Strip all unused functions first (necessary even in -O0 to filter out unused IRRT functions)
main.run_passes(
@@ -1081,7 +1048,7 @@ impl Nac3 {
let pass_options = PassBuilderOptions::create();
pass_options.set_merge_functions(true);
let passes = format!("default<O{}>", self.llvm_options.opt_level as u32);
let passes = format!("default<O{}>", self.codegen_options.opt_level);
let result = main.run_passes(passes.as_str(), &target_machine, pass_options);
if let Err(err) = result {
panic!("Failed to run optimization for module `main`: {}", err.to_string());
@@ -1106,12 +1073,6 @@ impl Nac3 {
link_fn(&main)
}
/// Returns an instance of [`TargetMachine`] used in compiling and linking of a program to the
/// target [ISA][isa].
fn get_llvm_target_machine(&self) -> TargetMachine {
self.isa.create_llvm_target_machine(self.llvm_options.opt_level)
}
}
/// Returns the (possibly qualified) path of a class name expression, or [`None`] if the class name
@@ -1285,13 +1246,31 @@ impl Nac3 {
"cortexa9" => Isa::CortexA9,
_ => return Err(exceptions::PyValueError::new_err("invalid ISA")),
};
let opt_level = match std::env::var(ENV_NAC3_OPT_LEVEL) {
Ok(x) if matches!(&*x, "0" | "1" | "2" | "3" | "s" | "z") => x,
Err(std::env::VarError::NotPresent) => String::from("2"),
unknown => {
return Err(exceptions::PyValueError::new_err(format!(
"unknown opt level: {unknown:?}"
)));
}
};
// We always use the `Default` target-specific optimization level,
// since `nac3ld` only supports relocation types that are used in optimized code.
let target_opt_level = OptimizationLevel::Default;
let target_options = isa.get_llvm_target_options(target_opt_level);
let time_fns: &(dyn TimeFns + Sync) = match isa {
Isa::RiscV32G => &timeline::NOW_PINNING_TIME_FNS_64,
Isa::RiscV32IMA => &timeline::NOW_PINNING_TIME_FNS,
Isa::CortexA9 | Isa::Host => &timeline::EXTERN_TIME_FNS,
};
let (primitive, _) =
TopLevelComposer::make_primitives(isa.get_size_type(&Context::create()));
let size_t_bits =
target_options.create_target_machine().get_target_data().get_pointer_byte_size(None)
* 8;
let (primitive, _) = TopLevelComposer::make_primitives(size_t_bits);
let builtins = vec![
(
"now_mu".into(),
@@ -1442,17 +1421,8 @@ impl Nac3 {
string_store.insert(exn_name, id);
}
let opt_level = match std::env::var(ENV_NAC3_OPT_LEVEL).as_deref() {
Ok("0") => OptimizationLevel::None,
Ok("1") => OptimizationLevel::Less,
Ok("2") | Err(std::env::VarError::NotPresent) => OptimizationLevel::Default,
Ok("3") => OptimizationLevel::Aggressive,
unknown => {
return Err(exceptions::PyValueError::new_err(format!(
"unknown opt level: {unknown:?}"
)));
}
};
let codegen_options =
CodeGenOptions { debug: opt_level == "0", target: target_options, opt_level };
Ok(Nac3 {
isa,
@@ -1468,7 +1438,7 @@ impl Nac3 {
deferred_eval_store: DeferredEvaluationStore::new(),
special_ids: SpecialPythonId::default(),
modules: Arc::default(),
llvm_options: CodeGenLLVMOptions { opt_level, target: isa.get_llvm_target_options() },
codegen_options,
})
}
@@ -1529,7 +1499,7 @@ impl Nac3 {
embedding_map: &Bound<'py, PyAny>,
py: Python<'py>,
) -> PyResult<()> {
let target_machine = self.get_llvm_target_machine();
let target_machine = self.codegen_options.target.create_target_machine();
let link_fn = |module: &Module| {
if self.isa == Isa::Host {
let working_directory = self.working_directory.path().to_owned();
@@ -1545,6 +1515,9 @@ impl Nac3 {
let object_mem = target_machine
.write_to_memory_buffer(module, FileType::Object)
.expect("couldn't write module to object file buffer");
if let Some(path) = std::env::var_os("NAC3_EMIT_OBJ") {
std::fs::write(&path, object_mem.as_slice()).map_err(CompileError::new_err)?;
}
if let Ok(dyn_lib) = Linker::ld(object_mem.as_slice()) {
if let Ok(mut file) = fs::File::create(filename) {
file.write_all(&dyn_lib).expect("couldn't write linked library to file");
@@ -1569,7 +1542,7 @@ impl Nac3 {
embedding_map: &Bound<'py, PyAny>,
py: Python<'py>,
) -> PyResult<PyObject> {
let target_machine = self.get_llvm_target_machine();
let target_machine = self.codegen_options.target.create_target_machine();
let link_fn = |module: &Module| {
if self.isa == Isa::Host {
let working_directory = self.working_directory.path().to_owned();

View File

@@ -139,7 +139,7 @@ impl StaticValue for PythonValue {
|| {
Python::with_gil(|py| -> PyResult<BasicValueEnum<'ctx>> {
let id: u32 = self.store_obj.bind(py).call1((&*self.value,))?.extract()?;
let struct_type = ctx.ctx.struct_type(&[ctx.ctx.i32_type().into()], false);
let struct_type = ctx.ctx.struct_type(&[ctx.i32.into()], false);
let global = ctx.module.add_global(
struct_type,
None,
@@ -148,10 +148,10 @@ impl StaticValue for PythonValue {
global.set_constant(true);
// Set linkage of global to private to avoid name collisions
global.set_linkage(Linkage::Private);
global.set_initializer(&ctx.ctx.const_struct(
&[ctx.ctx.i32_type().const_int(u64::from(id), false).into()],
false,
));
global.set_initializer(
&ctx.ctx
.const_struct(&[ctx.i32.const_int(u64::from(id), false).into()], false),
);
Ok(global.as_pointer_value().into())
})
.unwrap()
@@ -168,12 +168,10 @@ impl StaticValue for PythonValue {
) -> Result<BasicValueEnum<'ctx>, String> {
if let Some(val) = self.resolver.id_to_primitive.read().get(&self.id) {
return Ok(match val {
PrimitiveValue::I32(val) => ctx.ctx.i32_type().const_int(*val as u64, false).into(),
PrimitiveValue::I64(val) => ctx.ctx.i64_type().const_int(*val as u64, false).into(),
PrimitiveValue::U32(val) => {
ctx.ctx.i32_type().const_int(u64::from(*val), false).into()
}
PrimitiveValue::U64(val) => ctx.ctx.i64_type().const_int(*val, false).into(),
PrimitiveValue::I32(val) => ctx.i32.const_int(*val as u64, false).into(),
PrimitiveValue::I64(val) => ctx.i64.const_int(*val as u64, false).into(),
PrimitiveValue::U32(val) => ctx.i32.const_int(u64::from(*val), false).into(),
PrimitiveValue::U64(val) => ctx.i64.const_int(*val, false).into(),
PrimitiveValue::F64(val) => ctx.ctx.f64_type().const_float(*val).into(),
PrimitiveValue::Bool(val) => {
ctx.ctx.i8_type().const_int(u64::from(*val), false).into()
@@ -1044,19 +1042,19 @@ impl InnerResolver {
if ty_id == self.primitive_ids.int || ty_id == self.primitive_ids.int32 {
let val: i32 = obj.extract().unwrap();
self.id_to_primitive.write().insert(id, PrimitiveValue::I32(val));
Ok(Some(ctx.ctx.i32_type().const_int(val as u64, false).into()))
Ok(Some(ctx.i32.const_int(val as u64, false).into()))
} else if ty_id == self.primitive_ids.int64 {
let val: i64 = obj.extract().unwrap();
self.id_to_primitive.write().insert(id, PrimitiveValue::I64(val));
Ok(Some(ctx.ctx.i64_type().const_int(val as u64, false).into()))
Ok(Some(ctx.i64.const_int(val as u64, false).into()))
} else if ty_id == self.primitive_ids.uint32 {
let val: u32 = obj.extract().unwrap();
self.id_to_primitive.write().insert(id, PrimitiveValue::U32(val));
Ok(Some(ctx.ctx.i32_type().const_int(u64::from(val), false).into()))
Ok(Some(ctx.i32.const_int(u64::from(val), false).into()))
} else if ty_id == self.primitive_ids.uint64 {
let val: u64 = obj.extract().unwrap();
self.id_to_primitive.write().insert(id, PrimitiveValue::U64(val));
Ok(Some(ctx.ctx.i64_type().const_int(val, false).into()))
Ok(Some(ctx.i64.const_int(val, false).into()))
} else if ty_id == self.primitive_ids.bool {
let val: bool = obj.extract().unwrap();
self.id_to_primitive.write().insert(id, PrimitiveValue::Bool(val));
@@ -1087,14 +1085,14 @@ impl InnerResolver {
}
_ => unreachable!("must be list"),
};
let size_t = ctx.get_size_type();
let size_t = ctx.size_t;
let ty = if len == 0
&& matches!(&*ctx.unifier.get_ty_immutable(elem_ty), TypeEnum::TVar { .. })
{
// The default type for zero-length lists of unknown element type is size_t
size_t.into()
} else {
ctx.get_llvm_type(generator, elem_ty)
ctx.get_llvm_type(elem_ty)
};
let arr_ty = ctx
.ctx
@@ -1176,8 +1174,8 @@ impl InnerResolver {
let llvm_i8 = ctx.ctx.i8_type();
let llvm_pi8 = llvm_i8.ptr_type(AddressSpace::default());
let llvm_usize = ctx.get_size_type();
let llvm_ndarray = NDArrayType::from_unifier_type(generator, ctx, ndarray_ty);
let llvm_usize = ctx.size_t;
let llvm_ndarray = NDArrayType::from_unifier_type(ctx, ndarray_ty);
let dtype = llvm_ndarray.element_type();
{
@@ -1301,10 +1299,11 @@ impl InnerResolver {
// will always return a constant size.
let itemsize = ctx
.registry
.llvm_options
.codegen_options
.target
.create_target_machine()
.map(|tm| tm.get_target_data().get_store_size(&dtype))
.unwrap();
.get_target_data()
.get_store_size(&dtype);
assert_ne!(itemsize, 0);
// Create the strides needed for ndarray.strides
@@ -1406,7 +1405,7 @@ impl InnerResolver {
if id == self.primitive_ids.none {
// for option type, just a null ptr
Ok(Some(
ctx.get_llvm_type(generator, option_val_ty)
ctx.get_llvm_type(option_val_ty)
.ptr_type(AddressSpace::default())
.const_null()
.into(),
@@ -1465,11 +1464,8 @@ impl InnerResolver {
let ty = self
.get_obj_type(py, obj, &mut ctx.unifier, &top_level_defs, &ctx.primitives)?
.unwrap();
let ty = ctx
.get_llvm_type(generator, ty)
.into_pointer_type()
.get_element_type()
.into_struct_type();
let ty =
ctx.get_llvm_type(ty).into_pointer_type().get_element_type().into_struct_type();
{
if self.global_value_ids.read().contains_key(&id) {
let global = ctx.module.get_global(&id_str).unwrap_or_else(|| {

View File

@@ -21,8 +21,8 @@ pub struct NowPinningTimeFns64 {}
// values that are each padded to 64-bits.
impl TimeFns for NowPinningTimeFns64 {
fn emit_now_mu<'ctx>(&self, ctx: &mut CodeGenContext<'ctx, '_>) -> BasicValueEnum<'ctx> {
let i64_type = ctx.ctx.i64_type();
let i32_type = ctx.ctx.i32_type();
let i64_type = ctx.i64;
let i32_type = ctx.i32;
let now = ctx
.module
.get_global("now")
@@ -57,8 +57,8 @@ impl TimeFns for NowPinningTimeFns64 {
}
fn emit_at_mu<'ctx>(&self, ctx: &mut CodeGenContext<'ctx, '_>, t: BasicValueEnum<'ctx>) {
let i32_type = ctx.ctx.i32_type();
let i64_type = ctx.ctx.i64_type();
let i32_type = ctx.i32;
let i64_type = ctx.i64;
let i64_32 = i64_type.const_int(32, false);
let time = t.into_int_value();
@@ -99,8 +99,8 @@ impl TimeFns for NowPinningTimeFns64 {
}
fn emit_delay_mu<'ctx>(&self, ctx: &mut CodeGenContext<'ctx, '_>, dt: BasicValueEnum<'ctx>) {
let i64_type = ctx.ctx.i64_type();
let i32_type = ctx.ctx.i32_type();
let i64_type = ctx.i64;
let i32_type = ctx.i32;
let now = ctx
.module
.get_global("now")
@@ -166,7 +166,7 @@ pub struct NowPinningTimeFns {}
impl TimeFns for NowPinningTimeFns {
fn emit_now_mu<'ctx>(&self, ctx: &mut CodeGenContext<'ctx, '_>) -> BasicValueEnum<'ctx> {
let i64_type = ctx.ctx.i64_type();
let i64_type = ctx.i64;
let now = ctx
.module
.get_global("now")
@@ -184,8 +184,8 @@ impl TimeFns for NowPinningTimeFns {
}
fn emit_at_mu<'ctx>(&self, ctx: &mut CodeGenContext<'ctx, '_>, t: BasicValueEnum<'ctx>) {
let i32_type = ctx.ctx.i32_type();
let i64_type = ctx.ctx.i64_type();
let i32_type = ctx.i32;
let i64_type = ctx.i64;
let i64_32 = i64_type.const_int(32, false);
let time = t.into_int_value();
@@ -226,8 +226,8 @@ impl TimeFns for NowPinningTimeFns {
}
fn emit_delay_mu<'ctx>(&self, ctx: &mut CodeGenContext<'ctx, '_>, dt: BasicValueEnum<'ctx>) {
let i32_type = ctx.ctx.i32_type();
let i64_type = ctx.ctx.i64_type();
let i32_type = ctx.i32;
let i64_type = ctx.i64;
let i64_32 = i64_type.const_int(32, false);
let now = ctx
.module
@@ -283,16 +283,16 @@ pub struct ExternTimeFns {}
impl TimeFns for ExternTimeFns {
fn emit_now_mu<'ctx>(&self, ctx: &mut CodeGenContext<'ctx, '_>) -> BasicValueEnum<'ctx> {
call_extern!(ctx: (ctx.ctx.i64_type()) "now_mu" = "now_mu"()).into()
call_extern!(ctx: (ctx.i64) "now_mu" = "now_mu"()).into()
}
fn emit_at_mu<'ctx>(&self, ctx: &mut CodeGenContext<'ctx, '_>, t: BasicValueEnum<'ctx>) {
assert_eq!(t.get_type(), ctx.ctx.i64_type().into());
assert_eq!(t.get_type(), ctx.i64.into());
call_extern!(ctx: void "at_mu" = "at_mu"(t));
}
fn emit_delay_mu<'ctx>(&self, ctx: &mut CodeGenContext<'ctx, '_>, dt: BasicValueEnum<'ctx>) {
assert_eq!(dt.get_type(), ctx.ctx.i64_type().into());
assert_eq!(dt.get_type(), ctx.i64.into());
call_extern!(ctx: void "delay_mu" = "delay_mu"(dt));
}
}

View File

@@ -295,9 +295,7 @@ pub fn derive(input: TokenStream) -> TokenStream {
let impl_block = quote! {
impl<'ctx> ::nac3core::codegen::types::structure::StructFields<'ctx> for #ident<'ctx> {
fn new(ctx: impl ::nac3core::inkwell::context::AsContextRef<'ctx>, llvm_usize: ::nac3core::inkwell::types::IntType<'ctx>) -> Self {
let ctx = unsafe { ::nac3core::inkwell::context::ContextRef::new(ctx.as_ctx_ref()) };
fn new(ctx: ::nac3core::inkwell::context::ContextRef<'ctx>, llvm_usize: ::nac3core::inkwell::types::IntType<'ctx>) -> Self {
let mut counter = ::nac3core::codegen::types::structure::FieldIndexCounter::default();
#ident {

View File

@@ -1,5 +1,5 @@
use inkwell::{
FloatPredicate, IntPredicate, OptimizationLevel,
FloatPredicate, IntPredicate,
types::BasicTypeEnum,
values::{BasicValueEnum, IntValue},
};
@@ -43,7 +43,7 @@ pub fn call_len<'ctx, G: CodeGenerator + ?Sized>(
ctx: &mut CodeGenContext<'ctx, '_>,
(arg_ty, arg): (Type, BasicValueEnum<'ctx>),
) -> Result<IntValue<'ctx>, String> {
let llvm_i32 = ctx.ctx.i32_type();
let llvm_i32 = ctx.i32;
let range_ty = ctx.primitives.range;
Ok(if ctx.unifier.unioned(arg_ty, range_ty) {
@@ -53,7 +53,7 @@ pub fn call_len<'ctx, G: CodeGenerator + ?Sized>(
} else {
match &*ctx.unifier.get_ty_immutable(arg_ty) {
TypeEnum::TTuple { .. } => {
let tuple = TupleType::from_unifier_type(generator, ctx, arg_ty)
let tuple = TupleType::from_unifier_type(ctx, arg_ty)
.map_struct_value(arg.into_struct_value(), None);
llvm_i32.const_int(tuple.get_type().num_elements().into(), false)
}
@@ -61,7 +61,7 @@ pub fn call_len<'ctx, G: CodeGenerator + ?Sized>(
TypeEnum::TObj { obj_id, .. }
if *obj_id == ctx.primitives.ndarray.obj_id(&ctx.unifier).unwrap() =>
{
let ndarray = NDArrayType::from_unifier_type(generator, ctx, arg_ty)
let ndarray = NDArrayType::from_unifier_type(ctx, arg_ty)
.map_pointer_value(arg.into_pointer_value(), None);
let len = ndarray.len(ctx);
ctx.builder.build_int_truncate_or_bit_cast(len, llvm_i32, "len").unwrap()
@@ -70,7 +70,7 @@ pub fn call_len<'ctx, G: CodeGenerator + ?Sized>(
TypeEnum::TObj { obj_id, .. }
if *obj_id == ctx.primitives.list.obj_id(&ctx.unifier).unwrap() =>
{
let list = ListType::from_unifier_type(generator, ctx, arg_ty)
let list = ListType::from_unifier_type(ctx, arg_ty)
.map_pointer_value(arg.into_pointer_value(), None);
let size = list.load_size(ctx, None);
ctx.builder.build_int_truncate_or_bit_cast(size, llvm_i32, "len").unwrap()
@@ -87,7 +87,7 @@ pub fn call_int32<'ctx, G: CodeGenerator + ?Sized>(
ctx: &mut CodeGenContext<'ctx, '_>,
(n_ty, n): (Type, BasicValueEnum<'ctx>),
) -> Result<BasicValueEnum<'ctx>, String> {
let llvm_i32 = ctx.ctx.i32_type();
let llvm_i32 = ctx.i32;
Ok(match n {
BasicValueEnum::IntValue(n) if matches!(n.get_type().get_bit_width(), 1 | 8) => {
@@ -119,8 +119,7 @@ pub fn call_int32<'ctx, G: CodeGenerator + ?Sized>(
BasicValueEnum::FloatValue(n) => {
debug_assert!(ctx.unifier.unioned(n_ty, ctx.primitives.float));
let to_int64 =
ctx.builder.build_float_to_signed_int(n, ctx.ctx.i64_type(), "").unwrap();
let to_int64 = ctx.builder.build_float_to_signed_int(n, ctx.i64, "").unwrap();
ctx.builder.build_int_truncate(to_int64, llvm_i32, "conv").map(Into::into).unwrap()
}
@@ -128,14 +127,13 @@ pub fn call_int32<'ctx, G: CodeGenerator + ?Sized>(
if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) =>
{
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty);
let ndarray =
NDArrayType::from_unifier_type(generator, ctx, n_ty).map_pointer_value(n, None);
let ndarray = NDArrayType::from_unifier_type(ctx, n_ty).map_pointer_value(n, None);
let result = ndarray
.map(
generator,
ctx,
NDArrayOut::NewNDArray { dtype: ctx.ctx.i32_type().into() },
NDArrayOut::NewNDArray { dtype: ctx.i32.into() },
|generator, ctx, scalar| call_int32(generator, ctx, (elem_ty, scalar)),
)
.unwrap();
@@ -153,7 +151,7 @@ pub fn call_int64<'ctx, G: CodeGenerator + ?Sized>(
ctx: &mut CodeGenContext<'ctx, '_>,
(n_ty, n): (Type, BasicValueEnum<'ctx>),
) -> Result<BasicValueEnum<'ctx>, String> {
let llvm_i64 = ctx.ctx.i64_type();
let llvm_i64 = ctx.i64;
Ok(match n {
BasicValueEnum::IntValue(n) if matches!(n.get_type().get_bit_width(), 1 | 8 | 32) => {
@@ -183,24 +181,20 @@ pub fn call_int64<'ctx, G: CodeGenerator + ?Sized>(
BasicValueEnum::FloatValue(n) => {
debug_assert!(ctx.unifier.unioned(n_ty, ctx.primitives.float));
ctx.builder
.build_float_to_signed_int(n, ctx.ctx.i64_type(), "fptosi")
.map(Into::into)
.unwrap()
ctx.builder.build_float_to_signed_int(n, ctx.i64, "fptosi").map(Into::into).unwrap()
}
BasicValueEnum::PointerValue(n)
if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) =>
{
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty);
let ndarray =
NDArrayType::from_unifier_type(generator, ctx, n_ty).map_pointer_value(n, None);
let ndarray = NDArrayType::from_unifier_type(ctx, n_ty).map_pointer_value(n, None);
let result = ndarray
.map(
generator,
ctx,
NDArrayOut::NewNDArray { dtype: ctx.ctx.i64_type().into() },
NDArrayOut::NewNDArray { dtype: ctx.i64.into() },
|generator, ctx, scalar| call_int64(generator, ctx, (elem_ty, scalar)),
)
.unwrap();
@@ -218,7 +212,7 @@ pub fn call_uint32<'ctx, G: CodeGenerator + ?Sized>(
ctx: &mut CodeGenContext<'ctx, '_>,
(n_ty, n): (Type, BasicValueEnum<'ctx>),
) -> Result<BasicValueEnum<'ctx>, String> {
let llvm_i32 = ctx.ctx.i32_type();
let llvm_i32 = ctx.i32;
Ok(match n {
BasicValueEnum::IntValue(n) if matches!(n.get_type().get_bit_width(), 1 | 8) => {
@@ -255,8 +249,7 @@ pub fn call_uint32<'ctx, G: CodeGenerator + ?Sized>(
.unwrap();
let to_int32 = ctx.builder.build_float_to_signed_int(n, llvm_i32, "").unwrap();
let to_uint64 =
ctx.builder.build_float_to_unsigned_int(n, ctx.ctx.i64_type(), "").unwrap();
let to_uint64 = ctx.builder.build_float_to_unsigned_int(n, ctx.i64, "").unwrap();
ctx.builder
.build_select(
@@ -272,14 +265,13 @@ pub fn call_uint32<'ctx, G: CodeGenerator + ?Sized>(
if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) =>
{
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty);
let ndarray =
NDArrayType::from_unifier_type(generator, ctx, n_ty).map_pointer_value(n, None);
let ndarray = NDArrayType::from_unifier_type(ctx, n_ty).map_pointer_value(n, None);
let result = ndarray
.map(
generator,
ctx,
NDArrayOut::NewNDArray { dtype: ctx.ctx.i32_type().into() },
NDArrayOut::NewNDArray { dtype: ctx.i32.into() },
|generator, ctx, scalar| call_uint32(generator, ctx, (elem_ty, scalar)),
)
.unwrap();
@@ -297,7 +289,7 @@ pub fn call_uint64<'ctx, G: CodeGenerator + ?Sized>(
ctx: &mut CodeGenContext<'ctx, '_>,
(n_ty, n): (Type, BasicValueEnum<'ctx>),
) -> Result<BasicValueEnum<'ctx>, String> {
let llvm_i64 = ctx.ctx.i64_type();
let llvm_i64 = ctx.i64;
Ok(match n {
BasicValueEnum::IntValue(n) if matches!(n.get_type().get_bit_width(), 1 | 8 | 32) => {
@@ -342,14 +334,13 @@ pub fn call_uint64<'ctx, G: CodeGenerator + ?Sized>(
if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) =>
{
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty);
let ndarray =
NDArrayType::from_unifier_type(generator, ctx, n_ty).map_pointer_value(n, None);
let ndarray = NDArrayType::from_unifier_type(ctx, n_ty).map_pointer_value(n, None);
let result = ndarray
.map(
generator,
ctx,
NDArrayOut::NewNDArray { dtype: ctx.ctx.i64_type().into() },
NDArrayOut::NewNDArray { dtype: ctx.i64.into() },
|generator, ctx, scalar| call_uint64(generator, ctx, (elem_ty, scalar)),
)
.unwrap();
@@ -409,8 +400,7 @@ pub fn call_float<'ctx, G: CodeGenerator + ?Sized>(
if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) =>
{
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty);
let ndarray =
NDArrayType::from_unifier_type(generator, ctx, n_ty).map_pointer_value(n, None);
let ndarray = NDArrayType::from_unifier_type(ctx, n_ty).map_pointer_value(n, None);
let result = ndarray
.map(
@@ -437,7 +427,7 @@ pub fn call_round<'ctx, G: CodeGenerator + ?Sized>(
) -> Result<BasicValueEnum<'ctx>, String> {
const FN_NAME: &str = "round";
let llvm_ret_elem_ty = ctx.get_llvm_abi_type(generator, ret_elem_ty).into_int_type();
let llvm_ret_elem_ty = ctx.get_llvm_abi_type(ret_elem_ty).into_int_type();
Ok(match n {
BasicValueEnum::FloatValue(n) => {
@@ -454,8 +444,7 @@ pub fn call_round<'ctx, G: CodeGenerator + ?Sized>(
if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) =>
{
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty);
let ndarray =
NDArrayType::from_unifier_type(generator, ctx, n_ty).map_pointer_value(n, None);
let ndarray = NDArrayType::from_unifier_type(ctx, n_ty).map_pointer_value(n, None);
let result = ndarray
.map(
@@ -494,8 +483,7 @@ pub fn call_numpy_round<'ctx, G: CodeGenerator + ?Sized>(
if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) =>
{
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty);
let ndarray =
NDArrayType::from_unifier_type(generator, ctx, n_ty).map_pointer_value(n, None);
let ndarray = NDArrayType::from_unifier_type(ctx, n_ty).map_pointer_value(n, None);
let result = ndarray
.map(
@@ -559,8 +547,7 @@ pub fn call_bool<'ctx, G: CodeGenerator + ?Sized>(
if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) =>
{
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty);
let ndarray =
NDArrayType::from_unifier_type(generator, ctx, n_ty).map_pointer_value(n, None);
let ndarray = NDArrayType::from_unifier_type(ctx, n_ty).map_pointer_value(n, None);
let result = ndarray
.map(
@@ -590,7 +577,7 @@ pub fn call_floor<'ctx, G: CodeGenerator + ?Sized>(
) -> Result<BasicValueEnum<'ctx>, String> {
const FN_NAME: &str = "floor";
let llvm_ret_elem_ty = ctx.get_llvm_abi_type(generator, ret_elem_ty);
let llvm_ret_elem_ty = ctx.get_llvm_abi_type(ret_elem_ty);
Ok(match n {
BasicValueEnum::FloatValue(n) => {
@@ -611,8 +598,7 @@ pub fn call_floor<'ctx, G: CodeGenerator + ?Sized>(
if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) =>
{
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty);
let ndarray =
NDArrayType::from_unifier_type(generator, ctx, n_ty).map_pointer_value(n, None);
let ndarray = NDArrayType::from_unifier_type(ctx, n_ty).map_pointer_value(n, None);
let result = ndarray
.map(
@@ -641,7 +627,7 @@ pub fn call_ceil<'ctx, G: CodeGenerator + ?Sized>(
) -> Result<BasicValueEnum<'ctx>, String> {
const FN_NAME: &str = "ceil";
let llvm_ret_elem_ty = ctx.get_llvm_abi_type(generator, ret_elem_ty);
let llvm_ret_elem_ty = ctx.get_llvm_abi_type(ret_elem_ty);
Ok(match n {
BasicValueEnum::FloatValue(n) => {
@@ -662,8 +648,7 @@ pub fn call_ceil<'ctx, G: CodeGenerator + ?Sized>(
if n_ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) =>
{
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, n_ty);
let ndarray =
NDArrayType::from_unifier_type(generator, ctx, n_ty).map_pointer_value(n, None);
let ndarray = NDArrayType::from_unifier_type(ctx, n_ty).map_pointer_value(n, None);
let result = ndarray
.map(
@@ -771,10 +756,8 @@ pub fn call_numpy_minimum<'ctx, G: CodeGenerator + ?Sized>(
ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id())
}) =>
{
let x1 =
ScalarOrNDArray::from_value(generator, ctx, (x1_ty, x1)).to_ndarray(generator, ctx);
let x2 =
ScalarOrNDArray::from_value(generator, ctx, (x2_ty, x2)).to_ndarray(generator, ctx);
let x1 = ScalarOrNDArray::from_value(ctx, (x1_ty, x1)).to_ndarray(generator, ctx);
let x2 = ScalarOrNDArray::from_value(ctx, (x2_ty, x2)).to_ndarray(generator, ctx);
let x1_dtype = arraylike_flatten_element_type(&mut ctx.unifier, x1_ty);
let x2_dtype = arraylike_flatten_element_type(&mut ctx.unifier, x2_ty);
@@ -862,8 +845,8 @@ pub fn call_numpy_max_min<'ctx, G: CodeGenerator + ?Sized>(
) -> Result<BasicValueEnum<'ctx>, String> {
debug_assert!(["np_argmin", "np_argmax", "np_max", "np_min"].contains(&fn_name));
let llvm_int64 = ctx.ctx.i64_type();
let llvm_usize = ctx.get_size_type();
let llvm_int64 = ctx.i64;
let llvm_usize = ctx.size_t;
Ok(match a {
BasicValueEnum::IntValue(_) | BasicValueEnum::FloatValue(_) => {
@@ -892,13 +875,12 @@ pub fn call_numpy_max_min<'ctx, G: CodeGenerator + ?Sized>(
{
let (elem_ty, _) = unpack_ndarray_var_tys(&mut ctx.unifier, a_ty);
let ndarray =
NDArrayType::from_unifier_type(generator, ctx, a_ty).map_pointer_value(n, None);
let ndarray = NDArrayType::from_unifier_type(ctx, a_ty).map_pointer_value(n, None);
let llvm_dtype = ndarray.get_type().element_type();
let zero = llvm_usize.const_zero();
if ctx.registry.llvm_options.opt_level == OptimizationLevel::None {
if ctx.registry.codegen_options.debug {
let size = ndarray.size(ctx);
let size_nez =
ctx.builder.build_int_compare(IntPredicate::NE, size, zero, "").unwrap();
@@ -982,7 +964,7 @@ pub fn call_numpy_max_min<'ctx, G: CodeGenerator + ?Sized>(
.build_load(extremum_idx, "")
.map(BasicValueEnum::into_int_value)
.unwrap(),
ctx.ctx.i64_type(),
ctx.i64,
"",
)
.unwrap()
@@ -1036,10 +1018,8 @@ pub fn call_numpy_maximum<'ctx, G: CodeGenerator + ?Sized>(
ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id())
}) =>
{
let x1 =
ScalarOrNDArray::from_value(generator, ctx, (x1_ty, x1)).to_ndarray(generator, ctx);
let x2 =
ScalarOrNDArray::from_value(generator, ctx, (x2_ty, x2)).to_ndarray(generator, ctx);
let x1 = ScalarOrNDArray::from_value(ctx, (x1_ty, x1)).to_ndarray(generator, ctx);
let x2 = ScalarOrNDArray::from_value(ctx, (x2_ty, x2)).to_ndarray(generator, ctx);
let x1_dtype = arraylike_flatten_element_type(&mut ctx.unifier, x1_ty);
let x2_dtype = arraylike_flatten_element_type(&mut ctx.unifier, x2_ty);
@@ -1095,12 +1075,12 @@ where
) -> Option<BasicValueEnum<'ctx>>,
RetElemFn: Fn(&mut CodeGenContext<'ctx, '_>, Type) -> Type,
{
let arg = ScalarOrNDArray::from_value(generator, ctx, (arg_ty, arg_val));
let arg = ScalarOrNDArray::from_value(ctx, (arg_ty, arg_val));
let dtype = arraylike_flatten_element_type(&mut ctx.unifier, arg_ty);
let ret_ty = get_ret_elem_type(ctx, dtype);
let llvm_ret_ty = ctx.get_llvm_type(generator, ret_ty);
let llvm_ret_ty = ctx.get_llvm_type(ret_ty);
let result = arg.map(generator, ctx, llvm_ret_ty, |generator, ctx, scalar| {
let Some(result) = on_scalar(generator, ctx, dtype, scalar) else {
unsupported_type(ctx, fn_name, &[arg_ty])
@@ -1435,8 +1415,8 @@ pub fn call_numpy_arctan2<'ctx, G: CodeGenerator + ?Sized>(
) -> Result<BasicValueEnum<'ctx>, String> {
const FN_NAME: &str = "np_arctan2";
let x1 = ScalarOrNDArray::from_value(generator, ctx, (x1_ty, x1));
let x2 = ScalarOrNDArray::from_value(generator, ctx, (x2_ty, x2));
let x1 = ScalarOrNDArray::from_value(ctx, (x1_ty, x1));
let x2 = ScalarOrNDArray::from_value(ctx, (x2_ty, x2));
let result = ScalarOrNDArray::broadcasting_starmap(
generator,
@@ -1469,8 +1449,8 @@ pub fn call_numpy_copysign<'ctx, G: CodeGenerator + ?Sized>(
) -> Result<BasicValueEnum<'ctx>, String> {
const FN_NAME: &str = "np_copysign";
let x1 = ScalarOrNDArray::from_value(generator, ctx, (x1_ty, x1));
let x2 = ScalarOrNDArray::from_value(generator, ctx, (x2_ty, x2));
let x1 = ScalarOrNDArray::from_value(ctx, (x1_ty, x1));
let x2 = ScalarOrNDArray::from_value(ctx, (x2_ty, x2));
let result = ScalarOrNDArray::broadcasting_starmap(
generator,
@@ -1503,8 +1483,8 @@ pub fn call_numpy_fmax<'ctx, G: CodeGenerator + ?Sized>(
) -> Result<BasicValueEnum<'ctx>, String> {
const FN_NAME: &str = "np_fmax";
let x1 = ScalarOrNDArray::from_value(generator, ctx, (x1_ty, x1));
let x2 = ScalarOrNDArray::from_value(generator, ctx, (x2_ty, x2));
let x1 = ScalarOrNDArray::from_value(ctx, (x1_ty, x1));
let x2 = ScalarOrNDArray::from_value(ctx, (x2_ty, x2));
let result = ScalarOrNDArray::broadcasting_starmap(
generator,
@@ -1537,8 +1517,8 @@ pub fn call_numpy_fmin<'ctx, G: CodeGenerator + ?Sized>(
) -> Result<BasicValueEnum<'ctx>, String> {
const FN_NAME: &str = "np_fmin";
let x1 = ScalarOrNDArray::from_value(generator, ctx, (x1_ty, x1));
let x2 = ScalarOrNDArray::from_value(generator, ctx, (x2_ty, x2));
let x1 = ScalarOrNDArray::from_value(ctx, (x1_ty, x1));
let x2 = ScalarOrNDArray::from_value(ctx, (x2_ty, x2));
let result = ScalarOrNDArray::broadcasting_starmap(
generator,
@@ -1571,8 +1551,8 @@ pub fn call_numpy_ldexp<'ctx, G: CodeGenerator + ?Sized>(
) -> Result<BasicValueEnum<'ctx>, String> {
const FN_NAME: &str = "np_ldexp";
let x1 = ScalarOrNDArray::from_value(generator, ctx, (x1_ty, x1));
let x2 = ScalarOrNDArray::from_value(generator, ctx, (x2_ty, x2));
let x1 = ScalarOrNDArray::from_value(ctx, (x1_ty, x1));
let x2 = ScalarOrNDArray::from_value(ctx, (x2_ty, x2));
let result = ScalarOrNDArray::broadcasting_starmap(
generator,
@@ -1586,7 +1566,7 @@ pub fn call_numpy_ldexp<'ctx, G: CodeGenerator + ?Sized>(
match (x1_scalar, x2_scalar) {
(BasicValueEnum::FloatValue(x1_scalar), BasicValueEnum::IntValue(x2_scalar)) => {
debug_assert_eq!(x1.get_dtype(), ctx.ctx.f64_type().into());
debug_assert_eq!(x2.get_dtype(), ctx.ctx.i32_type().into());
debug_assert_eq!(x2.get_dtype(), ctx.i32.into());
Ok(irrt::call_ldexp(ctx, x1_scalar, x2_scalar, None).into())
}
_ => unsupported_type(ctx, FN_NAME, &[x1_ty, x2_ty]),
@@ -1607,8 +1587,8 @@ pub fn call_numpy_hypot<'ctx, G: CodeGenerator + ?Sized>(
) -> Result<BasicValueEnum<'ctx>, String> {
const FN_NAME: &str = "np_hypot";
let x1 = ScalarOrNDArray::from_value(generator, ctx, (x1_ty, x1));
let x2 = ScalarOrNDArray::from_value(generator, ctx, (x2_ty, x2));
let x1 = ScalarOrNDArray::from_value(ctx, (x1_ty, x1));
let x2 = ScalarOrNDArray::from_value(ctx, (x2_ty, x2));
let result = ScalarOrNDArray::broadcasting_starmap(
generator,
@@ -1641,8 +1621,8 @@ pub fn call_numpy_nextafter<'ctx, G: CodeGenerator + ?Sized>(
) -> Result<BasicValueEnum<'ctx>, String> {
const FN_NAME: &str = "np_nextafter";
let x1 = ScalarOrNDArray::from_value(generator, ctx, (x1_ty, x1));
let x2 = ScalarOrNDArray::from_value(generator, ctx, (x2_ty, x2));
let x1 = ScalarOrNDArray::from_value(ctx, (x1_ty, x1));
let x2 = ScalarOrNDArray::from_value(ctx, (x2_ty, x2));
let result = ScalarOrNDArray::broadcasting_starmap(
generator,
@@ -1676,7 +1656,7 @@ pub fn call_np_linalg_cholesky<'ctx, G: CodeGenerator + ?Sized>(
let BasicValueEnum::PointerValue(x1) = x1 else { unsupported_type(ctx, FN_NAME, &[x1_ty]) };
let x1 = NDArrayType::from_unifier_type(generator, ctx, x1_ty).map_pointer_value(x1, None);
let x1 = NDArrayType::from_unifier_type(ctx, x1_ty).map_pointer_value(x1, None);
if !x1.get_type().element_type().is_float_type() {
unsupported_type(ctx, FN_NAME, &[x1_ty]);
@@ -1706,11 +1686,11 @@ pub fn call_np_linalg_qr<'ctx, G: CodeGenerator + ?Sized>(
) -> Result<BasicValueEnum<'ctx>, String> {
const FN_NAME: &str = "np_linalg_qr";
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
let BasicValueEnum::PointerValue(x1) = x1 else { unsupported_type(ctx, FN_NAME, &[x1_ty]) };
let x1 = NDArrayType::from_unifier_type(generator, ctx, x1_ty).map_pointer_value(x1, None);
let x1 = NDArrayType::from_unifier_type(ctx, x1_ty).map_pointer_value(x1, None);
if !x1.get_type().element_type().is_float_type() {
unsupported_type(ctx, FN_NAME, &[x1_ty]);
@@ -1761,11 +1741,11 @@ pub fn call_np_linalg_svd<'ctx, G: CodeGenerator + ?Sized>(
) -> Result<BasicValueEnum<'ctx>, String> {
const FN_NAME: &str = "np_linalg_svd";
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
let BasicValueEnum::PointerValue(x1) = x1 else { unsupported_type(ctx, FN_NAME, &[x1_ty]) };
let x1 = NDArrayType::from_unifier_type(generator, ctx, x1_ty).map_pointer_value(x1, None);
let x1 = NDArrayType::from_unifier_type(ctx, x1_ty).map_pointer_value(x1, None);
if !x1.get_type().element_type().is_float_type() {
unsupported_type(ctx, FN_NAME, &[x1_ty]);
@@ -1823,7 +1803,7 @@ pub fn call_np_linalg_inv<'ctx, G: CodeGenerator + ?Sized>(
let BasicValueEnum::PointerValue(x1) = x1 else { unsupported_type(ctx, FN_NAME, &[x1_ty]) };
let x1 = NDArrayType::from_unifier_type(generator, ctx, x1_ty).map_pointer_value(x1, None);
let x1 = NDArrayType::from_unifier_type(ctx, x1_ty).map_pointer_value(x1, None);
if !x1.get_type().element_type().is_float_type() {
unsupported_type(ctx, FN_NAME, &[x1_ty]);
@@ -1854,11 +1834,11 @@ pub fn call_np_linalg_pinv<'ctx, G: CodeGenerator + ?Sized>(
) -> Result<BasicValueEnum<'ctx>, String> {
const FN_NAME: &str = "np_linalg_pinv";
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
let BasicValueEnum::PointerValue(x1) = x1 else { unsupported_type(ctx, FN_NAME, &[x1_ty]) };
let x1 = NDArrayType::from_unifier_type(generator, ctx, x1_ty).map_pointer_value(x1, None);
let x1 = NDArrayType::from_unifier_type(ctx, x1_ty).map_pointer_value(x1, None);
if !x1.get_type().element_type().is_float_type() {
unsupported_type(ctx, FN_NAME, &[x1_ty]);
@@ -1899,11 +1879,11 @@ pub fn call_sp_linalg_lu<'ctx, G: CodeGenerator + ?Sized>(
) -> Result<BasicValueEnum<'ctx>, String> {
const FN_NAME: &str = "sp_linalg_lu";
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
let BasicValueEnum::PointerValue(x1) = x1 else { unsupported_type(ctx, FN_NAME, &[x1_ty]) };
let x1 = NDArrayType::from_unifier_type(generator, ctx, x1_ty).map_pointer_value(x1, None);
let x1 = NDArrayType::from_unifier_type(ctx, x1_ty).map_pointer_value(x1, None);
if !x1.get_type().element_type().is_float_type() {
unsupported_type(ctx, FN_NAME, &[x1_ty]);
@@ -1955,7 +1935,7 @@ pub fn call_np_linalg_matrix_power<'ctx, G: CodeGenerator + ?Sized>(
) -> Result<BasicValueEnum<'ctx>, String> {
const FN_NAME: &str = "np_linalg_matrix_power";
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
let BasicValueEnum::PointerValue(x1) = x1 else {
unsupported_type(ctx, FN_NAME, &[x1_ty, x2_ty])
@@ -1963,7 +1943,7 @@ pub fn call_np_linalg_matrix_power<'ctx, G: CodeGenerator + ?Sized>(
let (elem_ty, ndims) = unpack_ndarray_var_tys(&mut ctx.unifier, x1_ty);
let ndims = extract_ndims(&ctx.unifier, ndims);
let x1_elem_ty = ctx.get_llvm_type(generator, elem_ty);
let x1_elem_ty = ctx.get_llvm_type(elem_ty);
let x1 = NDArrayValue::from_pointer_value(x1, x1_elem_ty, ndims, llvm_usize, None);
if !x1.get_type().element_type().is_float_type() {
@@ -2008,11 +1988,11 @@ pub fn call_np_linalg_det<'ctx, G: CodeGenerator + ?Sized>(
) -> Result<BasicValueEnum<'ctx>, String> {
const FN_NAME: &str = "np_linalg_matrix_power";
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
let BasicValueEnum::PointerValue(x1) = x1 else { unsupported_type(ctx, FN_NAME, &[x1_ty]) };
let x1 = NDArrayType::from_unifier_type(generator, ctx, x1_ty).map_pointer_value(x1, None);
let x1 = NDArrayType::from_unifier_type(ctx, x1_ty).map_pointer_value(x1, None);
if !x1.get_type().element_type().is_float_type() {
unsupported_type(ctx, FN_NAME, &[x1_ty]);
@@ -2051,7 +2031,7 @@ pub fn call_sp_linalg_schur<'ctx, G: CodeGenerator + ?Sized>(
let BasicValueEnum::PointerValue(x1) = x1 else { unsupported_type(ctx, FN_NAME, &[x1_ty]) };
let x1 = NDArrayType::from_unifier_type(generator, ctx, x1_ty).map_pointer_value(x1, None);
let x1 = NDArrayType::from_unifier_type(ctx, x1_ty).map_pointer_value(x1, None);
assert_eq!(x1.get_type().ndims(), 2);
if !x1.get_type().element_type().is_float_type() {
@@ -2099,7 +2079,7 @@ pub fn call_sp_linalg_hessenberg<'ctx, G: CodeGenerator + ?Sized>(
let BasicValueEnum::PointerValue(x1) = x1 else { unsupported_type(ctx, FN_NAME, &[x1_ty]) };
let x1 = NDArrayType::from_unifier_type(generator, ctx, x1_ty).map_pointer_value(x1, None);
let x1 = NDArrayType::from_unifier_type(ctx, x1_ty).map_pointer_value(x1, None);
assert_eq!(x1.get_type().ndims(), 2);
if !x1.get_type().element_type().is_float_type() {

View File

@@ -6,7 +6,7 @@ use std::{
};
use inkwell::{
IntPredicate, OptimizationLevel,
IntPredicate,
basic_block::BasicBlock,
types::{BasicType, BasicTypeEnum},
values::{BasicValueEnum, IntValue, PointerValue, StructValue},
@@ -161,10 +161,10 @@ impl<'ctx> CodeGenContext<'ctx, '_> {
ty: Type,
) -> BasicValueEnum<'ctx> {
match val {
SymbolValue::I32(v) => self.ctx.i32_type().const_int(*v as u64, true).into(),
SymbolValue::I64(v) => self.ctx.i64_type().const_int(*v as u64, true).into(),
SymbolValue::U32(v) => self.ctx.i32_type().const_int(u64::from(*v), false).into(),
SymbolValue::U64(v) => self.ctx.i64_type().const_int(*v, false).into(),
SymbolValue::I32(v) => self.i32.const_int(*v as u64, true).into(),
SymbolValue::I64(v) => self.i64.const_int(*v as u64, true).into(),
SymbolValue::U32(v) => self.i32.const_int(u64::from(*v), false).into(),
SymbolValue::U64(v) => self.i64.const_int(*v, false).into(),
SymbolValue::Bool(v) => self.ctx.i8_type().const_int(u64::from(*v), true).into(),
SymbolValue::Double(v) => self.ctx.f64_type().const_float(*v).into(),
SymbolValue::Str(v) => {
@@ -180,12 +180,12 @@ impl<'ctx> CodeGenContext<'ctx, '_> {
}
SymbolValue::OptionSome(v) => {
let val = self.gen_symbol_val(generator, v, ty);
OptionType::from_unifier_type(generator, self, ty)
OptionType::from_unifier_type(self, ty)
.construct_some_value(generator, self, &val, None)
.as_abi_value(self)
.into()
}
SymbolValue::OptionNone => OptionType::from_unifier_type(generator, self, ty)
SymbolValue::OptionNone => OptionType::from_unifier_type(self, ty)
.construct_empty(generator, self, None)
.as_abi_value(self)
.into(),
@@ -193,32 +193,14 @@ impl<'ctx> CodeGenContext<'ctx, '_> {
}
/// See [`get_llvm_type`].
pub fn get_llvm_type<G: CodeGenerator + ?Sized>(
&mut self,
generator: &G,
ty: Type,
) -> BasicTypeEnum<'ctx> {
get_llvm_type(
self.ctx,
&self.module,
generator,
&mut self.unifier,
self.top_level,
&mut self.type_cache,
ty,
)
pub fn get_llvm_type(&mut self, ty: Type) -> BasicTypeEnum<'ctx> {
get_llvm_type(&self.inner, &mut self.unifier, self.top_level, &mut self.type_cache, ty)
}
/// See [`get_llvm_abi_type`].
pub fn get_llvm_abi_type<G: CodeGenerator + ?Sized>(
&mut self,
generator: &G,
ty: Type,
) -> BasicTypeEnum<'ctx> {
pub fn get_llvm_abi_type(&mut self, ty: Type) -> BasicTypeEnum<'ctx> {
get_llvm_abi_type(
self.ctx,
&self.module,
generator,
&self.inner,
&mut self.unifier,
self.top_level,
&mut self.type_cache,
@@ -244,11 +226,11 @@ impl<'ctx> CodeGenContext<'ctx, '_> {
let ty = if self.unifier.unioned(ty, self.primitives.int32)
|| self.unifier.unioned(ty, self.primitives.uint32)
{
self.ctx.i32_type()
self.i32
} else if self.unifier.unioned(ty, self.primitives.int64)
|| self.unifier.unioned(ty, self.primitives.uint64)
{
self.ctx.i64_type()
self.i64
} else {
codegen_unreachable!(self)
};
@@ -494,9 +476,7 @@ impl<'ctx> CodeGenContext<'ctx, '_> {
}
}
/// Calls a declared function. Use [`ctx.fn_store`] to get a function declaration.
///
/// [`ctx.fn_store`]: CodeGenContext::fn_store
/// Calls a declared function.
pub fn build_call_or_invoke(
&self,
fun: &FunctionDecl<'ctx>,
@@ -506,9 +486,7 @@ impl<'ctx> CodeGenContext<'ctx, '_> {
self.build_call_or_invoke_impl(fun, args, call_name, self.unwind_target)
}
/// Calls a declared function, ignoring unwind info. Use [`ctx.fn_store`] to get a function declaration.
///
/// [`ctx.fn_store`]: CodeGenContext::fn_store
/// Calls a declared function, ignoring unwind info.
pub fn build_call(
&self,
fun: &FunctionDecl<'ctx>,
@@ -537,9 +515,9 @@ impl<'ctx> CodeGenContext<'ctx, '_> {
params: [Option<IntValue<'ctx>>; 3],
loc: Location,
) {
let llvm_i32 = self.ctx.i32_type();
let llvm_i64 = self.ctx.i64_type();
let llvm_exn = ExceptionType::get_instance(generator, self);
let llvm_i32 = self.i32;
let llvm_i64 = self.i64;
let llvm_exn = ExceptionType::get_instance(self);
let zelf = if let Some(exception_val) = self.exception_val {
llvm_exn.map_pointer_value(exception_val, Some("exn"))
@@ -558,7 +536,7 @@ impl<'ctx> CodeGenContext<'ctx, '_> {
.iter()
.map(|p| {
p.map_or(llvm_i64.const_zero(), |v| {
self.builder.build_int_s_extend(v, self.ctx.i64_type(), "sext").unwrap()
self.builder.build_int_s_extend(v, self.i64, "sext").unwrap()
})
})
.collect_array()
@@ -620,7 +598,7 @@ pub fn gen_constructor<'ctx, 'a, G: CodeGenerator>(
// TODO: what about other fields that require alloca?
let fun_id = methods.iter().find(|method| method.0 == "__init__".into()).map(|method| method.2);
let ty = ctx.get_llvm_type(generator, signature.ret).into_pointer_type();
let ty = ctx.get_llvm_type(signature.ret).into_pointer_type();
let zelf_ty: BasicTypeEnum = ty.get_element_type().try_into().unwrap();
let zelf: BasicValueEnum<'ctx> =
ctx.builder.build_alloca(zelf_ty, "alloca").map(Into::into).unwrap();
@@ -813,23 +791,23 @@ pub fn gen_call<'ctx, G: CodeGenerator>(
let ret_type = if ctx.unifier.unioned(sign.ret, ctx.primitives.none) {
None
} else {
Some(ctx.get_llvm_abi_type(generator, sign.ret))
Some(ctx.get_llvm_abi_type(sign.ret))
};
let args_type = obj
.iter()
.map(|a| a.0)
.chain(sign.args.iter().map(|a| a.ty))
.map(|ty| ctx.get_llvm_abi_type(generator, ty));
.map(|ty| ctx.get_llvm_abi_type(ty));
// We must declare the function before codegen.
let f = if is_extern {
let args_type = &args_type.collect_vec();
ctx.fn_store.declare_external(&ctx.module, &symbol, ret_type, args_type, has_varargs, &[])
ctx.declare_external(&symbol, ret_type, args_type, has_varargs, &[])
} else {
// TODO(ivan): reimplement support for variadic arguments as passing lists/tuples
assert!(!has_varargs, "not yet implemented: varargs");
let args_type = &args_type.map(Into::into).collect_vec();
ctx.fn_store.declare_internal(&ctx.module, &symbol, ret_type, args_type, false).0
ctx.declare_internal(&symbol, ret_type, args_type, false).0
};
// Convert boolean parameter values into i1
@@ -893,15 +871,15 @@ pub fn gen_comprehension<'ctx, G: CodeGenerator>(
return Ok(None);
};
let int32 = ctx.ctx.i32_type();
let size_t = ctx.get_size_type();
let int32 = ctx.i32;
let size_t = ctx.size_t;
let zero_size_t = size_t.const_zero();
let zero_32 = int32.const_zero();
let index = generator.gen_var_alloc(ctx, size_t.into(), Some("index.addr"))?;
ctx.builder.build_store(index, zero_size_t).unwrap();
let elem_ty = ctx.get_llvm_type(generator, elt.custom.unwrap());
let elem_ty = ctx.get_llvm_type(elt.custom.unwrap());
let list;
match &*ctx.unifier.get_ty(iter_ty) {
@@ -1109,7 +1087,7 @@ pub fn gen_binop_expr_with_values<'ctx, G: CodeGenerator>(
} else if ty1.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::List.id())
|| ty2.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::List.id())
{
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
if op.variant == BinopVariant::AugAssign {
todo!("Augmented assignment operators not implemented for lists")
@@ -1134,7 +1112,7 @@ pub fn gen_binop_expr_with_values<'ctx, G: CodeGenerator>(
};
debug_assert!(ctx.unifier.unioned(elem_ty1, elem_ty2));
let llvm_elem_ty = ctx.get_llvm_type(generator, elem_ty1);
let llvm_elem_ty = ctx.get_llvm_type(elem_ty1);
let sizeof_elem = ctx
.builder
.build_int_truncate_or_bit_cast(llvm_elem_ty.size_of().unwrap(), llvm_usize, "")
@@ -1200,7 +1178,7 @@ pub fn gen_binop_expr_with_values<'ctx, G: CodeGenerator>(
// [...] * (i where i < 0) => []
let int_val = call_int_smax(ctx, int_val, llvm_usize.const_zero(), None);
let elem_llvm_ty = ctx.get_llvm_type(generator, elem_ty);
let elem_llvm_ty = ctx.get_llvm_type(elem_ty);
let sizeof_elem = ctx
.builder
.build_int_truncate_or_bit_cast(elem_llvm_ty.size_of().unwrap(), llvm_usize, "")
@@ -1252,8 +1230,8 @@ pub fn gen_binop_expr_with_values<'ctx, G: CodeGenerator>(
} else if ty1.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id())
|| ty2.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id())
{
let left = ScalarOrNDArray::from_value(generator, ctx, (ty1, left_val));
let right = ScalarOrNDArray::from_value(generator, ctx, (ty2, right_val));
let left = ScalarOrNDArray::from_value(ctx, (ty1, left_val));
let right = ScalarOrNDArray::from_value(ctx, (ty2, right_val));
let ty1_dtype = arraylike_flatten_element_type(&mut ctx.unifier, ty1);
let ty2_dtype = arraylike_flatten_element_type(&mut ctx.unifier, ty2);
@@ -1430,7 +1408,7 @@ pub fn gen_unaryop_expr_with_values<'ctx, G: CodeGenerator>(
generator.bool_to_int_type(ctx, not, val.get_type()).into()
} else {
let llvm_i32 = ctx.ctx.i32_type();
let llvm_i32 = ctx.i32;
gen_unaryop_expr_with_values(
generator,
@@ -1485,7 +1463,7 @@ pub fn gen_unaryop_expr_with_values<'ctx, G: CodeGenerator>(
} else if ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::NDArray.id()) {
let (ndarray_dtype, _) = unpack_ndarray_var_tys(&mut ctx.unifier, ty);
let ndarray = NDArrayType::from_unifier_type(generator, ctx, ty)
let ndarray = NDArrayType::from_unifier_type(ctx, ty)
.map_pointer_value(val.into_pointer_value(), None);
// ndarray uses `~` rather than `not` to perform elementwise inversion, convert it before
@@ -1567,10 +1545,9 @@ pub fn gen_cmpop_expr_with_values<'ctx, G: CodeGenerator>(
let left_ty_dtype = arraylike_flatten_element_type(&mut ctx.unifier, left_ty);
let right_ty_dtype = arraylike_flatten_element_type(&mut ctx.unifier, right_ty);
let left = ScalarOrNDArray::from_value(generator, ctx, (left_ty, left))
.to_ndarray(generator, ctx);
let right = ScalarOrNDArray::from_value(generator, ctx, (right_ty, right))
.to_ndarray(generator, ctx);
let left = ScalarOrNDArray::from_value(ctx, (left_ty, left)).to_ndarray(generator, ctx);
let right =
ScalarOrNDArray::from_value(ctx, (right_ty, right)).to_ndarray(generator, ctx);
let result_ndarray = NDArrayType::new_broadcast(
ctx,
@@ -1702,7 +1679,7 @@ pub fn gen_cmpop_expr_with_values<'ctx, G: CodeGenerator>(
.iter()
.any(|ty| ty.obj_id(&ctx.unifier).is_some_and(|id| id == PrimDef::List.id()))
{
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
let gen_list_cmpop = |generator: &mut G,
ctx: &mut CodeGenContext<'ctx, '_>|
@@ -1877,7 +1854,7 @@ pub fn gen_cmpop_expr_with_values<'ctx, G: CodeGenerator>(
}
let llvm_i1 = ctx.ctx.bool_type();
let llvm_i32 = ctx.ctx.i32_type();
let llvm_i32 = ctx.i32;
// Assume `true` by default
let cmp_addr = generator.gen_var_alloc(ctx, llvm_i1.into(), None).unwrap();
@@ -1981,7 +1958,7 @@ pub fn gen_cmpop_expr_with_values<'ctx, G: CodeGenerator>(
cmp_phi
}
} else if [left_ty, right_ty].iter().any(|ty| matches!(&*ctx.unifier.get_ty_immutable(*ty), TypeEnum::TVar { .. })) {
if ctx.registry.llvm_options.opt_level == OptimizationLevel::None {
if ctx.registry.codegen_options.debug {
ctx.make_assert(
generator,
ctx.ctx.bool_type().const_all_ones(),
@@ -2060,8 +2037,8 @@ pub fn gen_expr<'ctx, G: CodeGenerator>(
expr: &Expr<Option<Type>>,
) -> Result<Option<ValueEnum<'ctx>>, String> {
ctx.current_loc = expr.location;
let int32 = ctx.ctx.i32_type();
let usize = ctx.get_size_type();
let int32 = ctx.i32;
let usize = ctx.size_t;
let zero = int32.const_int(0, false);
let loc = ctx.debug_info.0.create_debug_location(
@@ -2084,7 +2061,7 @@ pub fn gen_expr<'ctx, G: CodeGenerator>(
TypeEnum::TObj { obj_id, .. }
if *obj_id == ctx.primitives.option.obj_id(&ctx.unifier).unwrap() =>
{
OptionType::from_unifier_type(generator, ctx, expr.custom.unwrap())
OptionType::from_unifier_type(ctx, expr.custom.unwrap())
.construct_empty(generator, ctx, None)
.as_abi_value(ctx)
.into()
@@ -2134,12 +2111,12 @@ pub fn gen_expr<'ctx, G: CodeGenerator>(
if let TypeEnum::TVar { .. } = &*ctx.unifier.get_ty_immutable(ty) {
None
} else {
Some(ctx.get_llvm_type(generator, ty))
Some(ctx.get_llvm_type(ty))
}
} else {
Some(elements[0].get_type())
};
let length = ctx.get_size_type().const_int(elements.len() as u64, false);
let length = ctx.size_t.const_int(elements.len() as u64, false);
let arr_str_ptr = if let Some(ty) = ty {
ListType::new(ctx, &ty).construct(generator, ctx, length, Some("list"))
} else {
@@ -2380,7 +2357,7 @@ pub fn gen_expr<'ctx, G: CodeGenerator>(
let result = if is_none {
None
} else {
let llvm_ty = ctx.get_llvm_type(generator, body_ty);
let llvm_ty = ctx.get_llvm_type(body_ty);
Some(ctx.builder.build_alloca(llvm_ty, "if_exp_result").unwrap())
};
let current = ctx.builder.get_insert_block().unwrap().get_parent().unwrap();
@@ -2560,10 +2537,8 @@ pub fn gen_expr<'ctx, G: CodeGenerator>(
ctx.current_loc,
);
ctx.builder.position_at_end(unreachable_block);
let ptr = ctx
.get_llvm_type(generator, key)
.into_pointer_type()
.const_null();
let ptr =
ctx.get_llvm_type(key).into_pointer_type().const_null();
Ok(Some(
ctx.builder
.build_load(ptr, "unwrap_none_unreachable_load")
@@ -2575,11 +2550,9 @@ pub fn gen_expr<'ctx, G: CodeGenerator>(
};
}
ValueEnum::Dynamic(BasicValueEnum::PointerValue(ptr)) => {
let option = OptionType::from_pointer_type(
ptr.get_type(),
ctx.get_size_type(),
)
.map_pointer_value(ptr, None);
let option =
OptionType::from_pointer_type(ptr.get_type(), ctx.size_t)
.map_pointer_value(ptr, None);
let not_null = option.is_some(ctx);
ctx.make_assert(
generator,
@@ -2619,7 +2592,7 @@ pub fn gen_expr<'ctx, G: CodeGenerator>(
return Ok(None);
};
let v = ListValue::from_pointer_value(v, usize, Some("arr"));
let ty = ctx.get_llvm_type(generator, *ty);
let ty = ctx.get_llvm_type(*ty);
if let ExprKind::Slice { lower, upper, step } = &slice.node {
let one = int32.const_int(1, false);
let size = v.load_size(ctx, None);
@@ -2671,17 +2644,15 @@ pub fn gen_expr<'ctx, G: CodeGenerator>(
} else {
return Ok(None);
};
let raw_index = ctx
.builder
.build_int_s_extend(raw_index, ctx.get_size_type(), "sext")
.unwrap();
let raw_index =
ctx.builder.build_int_s_extend(raw_index, ctx.size_t, "sext").unwrap();
// handle negative index
let is_negative = ctx
.builder
.build_int_compare(
IntPredicate::SLT,
raw_index,
ctx.get_size_type().const_zero(),
ctx.size_t.const_zero(),
"is_neg",
)
.unwrap();
@@ -2716,7 +2687,7 @@ pub fn gen_expr<'ctx, G: CodeGenerator>(
let ndarray_ty = value.custom.unwrap();
let ndarray = ndarray.to_basic_value_enum(ctx, generator, ndarray_ty)?;
let ndarray = NDArrayType::from_unifier_type(generator, ctx, ndarray_ty)
let ndarray = NDArrayType::from_unifier_type(ctx, ndarray_ty)
.map_pointer_value(ndarray.into_pointer_value(), None);
let indices = RustNDIndex::from_subscript_expr(generator, ctx, slice)?;
@@ -2862,7 +2833,7 @@ macro_rules! __codegen_call_extern_impl {
let _: &[$crate::inkwell::values::BasicValueEnum<'_>] = &args;
let types = args.map(|a| a.get_type());
let (ret_ty, cast) = $crate::codegen::expr::__handle_return_type($ret_ty);
$(let args = args.into_iter().chain($varargs).collect_vec();)?
$(let args = ::std::vec::Vec::from_iter(args.into_iter().chain($varargs));)?
let result = $crate::codegen::expr::call_extern_c_fn(
$ctx, &$fn_name, ret_ty, &types, &args, $is_varargs, $var_name, &$fn_attrs,
);
@@ -2941,24 +2912,31 @@ macro_rules! __codegen_call_extern_impl {
///
/// Call an external function with some attributes:
///
/// ```ignore
/// let success = ctx.ctx.i32_type().const_zero();
/// ```no_run
/// # use nac3core::codegen::{CodeGenContext, expr::call_extern};
/// # fn test(ctx: &mut CodeGenContext) {
/// let success = ctx.i32.const_zero();
/// call_extern!(ctx: void _ = ["noreturn"] "_Exit"(success));
/// # }
/// ```
///
/// Call a variadic function:
///
/// ```ignore
/// let int = ctx.ctx.i32_type();
/// ```no_run
/// # use nac3core::codegen::{CodeGenContext, expr::call_extern};
/// # use nac3core::inkwell::{values::IntValue, builder::BuilderError};
/// # fn test<'ctx>(ctx: &mut CodeGenContext<'ctx, '_>) -> Result<(), BuilderError> {
/// let int = ctx.i32;
/// let neg_one = int.const_all_ones();
/// let half = ctx.ctx.f32_type().const_float(0.5);
/// let format = ctx.builder.build_global_string_ptr("%d %.2f")?.as_pointer_value();
/// let format = ctx.builder.build_global_string_ptr("%d %.2f", "fmt_str")?.as_pointer_value();
///
/// // unlike positional args, variadic args need an explicit cast to BasicValueEnum
/// let varargs = [neg_one.into(), half.into()];
///
/// // at runtime, prints "-1 0.50"; written = 7
/// let written: IntValue<'ctx> = call_extern!(ctx: int "written" = "printf"(format; ...varargs));
/// # Ok(()) }
/// ```
///
/// [attr-docs]: https://llvm.org/docs/LangRef.html#fnattrs
@@ -2977,9 +2955,9 @@ pub use __codegen_call_extern as call_extern;
/// as it deduces and converts types automatically.
///
/// For repeated function calls and dynamically added external bindings, you might want to use
/// [`FunctionStore::declare_external`] and [`CodeGenContext::build_call_or_invoke`] directly.
/// [`CoreContext::declare_external`] and [`CodeGenContext::build_call_or_invoke`] directly.
///
/// [`FunctionStore::declare_external`]: crate::codegen::FunctionStore::declare_external
/// [`CoreContext::declare_external`]: crate::codegen::CoreContext::declare_external
#[allow(clippy::too_many_arguments, reason = "most users use the call_extern macro instead")]
pub fn call_extern_c_fn<'ctx>(
ctx: &mut CodeGenContext<'ctx, '_>,
@@ -2991,13 +2969,6 @@ pub fn call_extern_c_fn<'ctx>(
value_name: Option<&str>,
fn_attrs: &[&str],
) -> Option<BasicValueEnum<'ctx>> {
let f = ctx.fn_store.declare_external(
&ctx.module,
fn_name,
ret_type,
param_types,
is_c_varargs,
fn_attrs,
);
let f = ctx.declare_external(fn_name, ret_type, param_types, is_c_varargs, fn_attrs);
ctx.build_call(&f, args, value_name.unwrap_or(""))
}

View File

@@ -1,6 +1,4 @@
use inkwell::{
context::Context,
targets::TargetMachine,
types::{BasicTypeEnum, IntType},
values::{BasicValueEnum, IntValue, PointerValue},
};
@@ -26,12 +24,6 @@ pub trait CodeGenerator {
/// Return the module name for the code generator.
fn get_name(&self) -> &str;
/// Return an instance of [`IntType`] corresponding to the type of `size_t` for this instance.
///
/// Prefer using [`CodeGenContext::get_size_type`] if [`CodeGenContext`] is available, as it is
/// equivalent to this function in a more concise syntax.
fn get_size_type<'ctx>(&self, ctx: &'ctx Context) -> IntType<'ctx>;
/// Generate function call and returns the function return value.
/// - obj: Optional object for method call.
/// - fun: Function signature and definition ID.
@@ -287,24 +279,12 @@ pub trait CodeGenerator {
pub struct DefaultCodeGenerator {
name: String,
size_t: u32,
}
impl DefaultCodeGenerator {
#[must_use]
pub fn new(name: String, size_t: IntType<'_>) -> DefaultCodeGenerator {
assert!(matches!(size_t.get_bit_width(), 32 | 64));
DefaultCodeGenerator { name, size_t: size_t.get_bit_width() }
}
#[must_use]
pub fn with_target_machine(
name: String,
ctx: &Context,
target_machine: &TargetMachine,
) -> DefaultCodeGenerator {
let llvm_usize = ctx.ptr_sized_int_type(&target_machine.get_target_data(), None);
Self::new(name, llvm_usize)
pub fn new(name: String) -> DefaultCodeGenerator {
DefaultCodeGenerator { name }
}
}
@@ -312,10 +292,4 @@ impl CodeGenerator for DefaultCodeGenerator {
fn get_name(&self) -> &str {
&self.name
}
fn get_size_type<'ctx>(&self, ctx: &'ctx Context) -> IntType<'ctx> {
// it should be unsigned, but we don't really need unsigned and this could save us from
// having to do a bit cast...
if self.size_t == 32 { ctx.i32_type() } else { ctx.i64_type() }
}
}

View File

@@ -77,7 +77,7 @@ pub fn call_ldexp<'ctx>(
) -> FloatValue<'ctx> {
let llvm_f64 = ctx.ctx.f64_type();
debug_assert_eq!(arg.get_type(), llvm_f64);
debug_assert_eq!(exp.get_type(), ctx.ctx.i32_type());
debug_assert_eq!(exp.get_type(), ctx.i32);
call_extern!(ctx: llvm_f64 name? = ["nounwind"] "__nac3_ldexp"(arg, exp))
}

View File

@@ -25,9 +25,9 @@ pub fn list_slice_assignment<'ctx, G: CodeGenerator + ?Sized>(
src_arr: ListValue<'ctx>,
src_idx: (IntValue<'ctx>, IntValue<'ctx>, IntValue<'ctx>),
) {
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
let llvm_pi8 = ctx.ctx.i8_type().ptr_type(AddressSpace::default());
let llvm_i32 = ctx.ctx.i32_type();
let llvm_i32 = ctx.i32;
assert_eq!(dest_idx.0.get_type(), llvm_i32);
assert_eq!(dest_idx.1.get_type(), llvm_i32);

View File

@@ -1,7 +1,7 @@
use inkwell::{
IntPredicate,
attributes::{Attribute, AttributeLoc},
context::Context,
context::ContextRef,
memory_buffer::MemoryBuffer,
module::Module,
values::{BasicValue, BasicValueEnum, IntValue},
@@ -27,7 +27,10 @@ mod slice;
mod string;
#[must_use]
pub fn load_irrt<'ctx>(ctx: &'ctx Context, symbol_resolver: &dyn SymbolResolver) -> Module<'ctx> {
pub fn load_irrt<'ctx>(
ctx: ContextRef<'ctx>,
symbol_resolver: &dyn SymbolResolver,
) -> Module<'ctx> {
let bitcode_buf = MemoryBuffer::create_from_memory_range(
include_bytes!(concat!(env!("OUT_DIR"), "/irrt.bc")),
"irrt_bitcode_buffer",
@@ -72,7 +75,7 @@ pub fn load_irrt<'ctx>(ctx: &'ctx Context, symbol_resolver: &dyn SymbolResolver)
#[must_use]
pub fn get_usize_dependent_function_name(ctx: &mut CodeGenContext<'_, '_>, name: &str) -> String {
let mut name = name.to_owned();
match ctx.get_size_type().get_bit_width() {
match ctx.size_t.get_bit_width() {
32 => {}
64 => name.push_str("64"),
bit_width => {
@@ -130,7 +133,7 @@ pub fn handle_slice_indices<'ctx, G: CodeGenerator>(
generator: &mut G,
length: IntValue<'ctx>,
) -> Result<Option<(IntValue<'ctx>, IntValue<'ctx>, IntValue<'ctx>)>, String> {
let llvm_i32 = ctx.ctx.i32_type();
let llvm_i32 = ctx.i32;
let zero = llvm_i32.const_zero();
let one = llvm_i32.const_int(1, false);

View File

@@ -21,7 +21,7 @@ pub fn call_nac3_ndarray_array_set_and_validate_list_shape<'ctx, G: CodeGenerato
ndims: IntValue<'ctx>,
shape: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
) {
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
assert_eq!(list.get_type().element_type().unwrap(), ctx.ctx.i8_type().into());
assert_eq!(ndims.get_type(), llvm_usize);
assert_eq!(

View File

@@ -18,7 +18,7 @@ pub fn call_nac3_ndarray_util_assert_shape_no_negative<'ctx, G: CodeGenerator +
ctx: &mut CodeGenContext<'ctx, '_>,
shape: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
) {
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
assert_eq!(shape.element_type(ctx, generator), llvm_usize.into());
let name =
@@ -36,7 +36,7 @@ pub fn call_nac3_ndarray_util_assert_output_shape_same<'ctx, G: CodeGenerator +
ndarray_shape: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
output_shape: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
) {
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
assert_eq!(ndarray_shape.element_type(ctx, generator), llvm_usize.into());
assert_eq!(output_shape.element_type(ctx, generator), llvm_usize.into());
@@ -53,39 +53,39 @@ pub fn call_nac3_ndarray_util_assert_output_shape_same<'ctx, G: CodeGenerator +
/// Generates a call to `__nac3_ndarray_size`.
///
/// Returns a [`usize`][CodeGenerator::get_size_type] value of the number of elements of an
/// Returns a `usize` value of the number of elements of an
/// `ndarray`, corresponding to the value of `ndarray.size`.
pub fn call_nac3_ndarray_size<'ctx>(
ctx: &mut CodeGenContext<'ctx, '_>,
ndarray: NDArrayValue<'ctx>,
) -> IntValue<'ctx> {
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
let name = get_usize_dependent_function_name(ctx, "__nac3_ndarray_size");
call_extern!(ctx: llvm_usize "size" = name(ndarray.as_abi_value(ctx)))
}
/// Generates a call to `__nac3_ndarray_nbytes`.
///
/// Returns a [`usize`][CodeGenerator::get_size_type] value of the number of bytes consumed by the
/// Returns a `usize` value of the number of bytes consumed by the
/// data of the `ndarray`, corresponding to the value of `ndarray.nbytes`.
pub fn call_nac3_ndarray_nbytes<'ctx>(
ctx: &mut CodeGenContext<'ctx, '_>,
ndarray: NDArrayValue<'ctx>,
) -> IntValue<'ctx> {
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
let name = get_usize_dependent_function_name(ctx, "__nac3_ndarray_nbytes");
call_extern!(ctx: llvm_usize "nbytes" = name(ndarray.as_abi_value(ctx)))
}
/// Generates a call to `__nac3_ndarray_len`.
///
/// Returns a [`usize`][CodeGenerator::get_size_type] value of the size of the topmost dimension of
/// Returns a `usize` value of the size of the topmost dimension of
/// the `ndarray`, corresponding to the value of `ndarray.__len__`.
pub fn call_nac3_ndarray_len<'ctx>(
ctx: &mut CodeGenContext<'ctx, '_>,
ndarray: NDArrayValue<'ctx>,
) -> IntValue<'ctx> {
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
let name = get_usize_dependent_function_name(ctx, "__nac3_ndarray_len");
call_extern!(ctx: llvm_usize "len" = name(ndarray.as_abi_value(ctx)))
}
@@ -112,7 +112,7 @@ pub fn call_nac3_ndarray_get_nth_pelement<'ctx>(
) -> PointerValue<'ctx> {
let llvm_i8 = ctx.ctx.i8_type();
let llvm_pi8 = llvm_i8.ptr_type(AddressSpace::default());
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
assert_eq!(index.get_type(), llvm_usize);
let name = get_usize_dependent_function_name(ctx, "__nac3_ndarray_get_nth_pelement");
@@ -132,7 +132,7 @@ pub fn call_nac3_ndarray_get_pelement_by_indices<'ctx, G: CodeGenerator + ?Sized
) -> PointerValue<'ctx> {
let llvm_i8 = ctx.ctx.i8_type();
let llvm_pi8 = llvm_i8.ptr_type(AddressSpace::default());
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
assert_eq!(indices.element_type(ctx, generator), llvm_usize.into());
let name = get_usize_dependent_function_name(ctx, "__nac3_ndarray_get_pelement_by_indices");

View File

@@ -45,7 +45,7 @@ pub fn call_nac3_ndarray_broadcast_shapes<'ctx, G, Shape>(
Shape: TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>
+ TypedArrayLikeMutator<'ctx, G, IntValue<'ctx>>,
{
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
assert_eq!(num_shape_entries.get_type(), llvm_usize);
assert!(

View File

@@ -20,7 +20,7 @@ pub fn call_nac3_nditer_initialize<'ctx, G: CodeGenerator + ?Sized>(
ndarray: NDArrayValue<'ctx>,
indices: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
) {
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
assert_eq!(indices.element_type(ctx, generator), llvm_usize.into());
let name = get_usize_dependent_function_name(ctx, "__nac3_nditer_initialize");

View File

@@ -20,7 +20,7 @@ pub fn call_nac3_ndarray_matmul_calculate_shapes<'ctx, G: CodeGenerator + ?Sized
new_b_shape: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
dst_shape: &impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>,
) {
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
assert_eq!(a_shape.element_type(ctx, generator), llvm_usize.into());
assert_eq!(b_shape.element_type(ctx, generator), llvm_usize.into());

View File

@@ -18,7 +18,7 @@ pub fn call_nac3_ndarray_reshape_resolve_and_check_new_shape<'ctx, G: CodeGenera
new_ndims: IntValue<'ctx>,
new_shape: ArraySliceValue<'ctx>,
) {
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
assert_eq!(size.get_type(), llvm_usize);
assert_eq!(new_ndims.get_type(), llvm_usize);

View File

@@ -23,7 +23,7 @@ pub fn call_nac3_ndarray_transpose<'ctx, G: CodeGenerator + ?Sized>(
dst_ndarray: NDArrayValue<'ctx>,
axes: Option<&impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>>,
) {
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
assert!(axes.is_none_or(|axes| axes.size(ctx, generator).get_type() == llvm_usize));
assert!(axes.is_none_or(|axes| axes.element_type(ctx, generator) == llvm_usize.into()));

View File

@@ -16,7 +16,7 @@ pub fn calculate_len_for_slice_range<'ctx, G: CodeGenerator + ?Sized>(
end: IntValue<'ctx>,
step: IntValue<'ctx>,
) -> IntValue<'ctx> {
let llvm_i32 = ctx.ctx.i32_type();
let llvm_i32 = ctx.i32;
assert_eq!(start.get_type(), llvm_i32);
assert_eq!(end.get_type(), llvm_i32);
assert_eq!(step.get_type(), llvm_i32);

View File

@@ -15,7 +15,7 @@ pub fn handle_slice_index_bound<'ctx, G: CodeGenerator>(
generator: &mut G,
length: IntValue<'ctx>,
) -> Result<Option<IntValue<'ctx>>, String> {
let llvm_i32 = ctx.ctx.i32_type();
let llvm_i32 = ctx.i32;
assert_eq!(length.get_type(), llvm_i32);
let i = if let Some(v) = generator.gen_expr(ctx, i)? {

View File

@@ -11,13 +11,15 @@ use inkwell::{
AddressSpace,
attributes::{Attribute, AttributeLoc},
builder::Builder,
module::{Linkage, Module},
module::Linkage,
targets::TargetData,
types::{AnyType, BasicMetadataTypeEnum, BasicType, BasicTypeEnum, PointerType},
values::{BasicValueEnum, CallSiteValue, FunctionValue, PointerValue},
};
use itertools::Itertools;
use crate::codegen::{CoreContext, TargetMachineOptions};
const INTERNAL_CALL_CONV: u32 = inkwell::llvm_sys::LLVMCallConv::LLVMFastCallConv as _;
/// An LLVM function declaration.
@@ -72,28 +74,12 @@ fn get_attrs(
a.into_iter().enumerate().filter_map(|(i, attr)| Some((AttributeLoc::Param(i as _), attr?)))
}
/// Functions in an LLVM module, with ABI details encapsulated.
///
/// # Usage
///
/// Construct with [`FunctionStore::default`]. Always keep this in sync
/// with the relevant module; every construction of a [`FunctionStore`]
/// should be right next to some construction of a [`Module`], and vice
/// versa.
///
/// Declare functions using [`declare_external`] or [`declare_internal`].
/// Call the declared function using [`CodeGenContext::build_call`] or [`CodeGenContext::build_call_or_invoke`].
///
/// [`declare_external`]: FunctionStore::declare_external
/// [`declare_internal`]: FunctionStore::declare_internal
/// [`CodeGenContext::build_call`]: crate::codegen::CodeGenContext::build_call
/// [`CodeGenContext::build_call_or_invoke`]: crate::codegen::CodeGenContext::build_call_or_invoke
#[derive(Default)]
pub struct FunctionStore<'ctx> {
pub(super) struct FunctionStore<'ctx> {
functions: HashMap<String, (FunctionValue<'ctx>, FunctionInfo<'ctx>)>,
arch: String,
}
impl<'ctx> FunctionStore<'ctx> {
impl<'ctx> CoreContext<'ctx> {
/// Declares and registers a function that is defined internally.
///
/// Returns a `(decl, value)` pair.
@@ -111,19 +97,20 @@ impl<'ctx> FunctionStore<'ctx> {
/// convention.
pub fn declare_internal(
&mut self,
module: &Module<'ctx>,
name: &str,
ret: Option<BasicTypeEnum<'ctx>>,
params: &[BasicMetadataTypeEnum<'ctx>],
export: bool,
) -> (FunctionDecl<'ctx>, FunctionValue<'ctx>) {
let CoreContext { ctx, module, fn_store, .. } = self;
let mut new_fn = None;
self.functions.entry(name.to_owned()).or_insert_with(|| {
fn_store.functions.entry(name.to_owned()).or_insert_with(|| {
let f = module.add_function(
name,
match ret {
Some(ret) => ret.fn_type(params, false),
None => module.get_context().void_type().fn_type(params, false),
None => ctx.void_type().fn_type(params, false),
},
None,
);
@@ -142,23 +129,23 @@ impl<'ctx> FunctionStore<'ctx> {
///
/// Returns a function declaration. Note that the registered function signature is designed
/// to match the C ABI, so you might see a slightly different function signature in LLVM IR.
pub fn declare_external<'a>(
&'a mut self,
module: &Module<'ctx>,
pub fn declare_external(
&mut self,
name: &str,
ret: Option<BasicTypeEnum<'ctx>>,
params: &[BasicTypeEnum<'ctx>],
is_c_varargs: bool,
fn_attrs: &[&str],
) -> FunctionDecl<'ctx> {
let entry = match self.functions.entry(name.into()) {
let CoreContext { ctx, ref module, ref target, ref mut fn_store, .. } = *self;
let entry = match fn_store.functions.entry(name.into()) {
Entry::Occupied(_) => return FunctionDecl::new(name.into()),
Entry::Vacant(v) => v,
};
let ctx = module.get_context();
let triple = module.get_triple();
let arch = triple.as_str().to_str().unwrap().split('-').next().unwrap();
let arch = &*fn_store.arch;
let layout = target.get_target_data();
let attr_sret = (arch == "x86_64" || arch == "i686" || arch == "riscv32")
.then(|| Attribute::get_named_enum_kind_id("sret"));
@@ -166,7 +153,7 @@ impl<'ctx> FunctionStore<'ctx> {
.then(|| Attribute::get_named_enum_kind_id("byval"));
let get_conv = |attr: Option<u32>, ty, indirect_check: fn(_, _, _) -> bool| TyAndCallConv {
ty,
call_conv: if indirect_check(arch, module, ty) {
call_conv: if indirect_check(arch, &layout, ty) {
ArgCallConv::Indirect(
attr.map(|x| ctx.create_type_attribute(x, AnyType::as_any_type_enum(&ty))),
)
@@ -211,6 +198,15 @@ impl<'ctx> FunctionStore<'ctx> {
entry.insert((f, info));
FunctionDecl::new(name.into())
}
}
impl<'ctx> FunctionStore<'ctx> {
pub(crate) fn new(options: &TargetMachineOptions) -> Self {
Self {
functions: HashMap::default(),
arch: options.triple.split('-').next().unwrap().to_owned(),
}
}
pub(crate) fn do_call<T>(
&self,
@@ -334,58 +330,56 @@ impl<'ctx> FunctionStore<'ctx> {
///
/// Also refer to rustc's impl:
/// <https://github.com/rust-lang/rust/tree/255aa220821c05c3eac7605fce4ea1c9ab2cbdb4/compiler/rustc_target/src/callconv>
fn indirect_ret(arch: &str, module: &Module<'_>, ret: BasicTypeEnum<'_>) -> bool {
fn indirect_ret(arch: &str, layout: &TargetData, ret: BasicTypeEnum<'_>) -> bool {
// LLVM's TargetTriple has methods to access separate components, but inkwell does not
// expose them. We use a rudimentary approach to parse the triple.
match arch {
"x86_64" => x86_64_indirect_ret(module, ret),
"armv7" => arm_indirect_ret(module, ret, false),
"aarch64" => arm_indirect_ret(module, ret, true),
"riscv32" => riscv_indirect_ret(module, ret),
"i686" => x86_indirect_ret(module, ret),
_ => unimplemented!("unsupported arch for extern fn: {arch}"),
"x86_64" => x86_64_indirect_ret(layout, ret),
"armv7" => arm_indirect_ret(layout, ret, false),
"aarch64" => arm_indirect_ret(layout, ret, true),
"riscv32" => riscv_indirect_ret(layout, ret),
"i686" => x86_indirect_ret(layout, ret),
arch => unimplemented!("unsupported arch for extern fn: {arch}"),
}
}
fn indirect_arg(arch: &str, module: &Module<'_>, ty: BasicTypeEnum<'_>) -> bool {
fn indirect_arg(arch: &str, layout: &TargetData, ty: BasicTypeEnum<'_>) -> bool {
// armv7 appears to never pass arguments indirectly at all
arch != "armv7" && indirect_ret(arch, module, ty)
arch != "armv7" && indirect_ret(arch, layout, ty)
}
fn bits_of(module: &Module<'_>, ty: BasicTypeEnum<'_>) -> u64 {
TargetData::create(module.get_data_layout().as_str().to_str().unwrap()).get_bit_size(&ty)
}
fn arm_homogeneous_aggregate(module: &Module<'_>, ty: BasicTypeEnum<'_>) -> Option<u32> {
fn arm_homogeneous_aggregate(layout: &TargetData, ty: BasicTypeEnum<'_>) -> Option<u32> {
// On ARM architectures, returning a struct of exactly 1-4 floats is through registers.
match ty {
BasicTypeEnum::FloatType(_) => Some(1),
BasicTypeEnum::IntType(_) | BasicTypeEnum::PointerType(_) if bits_of(module, ty) <= 64 => {
BasicTypeEnum::IntType(_) | BasicTypeEnum::PointerType(_)
if layout.get_bit_size(&ty) <= 64 =>
{
None
}
BasicTypeEnum::StructType(s) => s
.get_field_types_iter()
.map(|ty| arm_homogeneous_aggregate(module, ty))
.map(|ty| arm_homogeneous_aggregate(layout, ty))
.sum::<Option<u32>>()
.filter(|&n| n <= 4),
_ => unreachable!(),
}
}
fn arm_indirect_ret(module: &Module<'_>, ret: BasicTypeEnum<'_>, aarch64: bool) -> bool {
fn arm_indirect_ret(layout: &TargetData, ret: BasicTypeEnum<'_>, aarch64: bool) -> bool {
!matches!(
ret,
BasicTypeEnum::FloatType(_) | BasicTypeEnum::IntType(_) | BasicTypeEnum::PointerType(_)
) && bits_of(module, ret) > if aarch64 { 128 } else { 32 }
&& arm_homogeneous_aggregate(module, ret).is_none()
) && layout.get_bit_size(&ret) > if aarch64 { 128 } else { 32 }
&& arm_homogeneous_aggregate(layout, ret).is_none()
}
fn riscv_indirect_ret(module: &Module<'_>, ret: BasicTypeEnum<'_>) -> bool {
fn riscv_indirect_ret(layout: &TargetData, ret: BasicTypeEnum<'_>) -> bool {
match ret {
BasicTypeEnum::FloatType(_) | BasicTypeEnum::IntType(_) | BasicTypeEnum::PointerType(_) => {
false
}
_ if bits_of(module, ret) <= 64 => false,
_ if layout.get_bit_size(&ret) <= 64 => false,
BasicTypeEnum::StructType(s) => {
let (mut f, mut i) = (0, 0);
for field in s.get_field_types_iter() {
@@ -401,7 +395,7 @@ fn riscv_indirect_ret(module: &Module<'_>, ret: BasicTypeEnum<'_>) -> bool {
}
}
fn x86_64_indirect_ret(module: &Module<'_>, ret: BasicTypeEnum<'_>) -> bool {
fn x86_64_indirect_ret(layout: &TargetData, ret: BasicTypeEnum<'_>) -> bool {
// There's a lot of logic determining which class each "EIGHTBYTE" (64-bit) component refers to.
// However, if we limit ourselves to:
// - not have unaligned values;
@@ -411,10 +405,10 @@ fn x86_64_indirect_ret(module: &Module<'_>, ret: BasicTypeEnum<'_>) -> bool {
// unless the size of the struct is > 128 bits, where everything is assigned MEMORY.
//
// So for our specific case, `need_sret` is just a size check.
bits_of(module, ret) > 128
layout.get_bit_size(&ret) > 128
}
fn x86_indirect_ret(_module: &Module<'_>, ret: BasicTypeEnum<'_>) -> bool {
fn x86_indirect_ret(_layout: &TargetData, ret: BasicTypeEnum<'_>) -> bool {
// All aggregates are passed indirectly, even those with just 1 element.
ret.is_struct_type() || ret.is_array_type()
}

View File

@@ -95,13 +95,12 @@ pub fn call_memcpy<'ctx>(
dest.get_type().get_element_type().into_int_type().get_bit_width(),
src.get_type().get_element_type().into_int_type().get_bit_width(),
);
debug_assert_eq!(len.get_type(), ctx.get_size_type());
debug_assert_eq!(len.get_type(), ctx.size_t);
let llvm_dest_t = dest.get_type();
let llvm_src_t = src.get_type();
let target_data =
ctx.registry.llvm_options.create_target_machine().map(|tm| tm.get_target_data()).unwrap();
let target_data = ctx.target.get_target_data();
let dest_alignment = target_data.get_abi_alignment(&llvm_dest_t);
let src_alignment = target_data.get_abi_alignment(&llvm_src_t);
@@ -158,7 +157,7 @@ pub fn call_memcpy_generic_array<'ctx>(
) {
let llvm_i8 = ctx.ctx.i8_type();
let llvm_p0i8 = llvm_i8.ptr_type(AddressSpace::default());
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
let dest_elem_t = dest.get_type().get_element_type();
let src_elem_t = src.get_type().get_element_type();

View File

@@ -1,5 +1,4 @@
use std::{
cell::OnceCell,
collections::{HashMap, HashSet},
ops::ControlFlow,
sync::{
@@ -14,7 +13,7 @@ use inkwell::{
AddressSpace, IntPredicate, OptimizationLevel,
basic_block::BasicBlock,
builder::Builder,
context::Context,
context::{Context, ContextRef},
debug_info::{
AsDIScope, DICompileUnit, DIFlagsConstants, DIScope, DISubprogram, DebugInfoBuilder,
},
@@ -30,7 +29,7 @@ use parking_lot::{Condvar, Mutex};
use nac3parser::ast::{Location, Stmt, StrRef};
use crate::{
codegen::stmt::get_personality,
codegen::{llvm_fns::FunctionStore, stmt::get_personality},
symbol_resolver::{StaticValue, SymbolResolver},
toplevel::{
TopLevelContext, TopLevelDef,
@@ -44,7 +43,7 @@ use crate::{
};
use concrete_type::{ConcreteType, ConcreteTypeEnum, ConcreteTypeStore};
pub use generator::{CodeGenerator, DefaultCodeGenerator};
pub use llvm_fns::{FunctionDecl, FunctionStore};
pub use llvm_fns::FunctionDecl;
use types::{
ExceptionType, ListType, OptionType, ProxyType, RangeType, StringType, TupleType,
ndarray::NDArrayType,
@@ -92,27 +91,20 @@ pub type VarValue<'ctx> = (PointerValue<'ctx>, Option<Arc<dyn StaticValue + Send
/// Additional options for LLVM during codegen.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct CodeGenLLVMOptions {
/// The optimization level to apply on the generated LLVM IR.
pub opt_level: OptimizationLevel,
pub struct CodeGenOptions {
/// The optimization level (0/1/2/3/s/z) to apply on the generated LLVM IR.
pub opt_level: String,
/// Whether we should insert debugging statements in the generated code.
pub debug: bool,
/// Options related to the target machine.
pub target: CodeGenTargetMachineOptions,
}
impl CodeGenLLVMOptions {
/// Creates a [`TargetMachine`] using the target options specified by this struct.
///
/// See [`Target::create_target_machine`].
#[must_use]
pub fn create_target_machine(&self) -> Option<TargetMachine> {
self.target.create_target_machine(self.opt_level)
}
pub target: TargetMachineOptions,
}
/// Additional options for code generation for the target machine.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct CodeGenTargetMachineOptions {
pub struct TargetMachineOptions {
/// The target machine triple.
pub triple: String,
/// The target machine CPU.
@@ -123,30 +115,40 @@ pub struct CodeGenTargetMachineOptions {
pub reloc_mode: RelocMode,
/// Code model for code generation.
pub code_model: CodeModel,
/// Optimization level for backend/target-specific code generation.
///
/// Note that this does not determine the set of optimization passes that run on LLVM IR.
///
/// # Target-specific notes
///
/// On ARM, GOT entries are created when this level is [`OptimizationLevel::None`], which
/// our linker (`nac3ld`) does not support. You must at least use [`OptimizationLevel::Less`].
pub target_opt_level: OptimizationLevel,
}
impl CodeGenTargetMachineOptions {
impl TargetMachineOptions {
/// Creates an instance of [`CodeGenTargetMachineOptions`] using the triple of the host machine.
/// Other options are set to defaults.
#[must_use]
pub fn from_host_triple() -> CodeGenTargetMachineOptions {
CodeGenTargetMachineOptions {
pub fn from_host_triple(level: OptimizationLevel) -> TargetMachineOptions {
TargetMachineOptions {
triple: TargetMachine::get_default_triple().as_str().to_string_lossy().into_owned(),
cpu: String::default(),
features: String::default(),
reloc_mode: RelocMode::Default,
code_model: CodeModel::Default,
target_opt_level: level,
}
}
/// Creates an instance of [`CodeGenTargetMachineOptions`] using the properties of the host
/// machine. Other options are set to defaults.
#[must_use]
pub fn from_host() -> CodeGenTargetMachineOptions {
CodeGenTargetMachineOptions {
pub fn from_host(level: OptimizationLevel) -> TargetMachineOptions {
TargetMachineOptions {
cpu: TargetMachine::get_host_cpu_name().to_string(),
features: TargetMachine::get_host_cpu_features().to_string(),
..CodeGenTargetMachineOptions::from_host_triple()
..TargetMachineOptions::from_host_triple(level)
}
}
@@ -154,36 +156,35 @@ impl CodeGenTargetMachineOptions {
///
/// See [`Target::create_target_machine`].
#[must_use]
pub fn create_target_machine(&self, level: OptimizationLevel) -> Option<TargetMachine> {
pub fn create_target_machine(&self) -> TargetMachine {
let triple = TargetTriple::create(self.triple.as_str());
let target = Target::from_triple(&triple).unwrap_or_else(|_| {
panic!("could not create target from target triple {}", self.triple)
let target = Target::from_triple(&triple).unwrap_or_else(|e| {
panic!("could not create target from target triple {}: {e}", self.triple)
});
target.create_target_machine(
&triple,
self.cpu.as_str(),
self.features.as_str(),
level,
self.reloc_mode,
self.code_model,
)
target
.create_target_machine(
&triple,
self.cpu.as_str(),
self.features.as_str(),
self.target_opt_level,
self.reloc_mode,
self.code_model,
)
.expect("could not create target machine")
}
}
pub struct CodeGenContext<'ctx, 'a> {
/// The LLVM context associated with [this context][CodeGenContext].
pub ctx: &'ctx Context,
/// The [`CoreContext`] instance which includes the module and target-specific information.
pub inner: CoreContext<'ctx>,
/// The [Builder] instance for creating LLVM IR statements.
/// The [`Builder`] instance for creating LLVM IR statements.
pub builder: Builder<'ctx>,
/// The [`DebugInfoBuilder`], [compilation unit information][DICompileUnit], and
/// [scope information][DIScope] of this context.
pub debug_info: (DebugInfoBuilder<'ctx>, DICompileUnit<'ctx>, DIScope<'ctx>),
/// The module for which [this context][CodeGenContext] is generating into.
pub module: Module<'ctx>,
/// The [`TopLevelContext`] associated with [this context][CodeGenContext].
pub top_level: &'a TopLevelContext,
pub unifier: Unifier,
@@ -226,38 +227,30 @@ pub struct CodeGenContext<'ctx, 'a> {
pub outer_catch_clauses:
Option<(Vec<Option<BasicValueEnum<'ctx>>>, BasicBlock<'ctx>, PhiValue<'ctx>)>,
// all LLVM function declarations
pub fn_store: FunctionStore<'ctx>,
/// The current source location.
pub current_loc: Location,
/// The cached type of `size_t`.
llvm_usize: OnceCell<IntType<'ctx>>,
}
impl<'ctx> CodeGenContext<'ctx, '_> {
impl<'ctx> std::ops::Deref for CodeGenContext<'ctx, '_> {
type Target = CoreContext<'ctx>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl std::ops::DerefMut for CodeGenContext<'_, '_> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
impl CodeGenContext<'_, '_> {
/// Whether the [current basic block][Builder::get_insert_block] referenced by `builder`
/// contains a [terminator statement][BasicBlock::get_terminator].
pub fn is_terminated(&self) -> bool {
self.builder.get_insert_block().and_then(BasicBlock::get_terminator).is_some()
}
/// Returns a [`IntType`] representing `size_t` for the compilation target as specified by
/// [`self.registry`][WorkerRegistry].
pub fn get_size_type(&self) -> IntType<'ctx> {
*self.llvm_usize.get_or_init(|| {
self.ctx.ptr_sized_int_type(
&self
.registry
.llvm_options
.create_target_machine()
.map(|tm| tm.get_target_data())
.unwrap(),
None,
)
})
}
}
type Fp = Box<dyn Fn(&Module) + Send + Sync>;
@@ -294,7 +287,7 @@ pub struct WorkerRegistry {
static_value_store: Arc<Mutex<StaticValueStore>>,
/// LLVM-related options for code generation.
pub llvm_options: CodeGenLLVMOptions,
pub codegen_options: CodeGenOptions,
}
impl WorkerRegistry {
@@ -303,7 +296,7 @@ impl WorkerRegistry {
pub fn create_workers<G: CodeGenerator + Send + 'static>(
generators: Vec<Box<G>>,
top_level_ctx: Arc<TopLevelContext>,
llvm_options: &CodeGenLLVMOptions,
codegen_options: &CodeGenOptions,
f: &Arc<WithCall>,
) -> (Arc<WorkerRegistry>, Vec<thread::JoinHandle<()>>) {
let (sender, receiver) = unbounded();
@@ -324,7 +317,7 @@ impl WorkerRegistry {
task_count,
wait_condvar,
top_level_ctx,
llvm_options: llvm_options.clone(),
codegen_options: codegen_options.clone(),
});
let mut handles = Vec::new();
@@ -393,44 +386,26 @@ impl WorkerRegistry {
/// Function executed by worker thread for generating IR for each function.
fn worker_thread<G: CodeGenerator>(&self, generator: &mut G, f: &Arc<WithCall>) {
let context = Context::create();
let mut builder = context.create_builder();
let mut module = context.create_module(generator.get_name());
let mut fn_store = FunctionStore::default();
let target_machine = self.llvm_options.create_target_machine().unwrap();
module.set_data_layout(&target_machine.get_target_data().get_data_layout());
module.set_triple(&target_machine.get_triple());
module.add_basic_value_flag(
"Debug Info Version",
inkwell::module::FlagBehavior::Warning,
context.i32_type().const_int(3, false),
);
module.add_basic_value_flag(
"Dwarf Version",
inkwell::module::FlagBehavior::Warning,
context.i32_type().const_int(4, false),
);
context_ref!(ctx);
let options = &self.codegen_options.target;
let mut context = CoreContext::new(ctx, generator.get_name(), options);
let mut builder = context.ctx.create_builder();
let mut errors = HashSet::new();
while let Some(task) = self.receiver.recv().unwrap() {
match gen_func(&context, generator, self, builder, module, fn_store, task) {
Ok(result) => {
(builder, module, fn_store, _) = result;
}
Err((old_builder, e)) => {
builder = old_builder;
let (context_, builder_, result) = gen_func(context, builder, generator, self, task);
builder = builder_;
context = match result {
Ok(_) => context_,
Err(e) => {
errors.insert(e);
// create a new empty module just to continue codegen and collect errors
module = context.create_module(&format!("{}_recover", generator.get_name()));
let target_machine = self.llvm_options.create_target_machine().unwrap();
module.set_data_layout(&target_machine.get_target_data().get_data_layout());
module.set_triple(&target_machine.get_triple());
fn_store = FunctionStore::default();
CoreContext::new(
context_.ctx,
&format!("{}_recover", generator.get_name()),
&self.codegen_options.target,
)
}
}
};
*self.task_count.lock() -= 1;
self.wait_condvar.notify_all();
}
@@ -440,34 +415,24 @@ impl WorkerRegistry {
errors.into_iter().sorted().join("\n----------\n")
);
let result = module.verify();
let result = context.module.verify();
if let Err(err) = result {
println!("{}", module.print_to_string().to_str().unwrap());
println!("{}", context.module.print_to_string().to_str().unwrap());
panic!("{}", err.to_string())
}
let pass_options = PassBuilderOptions::create();
let target_machine = self
.llvm_options
.target
.create_target_machine(self.llvm_options.opt_level)
.unwrap_or_else(|| {
panic!(
"could not create target machine from properties {:?}",
self.llvm_options.target
)
});
let passes = format!("default<O{}>", self.llvm_options.opt_level as u32);
let result = module.run_passes(passes.as_str(), &target_machine, pass_options);
let passes = format!("default<O{}>", self.codegen_options.opt_level);
let result = context.module.run_passes(passes.as_str(), &context.target, pass_options);
if let Err(err) = result {
panic!(
"Failed to run optimization for module `{}`: {}",
module.get_name().to_str().unwrap(),
context.module.get_name().to_str().unwrap(),
err.to_string()
);
}
f.run(&module);
f.run(&context.module);
let mut lock = self.task_count.lock();
*lock += 1;
self.wait_condvar.notify_all();
@@ -492,10 +457,8 @@ pub struct CodeGenTask {
/// This function is used to obtain the in-memory representation of `ty`, e.g. a `bool` variable
/// would be represented by an `i8`.
#[allow(clippy::too_many_arguments)]
fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>(
ctx: &'ctx Context,
module: &Module<'ctx>,
generator: &G,
fn get_llvm_type<'ctx>(
ctx: &CoreContext<'ctx>,
unifier: &mut Unifier,
top_level: &TopLevelContext,
type_cache: &mut HashMap<Type, BasicTypeEnum<'ctx>>,
@@ -514,39 +477,35 @@ fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>(
TObj { obj_id, params, .. } if *obj_id == PrimDef::Option.id() => {
let element_type = get_llvm_type(
ctx,
module,
generator,
unifier,
top_level,
type_cache,
*params.iter().next().unwrap().1,
);
OptionType::new_with_generator(generator, ctx, &element_type).as_abi_type().into()
OptionType::new(ctx, &element_type).as_abi_type().into()
}
TObj { obj_id, params, .. } if *obj_id == PrimDef::List.id() => {
let element_type = get_llvm_type(
ctx,
module,
generator,
unifier,
top_level,
type_cache,
*params.iter().next().unwrap().1,
);
ListType::new_with_generator(generator, ctx, element_type).as_abi_type().into()
ListType::new(ctx, &element_type).as_abi_type().into()
}
TObj { obj_id, .. } if *obj_id == PrimDef::NDArray.id() => {
let (dtype, ndims) = unpack_ndarray_var_tys(unifier, ty);
let ndims = extract_ndims(unifier, ndims);
let element_type = get_llvm_type(
ctx, module, generator, unifier, top_level, type_cache, dtype,
ctx, unifier, top_level, type_cache, dtype,
);
NDArrayType::new_with_generator(generator, ctx, element_type, ndims).as_abi_type().into()
NDArrayType::new(ctx, element_type, ndims).as_abi_type().into()
}
_ => unreachable!(
@@ -563,10 +522,10 @@ fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>(
};
let name = unifier.stringify(ty);
let ty = if let Some(t) = module.get_struct_type(&name) {
let ty = if let Some(t) = ctx.module.get_struct_type(&name) {
t.ptr_type(AddressSpace::default()).into()
} else {
let struct_type = ctx.opaque_struct_type(&name);
let struct_type = ctx.ctx.opaque_struct_type(&name);
type_cache.insert(
unifier.get_representative(ty),
struct_type.ptr_type(AddressSpace::default()).into(),
@@ -576,8 +535,6 @@ fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>(
.map(|f| {
get_llvm_type(
ctx,
module,
generator,
unifier,
top_level,
type_cache,
@@ -597,10 +554,10 @@ fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>(
let fields = ty
.iter()
.map(|ty| {
get_llvm_type(ctx, module, generator, unifier, top_level, type_cache, *ty)
get_llvm_type(ctx, unifier, top_level, type_cache, *ty)
})
.collect_vec();
TupleType::new_with_generator(generator, ctx, &fields).as_abi_type().into()
TupleType::new(ctx, &fields).as_abi_type().into()
}
TVirtual { .. } => unimplemented!(),
_ => unreachable!("{}", ty_enum.get_type_name()),
@@ -620,10 +577,8 @@ fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>(
/// be byte-aligned for the variable to be addressable in memory, whereas there is no such
/// restriction for ABI representations.
#[allow(clippy::too_many_arguments)]
fn get_llvm_abi_type<'ctx, G: CodeGenerator + ?Sized>(
ctx: &'ctx Context,
module: &Module<'ctx>,
generator: &G,
fn get_llvm_abi_type<'ctx>(
ctx: &CoreContext<'ctx>,
unifier: &mut Unifier,
top_level: &TopLevelContext,
type_cache: &mut HashMap<Type, BasicTypeEnum<'ctx>>,
@@ -633,15 +588,15 @@ fn get_llvm_abi_type<'ctx, G: CodeGenerator + ?Sized>(
// If the type is used in the definition of a function, return `i1` instead of `i8` for ABI
// consistency.
if unifier.unioned(ty, primitives.bool) {
ctx.bool_type().into()
ctx.ctx.bool_type().into()
} else {
get_llvm_type(ctx, module, generator, unifier, top_level, type_cache, ty)
get_llvm_type(ctx, unifier, top_level, type_cache, ty)
}
}
/// Returns the [`BasicTypeEnum`] representing a `va_list` struct for variadic arguments.
#[allow(dead_code)]
fn get_llvm_valist_type<'ctx>(ctx: &'ctx Context, triple: &TargetTriple) -> BasicTypeEnum<'ctx> {
fn get_llvm_valist_type<'ctx>(ctx: ContextRef<'ctx>, triple: &TargetTriple) -> BasicTypeEnum<'ctx> {
let triple = TargetMachine::normalize_triple(triple);
let triple = triple.as_str().to_str().unwrap();
let arch = triple.split('-').next().unwrap();
@@ -733,18 +688,13 @@ pub fn gen_func_impl<
G: CodeGenerator,
F: FnOnce(&mut G, &mut CodeGenContext) -> Result<(), String>,
>(
context: &'ctx Context,
mut ctx: CoreContext<'ctx>,
builder: Builder<'ctx>,
generator: &mut G,
registry: &WorkerRegistry,
builder: Builder<'ctx>,
module: Module<'ctx>,
mut fn_store: FunctionStore<'ctx>,
task: CodeGenTask,
codegen_function: F,
) -> Result<
(Builder<'ctx>, Module<'ctx>, FunctionStore<'ctx>, FunctionValue<'ctx>),
(Builder<'ctx>, String),
> {
) -> (CoreContext<'ctx>, Builder<'ctx>, Result<FunctionValue<'ctx>, String>) {
let top_level_ctx = registry.top_level_ctx.clone();
let static_value_store = registry.static_value_store.clone();
let (mut unifier, primitives) = {
@@ -789,19 +739,15 @@ pub fn gen_func_impl<
};
let mut type_cache: HashMap<_, _> = [
(primitives.int32, context.i32_type().into()),
(primitives.int64, context.i64_type().into()),
(primitives.uint32, context.i32_type().into()),
(primitives.uint64, context.i64_type().into()),
(primitives.float, context.f64_type().into()),
(primitives.bool, context.i8_type().into()),
(primitives.str, {
StringType::new_with_generator(generator, context).as_abi_type().into()
}),
(primitives.range, RangeType::new_with_generator(generator, context).as_abi_type().into()),
(primitives.exception, {
ExceptionType::new_with_generator(generator, context).as_abi_type().into()
}),
(primitives.int32, ctx.i32.into()),
(primitives.int64, ctx.i64.into()),
(primitives.uint32, ctx.i32.into()),
(primitives.uint64, ctx.i64.into()),
(primitives.float, ctx.ctx.f64_type().into()),
(primitives.bool, ctx.ctx.i8_type().into()),
(primitives.str, { StringType::new(&ctx).as_abi_type().into() }),
(primitives.range, RangeType::new(&ctx).as_abi_type().into()),
(primitives.exception, { ExceptionType::new(&ctx).as_abi_type().into() }),
]
.iter()
.copied()
@@ -822,9 +768,7 @@ pub fn gen_func_impl<
None
} else {
Some(get_llvm_abi_type(
context,
&module,
generator,
&ctx,
&mut unifier,
top_level_ctx.as_ref(),
&mut type_cache,
@@ -846,9 +790,7 @@ pub fn gen_func_impl<
.iter()
.map(|arg| {
get_llvm_abi_type(
context,
&module,
generator,
&ctx,
&mut unifier,
top_level_ctx.as_ref(),
&mut type_cache,
@@ -862,31 +804,23 @@ pub fn gen_func_impl<
let symbol = &task.symbol_name;
// This module is independent from the module spawning this codegen task,
// so we must redefine the function from scratch.
let (_, fn_val) =
fn_store.declare_internal(&module, symbol, ret_type, &params_type, task.export_symbol);
let (_, fn_val) = ctx.declare_internal(symbol, ret_type, &params_type, task.export_symbol);
if let Some(personality) = get_personality(&top_level_ctx, &module) {
if let Some(personality) = get_personality(&top_level_ctx, &ctx) {
fn_val.set_personality_function(personality);
}
let init_bb = context.append_basic_block(fn_val, "init");
let init_bb = ctx.ctx.append_basic_block(fn_val, "init");
builder.position_at_end(init_bb);
let body_bb = context.append_basic_block(fn_val, "body");
let body_bb = ctx.ctx.append_basic_block(fn_val, "body");
// Store non-vararg argument values into local variables
let mut var_assignment = HashMap::new();
for (n, arg) in params.iter().enumerate().filter(|(_, arg)| !arg.is_vararg) {
let param = fn_val.get_nth_param(n as u32).unwrap();
let local_type = get_llvm_type(
context,
&module,
generator,
&mut unifier,
top_level_ctx.as_ref(),
&mut type_cache,
arg.ty,
);
let local_type =
get_llvm_type(&ctx, &mut unifier, top_level_ctx.as_ref(), &mut type_cache, arg.ty);
let alloca =
builder.build_alloca(local_type, &format!("{}.addr", &arg.name.to_string())).unwrap();
@@ -896,7 +830,7 @@ pub fn gen_func_impl<
let param_val = param.into_int_value();
if expected_ty.get_bit_width() == 8 && param_val.get_type().get_bit_width() == 1 {
bool_to_int_type(&builder, param_val, context.i8_type())
bool_to_int_type(&builder, param_val, ctx.ctx.i8_type())
} else {
param_val
}
@@ -925,7 +859,9 @@ pub fn gen_func_impl<
builder.build_unconditional_branch(body_bb).unwrap();
builder.position_at_end(body_bb);
let (dibuilder, compile_unit) = module.create_debug_info_builder(
let is_optimized = registry.codegen_options.opt_level != "0";
let (dibuilder, compile_unit) = ctx.module.create_debug_info_builder(
/* allow_unresolved */ true,
/* language */ inkwell::debug_info::DWARFSourceLanguage::Python,
/* filename */
@@ -935,7 +871,7 @@ pub fn gen_func_impl<
.map_or_else(|| "<nac3_internal>".to_string(), |f| f.location.file.0.to_string()),
/* directory */ "",
/* producer */ "NAC3",
/* is_optimized */ registry.llvm_options.opt_level != OptimizationLevel::None,
/* is_optimized */ is_optimized,
/* compiler command line flags */ "",
/* runtime_ver */ 0,
/* split_name */ "",
@@ -970,12 +906,22 @@ pub fn gen_func_impl<
/* is_definition */ true,
/* scope_line */ row as u32,
/* flags */ inkwell::debug_info::DIFlags::PUBLIC,
/* is_optimized */ registry.llvm_options.opt_level != OptimizationLevel::None,
/* is_optimized */ is_optimized,
);
fn_val.set_subprogram(func_scope);
let debug_info = (dibuilder, compile_unit, func_scope.as_debug_info_scope());
let loc = debug_info.0.create_debug_location(
ctx.ctx,
row as u32,
col as u32,
func_scope.as_debug_info_scope(),
None,
);
builder.set_current_debug_location(loc);
let mut code_gen_context = CodeGenContext {
ctx: context,
inner: ctx,
resolver: task.resolver,
top_level: top_level_ctx.as_ref(),
calls: task.calls,
@@ -992,35 +938,13 @@ pub fn gen_func_impl<
init_bb,
exception_val: Option::default(),
builder,
module,
unifier,
static_value_store,
fn_store,
current_loc: Location::default(),
debug_info: (dibuilder, compile_unit, func_scope.as_debug_info_scope()),
llvm_usize: OnceCell::default(),
debug_info,
};
let target_llvm_usize = context.ptr_sized_int_type(
&registry.llvm_options.create_target_machine().map(|tm| tm.get_target_data()).unwrap(),
None,
);
let generator_llvm_usize = generator.get_size_type(context);
assert_eq!(
generator_llvm_usize, target_llvm_usize,
"CodeGenerator (size_t = {generator_llvm_usize}) is not compatible with CodeGen Target (size_t = {target_llvm_usize})",
);
let loc = code_gen_context.debug_info.0.create_debug_location(
context,
row as u32,
col as u32,
func_scope.as_debug_info_scope(),
None,
);
code_gen_context.builder.set_current_debug_location(loc);
let result = codegen_function(generator, &mut code_gen_context);
let result = codegen_function(generator, &mut code_gen_context).map(|()| fn_val);
// after static analysis, only void functions can have no return at the end.
if !code_gen_context.is_terminated() {
@@ -1030,46 +954,28 @@ pub fn gen_func_impl<
code_gen_context.builder.unset_current_debug_location();
code_gen_context.debug_info.0.finalize();
let CodeGenContext { builder, module, fn_store, .. } = code_gen_context;
if let Err(e) = result {
return Err((builder, e));
}
Ok((builder, module, fn_store, fn_val))
let CodeGenContext { inner, builder, .. } = code_gen_context;
(inner, builder, result)
}
/// Generates LLVM IR for a function.
///
/// * `context` - The [LLVM Context][`Context`] used in generating the function body.
/// * `context` - The [`CoreContext`] we are inserting into.
/// * `builder` - The [`Builder`] used for generating LLVM IR.
/// * `generator` - The [`CodeGenerator`] for generating various program constructs.
/// * `registry` - The [`WorkerRegistry`] responsible for monitoring this function generation task.
/// * `builder` - The [`Builder`] used for generating LLVM IR.
/// * `module` - The [`Module`] of which the generated LLVM function will be inserted into.
/// * `task` - The [`CodeGenTask`] associated with this function generation task.
///
pub fn gen_func<'ctx, G: CodeGenerator>(
context: &'ctx Context,
context: CoreContext<'ctx>,
builder: Builder<'ctx>,
generator: &mut G,
registry: &WorkerRegistry,
builder: Builder<'ctx>,
module: Module<'ctx>,
fn_store: FunctionStore<'ctx>,
task: CodeGenTask,
) -> Result<
(Builder<'ctx>, Module<'ctx>, FunctionStore<'ctx>, FunctionValue<'ctx>),
(Builder<'ctx>, String),
> {
) -> (CoreContext<'ctx>, Builder<'ctx>, Result<FunctionValue<'ctx>, String>) {
let body = task.body.clone();
gen_func_impl(
context,
generator,
registry,
builder,
module,
fn_store,
task,
|generator, ctx| generator.gen_block(ctx, body.iter()),
)
gen_func_impl(context, builder, generator, registry, task, |generator, ctx| {
generator.gen_block(ctx, body.iter())
})
}
/// Converts the value of a boolean-like value `value` into an arbitrary [`IntType`].
@@ -1120,10 +1026,8 @@ fn gen_in_range_check<'ctx>(
stop: IntValue<'ctx>,
step: IntValue<'ctx>,
) -> IntValue<'ctx> {
let sign = ctx
.builder
.build_int_compare(IntPredicate::SGT, step, ctx.ctx.i32_type().const_zero(), "")
.unwrap();
let sign =
ctx.builder.build_int_compare(IntPredicate::SGT, step, ctx.i32.const_zero(), "").unwrap();
let lo = ctx
.builder
.build_select(sign, value, stop, "")
@@ -1194,7 +1098,7 @@ pub fn type_aligned_alloca<'ctx, G: CodeGenerator + ?Sized>(
let llvm_i8 = ctx.ctx.i8_type();
let llvm_pi8 = llvm_i8.ptr_type(AddressSpace::default());
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
let align_ty = align_ty.into();
let size = ctx.builder.build_int_truncate_or_bit_cast(size, llvm_usize, "").unwrap();
@@ -1210,7 +1114,7 @@ pub fn type_aligned_alloca<'ctx, G: CodeGenerator + ?Sized>(
let alignment = get_type_alignment(align_ty);
let alignment = ctx.builder.build_int_truncate_or_bit_cast(alignment, llvm_usize, "").unwrap();
if ctx.registry.llvm_options.opt_level == OptimizationLevel::None {
if ctx.registry.codegen_options.debug {
let alignment_bitcount = llvm_intrinsics::call_int_ctpop(ctx, alignment, None);
ctx.make_assert(
@@ -1241,3 +1145,84 @@ pub fn type_aligned_alloca<'ctx, G: CodeGenerator + ?Sized>(
.map(BasicValueEnum::into_pointer_value)
.unwrap()
}
/// Contains all global LLVM state that is independent from Python.
pub struct CoreContext<'ctx> {
/// The associated LLVM context.
pub ctx: ContextRef<'ctx>,
/// The LLVM module that we are generating into.
pub module: Module<'ctx>,
/// The `TargetMachine` that we are compiling for.
pub target: TargetMachine,
/// The `usize`/`size_t` integer type. Pointer-sized on all supported platforms.
pub size_t: IntType<'ctx>,
/// The 32-bit integer type.
pub i32: IntType<'ctx>,
/// The 64-bit integer type.
pub i64: IntType<'ctx>,
/// Wrapped function declarations.
///
/// Belongs here because it needs to capture all function declarations to the module;
/// wrapping a new `FunctionStore` around a non-empty `Module` is a logical error.
fn_store: FunctionStore<'ctx>,
}
impl<'ctx> CoreContext<'ctx> {
/// Constructs a [`CoreContext`].
#[must_use]
pub fn new(ctx: ContextRef<'ctx>, module_name: &str, options: &TargetMachineOptions) -> Self {
let module = ctx.create_module(module_name);
let target = options.create_target_machine();
let size_t = ctx.ptr_sized_int_type(&target.get_target_data(), None);
let i32 = ctx.i32_type();
let i64 = ctx.i64_type();
let fn_store = FunctionStore::new(options);
module.set_data_layout(&target.get_target_data().get_data_layout());
module.set_triple(&target.get_triple());
module.add_basic_value_flag(
"Debug Info Version",
inkwell::module::FlagBehavior::Warning,
i32.const_int(3, false),
);
module.add_basic_value_flag(
"Dwarf Version",
inkwell::module::FlagBehavior::Warning,
i32.const_int(4, false),
);
Self { ctx, module, target, size_t, i32, i64, fn_store }
}
}
/// Constructs a [`ContextRef`].
///
/// This is a macro because it needs to declare and borrow from a local [`Context`].
///
/// # Example
///
/// ```
/// # use nac3core::codegen::context_ref;
/// // Constructs a ContextRef named `ctx`.
/// context_ref!(ctx);
/// ```
#[doc(hidden)]
#[macro_export]
macro_rules! __codegen_context_ref {
($name:pat) => {
let ctx = $crate::inkwell::context::Context::create();
let $name = $crate::codegen::__make_context_ref(&ctx);
};
}
// Enforces that the ContextRef borrows from the Context.
#[doc(hidden)]
#[must_use]
pub fn __make_context_ref(ctx: &Context) -> ContextRef<'_> {
unsafe { ContextRef::new(inkwell::context::AsContextRef::as_ctx_ref(&ctx)) }
}
#[doc(inline)]
pub use __codegen_context_ref as context_ref;

View File

@@ -37,7 +37,7 @@ pub fn gen_ndarray_empty<'ctx>(
let shape_arg = args[0].1.clone().to_basic_value_enum(context, generator, shape_ty)?;
let (dtype, ndims) = unpack_ndarray_var_tys(&mut context.unifier, fun.0.ret);
let llvm_dtype = context.get_llvm_type(generator, dtype);
let llvm_dtype = context.get_llvm_type(dtype);
let ndims = extract_ndims(&context.unifier, ndims);
let shape = parse_numpy_int_sequence(generator, context, (shape_ty, shape_arg));
@@ -62,7 +62,7 @@ pub fn gen_ndarray_zeros<'ctx>(
let shape_arg = args[0].1.clone().to_basic_value_enum(context, generator, shape_ty)?;
let (dtype, ndims) = unpack_ndarray_var_tys(&mut context.unifier, fun.0.ret);
let llvm_dtype = context.get_llvm_type(generator, dtype);
let llvm_dtype = context.get_llvm_type(dtype);
let ndims = extract_ndims(&context.unifier, ndims);
let shape = parse_numpy_int_sequence(generator, context, (shape_ty, shape_arg));
@@ -87,7 +87,7 @@ pub fn gen_ndarray_ones<'ctx>(
let shape_arg = args[0].1.clone().to_basic_value_enum(context, generator, shape_ty)?;
let (dtype, ndims) = unpack_ndarray_var_tys(&mut context.unifier, fun.0.ret);
let llvm_dtype = context.get_llvm_type(generator, dtype);
let llvm_dtype = context.get_llvm_type(dtype);
let ndims = extract_ndims(&context.unifier, ndims);
let shape = parse_numpy_int_sequence(generator, context, (shape_ty, shape_arg));
@@ -115,7 +115,7 @@ pub fn gen_ndarray_full<'ctx>(
args[1].1.clone().to_basic_value_enum(context, generator, fill_value_ty)?;
let (dtype, ndims) = unpack_ndarray_var_tys(&mut context.unifier, fun.0.ret);
let llvm_dtype = context.get_llvm_type(generator, dtype);
let llvm_dtype = context.get_llvm_type(dtype);
let ndims = extract_ndims(&context.unifier, ndims);
let shape = parse_numpy_int_sequence(generator, context, (shape_ty, shape_arg));
@@ -162,7 +162,7 @@ pub fn gen_ndarray_array<'ctx>(
let ndims = extract_ndims(&context.unifier, ndims);
let copy = generator.bool_to_i1(context, copy_arg.into_int_value());
let ndarray = NDArrayType::from_unifier_type(generator, context, fun.0.ret)
let ndarray = NDArrayType::from_unifier_type(context, fun.0.ret)
.construct_numpy_array(generator, context, (obj_ty, obj_arg), copy, None)
.atleast_nd(generator, context, ndims);
@@ -207,8 +207,8 @@ pub fn gen_ndarray_eye<'ctx>(
let (dtype, _) = unpack_ndarray_var_tys(&mut context.unifier, fun.0.ret);
let llvm_usize = context.get_size_type();
let llvm_dtype = context.get_llvm_type(generator, dtype);
let llvm_usize = context.size_t;
let llvm_dtype = context.get_llvm_type(dtype);
let nrows = context
.builder
@@ -244,8 +244,8 @@ pub fn gen_ndarray_identity<'ctx>(
let (dtype, _) = unpack_ndarray_var_tys(&mut context.unifier, fun.0.ret);
let llvm_usize = context.get_size_type();
let llvm_dtype = context.get_llvm_type(generator, dtype);
let llvm_usize = context.size_t;
let llvm_dtype = context.get_llvm_type(dtype);
let n = context
.builder
@@ -271,7 +271,7 @@ pub fn gen_ndarray_copy<'ctx>(
let this_arg =
obj.as_ref().unwrap().1.clone().to_basic_value_enum(context, generator, this_ty)?;
let this = NDArrayType::from_unifier_type(generator, context, this_ty)
let this = NDArrayType::from_unifier_type(context, this_ty)
.map_pointer_value(this_arg.into_pointer_value(), None);
let ndarray = this.make_copy(generator, context);
Ok(ndarray.as_abi_value(context))
@@ -294,7 +294,7 @@ pub fn gen_ndarray_fill<'ctx>(
let value_ty = fun.0.args[0].ty;
let value_arg = args[0].1.clone().to_basic_value_enum(context, generator, value_ty)?;
let this = NDArrayType::from_unifier_type(generator, context, this_ty)
let this = NDArrayType::from_unifier_type(context, this_ty)
.map_pointer_value(this_arg.into_pointer_value(), None);
this.fill(generator, context, value_arg);
Ok(())
@@ -317,9 +317,9 @@ pub fn ndarray_dot<'ctx, G: CodeGenerator + ?Sized>(
match (x1, x2) {
(BasicValueEnum::PointerValue(n1), BasicValueEnum::PointerValue(n2)) => {
let a =
NDArrayType::from_unifier_type(generator, ctx, x1_ty).map_pointer_value(n1, None);
NDArrayType::from_unifier_type(ctx, x1_ty).map_pointer_value(n1, None);
let b =
NDArrayType::from_unifier_type(generator, ctx, x2_ty).map_pointer_value(n2, None);
NDArrayType::from_unifier_type(ctx, x2_ty).map_pointer_value(n2, None);
// TODO: General `np.dot()` https://numpy.org/doc/stable/reference/generated/numpy.dot.html.
assert_eq!(a.get_type().ndims(), 1);
@@ -340,7 +340,7 @@ pub fn ndarray_dot<'ctx, G: CodeGenerator + ?Sized>(
ctx.current_loc,
);
let dtype_llvm = ctx.get_llvm_type(generator, common_dtype);
let dtype_llvm = ctx.get_llvm_type(common_dtype);
let result = ctx.builder.build_alloca(dtype_llvm, "np_dot_result").unwrap();
ctx.builder.build_store(result, dtype_llvm.const_zero()).unwrap();

View File

@@ -2,7 +2,6 @@ use inkwell::{
IntPredicate,
basic_block::BasicBlock,
builder::Builder,
module::Module,
types::{BasicMetadataTypeEnum, BasicType, BasicTypeEnum},
values::{BasicValue, BasicValueEnum, FunctionValue, IntValue, PointerValue},
};
@@ -27,7 +26,7 @@ use super::{
},
};
use crate::{
codegen::llvm_fns::FunctionDecl,
codegen::{CoreContext, llvm_fns::FunctionDecl},
symbol_resolver::ValueEnum,
toplevel::{DefinitionId, TopLevelContext, TopLevelDef},
typecheck::{
@@ -38,13 +37,15 @@ use crate::{
pub(crate) fn get_personality<'ctx>(
top_level: &TopLevelContext,
module: &Module<'ctx>,
ctx: &CoreContext<'ctx>,
) -> Option<FunctionValue<'ctx>> {
let sym = top_level.personality_symbol.as_ref()?;
// The personality is the only symbol where we do not use our external function ABI handling.
Some(module.get_function(sym).unwrap_or_else(|| {
module.add_function(sym, module.get_context().i32_type().fn_type(&[], true), None)
}))
Some(
ctx.module
.get_function(sym)
.unwrap_or_else(|| ctx.module.add_function(sym, ctx.i32.fn_type(&[], true), None)),
)
}
/// See [`CodeGenerator::gen_var_alloc`].
@@ -121,7 +122,7 @@ pub fn gen_store_target<'ctx, G: CodeGenerator>(
Ok(Some(match &pattern.node {
ExprKind::Name { id, .. } => match ctx.var_assignment.get(id) {
None => {
let ptr_ty = ctx.get_llvm_type(generator, pattern.custom.unwrap());
let ptr_ty = ctx.get_llvm_type(pattern.custom.unwrap());
let ptr = generator.gen_var_alloc(ctx, ptr_ty, name)?;
ctx.var_assignment.insert(*id, (ptr, None, 0));
ptr
@@ -145,10 +146,7 @@ pub fn gen_store_target<'ctx, G: CodeGenerator>(
unsafe {
ctx.builder.build_in_bounds_gep(
ptr,
&[
ctx.ctx.i32_type().const_zero(),
ctx.ctx.i32_type().const_int(index as u64, false),
],
&[ctx.i32.const_zero(), ctx.i32.const_int(index as u64, false)],
name.unwrap_or(""),
)
}
@@ -217,7 +215,7 @@ pub fn gen_assign_target_list<'ctx, G: CodeGenerator>(
value: ValueEnum<'ctx>,
value_ty: Type,
) -> Result<(), String> {
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
match &*ctx.unifier.get_ty(value_ty) {
TypeEnum::TTuple { ty: tuple_tys, .. } => {
// Deconstruct the tuple `value`
@@ -318,8 +316,7 @@ pub fn gen_assign_target_list<'ctx, G: CodeGenerator>(
codegen_unreachable!(ctx);
};
let rhs_list =
ListValue::from_pointer_value(list_ptr, ctx.get_size_type(), Some("rhs_list"));
let rhs_list = ListValue::from_pointer_value(list_ptr, ctx.size_t, Some("rhs_list"));
let rhs_size = rhs_list.load_size(ctx, Some("rhs_size"));
let starred_idx =
targets.iter().position(|t| matches!(t.node, ExprKind::Starred { .. }));
@@ -397,7 +394,7 @@ pub fn gen_assign_target_list<'ctx, G: CodeGenerator>(
.unwrap();
// Allocate a new list for the starred target and copy the data from the rhs list into it
let llvm_array_ty = ctx.get_llvm_type(generator, *ty);
let llvm_array_ty = ctx.get_llvm_type(*ty);
let new_list = ListType::new(ctx, &llvm_array_ty).construct(
generator,
ctx,
@@ -502,7 +499,7 @@ pub fn gen_setitem<'ctx, G: CodeGenerator>(
if *obj_id == ctx.primitives.list.obj_id(&ctx.unifier).unwrap() =>
{
// Handle list item assignment
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
let target_item_ty = iter_type_vars(list_params).next().unwrap().ty;
let target = generator
@@ -528,7 +525,7 @@ pub fn gen_setitem<'ctx, G: CodeGenerator>(
value.to_basic_value_enum(ctx, generator, value_ty)?.into_pointer_value();
let value = ListValue::from_pointer_value(value, llvm_usize, None);
let target_item_ty = ctx.get_llvm_type(generator, target_item_ty);
let target_item_ty = ctx.get_llvm_type(target_item_ty);
let size = value.load_size(ctx, None);
let Some(src_ind) =
handle_slice_indices(&None, &None, &None, ctx, generator, size)?
@@ -553,18 +550,12 @@ pub fn gen_setitem<'ctx, G: CodeGenerator>(
.unwrap()
.to_basic_value_enum(ctx, generator, key_ty)?
.into_int_value();
let index =
ctx.builder.build_int_s_extend(index, ctx.get_size_type(), "sext").unwrap();
let index = ctx.builder.build_int_s_extend(index, ctx.size_t, "sext").unwrap();
// handle negative index
let is_negative = ctx
.builder
.build_int_compare(
IntPredicate::SLT,
index,
ctx.get_size_type().const_zero(),
"is_neg",
)
.build_int_compare(IntPredicate::SLT, index, ctx.size_t.const_zero(), "is_neg")
.unwrap();
let adjusted = ctx.builder.build_int_add(index, len, "adjusted").unwrap();
let index = ctx
@@ -624,12 +615,12 @@ pub fn gen_setitem<'ctx, G: CodeGenerator>(
// # ...and finally copy 1-1 from value to target.
// ```
let target = NDArrayType::from_unifier_type(generator, ctx, target_ty)
let target = NDArrayType::from_unifier_type(ctx, target_ty)
.map_pointer_value(target.into_pointer_value(), None);
let target = target.index(generator, ctx, &key);
let value = ScalarOrNDArray::from_value(generator, ctx, (value_ty, value))
.to_ndarray(generator, ctx);
let value =
ScalarOrNDArray::from_value(ctx, (value_ty, value)).to_ndarray(generator, ctx);
let broadcast_ndims =
[target.get_type().ndims(), value.get_type().ndims()].into_iter().max().unwrap();
@@ -666,8 +657,8 @@ pub fn gen_for<G: CodeGenerator>(
// if so, remove the static value as it may not be correct in this branch
let var_assignment = ctx.var_assignment.clone();
let int32 = ctx.ctx.i32_type();
let size_t = ctx.get_size_type();
let int32 = ctx.i32;
let size_t = ctx.size_t;
let zero = int32.const_zero();
let current = ctx.builder.get_insert_block().and_then(BasicBlock::get_parent).unwrap();
let body_bb = ctx.ctx.append_basic_block(current, "for.body");
@@ -1422,15 +1413,10 @@ pub fn final_proxy<'ctx>(
/// Inserts the declaration of the builtin function with the specified `symbol` name, and returns
/// the function.
pub fn get_builtins<'ctx, G: CodeGenerator + ?Sized>(
generator: &mut G,
ctx: &mut CodeGenContext<'ctx, '_>,
symbol: &str,
) -> FunctionDecl<'ctx> {
let raise_arg = [ctx.get_llvm_type(generator, ctx.primitives.exception)];
pub fn get_builtins<'ctx>(ctx: &mut CodeGenContext<'ctx, '_>, symbol: &str) -> FunctionDecl<'ctx> {
let raise_arg = [ctx.get_llvm_type(ctx.primitives.exception)];
let noreturn = ["noreturn"];
ctx.fn_store.declare_external(
&ctx.module,
ctx.declare_external(
symbol,
None,
match symbol {
@@ -1455,7 +1441,7 @@ pub fn exn_constructor<'ctx>(
) -> Result<Option<BasicValueEnum<'ctx>>, String> {
let (zelf_ty, zelf) = obj.unwrap();
let zelf = zelf.to_basic_value_enum(ctx, generator, zelf_ty)?.into_pointer_value();
let int32 = ctx.ctx.i32_type();
let int32 = ctx.i32;
let zero = int32.const_zero();
let zelf_id = if let TypeEnum::TObj { obj_id, .. } = &*ctx.unifier.get_ty(zelf_ty) {
obj_id.0
@@ -1484,7 +1470,7 @@ pub fn exn_constructor<'ctx>(
ctx.builder.build_store(ptr, msg).unwrap();
for i in &[6, 7, 8] {
let value = if args.is_empty() {
ctx.ctx.i64_type().const_zero().into()
ctx.i64.const_zero().into()
} else {
args.remove(0).1.to_basic_value_enum(ctx, generator, ctx.primitives.int64)?
};
@@ -1531,11 +1517,11 @@ pub fn gen_raise<'ctx, G: CodeGenerator + ?Sized>(
let fun_name = ctx.gen_string(generator, current_fun.get_name().to_str().unwrap());
exception.store_func(ctx, fun_name);
let raise = get_builtins(generator, ctx, "__nac3_raise");
let raise = get_builtins(ctx, "__nac3_raise");
let exception = *exception;
ctx.build_call_or_invoke(&raise, &[exception.as_abi_value(ctx).into()], "raise");
} else {
let resume = get_builtins(generator, ctx, "__nac3_resume");
let resume = get_builtins(ctx, "__nac3_resume");
ctx.build_call_or_invoke(&resume, &[], "resume");
}
ctx.builder.build_unreachable().unwrap();
@@ -1552,8 +1538,8 @@ pub fn gen_try<'ctx, 'a, G: CodeGenerator>(
};
// if we need to generate anything related to exception, we must have personality defined
let personality = get_personality(ctx.top_level, &ctx.module).unwrap();
let exception_type = ctx.get_llvm_type(generator, ctx.primitives.exception);
let personality = get_personality(ctx.top_level, ctx).unwrap();
let exception_type = ctx.get_llvm_type(ctx.primitives.exception);
let ptr_type = ctx.ctx.i8_type().ptr_type(inkwell::AddressSpace::default());
let current_block = ctx.builder.get_insert_block().unwrap();
let current_fun = current_block.get_parent().unwrap();
@@ -1625,9 +1611,8 @@ pub fn gen_try<'ctx, 'a, G: CodeGenerator>(
};
let exception_name = format!("{}:{}", ctx.resolver.get_exception_id(obj_id.0), exn_name);
let exn_id = ctx.resolver.get_string_id(&exception_name);
let exn_id_global =
ctx.module.add_global(ctx.ctx.i32_type(), None, &format!("exn.{exn_id}"));
exn_id_global.set_initializer(&ctx.ctx.i32_type().const_int(exn_id as u64, false));
let exn_id_global = ctx.module.add_global(ctx.i32, None, &format!("exn.{exn_id}"));
exn_id_global.set_initializer(&ctx.i32.const_int(exn_id as u64, false));
clauses.push(Some(exn_id_global.as_pointer_value().as_basic_value_enum()));
}
let mut all_clauses = clauses.clone();
@@ -1687,8 +1672,8 @@ pub fn gen_try<'ctx, 'a, G: CodeGenerator>(
&mut redirect_lambda
as &mut dyn FnMut(&mut CodeGenContext<'ctx, 'a>, BasicBlock<'ctx>, BasicBlock<'ctx>)
};
let resume = get_builtins(generator, ctx, "__nac3_resume");
let end_catch = get_builtins(generator, ctx, "__nac3_end_catch");
let resume = get_builtins(ctx, "__nac3_resume");
let end_catch = get_builtins(ctx, "__nac3_end_catch");
if let Some((continue_target, break_target)) = ctx.loop_target.take() {
let break_proxy = ctx.ctx.append_basic_block(current_fun, "try.break");
let continue_proxy = ctx.ctx.append_basic_block(current_fun, "try.continue");
@@ -1723,7 +1708,7 @@ pub fn gen_try<'ctx, 'a, G: CodeGenerator>(
} else {
ctx.builder.position_at_end(dispatcher);
unsafe {
let zero = ctx.ctx.i32_type().const_zero();
let zero = ctx.i32.const_zero();
let exnid_ptr = ctx
.builder
.build_gep(exn.as_basic_value().into_pointer_value(), &[zero, zero], "exnidptr")
@@ -1737,7 +1722,7 @@ pub fn gen_try<'ctx, 'a, G: CodeGenerator>(
let handler_bb = ctx.ctx.append_basic_block(current_fun, "try.handler");
ctx.builder.position_at_end(handler_bb);
if let Some(name) = name {
let exn_ty = ctx.get_llvm_type(generator, type_.as_ref().unwrap().custom.unwrap());
let exn_ty = ctx.get_llvm_type(type_.as_ref().unwrap().custom.unwrap());
let exn_store = generator.gen_var_alloc(ctx, exn_ty, Some("try.exn_store.addr"))?;
ctx.var_assignment.insert(*name, (exn_store, None, 0));
ctx.builder.build_store(exn_store, exn.as_basic_value()).unwrap();
@@ -1960,8 +1945,8 @@ pub fn gen_with<'ctx, 'a, G: CodeGenerator>(
};
// copied and trimmed from gen_try, to cover try (setup, enter)..finally (exit)
let personality = get_personality(ctx.top_level, &ctx.module).unwrap();
let exception_type = ctx.get_llvm_type(generator, ctx.primitives.exception);
let personality = get_personality(ctx.top_level, ctx).unwrap();
let exception_type = ctx.get_llvm_type(ctx.primitives.exception);
let ptr_type = ctx.ctx.i8_type().ptr_type(inkwell::AddressSpace::default());
let current_block = ctx.builder.get_insert_block().unwrap();
let current_fun = current_block.get_parent().unwrap();
@@ -2028,8 +2013,8 @@ pub fn gen_with<'ctx, 'a, G: CodeGenerator>(
let redirect = &mut final_proxy_lambda
as &mut dyn FnMut(&mut CodeGenContext<'ctx, 'a>, BasicBlock<'ctx>, BasicBlock<'ctx>);
let resume = get_builtins(generator, ctx, "__nac3_resume");
let end_catch = get_builtins(generator, ctx, "__nac3_end_catch");
let resume = get_builtins(ctx, "__nac3_resume");
let end_catch = get_builtins(ctx, "__nac3_end_catch");
if let Some((continue_target, break_target)) = ctx.loop_target.take() {
let break_proxy = ctx.ctx.append_basic_block(current_fun, "with.break");
let continue_proxy = ctx.ctx.append_basic_block(current_fun, "with.continue");
@@ -2250,7 +2235,7 @@ pub fn gen_stmt<G: CodeGenerator>(
} else {
return Ok(());
};
let exc = ExceptionType::get_instance(generator, ctx)
let exc = ExceptionType::get_instance(ctx)
.map_pointer_value(exc.into_pointer_value(), None);
gen_raise(generator, ctx, Some(&exc), stmt.location);
} else {

View File

@@ -18,12 +18,13 @@ use nac3parser::{
};
use super::{
CodeGenContext, CodeGenLLVMOptions, CodeGenTargetMachineOptions, CodeGenTask, CodeGenerator,
DefaultCodeGenerator, WithCall, WorkerRegistry,
CodeGenContext, CodeGenOptions, CodeGenTask, CodeGenerator, DefaultCodeGenerator,
TargetMachineOptions, WithCall, WorkerRegistry,
concrete_type::ConcreteTypeStore,
types::{ListType, ProxyType, RangeType, ndarray::NDArrayType},
};
use crate::{
codegen::{CoreContext, context_ref},
symbol_resolver::{SymbolResolver, ValueEnum},
toplevel::{
DefinitionId, FunInstance, TopLevelContext, TopLevelDef,
@@ -90,6 +91,16 @@ impl SymbolResolver for Resolver {
}
}
fn codegen_options() -> CodeGenOptions {
Target::initialize_native(&InitializationConfig::default()).unwrap();
// We want things like debug assertions, but we otherwise want to run on optimized code.
CodeGenOptions {
opt_level: String::from("2"),
debug: true,
target: TargetMachineOptions::from_host_triple(OptimizationLevel::Default),
}
}
#[test]
#[named]
fn test_primitives() {
@@ -100,7 +111,6 @@ fn test_primitives() {
"};
let statements = parse_program(source, FileName::default()).unwrap();
let context = inkwell::context::Context::create();
let composer = TopLevelComposer::new(Vec::new(), Vec::new(), ComposerConfig::default(), 64).0;
let mut unifier = composer.unifier.clone();
let primitives = composer.primitives_ty;
@@ -111,7 +121,7 @@ fn test_primitives() {
Arc::new(Resolver { id_to_type: HashMap::new(), id_to_def: RwLock::new(HashMap::new()) })
as Arc<dyn SymbolResolver + Send + Sync>;
let threads = vec![DefaultCodeGenerator::new("test".into(), context.i64_type()).into()];
let threads = vec![DefaultCodeGenerator::new("test".into()).into()];
let signature = FunSignature {
args: vec![
FuncArg {
@@ -190,13 +200,8 @@ fn test_primitives() {
);
})));
Target::initialize_all(&InitializationConfig::default());
let llvm_options = CodeGenLLVMOptions {
opt_level: OptimizationLevel::Default,
target: CodeGenTargetMachineOptions::from_host_triple(),
};
let (registry, handles) = WorkerRegistry::create_workers(threads, top_level, &llvm_options, &f);
let (registry, handles) =
WorkerRegistry::create_workers(threads, top_level, &codegen_options(), &f);
registry.add_task(task);
registry.wait_tasks_complete(handles);
}
@@ -215,7 +220,6 @@ fn test_simple_call() {
"};
let statements_2 = parse_program(source_2, FileName::default()).unwrap();
let context = inkwell::context::Context::create();
let composer = TopLevelComposer::new(Vec::new(), Vec::new(), ComposerConfig::default(), 64).0;
let mut unifier = composer.unifier.clone();
let primitives = composer.primitives_ty;
@@ -264,7 +268,7 @@ fn test_simple_call() {
unreachable!()
}
let threads = vec![DefaultCodeGenerator::new("test".into(), context.i64_type()).into()];
let threads = vec![DefaultCodeGenerator::new("test".into()).into()];
let mut function_data = FunctionData {
resolver: resolver.clone(),
bound_variables: Vec::new(),
@@ -344,48 +348,36 @@ fn test_simple_call() {
);
})));
Target::initialize_all(&InitializationConfig::default());
let llvm_options = CodeGenLLVMOptions {
opt_level: OptimizationLevel::Default,
target: CodeGenTargetMachineOptions::from_host_triple(),
};
let (registry, handles) = WorkerRegistry::create_workers(threads, top_level, &llvm_options, &f);
let (registry, handles) =
WorkerRegistry::create_workers(threads, top_level, &codegen_options(), &f);
registry.add_task(task);
registry.wait_tasks_complete(handles);
}
#[test]
fn test_classes_list_type_new() {
let ctx = inkwell::context::Context::create();
let generator = DefaultCodeGenerator::new(String::new(), ctx.i64_type());
let llvm_i32 = ctx.i32_type();
let llvm_usize = generator.get_size_type(&ctx);
let llvm_list = ListType::new_with_generator(&generator, &ctx, llvm_i32.into());
assert!(ListType::is_representable(llvm_list.as_abi_type(), llvm_usize).is_ok());
context_ref!(ctx);
let ctx = CoreContext::new(ctx, "test_classes_list_type_new", &codegen_options().target);
let llvm_i32 = ctx.i32;
let llvm_list = ListType::new(&ctx, &llvm_i32);
assert!(ListType::is_representable(llvm_list.as_abi_type(), ctx.size_t).is_ok());
}
#[test]
fn test_classes_range_type_new() {
let ctx = inkwell::context::Context::create();
let generator = DefaultCodeGenerator::new(String::new(), ctx.i64_type());
let llvm_usize = generator.get_size_type(&ctx);
let llvm_range = RangeType::new_with_generator(&generator, &ctx);
context_ref!(ctx);
let ctx = CoreContext::new(ctx, "test_classes_range_type_new", &codegen_options().target);
let llvm_usize = ctx.size_t;
let llvm_range = RangeType::new(&ctx);
assert!(RangeType::is_representable(llvm_range.as_abi_type(), llvm_usize).is_ok());
}
#[test]
fn test_classes_ndarray_type_new() {
let ctx = inkwell::context::Context::create();
let generator = DefaultCodeGenerator::new(String::new(), ctx.i64_type());
let llvm_i32 = ctx.i32_type();
let llvm_usize = generator.get_size_type(&ctx);
let llvm_ndarray = NDArrayType::new_with_generator(&generator, &ctx, llvm_i32.into(), 2);
context_ref!(ctx);
let ctx = CoreContext::new(ctx, "test_classes_ndarray_type_new", &codegen_options().target);
let llvm_usize = ctx.size_t;
let llvm_i32 = ctx.i32;
let llvm_ndarray = NDArrayType::new(&ctx, llvm_i32.into(), 2);
assert!(NDArrayType::is_representable(llvm_ndarray.as_abi_type(), llvm_usize).is_ok());
}

View File

@@ -1,6 +1,6 @@
use inkwell::{
AddressSpace,
context::{AsContextRef, Context},
context::ContextRef,
types::{AnyTypeEnum, BasicType, BasicTypeEnum, IntType, PointerType, StructType},
values::{IntValue, PointerValue, StructValue},
};
@@ -13,7 +13,7 @@ use super::{
structure::{StructField, StructFields, StructProxyType, check_struct_type_matches_fields},
};
use crate::{
codegen::{CodeGenContext, CodeGenerator, values::ExceptionValue},
codegen::{CoreContext, CodeGenContext, CodeGenerator, values::ExceptionValue},
typecheck::typedef::{Type, TypeEnum},
};
@@ -63,16 +63,13 @@ pub struct ExceptionStructFields<'ctx> {
impl<'ctx> ExceptionType<'ctx> {
/// Returns an instance of [`StructFields`] containing all field accessors for this type.
#[must_use]
fn fields(
ctx: impl AsContextRef<'ctx>,
llvm_usize: IntType<'ctx>,
) -> ExceptionStructFields<'ctx> {
fn fields(ctx: ContextRef<'ctx>, llvm_usize: IntType<'ctx>) -> ExceptionStructFields<'ctx> {
ExceptionStructFields::new(ctx, llvm_usize)
}
/// Creates an LLVM type corresponding to the expected structure of an `Exception`.
#[must_use]
fn llvm_type(ctx: &'ctx Context, llvm_usize: IntType<'ctx>) -> PointerType<'ctx> {
fn llvm_type(ctx: ContextRef<'ctx>, llvm_usize: IntType<'ctx>) -> PointerType<'ctx> {
const NAME: &str = "Exception";
assert!(ctx.get_struct_type("str").is_some());
@@ -88,7 +85,7 @@ impl<'ctx> ExceptionType<'ctx> {
}
}
fn new_impl(ctx: &'ctx Context, llvm_usize: IntType<'ctx>) -> Self {
fn new_impl(ctx: ContextRef<'ctx>, llvm_usize: IntType<'ctx>) -> Self {
let llvm_str = Self::llvm_type(ctx, llvm_usize);
Self { ty: llvm_str, llvm_usize }
@@ -96,17 +93,8 @@ impl<'ctx> ExceptionType<'ctx> {
/// Creates an instance of [`ExceptionType`].
#[must_use]
pub fn new(ctx: &CodeGenContext<'ctx, '_>) -> Self {
Self::new_impl(ctx.ctx, ctx.get_size_type())
}
/// Creates an instance of [`ExceptionType`].
#[must_use]
pub fn new_with_generator<G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &'ctx Context,
) -> Self {
Self::new_impl(ctx, generator.get_size_type(ctx))
pub fn new(ctx: &CoreContext<'ctx>) -> Self {
Self::new_impl(ctx.ctx, ctx.size_t)
}
/// Creates an [`ExceptionType`] from a [unifier type][Type].
@@ -117,7 +105,7 @@ impl<'ctx> ExceptionType<'ctx> {
matches!(&*ctx.unifier.get_ty_immutable(ty), TypeEnum::TObj { obj_id, .. } if *obj_id == ctx.primitives.exception.obj_id(&ctx.unifier).unwrap())
);
Self::new_impl(ctx.ctx, ctx.get_size_type())
Self::new_impl(ctx.ctx, ctx.size_t)
}
/// Creates an [`ExceptionType`] from a [`StructType`] representing an `Exception`.
@@ -137,13 +125,10 @@ impl<'ctx> ExceptionType<'ctx> {
/// Returns an instance of [`ExceptionType`] by obtaining the LLVM representation of the builtin
/// `Exception` type.
#[must_use]
pub fn get_instance<G: CodeGenerator + ?Sized>(
generator: &mut G,
ctx: &mut CodeGenContext<'ctx, '_>,
) -> Self {
pub fn get_instance(ctx: &mut CodeGenContext<'ctx, '_>) -> Self {
Self::from_pointer_type(
ctx.get_llvm_type(generator, ctx.primitives.exception).into_pointer_type(),
ctx.get_size_type(),
ctx.get_llvm_type(ctx.primitives.exception).into_pointer_type(),
ctx.size_t,
)
}

View File

@@ -1,6 +1,6 @@
use inkwell::{
AddressSpace, IntPredicate, OptimizationLevel,
context::Context,
AddressSpace, IntPredicate,
context::ContextRef,
types::{AnyTypeEnum, BasicType, BasicTypeEnum, IntType, PointerType, StructType},
values::{IntValue, PointerValue, StructValue},
};
@@ -11,7 +11,7 @@ use nac3core_derive::StructFields;
use super::ProxyType;
use crate::{
codegen::{
CodeGenContext, CodeGenerator,
CodeGenContext, CodeGenerator, CoreContext,
types::structure::{
FieldIndexCounter, StructField, StructFields, StructProxyType,
check_struct_type_matches_fields,
@@ -66,7 +66,7 @@ impl<'ctx> ListType<'ctx> {
/// Creates an LLVM type corresponding to the expected structure of a `List`.
#[must_use]
fn llvm_type(
ctx: &'ctx Context,
ctx: ContextRef<'ctx>,
element_type: Option<BasicTypeEnum<'ctx>>,
llvm_usize: IntType<'ctx>,
) -> PointerType<'ctx> {
@@ -79,7 +79,7 @@ impl<'ctx> ListType<'ctx> {
}
fn new_impl(
ctx: &'ctx Context,
ctx: ContextRef<'ctx>,
element_type: Option<BasicTypeEnum<'ctx>>,
llvm_usize: IntType<'ctx>,
) -> Self {
@@ -90,42 +90,19 @@ impl<'ctx> ListType<'ctx> {
/// Creates an instance of [`ListType`].
#[must_use]
pub fn new(ctx: &CodeGenContext<'ctx, '_>, element_type: &impl BasicType<'ctx>) -> Self {
Self::new_impl(ctx.ctx, Some(element_type.as_basic_type_enum()), ctx.get_size_type())
}
/// Creates an instance of [`ListType`].
#[must_use]
pub fn new_with_generator<G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &'ctx Context,
element_type: BasicTypeEnum<'ctx>,
) -> Self {
Self::new_impl(ctx, Some(element_type.as_basic_type_enum()), generator.get_size_type(ctx))
pub fn new(ctx: &CoreContext<'ctx>, element_type: &impl BasicType<'ctx>) -> Self {
Self::new_impl(ctx.ctx, Some(element_type.as_basic_type_enum()), ctx.size_t)
}
/// Creates an instance of [`ListType`] with an unknown element type.
#[must_use]
pub fn new_untyped(ctx: &CodeGenContext<'ctx, '_>) -> Self {
Self::new_impl(ctx.ctx, None, ctx.get_size_type())
}
/// Creates an instance of [`ListType`] with an unknown element type.
#[must_use]
pub fn new_untyped_with_generator<G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &'ctx Context,
) -> Self {
Self::new_impl(ctx, None, generator.get_size_type(ctx))
Self::new_impl(ctx.ctx, None, ctx.size_t)
}
/// Creates an [`ListType`] from a [unifier type][Type].
#[must_use]
pub fn from_unifier_type<G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &mut CodeGenContext<'ctx, '_>,
ty: Type,
) -> Self {
pub fn from_unifier_type(ctx: &mut CodeGenContext<'ctx, '_>, ty: Type) -> Self {
// Check unifier type and extract `item_type`
let elem_type = match &*ctx.unifier.get_ty_immutable(ty) {
TypeEnum::TObj { obj_id, params, .. }
@@ -137,11 +114,11 @@ impl<'ctx> ListType<'ctx> {
_ => panic!("Expected `list` type, but got {}", ctx.unifier.stringify(ty)),
};
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
let llvm_elem_type = if let TypeEnum::TVar { .. } = &*ctx.unifier.get_ty_immutable(ty) {
None
} else {
Some(ctx.get_llvm_type(generator, elem_type))
Some(ctx.get_llvm_type(elem_type))
};
Self::new_impl(ctx.ctx, llvm_elem_type, llvm_usize)
@@ -243,7 +220,7 @@ impl<'ctx> ListType<'ctx> {
let len = ctx.builder.build_int_z_extend(len, self.llvm_usize, "").unwrap();
// Generate a runtime assertion if allocating a non-empty list with unknown element type
if ctx.registry.llvm_options.opt_level == OptimizationLevel::None && self.item.is_none() {
if ctx.registry.codegen_options.debug && self.item.is_none() {
let len_eqz = ctx
.builder
.build_int_compare(IntPredicate::EQ, len, self.llvm_usize.const_zero(), "")

View File

@@ -19,15 +19,14 @@ use crate::{
};
/// Get the expected `dtype` and `ndims` of the ndarray returned by `np_array(<list>)`.
fn get_list_object_dtype_and_ndims<'ctx, G: CodeGenerator + ?Sized>(
generator: &G,
fn get_list_object_dtype_and_ndims<'ctx>(
ctx: &mut CodeGenContext<'ctx, '_>,
list_ty: Type,
) -> (BasicTypeEnum<'ctx>, u64) {
let dtype = arraylike_flatten_element_type(&mut ctx.unifier, list_ty);
let ndims = arraylike_get_ndims(&mut ctx.unifier, list_ty);
(ctx.get_llvm_type(generator, dtype), ndims)
(ctx.get_llvm_type(dtype), ndims)
}
impl<'ctx> NDArrayType<'ctx> {
@@ -39,7 +38,7 @@ impl<'ctx> NDArrayType<'ctx> {
(list_ty, list): (Type, ListValue<'ctx>),
name: Option<&'ctx str>,
) -> <Self as ProxyType<'ctx>>::Value {
let (dtype, ndims_int) = get_list_object_dtype_and_ndims(generator, ctx, list_ty);
let (dtype, ndims_int) = get_list_object_dtype_and_ndims(ctx, list_ty);
assert!(self.ndims >= ndims_int);
assert_eq!(dtype, self.dtype);
@@ -86,7 +85,7 @@ impl<'ctx> NDArrayType<'ctx> {
//
// If `list` is `list[list[T]]` or worse, copy.
let (dtype, ndims) = get_list_object_dtype_and_ndims(generator, ctx, list_ty);
let (dtype, ndims) = get_list_object_dtype_and_ndims(ctx, list_ty);
if ndims == 1 {
// `list` is not nested
assert_eq!(ndims, 1);
@@ -137,7 +136,7 @@ impl<'ctx> NDArrayType<'ctx> {
) -> <Self as ProxyType<'ctx>>::Value {
assert_eq!(copy.get_type(), ctx.ctx.bool_type());
let (dtype, ndims) = get_list_object_dtype_and_ndims(generator, ctx, list_ty);
let (dtype, ndims) = get_list_object_dtype_and_ndims(ctx, list_ty);
let ndarray = gen_if_else_expr_callback(
generator,
@@ -220,7 +219,7 @@ impl<'ctx> NDArrayType<'ctx> {
TypeEnum::TObj { obj_id, .. }
if *obj_id == ctx.primitives.list.obj_id(&ctx.unifier).unwrap() =>
{
let list = ListType::from_unifier_type(generator, ctx, object_ty)
let list = ListType::from_unifier_type(ctx, object_ty)
.map_pointer_value(object.into_pointer_value(), None);
self.construct_numpy_array_list_impl(generator, ctx, (object_ty, list), copy, name)
}
@@ -228,7 +227,7 @@ impl<'ctx> NDArrayType<'ctx> {
TypeEnum::TObj { obj_id, .. }
if *obj_id == ctx.primitives.ndarray.obj_id(&ctx.unifier).unwrap() =>
{
let ndarray = NDArrayType::from_unifier_type(generator, ctx, object_ty)
let ndarray = NDArrayType::from_unifier_type(ctx, object_ty)
.map_pointer_value(object.into_pointer_value(), None);
self.construct_numpy_array_ndarray_impl(generator, ctx, ndarray, copy, name)
}

View File

@@ -1,6 +1,6 @@
use inkwell::{
AddressSpace,
context::{AsContextRef, Context},
context::ContextRef,
types::{AnyTypeEnum, BasicType, BasicTypeEnum, IntType, PointerType, StructType},
values::{IntValue, PointerValue, StructValue},
};
@@ -9,7 +9,7 @@ use itertools::Itertools;
use nac3core_derive::StructFields;
use crate::codegen::{
CodeGenContext, CodeGenerator,
CoreContext, CodeGenContext, CodeGenerator,
types::{
ProxyType,
structure::{StructField, StructFields, StructProxyType, check_struct_type_matches_fields},
@@ -34,23 +34,20 @@ pub struct ShapeEntryStructFields<'ctx> {
impl<'ctx> ShapeEntryType<'ctx> {
/// Returns an instance of [`StructFields`] containing all field accessors for this type.
#[must_use]
fn fields(
ctx: impl AsContextRef<'ctx>,
llvm_usize: IntType<'ctx>,
) -> ShapeEntryStructFields<'ctx> {
fn fields(ctx: ContextRef<'ctx>, llvm_usize: IntType<'ctx>) -> ShapeEntryStructFields<'ctx> {
ShapeEntryStructFields::new(ctx, llvm_usize)
}
/// Creates an LLVM type corresponding to the expected structure of a `ShapeEntry`.
#[must_use]
fn llvm_type(ctx: &'ctx Context, llvm_usize: IntType<'ctx>) -> PointerType<'ctx> {
fn llvm_type(ctx: ContextRef<'ctx>, llvm_usize: IntType<'ctx>) -> PointerType<'ctx> {
let field_tys =
Self::fields(ctx, llvm_usize).into_iter().map(|field| field.1).collect_vec();
ctx.struct_type(&field_tys, false).ptr_type(AddressSpace::default())
}
fn new_impl(ctx: &'ctx Context, llvm_usize: IntType<'ctx>) -> Self {
fn new_impl(ctx: ContextRef<'ctx>, llvm_usize: IntType<'ctx>) -> Self {
let llvm_ty = Self::llvm_type(ctx, llvm_usize);
Self { ty: llvm_ty, llvm_usize }
@@ -58,17 +55,8 @@ impl<'ctx> ShapeEntryType<'ctx> {
/// Creates an instance of [`ShapeEntryType`].
#[must_use]
pub fn new(ctx: &CodeGenContext<'ctx, '_>) -> Self {
Self::new_impl(ctx.ctx, ctx.get_size_type())
}
/// Creates an instance of [`ShapeEntryType`].
#[must_use]
pub fn new_with_generator<G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &'ctx Context,
) -> Self {
Self::new_impl(ctx, generator.get_size_type(ctx))
pub fn new(ctx: &CoreContext<'ctx>) -> Self {
Self::new_impl(ctx.ctx, ctx.size_t)
}
/// Creates a [`ShapeEntryType`] from a [`StructType`] representing an `ShapeEntry`.

View File

@@ -1,6 +1,6 @@
use inkwell::{
AddressSpace,
context::Context,
context::ContextRef,
types::{AnyTypeEnum, BasicType, BasicTypeEnum, IntType, PointerType, StructType},
values::{IntValue, PointerValue, StructValue},
};
@@ -10,7 +10,7 @@ use nac3core_derive::StructFields;
use crate::{
codegen::{
CodeGenContext, CodeGenerator,
CoreContext, CodeGenContext, CodeGenerator,
types::{
ProxyType,
structure::{
@@ -71,7 +71,7 @@ impl<'ctx> ContiguousNDArrayType<'ctx> {
/// Creates an LLVM type corresponding to the expected structure of an `NDArray`.
#[must_use]
fn llvm_type(
ctx: &'ctx Context,
ctx: ContextRef<'ctx>,
item: BasicTypeEnum<'ctx>,
llvm_usize: IntType<'ctx>,
) -> PointerType<'ctx> {
@@ -81,7 +81,11 @@ impl<'ctx> ContiguousNDArrayType<'ctx> {
ctx.struct_type(&field_tys, false).ptr_type(AddressSpace::default())
}
fn new_impl(ctx: &'ctx Context, item: BasicTypeEnum<'ctx>, llvm_usize: IntType<'ctx>) -> Self {
fn new_impl(
ctx: ContextRef<'ctx>,
item: BasicTypeEnum<'ctx>,
llvm_usize: IntType<'ctx>,
) -> Self {
let llvm_cndarray = Self::llvm_type(ctx, item, llvm_usize);
Self { ty: llvm_cndarray, item, llvm_usize }
@@ -89,32 +93,21 @@ impl<'ctx> ContiguousNDArrayType<'ctx> {
/// Creates an instance of [`ContiguousNDArrayType`].
#[must_use]
pub fn new(ctx: &CodeGenContext<'ctx, '_>, item: &impl BasicType<'ctx>) -> Self {
Self::new_impl(ctx.ctx, item.as_basic_type_enum(), ctx.get_size_type())
}
/// Creates an instance of [`ContiguousNDArrayType`].
#[must_use]
pub fn new_with_generator<G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &'ctx Context,
item: BasicTypeEnum<'ctx>,
) -> Self {
Self::new_impl(ctx, item, generator.get_size_type(ctx))
pub fn new(ctx: &CoreContext<'ctx>, item: &impl BasicType<'ctx>) -> Self {
Self::new_impl(ctx.ctx, item.as_basic_type_enum(), ctx.size_t)
}
/// Creates an [`ContiguousNDArrayType`] from a [unifier type][Type].
#[must_use]
pub fn from_unifier_type<G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &mut CodeGenContext<'ctx, '_>,
ty: Type,
) -> Self {
let (dtype, _) = unpack_ndarray_var_tys(&mut ctx.unifier, ty);
let llvm_dtype = ctx.get_llvm_type(generator, dtype);
let llvm_dtype = ctx.get_llvm_type(dtype);
Self::new_impl(ctx.ctx, llvm_dtype, ctx.get_size_type())
Self::new_impl(ctx.ctx, llvm_dtype, ctx.size_t)
}
/// Creates an [`ContiguousNDArrayType`] from a [`StructType`] representing an `NDArray`.

View File

@@ -21,12 +21,12 @@ fn ndarray_zero_value<'ctx, G: CodeGenerator + ?Sized>(
.iter()
.any(|ty| ctx.unifier.unioned(dtype, *ty))
{
ctx.ctx.i32_type().const_zero().into()
ctx.i32.const_zero().into()
} else if [ctx.primitives.int64, ctx.primitives.uint64]
.iter()
.any(|ty| ctx.unifier.unioned(dtype, *ty))
{
ctx.ctx.i64_type().const_zero().into()
ctx.i64.const_zero().into()
} else if ctx.unifier.unioned(dtype, ctx.primitives.float) {
ctx.ctx.f64_type().const_zero().into()
} else if ctx.unifier.unioned(dtype, ctx.primitives.bool) {
@@ -49,13 +49,13 @@ fn ndarray_one_value<'ctx, G: CodeGenerator + ?Sized>(
.any(|ty| ctx.unifier.unioned(dtype, *ty))
{
let is_signed = ctx.unifier.unioned(dtype, ctx.primitives.int32);
ctx.ctx.i32_type().const_int(1, is_signed).into()
ctx.i32.const_int(1, is_signed).into()
} else if [ctx.primitives.int64, ctx.primitives.uint64]
.iter()
.any(|ty| ctx.unifier.unioned(dtype, *ty))
{
let is_signed = ctx.unifier.unioned(dtype, ctx.primitives.int64);
ctx.ctx.i64_type().const_int(1, is_signed).into()
ctx.i64.const_int(1, is_signed).into()
} else if ctx.unifier.unioned(dtype, ctx.primitives.float) {
ctx.ctx.f64_type().const_float(1.0).into()
} else if ctx.unifier.unioned(dtype, ctx.primitives.bool) {
@@ -114,11 +114,11 @@ impl<'ctx> NDArrayType<'ctx> {
name: Option<&'ctx str>,
) -> <Self as ProxyType<'ctx>>::Value {
assert_eq!(
ctx.get_llvm_type(generator, dtype),
ctx.get_llvm_type(dtype),
self.dtype,
"Expected LLVM dtype={} but got {}",
self.dtype.print_to_string(),
ctx.get_llvm_type(generator, dtype).print_to_string(),
ctx.get_llvm_type(dtype).print_to_string(),
);
let fill_value = ndarray_zero_value(generator, ctx, dtype);
@@ -136,11 +136,11 @@ impl<'ctx> NDArrayType<'ctx> {
name: Option<&'ctx str>,
) -> <Self as ProxyType<'ctx>>::Value {
assert_eq!(
ctx.get_llvm_type(generator, dtype),
ctx.get_llvm_type(dtype),
self.dtype,
"Expected LLVM dtype={} but got {}",
self.dtype.print_to_string(),
ctx.get_llvm_type(generator, dtype).print_to_string(),
ctx.get_llvm_type(dtype).print_to_string(),
);
let fill_value = ndarray_one_value(generator, ctx, dtype);
@@ -161,11 +161,11 @@ impl<'ctx> NDArrayType<'ctx> {
name: Option<&'ctx str>,
) -> <Self as ProxyType<'ctx>>::Value {
assert_eq!(
ctx.get_llvm_type(generator, dtype),
ctx.get_llvm_type(dtype),
self.dtype,
"Expected LLVM dtype={} but got {}",
self.dtype.print_to_string(),
ctx.get_llvm_type(generator, dtype).print_to_string(),
ctx.get_llvm_type(dtype).print_to_string(),
);
assert_eq!(nrows.get_type(), self.llvm_usize);
assert_eq!(ncols.get_type(), self.llvm_usize);

View File

@@ -1,6 +1,6 @@
use inkwell::{
AddressSpace,
context::{AsContextRef, Context},
context::ContextRef,
types::{AnyTypeEnum, BasicType, BasicTypeEnum, IntType, PointerType, StructType},
values::{IntValue, PointerValue, StructValue},
};
@@ -9,7 +9,7 @@ use itertools::Itertools;
use nac3core_derive::StructFields;
use crate::codegen::{
CodeGenContext, CodeGenerator,
CoreContext, CodeGenContext, CodeGenerator,
types::{
ProxyType,
structure::{StructField, StructFields, StructProxyType, check_struct_type_matches_fields},
@@ -36,38 +36,27 @@ pub struct NDIndexStructFields<'ctx> {
impl<'ctx> NDIndexType<'ctx> {
#[must_use]
fn fields(
ctx: impl AsContextRef<'ctx>,
llvm_usize: IntType<'ctx>,
) -> NDIndexStructFields<'ctx> {
fn fields(ctx: ContextRef<'ctx>, llvm_usize: IntType<'ctx>) -> NDIndexStructFields<'ctx> {
NDIndexStructFields::new(ctx, llvm_usize)
}
#[must_use]
fn llvm_type(ctx: &'ctx Context, llvm_usize: IntType<'ctx>) -> PointerType<'ctx> {
fn llvm_type(ctx: ContextRef<'ctx>, llvm_usize: IntType<'ctx>) -> PointerType<'ctx> {
let field_tys =
Self::fields(ctx, llvm_usize).into_iter().map(|field| field.1).collect_vec();
ctx.struct_type(&field_tys, false).ptr_type(AddressSpace::default())
}
fn new_impl(ctx: &'ctx Context, llvm_usize: IntType<'ctx>) -> Self {
fn new_impl(ctx: ContextRef<'ctx>, llvm_usize: IntType<'ctx>) -> Self {
let llvm_ndindex = Self::llvm_type(ctx, llvm_usize);
Self { ty: llvm_ndindex, llvm_usize }
}
#[must_use]
pub fn new(ctx: &CodeGenContext<'ctx, '_>) -> Self {
Self::new_impl(ctx.ctx, ctx.get_size_type())
}
#[must_use]
pub fn new_with_generator<G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &'ctx Context,
) -> Self {
Self::new_impl(ctx, generator.get_size_type(ctx))
pub fn new(ctx: &CoreContext<'ctx>) -> Self {
Self::new_impl(ctx.ctx, ctx.size_t)
}
#[must_use]
@@ -132,7 +121,7 @@ impl<'ctx> NDIndexType<'ctx> {
ndindices.ptr_offset_unchecked(
ctx,
generator,
&ctx.ctx.i64_type().const_int(u64::try_from(i).unwrap(), false),
&ctx.i64.const_int(u64::try_from(i).unwrap(), false),
None,
)
};

View File

@@ -1,6 +1,6 @@
use inkwell::{
AddressSpace,
context::{AsContextRef, Context},
context::ContextRef,
types::{AnyTypeEnum, BasicType, BasicTypeEnum, IntType, PointerType, StructType},
values::{BasicValue, IntValue, PointerValue, StructValue},
};
@@ -14,8 +14,8 @@ use super::{
};
use crate::{
codegen::{
CoreContext, CodeGenContext, CodeGenerator,
values::{TypedArrayLikeMutator, ndarray::NDArrayValue},
{CodeGenContext, CodeGenerator},
},
toplevel::{helper::extract_ndims, numpy::unpack_ndarray_var_tys},
typecheck::typedef::Type,
@@ -64,16 +64,13 @@ pub struct NDArrayStructFields<'ctx> {
impl<'ctx> NDArrayType<'ctx> {
/// Returns an instance of [`StructFields`] containing all field accessors for this type.
#[must_use]
fn fields(
ctx: impl AsContextRef<'ctx>,
llvm_usize: IntType<'ctx>,
) -> NDArrayStructFields<'ctx> {
fn fields(ctx: ContextRef<'ctx>, llvm_usize: IntType<'ctx>) -> NDArrayStructFields<'ctx> {
NDArrayStructFields::new(ctx, llvm_usize)
}
/// Creates an LLVM type corresponding to the expected structure of an `NDArray`.
#[must_use]
fn llvm_type(ctx: &'ctx Context, llvm_usize: IntType<'ctx>) -> PointerType<'ctx> {
fn llvm_type(ctx: ContextRef<'ctx>, llvm_usize: IntType<'ctx>) -> PointerType<'ctx> {
let field_tys =
Self::fields(ctx, llvm_usize).into_iter().map(|field| field.1).collect_vec();
@@ -81,7 +78,7 @@ impl<'ctx> NDArrayType<'ctx> {
}
fn new_impl(
ctx: &'ctx Context,
ctx: ContextRef<'ctx>,
dtype: BasicTypeEnum<'ctx>,
ndims: u64,
llvm_usize: IntType<'ctx>,
@@ -93,19 +90,8 @@ impl<'ctx> NDArrayType<'ctx> {
/// Creates an instance of [`NDArrayType`].
#[must_use]
pub fn new(ctx: &CodeGenContext<'ctx, '_>, dtype: BasicTypeEnum<'ctx>, ndims: u64) -> Self {
Self::new_impl(ctx.ctx, dtype, ndims, ctx.get_size_type())
}
/// Creates an instance of [`NDArrayType`].
#[must_use]
pub fn new_with_generator<G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &'ctx Context,
dtype: BasicTypeEnum<'ctx>,
ndims: u64,
) -> Self {
Self::new_impl(ctx, dtype, ndims, generator.get_size_type(ctx))
pub fn new(ctx: &CoreContext<'ctx>, dtype: BasicTypeEnum<'ctx>, ndims: u64) -> Self {
Self::new_impl(ctx.ctx, dtype, ndims, ctx.size_t)
}
/// Creates an instance of [`NDArrayType`] as a result of a broadcast operation over one or more
@@ -122,58 +108,25 @@ impl<'ctx> NDArrayType<'ctx> {
ctx.ctx,
dtype,
inputs.iter().map(NDArrayType::ndims).max().unwrap(),
ctx.get_size_type(),
)
}
/// Creates an instance of [`NDArrayType`] as a result of a broadcast operation over one or more
/// `ndarray` operands.
#[must_use]
pub fn new_broadcast_with_generator<G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &'ctx Context,
dtype: BasicTypeEnum<'ctx>,
inputs: &[NDArrayType<'ctx>],
) -> Self {
assert!(!inputs.is_empty());
Self::new_impl(
ctx,
dtype,
inputs.iter().map(NDArrayType::ndims).max().unwrap(),
generator.get_size_type(ctx),
ctx.size_t,
)
}
/// Creates an instance of [`NDArrayType`] with `ndims` of 0.
#[must_use]
pub fn new_unsized(ctx: &CodeGenContext<'ctx, '_>, dtype: BasicTypeEnum<'ctx>) -> Self {
Self::new_impl(ctx.ctx, dtype, 0, ctx.get_size_type())
}
/// Creates an instance of [`NDArrayType`] with `ndims` of 0.
#[must_use]
pub fn new_unsized_with_generator<G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &'ctx Context,
dtype: BasicTypeEnum<'ctx>,
) -> Self {
Self::new_impl(ctx, dtype, 0, generator.get_size_type(ctx))
Self::new_impl(ctx.ctx, dtype, 0, ctx.size_t)
}
/// Creates an [`NDArrayType`] from a [unifier type][Type].
#[must_use]
pub fn from_unifier_type<G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &mut CodeGenContext<'ctx, '_>,
ty: Type,
) -> Self {
pub fn from_unifier_type(ctx: &mut CodeGenContext<'ctx, '_>, ty: Type) -> Self {
let (dtype, ndims) = unpack_ndarray_var_tys(&mut ctx.unifier, ty);
let llvm_dtype = ctx.get_llvm_type(generator, dtype);
let llvm_dtype = ctx.get_llvm_type(dtype);
let ndims = extract_ndims(&ctx.unifier, ndims);
Self::new_impl(ctx.ctx, llvm_dtype, ndims, ctx.get_size_type())
Self::new_impl(ctx.ctx, llvm_dtype, ndims, ctx.size_t)
}
/// Creates an [`NDArrayType`] from a [`StructType`] representing an `NDArray`.
@@ -325,7 +278,7 @@ impl<'ctx> NDArrayType<'ctx> {
let ndarray = Self::new(ctx, self.dtype, shape.len() as u64)
.construct_uninitialized(generator, ctx, name);
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
// Write shape
let ndarray_shape = ndarray.shape();
@@ -360,7 +313,7 @@ impl<'ctx> NDArrayType<'ctx> {
let ndarray = Self::new(ctx, self.dtype, shape.len() as u64)
.construct_uninitialized(generator, ctx, name);
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
// Write shape
let ndarray_shape = ndarray.shape();

View File

@@ -1,6 +1,6 @@
use inkwell::{
AddressSpace,
context::{AsContextRef, Context},
context::ContextRef,
types::{AnyTypeEnum, BasicType, BasicTypeEnum, IntType, PointerType, StructType},
values::{IntValue, PointerValue, StructValue},
};
@@ -9,7 +9,7 @@ use itertools::Itertools;
use nac3core_derive::StructFields;
use crate::codegen::{
CodeGenContext, CodeGenerator, irrt,
CoreContext, CodeGenContext, CodeGenerator, irrt,
types::{
ProxyType,
structure::{StructField, StructFields, StructProxyType, check_struct_type_matches_fields},
@@ -47,20 +47,20 @@ pub struct NDIterStructFields<'ctx> {
impl<'ctx> NDIterType<'ctx> {
/// Returns an instance of [`StructFields`] containing all field accessors for this type.
#[must_use]
fn fields(ctx: impl AsContextRef<'ctx>, llvm_usize: IntType<'ctx>) -> NDIterStructFields<'ctx> {
fn fields(ctx: ContextRef<'ctx>, llvm_usize: IntType<'ctx>) -> NDIterStructFields<'ctx> {
NDIterStructFields::new(ctx, llvm_usize)
}
/// Creates an LLVM type corresponding to the expected structure of an `NDIter`.
#[must_use]
fn llvm_type(ctx: &'ctx Context, llvm_usize: IntType<'ctx>) -> PointerType<'ctx> {
fn llvm_type(ctx: ContextRef<'ctx>, llvm_usize: IntType<'ctx>) -> PointerType<'ctx> {
let field_tys =
Self::fields(ctx, llvm_usize).into_iter().map(|field| field.1).collect_vec();
ctx.struct_type(&field_tys, false).ptr_type(AddressSpace::default())
}
fn new_impl(ctx: &'ctx Context, llvm_usize: IntType<'ctx>) -> Self {
fn new_impl(ctx: ContextRef<'ctx>, llvm_usize: IntType<'ctx>) -> Self {
let llvm_nditer = Self::llvm_type(ctx, llvm_usize);
Self { ty: llvm_nditer, llvm_usize }
@@ -68,17 +68,8 @@ impl<'ctx> NDIterType<'ctx> {
/// Creates an instance of [`NDIter`].
#[must_use]
pub fn new(ctx: &CodeGenContext<'ctx, '_>) -> Self {
Self::new_impl(ctx.ctx, ctx.get_size_type())
}
/// Creates an instance of [`NDIter`].
#[must_use]
pub fn new_with_generator<G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &'ctx Context,
) -> Self {
Self::new_impl(ctx, generator.get_size_type(ctx))
pub fn new(ctx: &CoreContext<'ctx>) -> Self {
Self::new_impl(ctx.ctx, ctx.size_t)
}
/// Creates an [`NDIterType`] from a [`StructType`] representing an `NDIter`.

View File

@@ -1,13 +1,12 @@
use inkwell::{
AddressSpace,
context::Context,
types::{BasicType, BasicTypeEnum, IntType, PointerType},
values::{BasicValue, BasicValueEnum, PointerValue},
};
use super::ProxyType;
use crate::{
codegen::{CodeGenContext, CodeGenerator, values::OptionValue},
codegen::{CoreContext, CodeGenContext, CodeGenerator, values::OptionValue},
typecheck::typedef::{Type, TypeEnum, iter_type_vars},
};
@@ -33,27 +32,13 @@ impl<'ctx> OptionType<'ctx> {
/// Creates an instance of [`OptionType`].
#[must_use]
pub fn new(ctx: &CodeGenContext<'ctx, '_>, element_type: &impl BasicType<'ctx>) -> Self {
Self::new_impl(element_type, ctx.get_size_type())
}
/// Creates an instance of [`OptionType`].
#[must_use]
pub fn new_with_generator<G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &'ctx Context,
element_type: &impl BasicType<'ctx>,
) -> Self {
Self::new_impl(element_type, generator.get_size_type(ctx))
pub fn new(ctx: &CoreContext<'ctx>, element_type: &impl BasicType<'ctx>) -> Self {
Self::new_impl(element_type, ctx.size_t)
}
/// Creates an [`OptionType`] from a [unifier type][Type].
#[must_use]
pub fn from_unifier_type<G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &mut CodeGenContext<'ctx, '_>,
ty: Type,
) -> Self {
pub fn from_unifier_type(ctx: &mut CodeGenContext<'ctx, '_>, ty: Type) -> Self {
// Check unifier type and extract `element_type`
let elem_type = match &*ctx.unifier.get_ty_immutable(ty) {
TypeEnum::TObj { obj_id, params, .. }
@@ -65,8 +50,8 @@ impl<'ctx> OptionType<'ctx> {
_ => panic!("Expected `option` type, but got {}", ctx.unifier.stringify(ty)),
};
let llvm_usize = ctx.get_size_type();
let llvm_elem_type = ctx.get_llvm_type(generator, elem_type);
let llvm_usize = ctx.size_t;
let llvm_elem_type = ctx.get_llvm_type(elem_type);
Self::new_impl(&llvm_elem_type, llvm_usize)
}

View File

@@ -1,16 +1,13 @@
use inkwell::{
AddressSpace,
context::Context,
context::ContextRef,
types::{AnyTypeEnum, ArrayType, BasicType, BasicTypeEnum, IntType, PointerType},
values::{ArrayValue, PointerValue},
};
use super::ProxyType;
use crate::{
codegen::{
values::RangeValue,
{CodeGenContext, CodeGenerator},
},
codegen::{CoreContext, CodeGenContext, CodeGenerator, values::RangeValue},
typecheck::typedef::{Type, TypeEnum},
};
@@ -24,13 +21,13 @@ pub struct RangeType<'ctx> {
impl<'ctx> RangeType<'ctx> {
/// Creates an LLVM type corresponding to the expected structure of a `Range`.
#[must_use]
fn llvm_type(ctx: &'ctx Context) -> PointerType<'ctx> {
fn llvm_type(ctx: ContextRef<'ctx>) -> PointerType<'ctx> {
// typedef int32_t Range[3];
let llvm_i32 = ctx.i32_type();
llvm_i32.array_type(3).ptr_type(AddressSpace::default())
}
fn new_impl(ctx: &'ctx Context, llvm_usize: IntType<'ctx>) -> Self {
fn new_impl(ctx: ContextRef<'ctx>, llvm_usize: IntType<'ctx>) -> Self {
let llvm_range = Self::llvm_type(ctx);
RangeType { ty: llvm_range, llvm_usize }
@@ -38,17 +35,8 @@ impl<'ctx> RangeType<'ctx> {
/// Creates an instance of [`RangeType`].
#[must_use]
pub fn new(ctx: &CodeGenContext<'ctx, '_>) -> Self {
Self::new_impl(ctx.ctx, ctx.get_size_type())
}
/// Creates an instance of [`RangeType`].
#[must_use]
pub fn new_with_generator<G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &'ctx Context,
) -> Self {
Self::new_impl(ctx, generator.get_size_type(ctx))
pub fn new(ctx: &CoreContext<'ctx>) -> Self {
Self::new_impl(ctx.ctx, ctx.size_t)
}
/// Creates an [`RangeType`] from a [unifier type][Type].

View File

@@ -1,6 +1,6 @@
use inkwell::{
AddressSpace,
context::Context,
context::ContextRef,
types::{BasicType, BasicTypeEnum, IntType, PointerType, StructType},
values::{GlobalValue, IntValue, PointerValue, StructValue},
};
@@ -12,7 +12,7 @@ use super::{
ProxyType,
structure::{StructField, StructFields, check_struct_type_matches_fields},
};
use crate::codegen::{CodeGenContext, CodeGenerator, values::StringValue};
use crate::codegen::{CoreContext, CodeGenContext, values::StringValue};
/// Proxy type for a `str` type in LLVM.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
@@ -41,7 +41,7 @@ impl<'ctx> StringType<'ctx> {
/// Creates an LLVM type corresponding to the expected structure of a `str`.
#[must_use]
fn llvm_type(ctx: &'ctx Context, llvm_usize: IntType<'ctx>) -> StructType<'ctx> {
fn llvm_type(ctx: ContextRef<'ctx>, llvm_usize: IntType<'ctx>) -> StructType<'ctx> {
const NAME: &str = "str";
if let Some(t) = ctx.get_struct_type(NAME) {
@@ -54,7 +54,7 @@ impl<'ctx> StringType<'ctx> {
}
}
fn new_impl(ctx: &'ctx Context, llvm_usize: IntType<'ctx>) -> Self {
fn new_impl(ctx: ContextRef<'ctx>, llvm_usize: IntType<'ctx>) -> Self {
let llvm_str = Self::llvm_type(ctx, llvm_usize);
Self { ty: llvm_str, llvm_usize }
@@ -62,17 +62,8 @@ impl<'ctx> StringType<'ctx> {
/// Creates an instance of [`StringType`].
#[must_use]
pub fn new(ctx: &CodeGenContext<'ctx, '_>) -> Self {
Self::new_impl(ctx.ctx, ctx.get_size_type())
}
/// Creates an instance of [`StringType`].
#[must_use]
pub fn new_with_generator<G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &'ctx Context,
) -> Self {
Self::new_impl(ctx, generator.get_size_type(ctx))
pub fn new(ctx: &CoreContext<'ctx>) -> Self {
Self::new_impl(ctx.ctx, ctx.size_t)
}
/// Creates an [`StringType`] from a [`StructType`] representing a `str`.
@@ -108,7 +99,7 @@ impl<'ctx> StringType<'ctx> {
.build_global_string_ptr(v, "const")
.map(GlobalValue::as_pointer_value)
.unwrap();
let size = ctx.get_size_type().const_int(v.len() as u64, false);
let size = ctx.size_t.const_int(v.len() as u64, false);
self.map_struct_value(
self.as_abi_type().const_named_struct(&[str_ptr.into(), size.into()]),
name,

View File

@@ -2,7 +2,7 @@ use std::marker::PhantomData;
use inkwell::{
AddressSpace,
context::AsContextRef,
context::ContextRef,
types::{BasicTypeEnum, IntType, PointerType, StructType},
values::{AggregateValueEnum, BasicValue, BasicValueEnum, IntValue, PointerValue, StructValue},
};
@@ -65,7 +65,7 @@ pub trait StructProxyType<'ctx>: ProxyType<'ctx, Base = PointerType<'ctx>> {
/// ```
pub trait StructFields<'ctx>: Eq + Copy {
/// Creates an instance of [`StructFields`] using the given `ctx` and `size_t` types.
fn new(ctx: impl AsContextRef<'ctx>, llvm_usize: IntType<'ctx>) -> Self;
fn new(ctx: ContextRef<'ctx>, llvm_usize: IntType<'ctx>) -> Self;
/// Returns a [`Vec`] that contains the fields of the structure in the order as they appear in
/// the type definition.
@@ -177,7 +177,7 @@ where
unsafe {
ctx.builder.build_in_bounds_gep(
pobj,
&[idx, &[ctx.ctx.i32_type().const_int(u64::from(self.index), false)]].concat(),
&[idx, &[ctx.i32.const_int(u64::from(self.index), false)]].concat(),
"",
)
}

View File

@@ -1,5 +1,5 @@
use inkwell::{
context::Context,
context::ContextRef,
types::{BasicType, BasicTypeEnum, IntType, PointerType, StructType},
values::{BasicValueEnum, PointerValue, StructValue},
};
@@ -7,7 +7,7 @@ use itertools::Itertools;
use super::ProxyType;
use crate::{
codegen::{CodeGenContext, CodeGenerator, values::TupleValue},
codegen::{CoreContext, CodeGenContext, values::TupleValue},
typecheck::typedef::{Type, TypeEnum},
};
@@ -20,12 +20,12 @@ pub struct TupleType<'ctx> {
impl<'ctx> TupleType<'ctx> {
/// Creates an LLVM type corresponding to the expected structure of a tuple.
#[must_use]
fn llvm_type(ctx: &'ctx Context, tys: &[BasicTypeEnum<'ctx>]) -> StructType<'ctx> {
fn llvm_type(ctx: ContextRef<'ctx>, tys: &[BasicTypeEnum<'ctx>]) -> StructType<'ctx> {
ctx.struct_type(tys, false)
}
fn new_impl(
ctx: &'ctx Context,
ctx: ContextRef<'ctx>,
tys: &[BasicTypeEnum<'ctx>],
llvm_usize: IntType<'ctx>,
) -> Self {
@@ -36,39 +36,25 @@ impl<'ctx> TupleType<'ctx> {
/// Creates an instance of [`TupleType`].
#[must_use]
pub fn new(ctx: &CodeGenContext<'ctx, '_>, tys: &[impl BasicType<'ctx>]) -> Self {
pub fn new(ctx: &CoreContext<'ctx>, tys: &[impl BasicType<'ctx>]) -> Self {
Self::new_impl(
ctx.ctx,
&tys.iter().map(BasicType::as_basic_type_enum).collect_vec(),
ctx.get_size_type(),
ctx.size_t,
)
}
/// Creates an instance of [`TupleType`].
#[must_use]
pub fn new_with_generator<G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &'ctx Context,
tys: &[BasicTypeEnum<'ctx>],
) -> Self {
Self::new_impl(ctx, tys, generator.get_size_type(ctx))
}
/// Creates an [`TupleType`] from a [unifier type][Type].
#[must_use]
pub fn from_unifier_type<G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &mut CodeGenContext<'ctx, '_>,
ty: Type,
) -> Self {
let llvm_usize = ctx.get_size_type();
pub fn from_unifier_type(ctx: &mut CodeGenContext<'ctx, '_>, ty: Type) -> Self {
let llvm_usize = ctx.size_t;
// Sanity check on object type.
let TypeEnum::TTuple { ty: tys, .. } = &*ctx.unifier.get_ty_immutable(ty) else {
panic!("Expected type to be a TypeEnum::TTuple, got {}", ctx.unifier.stringify(ty));
};
let llvm_tys = tys.iter().map(|ty| ctx.get_llvm_type(generator, *ty)).collect_vec();
let llvm_tys = tys.iter().map(|ty| ctx.get_llvm_type(*ty)).collect_vec();
Self { ty: Self::llvm_type(ctx.ctx, &llvm_tys), llvm_usize }
}

View File

@@ -1,6 +1,6 @@
use inkwell::{
AddressSpace,
context::{AsContextRef, Context, ContextRef},
context::ContextRef,
types::{AnyTypeEnum, BasicType, BasicTypeEnum, IntType, PointerType, StructType},
values::{IntValue, PointerValue, StructValue},
};
@@ -9,7 +9,7 @@ use itertools::Itertools;
use nac3core_derive::StructFields;
use crate::codegen::{
CodeGenContext, CodeGenerator,
CoreContext, CodeGenContext, CodeGenerator,
types::{
ProxyType,
structure::{
@@ -46,8 +46,7 @@ pub struct SliceStructFields<'ctx> {
impl<'ctx> SliceStructFields<'ctx> {
/// Creates a new instance of [`SliceStructFields`] with a custom integer type for its range values.
#[must_use]
pub fn new_sized(ctx: &impl AsContextRef<'ctx>, int_ty: IntType<'ctx>) -> Self {
let ctx = unsafe { ContextRef::new(ctx.as_ctx_ref()) };
pub fn new_sized(ctx: ContextRef<'ctx>, int_ty: IntType<'ctx>) -> Self {
let mut counter = FieldIndexCounter::default();
SliceStructFields {
@@ -64,8 +63,8 @@ impl<'ctx> SliceStructFields<'ctx> {
impl<'ctx> SliceType<'ctx> {
/// Creates an LLVM type corresponding to the expected structure of a `Slice`.
#[must_use]
fn llvm_type(ctx: &'ctx Context, int_ty: IntType<'ctx>) -> PointerType<'ctx> {
let field_tys = SliceStructFields::new_sized(&int_ty.get_context(), int_ty)
fn llvm_type(ctx: ContextRef<'ctx>, int_ty: IntType<'ctx>) -> PointerType<'ctx> {
let field_tys = SliceStructFields::new_sized(ctx, int_ty)
.into_iter()
.map(|field| field.1)
.collect_vec();
@@ -73,7 +72,7 @@ impl<'ctx> SliceType<'ctx> {
ctx.struct_type(&field_tys, false).ptr_type(AddressSpace::default())
}
fn new_impl(ctx: &'ctx Context, int_ty: IntType<'ctx>, llvm_usize: IntType<'ctx>) -> Self {
fn new_impl(ctx: ContextRef<'ctx>, int_ty: IntType<'ctx>, llvm_usize: IntType<'ctx>) -> Self {
let llvm_ty = Self::llvm_type(ctx, int_ty);
Self { ty: llvm_ty, int_ty, llvm_usize }
@@ -81,33 +80,14 @@ impl<'ctx> SliceType<'ctx> {
/// Creates an instance of [`SliceType`] with `int_ty` as its backing integer type.
#[must_use]
pub fn new(ctx: &CodeGenContext<'ctx, '_>, int_ty: IntType<'ctx>) -> Self {
Self::new_impl(ctx.ctx, int_ty, ctx.get_size_type())
}
/// Creates an instance of [`SliceType`] with `int_ty` as its backing integer type.
#[must_use]
pub fn new_with_generator<G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &'ctx Context,
int_ty: IntType<'ctx>,
) -> Self {
Self::new_impl(ctx, int_ty, generator.get_size_type(ctx))
pub fn new(ctx: &CoreContext<'ctx>, int_ty: IntType<'ctx>) -> Self {
Self::new_impl(ctx.ctx, int_ty, ctx.size_t)
}
/// Creates an instance of [`SliceType`] with `usize` as its backing integer type.
#[must_use]
pub fn new_usize(ctx: &CodeGenContext<'ctx, '_>) -> Self {
Self::new_impl(ctx.ctx, ctx.get_size_type(), ctx.get_size_type())
}
/// Creates an instance of [`SliceType`] with `usize` as its backing integer type.
#[must_use]
pub fn new_usize_with_generator<G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &'ctx Context,
) -> Self {
Self::new_impl(ctx, generator.get_size_type(ctx), generator.get_size_type(ctx))
Self::new_impl(ctx.ctx, ctx.size_t, ctx.size_t)
}
/// Creates an [`SliceType`] from a [`StructType`] representing a `slice`.
@@ -280,7 +260,7 @@ impl<'ctx> StructProxyType<'ctx> for SliceType<'ctx> {
type StructFields = SliceStructFields<'ctx>;
fn get_fields(&self) -> Self::StructFields {
SliceStructFields::new_sized(&self.ty.get_context(), self.int_ty)
SliceStructFields::new_sized(self.ty.get_context(), self.int_ty)
}
}

View File

@@ -418,7 +418,7 @@ impl<'ctx> ArrayLikeIndexer<'ctx> for ArraySliceValue<'ctx> {
idx: &IntValue<'ctx>,
name: Option<&str>,
) -> PointerValue<'ctx> {
debug_assert_eq!(idx.get_type(), ctx.get_size_type());
debug_assert_eq!(idx.get_type(), ctx.size_t);
let size = self.size(ctx, generator);
let in_range = ctx.builder.build_int_compare(IntPredicate::ULT, *idx, size, "").unwrap();

View File

@@ -62,7 +62,7 @@ impl<'ctx> ExceptionValue<'ctx> {
/// Stores the ID of the exception name into this instance.
pub fn store_name(&self, ctx: &mut CodeGenContext<'ctx, '_>, name: IntValue<'ctx>) {
debug_assert_eq!(name.get_type(), ctx.ctx.i32_type());
debug_assert_eq!(name.get_type(), ctx.i32);
self.name_field().store(ctx, self.value, name, self.name);
}
@@ -93,7 +93,7 @@ impl<'ctx> ExceptionValue<'ctx> {
ctx: &mut CodeGenContext<'ctx, '_>,
location: Location,
) {
let llvm_i32 = ctx.ctx.i32_type();
let llvm_i32 = ctx.i32;
let filename = ctx.gen_string(generator, location.file.0);
self.store_file(ctx, filename);
@@ -150,7 +150,7 @@ impl<'ctx> ExceptionValue<'ctx> {
///
/// If the parameter does not exist, pass `i64 0` in the parameter slot.
pub fn store_params(&self, ctx: &mut CodeGenContext<'ctx, '_>, params: &[IntValue<'ctx>; 3]) {
debug_assert!(params.iter().all(|p| p.get_type() == ctx.ctx.i64_type()));
debug_assert!(params.iter().all(|p| p.get_type() == ctx.i64));
[self.param0_field(), self.param1_field(), self.param2_field()]
.into_iter()

View File

@@ -107,7 +107,7 @@ impl<'ctx> ListValue<'ctx> {
/// Stores the `size` of this `list` into this instance.
pub fn store_size(&self, ctx: &mut CodeGenContext<'ctx, '_>, size: IntValue<'ctx>) {
debug_assert_eq!(size.get_type(), ctx.get_size_type());
debug_assert_eq!(size.get_type(), ctx.size_t);
self.len_field().store(ctx, self.value, size, self.name);
}
@@ -215,7 +215,7 @@ impl<'ctx> ArrayLikeIndexer<'ctx> for ListDataProxy<'ctx, '_> {
idx: &IntValue<'ctx>,
name: Option<&str>,
) -> PointerValue<'ctx> {
debug_assert_eq!(idx.get_type(), ctx.get_size_type());
debug_assert_eq!(idx.get_type(), ctx.size_t);
let size = self.size(ctx, generator);
let in_range = ctx.builder.build_int_compare(IntPredicate::ULT, *idx, size, "").unwrap();

View File

@@ -164,7 +164,7 @@ fn broadcast_shapes<'ctx, G, Shape>(
Shape: TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>>
+ TypedArrayLikeMutator<'ctx, G, IntValue<'ctx>>,
{
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
let llvm_shape_ty = ShapeEntryType::new(ctx);
assert!(
@@ -219,7 +219,7 @@ impl<'ctx> NDArrayType<'ctx> {
) -> BroadcastAllResult<'ctx, G> {
assert!(!ndarrays.is_empty());
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
// Infer the broadcast output ndims.
let broadcast_ndims_int =

View File

@@ -253,7 +253,7 @@ impl<'ctx> RustNDIndex<'ctx> {
// Set `dst_ndindex_ptr->data`
match self {
RustNDIndex::SingleElement(in_index) => {
let index_ptr = ctx.builder.build_alloca(ctx.ctx.i32_type(), "").unwrap();
let index_ptr = ctx.builder.build_alloca(ctx.i32, "").unwrap();
ctx.builder.build_store(index_ptr, *in_index).unwrap();
dst_ndindex.store_data(
@@ -263,7 +263,7 @@ impl<'ctx> RustNDIndex<'ctx> {
}
RustNDIndex::Slice(in_rust_slice) => {
let user_slice_ptr =
SliceType::new(ctx, ctx.ctx.i32_type()).alloca_var(generator, ctx, None);
SliceType::new(ctx, ctx.i32).alloca_var(generator, ctx, None);
in_rust_slice.write_to_slice(ctx, user_slice_ptr);
dst_ndindex.store_data(

View File

@@ -35,8 +35,8 @@ fn matmul_at_least_2d<'ctx, G: CodeGenerator>(
let lhs_dtype = arraylike_flatten_element_type(&mut ctx.unifier, in_a_ty);
let rhs_dtype = arraylike_flatten_element_type(&mut ctx.unifier, in_b_ty);
let llvm_usize = ctx.get_size_type();
let llvm_dst_dtype = ctx.get_llvm_type(generator, dst_dtype);
let llvm_usize = ctx.size_t;
let llvm_dst_dtype = ctx.get_llvm_type(dst_dtype);
// Deduce ndims of the result of matmul.
let ndims_int = max(in_a.ndims, in_b.ndims);
@@ -130,7 +130,7 @@ fn matmul_at_least_2d<'ctx, G: CodeGenerator>(
let at_row = i64::try_from(ndims_int - 2).unwrap();
let at_col = i64::try_from(ndims_int - 1).unwrap();
let dst_dtype_llvm = ctx.get_llvm_type(generator, dst_dtype);
let dst_dtype_llvm = ctx.get_llvm_type(dst_dtype);
let dst_zero = dst_dtype_llvm.const_zero();
dst.foreach(generator, ctx, |generator, ctx, _, hdl| {
@@ -288,7 +288,7 @@ impl<'ctx> NDArrayValue<'ctx> {
// Postprocessing on the result to remove prepended/appended axes.
let mut postindices = vec![];
let zero = ctx.ctx.i32_type().const_zero();
let zero = ctx.i32.const_zero();
if self.ndims == 1 {
// Remove the prepended 1

View File

@@ -94,7 +94,7 @@ impl<'ctx> NDArrayValue<'ctx> {
/// Stores the number of dimensions `ndims` into this instance.
pub fn store_ndims(&self, ctx: &mut CodeGenContext<'ctx, '_>, ndims: IntValue<'ctx>) {
debug_assert_eq!(ndims.get_type(), ctx.get_size_type());
debug_assert_eq!(ndims.get_type(), ctx.size_t);
self.ndims_field().store(ctx, self.value, ndims, self.name);
}
@@ -110,7 +110,7 @@ impl<'ctx> NDArrayValue<'ctx> {
/// Stores the size of each element `itemsize` into this instance.
pub fn store_itemsize(&self, ctx: &mut CodeGenContext<'ctx, '_>, itemsize: IntValue<'ctx>) {
debug_assert_eq!(itemsize.get_type(), ctx.get_size_type());
debug_assert_eq!(itemsize.get_type(), ctx.size_t);
self.itemsize_field().store(ctx, self.value, itemsize, self.name);
}
@@ -355,7 +355,7 @@ impl<'ctx> NDArrayValue<'ctx> {
generator: &mut G,
ctx: &mut CodeGenContext<'ctx, '_>,
) -> TupleValue<'ctx> {
let llvm_i32 = ctx.ctx.i32_type();
let llvm_i32 = ctx.i32;
let objects = (0..self.ndims)
.map(|i| {
@@ -385,7 +385,7 @@ impl<'ctx> NDArrayValue<'ctx> {
generator: &mut G,
ctx: &mut CodeGenContext<'ctx, '_>,
) -> TupleValue<'ctx> {
let llvm_i32 = ctx.ctx.i32_type();
let llvm_i32 = ctx.i32;
let objects = (0..self.ndims)
.map(|i| {
@@ -420,7 +420,7 @@ impl<'ctx> NDArrayValue<'ctx> {
) -> Option<BasicValueEnum<'ctx>> {
if self.is_unsized() {
// NOTE: `np.size(self) == 0` here is never possible.
let zero = ctx.get_size_type().const_zero();
let zero = ctx.size_t.const_zero();
let value = unsafe { self.data().get_unchecked(ctx, generator, &zero, None) };
Some(value)
@@ -792,7 +792,7 @@ impl<'ctx, Index: UntypedArrayLikeAccessor<'ctx>> ArrayLikeIndexer<'ctx, Index>
indices: &Index,
name: Option<&str>,
) -> PointerValue<'ctx> {
assert_eq!(indices.element_type(ctx, generator), ctx.get_size_type().into());
assert_eq!(indices.element_type(ctx, generator), ctx.size_t.into());
let indices = TypedArrayLikeAdapter::from(
indices.as_slice_value(ctx, generator),
@@ -825,7 +825,7 @@ impl<'ctx, Index: UntypedArrayLikeAccessor<'ctx>> ArrayLikeIndexer<'ctx, Index>
indices: &Index,
name: Option<&str>,
) -> PointerValue<'ctx> {
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
let indices_size = indices.size(ctx, generator);
let ndims = self.0.load_ndims(ctx);
@@ -951,8 +951,7 @@ impl<'ctx> ScalarOrNDArray<'ctx> {
/// If `object` is an ndarray, [`ScalarOrNDArray::NDArray`].
///
/// For everything else, it is wrapped with [`ScalarOrNDArray::Scalar`].
pub fn from_value<G: CodeGenerator + ?Sized>(
generator: &mut G,
pub fn from_value(
ctx: &mut CodeGenContext<'ctx, '_>,
(object_ty, object): (Type, BasicValueEnum<'ctx>),
) -> ScalarOrNDArray<'ctx> {
@@ -960,7 +959,7 @@ impl<'ctx> ScalarOrNDArray<'ctx> {
TypeEnum::TObj { obj_id, .. }
if *obj_id == ctx.primitives.ndarray.obj_id(&ctx.unifier).unwrap() =>
{
let ndarray = NDArrayType::from_unifier_type(generator, ctx, object_ty)
let ndarray = NDArrayType::from_unifier_type(ctx, object_ty)
.map_pointer_value(object.into_pointer_value(), None);
ScalarOrNDArray::NDArray(ndarray)
}

View File

@@ -30,7 +30,7 @@ pub fn parse_numpy_int_sequence<'ctx, G: CodeGenerator + ?Sized>(
ctx: &mut CodeGenContext<'ctx, '_>,
(input_seq_ty, input_seq): (Type, BasicValueEnum<'ctx>),
) -> impl TypedArrayLikeAccessor<'ctx, G, IntValue<'ctx>> + use<'ctx, G> {
let llvm_usize = ctx.get_size_type();
let llvm_usize = ctx.size_t;
let zero = llvm_usize.const_zero();
let one = llvm_usize.const_int(1, false);
@@ -41,7 +41,7 @@ pub fn parse_numpy_int_sequence<'ctx, G: CodeGenerator + ?Sized>(
{
// 1. A list of `int32`; e.g., `np.empty([600, 800, 3])`
let input_seq = ListType::from_unifier_type(generator, ctx, input_seq_ty)
let input_seq = ListType::from_unifier_type(ctx, input_seq_ty)
.map_pointer_value(input_seq.into_pointer_value(), None);
let len = input_seq.load_size(ctx, None);
@@ -85,7 +85,7 @@ pub fn parse_numpy_int_sequence<'ctx, G: CodeGenerator + ?Sized>(
TypeEnum::TTuple { .. } => {
// 2. A tuple of ints; e.g., `np.empty((600, 800, 3))`
let input_seq = TupleType::from_unifier_type(generator, ctx, input_seq_ty)